diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java index bce0f910daf0..f72a6ef82c14 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java @@ -166,6 +166,11 @@ public interface Connection extends Abortable, Closeable { */ Admin getAdmin() throws IOException; + /** + * @return the cluster ID unique to this HBase cluster. + */ + String getClusterId() throws IOException; + @Override public void close() throws IOException; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java index 4e3e55e1d696..ac4a34281e8f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java @@ -184,6 +184,11 @@ public boolean isTableAvailable(byte[] tableName, byte[][] splitKeys) return wrappedConnection.isTableAvailable(tableName, splitKeys); } + @Override + public TableState getTableState(TableName tableName) throws IOException { + return wrappedConnection.getTableState(tableName); + } + @Override public HTableDescriptor[] listTables() throws IOException { return wrappedConnection.listTables(); @@ -496,4 +501,9 @@ public RpcRetryingCallerFactory getRpcRetryingCallerFactory() { public RpcControllerFactory getRpcControllerFactory() { return wrappedConnection.getRpcControllerFactory(); } + + @Override + public String getClusterId() throws IOException { + return wrappedConnection.getClusterId(); + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java index e8498c4c597b..7cf84eddd047 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java @@ -82,7 +82,7 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse; -import org.apache.hadoop.hbase.protobuf.generated.*; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionRequest; @@ -123,6 +123,8 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest; @@ -632,7 +634,7 @@ static class HConnectionImplementation implements ClusterConnection, Closeable { /** * Cluster registry of basic info such as clusterid and meta region location. */ - Registry registry; + ConnectionRegistry registry; private final ClientBackoffPolicy backoffPolicy; @@ -821,6 +823,11 @@ public Admin getAdmin() throws IOException { return new HBaseAdmin(this); } + @Override + public String getClusterId() throws IOException { + return registry.getClusterId(); + } + @Override public MetricsConnection getConnectionMetrics() { return this.metrics; @@ -917,8 +924,8 @@ private void shutdownBatchPool(ExecutorService pool) { * @return The cluster registry implementation to use. * @throws IOException */ - private Registry setupRegistry() throws IOException { - return RegistryFactory.getRegistry(this); + private ConnectionRegistry setupRegistry() throws IOException { + return ConnectionRegistryFactory.getRegistry(this); } /** @@ -939,7 +946,7 @@ public String toString(){ protected String clusterId = null; - void retrieveClusterId() { + void retrieveClusterId() throws IOException { if (clusterId != null) return; this.clusterId = this.registry.getClusterId(); if (clusterId == null) { @@ -1005,7 +1012,7 @@ public HRegionLocation getRegionLocation(final byte[] tableName, @Override public boolean isTableEnabled(TableName tableName) throws IOException { - return this.registry.isTableOnlineState(tableName, true); + return getTableState(tableName).inStates(TableState.State.ENABLED); } @Override @@ -1015,7 +1022,7 @@ public boolean isTableEnabled(byte[] tableName) throws IOException { @Override public boolean isTableDisabled(TableName tableName) throws IOException { - return this.registry.isTableOnlineState(tableName, false); + return getTableState(tableName).inStates(TableState.State.DISABLED); } @Override @@ -1256,7 +1263,7 @@ private RegionLocations locateMeta(final TableName tableName, } } // Look up from zookeeper - metaLocations = this.registry.getMetaRegionLocation(); + metaLocations = this.registry.getMetaRegionLocations(); lastMetaLookupTime = EnvironmentEdgeManager.currentTime(); if (metaLocations != null && metaLocations.getRegionLocation(replicaId) != null) { @@ -1571,43 +1578,31 @@ abstract class StubMaker { * @throws KeeperException * @throws ServiceException */ - private Object makeStubNoRetries() throws IOException, KeeperException, ServiceException { - ZooKeeperKeepAliveConnection zkw; - try { - zkw = getKeepAliveZooKeeperWatcher(); - } catch (IOException e) { - ExceptionUtil.rethrowIfInterrupt(e); - throw new ZooKeeperConnectionException("Can't connect to ZooKeeper", e); - } - try { - checkIfBaseNodeAvailable(zkw); - ServerName sn = MasterAddressTracker.getMasterAddress(zkw); - if (sn == null) { - String msg = "ZooKeeper available but no active master location found"; - LOG.info(msg); - throw new MasterNotRunningException(msg); - } - if (isDeadServer(sn)) { - throw new MasterNotRunningException(sn + " is dead."); - } - // Use the security info interface name as our stub key - String key = getStubKey(getServiceName(), - sn.getHostname(), sn.getPort(), hostnamesCanChange); - connectionLock.putIfAbsent(key, key); - Object stub = null; - synchronized (connectionLock.get(key)) { - stub = stubs.get(key); - if (stub == null) { - BlockingRpcChannel channel = rpcClient.createBlockingRpcChannel(sn, user, rpcTimeout); - stub = makeStub(channel); - isMasterRunning(); - stubs.put(key, stub); - } + private Object makeStubNoRetries() throws IOException, ServiceException { + ServerName sn = registry.getActiveMaster(); + if (sn == null) { + String msg = "No active master location found"; + LOG.info(msg); + throw new MasterNotRunningException(msg); + } + if (isDeadServer(sn)) { + throw new MasterNotRunningException(sn + " is dead."); + } + // Use the security info interface name as our stub key + String key = getStubKey(getServiceName(), + sn.getHostname(), sn.getPort(), hostnamesCanChange); + connectionLock.putIfAbsent(key, key); + Object stub = null; + synchronized (connectionLock.get(key)) { + stub = stubs.get(key); + if (stub == null) { + BlockingRpcChannel channel = rpcClient.createBlockingRpcChannel(sn, user, rpcTimeout); + stub = makeStub(channel); + isMasterRunning(); + stubs.put(key, stub); } - return stub; - } finally { - zkw.close(); } + return stub; } /** @@ -1625,12 +1620,9 @@ Object makeStub() throws IOException { return makeStubNoRetries(); } catch (IOException e) { exceptionCaught = e; - } catch (KeeperException e) { - exceptionCaught = e; } catch (ServiceException e) { exceptionCaught = e; } - throw new MasterNotRunningException(exceptionCaught); } else { throw new DoNotRetryIOException("Connection was closed while trying to get master"); @@ -2157,6 +2149,13 @@ public ListTableNamesByNamespaceResponse listTableNamesByNamespace( return stub.listTableNamesByNamespace(controller, request); } + @Override + public GetTableStateResponse getTableState( + RpcController controller, GetTableStateRequest request) + throws ServiceException { + return stub.getTableState(controller, request); + } + @Override public void close() { release(this.mss); @@ -2577,6 +2576,9 @@ void internalClose() { if (this.closed) { return; } + if (this.registry != null) { + this.registry.close(); + } closeMaster(); shutdownPools(); if (this.metrics != null) { @@ -2784,6 +2786,19 @@ public RpcRetryingCallerFactory getRpcRetryingCallerFactory() { public RpcControllerFactory getRpcControllerFactory() { return this.rpcControllerFactory; } + + public TableState getTableState(TableName tableName) throws IOException { + MasterKeepAliveConnection master = getKeepAliveMasterService(); + try { + GetTableStateResponse resp = master.getTableState(null, + RequestConverter.buildGetTableStateRequest(tableName)); + return TableState.convert(resp.getTableState()); + } catch (ServiceException se) { + throw ProtobufUtil.getRemoteException(se); + } finally { + master.close(); + } + } } /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Registry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistry.java similarity index 76% rename from hbase-client/src/main/java/org/apache/hadoop/hbase/client/Registry.java rename to hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistry.java index 58ec3c453997..353ff6182808 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Registry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistry.java @@ -19,42 +19,46 @@ import java.io.IOException; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.RegionLocations; -import org.apache.hadoop.hbase.TableName; /** - * Cluster registry. * Implementations hold cluster information such as this cluster's id, location of hbase:meta, etc. + * needed by cluster connections. * Internal use only. */ @InterfaceAudience.Private -interface Registry { +interface ConnectionRegistry { /** * @param connection */ - void init(Connection connection); + void init(Connection connection) throws IOException; + + /** + * @return the currently active master, null if none exists. + */ + ServerName getActiveMaster() throws IOException; /** * @return Meta region location * @throws IOException */ - RegionLocations getMetaRegionLocation() throws IOException; + RegionLocations getMetaRegionLocations() throws IOException; /** * @return Cluster id. */ - String getClusterId(); + String getClusterId() throws IOException; /** - * @param enabled Return true if table is enabled + * @return Count of 'running' regionservers * @throws IOException */ - boolean isTableOnlineState(TableName tableName, boolean enabled) throws IOException; + int getCurrentNrHRS() throws IOException; /** - * @return Count of 'running' regionservers - * @throws IOException + * Cleanup state, if any. */ - int getCurrentNrHRS() throws IOException; + void close(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegistryFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryFactory.java similarity index 71% rename from hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegistryFactory.java rename to hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryFactory.java index 789e2e144790..eaef3892c4c9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegistryFactory.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryFactory.java @@ -19,26 +19,26 @@ import java.io.IOException; +import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.classification.InterfaceAudience; /** - * Get instance of configured Registry. + * Get instance of configured Connection Registry. */ @InterfaceAudience.Private -class RegistryFactory { - static final String REGISTRY_IMPL_CONF_KEY = "hbase.client.registry.impl"; +class ConnectionRegistryFactory { /** * @return The cluster registry implementation to use. * @throws IOException */ - static Registry getRegistry(final Connection connection) + static ConnectionRegistry getRegistry(final Connection connection) throws IOException { - String registryClass = connection.getConfiguration().get(REGISTRY_IMPL_CONF_KEY, - ZooKeeperRegistry.class.getName()); - Registry registry = null; + String registryClass = connection.getConfiguration().get(HConstants.REGISTRY_IMPL_CONF_KEY, + ZKConnectionRegistry.class.getName()); + ConnectionRegistry registry = null; try { - registry = (Registry)Class.forName(registryClass).getDeclaredConstructor().newInstance(); + registry = (ConnectionRegistry)Class.forName(registryClass).getDeclaredConstructor().newInstance(); } catch (Throwable t) { throw new IOException(t); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java index e476d5f9717f..7de1dfb937dd 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java @@ -212,6 +212,13 @@ boolean isMasterRunning() @Deprecated boolean isTableDisabled(byte[] tableName) throws IOException; + /** + * Retrieve TableState, represent current table state. + * @param tableName table state for + * @return state of the table + */ + public TableState getTableState(TableName tableName) throws IOException; + /** * @param tableName table name * @return true if all regions of the table are available, false otherwise diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterAddressRefresher.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterAddressRefresher.java new file mode 100644 index 000000000000..08b5e9b7562a --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterAddressRefresher.java @@ -0,0 +1,125 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import com.google.common.base.Preconditions; +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import java.io.Closeable; +import java.io.IOException; +import java.util.HashSet; +import java.util.Set; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClientMetaService; + +/** + * Thread safe utility that keeps master end points used by {@link MasterRegistry} up to date. This + * uses the RPC {@link ClientMetaService#getMasters} to fetch the latest list of registered masters. + * By default the refresh happens periodically (configured via + * {@link #PERIODIC_REFRESH_INTERVAL_SECS}). The refresh can also be triggered on demand via + * {@link #refreshNow()}. To prevent a flood of on-demand refreshes we expect that any attempts two + * should be spaced at least {@link #MIN_SECS_BETWEEN_REFRESHES} seconds apart. + */ +@InterfaceAudience.Private +public class MasterAddressRefresher implements Closeable { + private static final Logger LOG = LoggerFactory.getLogger(MasterAddressRefresher.class); + public static final String PERIODIC_REFRESH_INTERVAL_SECS = + "hbase.client.master_registry.refresh_interval_secs"; + private static final int PERIODIC_REFRESH_INTERVAL_SECS_DEFAULT = 300; + public static final String MIN_SECS_BETWEEN_REFRESHES = + "hbase.client.master_registry.min_secs_between_refreshes"; + private static final int MIN_SECS_BETWEEN_REFRESHES_DEFAULT = 60; + + private final ExecutorService pool; + private final MasterRegistry registry; + private final long periodicRefreshMs; + private final long timeBetweenRefreshesMs; + private final Object refreshMasters = new Object(); + + @Override + public void close() { + pool.shutdownNow(); + } + + /** + * Thread that refreshes the master end points until it is interrupted via {@link #close()}. + * Multiple callers attempting to refresh at the same time synchronize on {@link #refreshMasters}. + */ + private class RefreshThread implements Runnable { + @Override + public void run() { + long lastRpcTs = 0; + while (!Thread.interrupted()) { + try { + // Spurious wake ups are okay, worst case we make an extra RPC call to refresh. We won't + // have duplicate refreshes because once the thread is past the wait(), notify()s are + // ignored until the thread is back to the waiting state. + synchronized (refreshMasters) { + refreshMasters.wait(periodicRefreshMs); + } + long currentTs = EnvironmentEdgeManager.currentTime(); + if (lastRpcTs != 0 && currentTs - lastRpcTs <= timeBetweenRefreshesMs) { + continue; + } + lastRpcTs = currentTs; + LOG.debug("Attempting to refresh master address end points."); + Set newMasters = new HashSet<>(registry.getMasters()); + registry.populateMasterStubs(newMasters); + LOG.debug("Finished refreshing master end points. {}", newMasters); + } catch (InterruptedException e) { + LOG.debug("Interrupted during wait, aborting refresh-masters-thread.", e); + break; + } catch (IOException e) { + LOG.debug("Error populating latest list of masters.", e); + } + } + LOG.info("Master end point refresher loop exited."); + } + } + + MasterAddressRefresher(Configuration conf, MasterRegistry registry) { + pool = Executors.newSingleThreadExecutor(new ThreadFactoryBuilder() + .setNameFormat("master-registry-refresh-end-points").setDaemon(true).build()); + periodicRefreshMs = TimeUnit.SECONDS.toMillis(conf.getLong(PERIODIC_REFRESH_INTERVAL_SECS, + PERIODIC_REFRESH_INTERVAL_SECS_DEFAULT)); + timeBetweenRefreshesMs = TimeUnit.SECONDS.toMillis(conf.getLong(MIN_SECS_BETWEEN_REFRESHES, + MIN_SECS_BETWEEN_REFRESHES_DEFAULT)); + Preconditions.checkArgument(periodicRefreshMs > 0); + Preconditions.checkArgument(timeBetweenRefreshesMs < periodicRefreshMs); + this.registry = registry; + pool.submit(new RefreshThread()); + } + + /** + * Notifies the refresher thread to refresh the configuration. This does not guarantee a refresh. + * See class comment for details. + */ + void refreshNow() { + synchronized (refreshMasters) { + refreshMasters.notify(); + } + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterRegistry.java new file mode 100644 index 000000000000..877049cd395f --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterRegistry.java @@ -0,0 +1,266 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import static org.apache.hadoop.hbase.util.DNS.getMasterHostname; +import com.google.common.base.Preconditions; +import com.google.common.base.Strings; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.net.HostAndPort; +import com.google.protobuf.Message; +import com.google.protobuf.RpcController; +import java.io.IOException; +import java.net.UnknownHostException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.ThreadLocalRandom; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseIOException; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.RegionLocations; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.exceptions.ClientExceptionsUtil; +import org.apache.hadoop.hbase.exceptions.MasterRegistryFetchException; +import org.apache.hadoop.hbase.ipc.BlockingRpcCallback; +import org.apache.hadoop.hbase.ipc.HBaseRpcController; +import org.apache.hadoop.hbase.ipc.RpcClient; +import org.apache.hadoop.hbase.ipc.RpcClientFactory; +import org.apache.hadoop.hbase.ipc.RpcControllerFactory; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.security.User; + +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClientMetaService; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse; + +/** + * Master based registry implementation. Makes RPCs to the configured master addresses from config + * {@value org.apache.hadoop.hbase.HConstants#MASTER_ADDRS_KEY}. All the registry methods are + * blocking unlike implementations in other branches. + */ +@InterfaceAudience.Private +public class MasterRegistry implements ConnectionRegistry { + private static final String MASTER_ADDRS_CONF_SEPARATOR = ","; + + private volatile ImmutableMap masterAddr2Stub; + + // RPC client used to talk to the masters. + private RpcClient rpcClient; + private RpcControllerFactory rpcControllerFactory; + private int rpcTimeoutMs; + + protected MasterAddressRefresher masterAddressRefresher; + + @Override + public void init(Connection connection) throws IOException { + Configuration conf = connection.getConfiguration(); + rpcTimeoutMs = (int) Math.min(Integer.MAX_VALUE, + conf.getLong(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT)); + // HBASE-25051: we pass cluster id as null here since we do not have a cluster id yet, we have + // to fetch this through the master registry... + // This is a problem as we will use the cluster id to determine the authentication method + rpcClient = RpcClientFactory.createClient(conf, null); + rpcControllerFactory = RpcControllerFactory.instantiate(conf); + populateMasterStubs(parseMasterAddrs(conf)); + masterAddressRefresher = new MasterAddressRefresher(conf, this); + } + + protected interface Callable { + T call(ClientMetaService.Interface stub, RpcController controller) throws IOException; + } + + protected T doCall(Callable callable) + throws MasterRegistryFetchException { + Exception lastException = null; + Set masters = masterAddr2Stub.keySet(); + List stubs = new ArrayList<>(masterAddr2Stub.values()); + Collections.shuffle(stubs, ThreadLocalRandom.current()); + for (ClientMetaService.Interface stub: stubs) { + HBaseRpcController controller = rpcControllerFactory.newController(); + try { + T resp = callable.call(stub, controller); + if (!controller.failed()) { + return resp; + } + lastException = controller.getFailed(); + } catch (Exception e) { + lastException = e; + } + if (ClientExceptionsUtil.isConnectionException(lastException)) { + masterAddressRefresher.refreshNow(); + } + } + // rpcs to all masters failed. + throw new MasterRegistryFetchException(masters, lastException); + } + + @Override + public ServerName getActiveMaster() throws IOException { + GetMastersResponseEntry activeMaster = null; + for (GetMastersResponseEntry entry: getMastersInternal().getMasterServersList()) { + if (entry.getIsActive()) { + activeMaster = entry; + break; + } + } + if (activeMaster == null) { + throw new HBaseIOException("No active master found"); + } + return ProtobufUtil.toServerName(activeMaster.getServerName()); + } + + List getMasters() throws IOException { + List result = new ArrayList<>(); + for (GetMastersResponseEntry entry: getMastersInternal().getMasterServersList()) { + result.add(ProtobufUtil.toServerName(entry.getServerName())); + } + return result; + } + + private GetMastersResponse getMastersInternal() throws IOException { + return doCall(new Callable() { + @Override + public GetMastersResponse call( + ClientMetaService.Interface stub, RpcController controller) throws IOException { + BlockingRpcCallback cb = new BlockingRpcCallback<>(); + stub.getMasters(controller, GetMastersRequest.getDefaultInstance(), cb); + return cb.get(); + } + }); + } + + @Override + public RegionLocations getMetaRegionLocations() throws IOException { + GetMetaRegionLocationsResponse resp = doCall(new Callable() { + @Override + public GetMetaRegionLocationsResponse call( + ClientMetaService.Interface stub, RpcController controller) throws IOException { + BlockingRpcCallback cb = new BlockingRpcCallback<>(); + stub.getMetaRegionLocations(controller, GetMetaRegionLocationsRequest.getDefaultInstance(), + cb); + return cb.get(); + } + }); + List result = new ArrayList<>(); + for (HBaseProtos.RegionLocation loc: resp.getMetaLocationsList()) { + result.add(ProtobufUtil.toRegionLocation(loc)); + } + return new RegionLocations(result); + } + + @Override + public String getClusterId() throws IOException { + GetClusterIdResponse resp = doCall(new Callable() { + @Override + public GetClusterIdResponse call(ClientMetaService.Interface stub, RpcController controller) + throws IOException { + BlockingRpcCallback cb = new BlockingRpcCallback<>(); + stub.getClusterId(controller, GetClusterIdRequest.getDefaultInstance(), cb); + return cb.get(); + } + }); + return resp.getClusterId(); + } + + @Override + public int getCurrentNrHRS() throws IOException { + GetNumLiveRSResponse resp = doCall(new Callable() { + @Override + public GetNumLiveRSResponse call(ClientMetaService.Interface stub, RpcController controller) + throws IOException { + BlockingRpcCallback cb = new BlockingRpcCallback<>(); + stub.getNumLiveRS(controller, GetNumLiveRSRequest.getDefaultInstance(), cb); + return cb.get(); + } + }); + return resp.getNumRegionServers(); + } + + @Override + public void close() { + if (rpcClient != null) { + rpcClient.close(); + } + } + + /** + * Parses the list of master addresses from the provided configuration. Supported format is comma + * separated host[:port] values. If no port number if specified, default master port is assumed. + * @param conf Configuration to parse from. + */ + @InterfaceAudience.Private + public static Set parseMasterAddrs(Configuration conf) throws UnknownHostException { + Set masterAddrs = new HashSet<>(); + String configuredMasters = getMasterAddr(conf); + for (String masterAddr : configuredMasters.split(MASTER_ADDRS_CONF_SEPARATOR)) { + HostAndPort masterHostPort = + HostAndPort.fromString(masterAddr.trim()).withDefaultPort(HConstants.DEFAULT_MASTER_PORT); + masterAddrs.add(ServerName.valueOf(masterHostPort.toString(), ServerName.NON_STARTCODE)); + } + Preconditions.checkArgument(!masterAddrs.isEmpty(), "At least one master address is needed"); + return masterAddrs; + } + + /** + * Builds the default master address end point if it is not specified in the configuration. + *

+ * Will be called in {@code HBaseTestingUtility}. + */ + @InterfaceAudience.Private + public static String getMasterAddr(Configuration conf) throws UnknownHostException { + String masterAddrFromConf = conf.get(HConstants.MASTER_ADDRS_KEY); + if (!Strings.isNullOrEmpty(masterAddrFromConf)) { + return masterAddrFromConf; + } + String hostname = getMasterHostname(conf); + int port = conf.getInt(HConstants.MASTER_PORT, HConstants.DEFAULT_MASTER_PORT); + return String.format("%s:%d", hostname, port); + } + + void populateMasterStubs(Set masters) throws IOException { + Preconditions.checkNotNull(masters); + ImmutableMap.Builder builder = ImmutableMap.builder(); + User user = User.getCurrent(); + for (ServerName masterAddr : masters) { + builder.put(masterAddr.toString(), ClientMetaService.newStub( + rpcClient.createRpcChannel(masterAddr, user, rpcTimeoutMs))); + } + masterAddr2Stub = builder.build(); + } + + @InterfaceAudience.Private + ImmutableSet getParsedMasterServers() { + return masterAddr2Stub.keySet(); + } + +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetricsConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetricsConnection.java index 6328d7fe2521..0bce8eb78e7f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetricsConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetricsConnection.java @@ -56,6 +56,7 @@ public class MetricsConnection implements StatisticTrackable { /** Set this key to {@code true} to enable metrics collection of client requests. */ public static final String CLIENT_SIDE_METRICS_ENABLED_KEY = "hbase.client.metrics.enable"; + private static final String CNT_BASE = "rpcCount_"; private static final String DRTN_BASE = "rpcCallDurationMs_"; private static final String REQ_BASE = "rpcCallRequestSizeBytes_"; private static final String RESP_BASE = "rpcCallResponseSizeBytes_"; @@ -303,6 +304,8 @@ private static interface NewMetric { LOAD_FACTOR, CONCURRENCY_LEVEL); private final ConcurrentMap cacheDroppingExceptions = new ConcurrentHashMap<>(CAPACITY, LOAD_FACTOR, CONCURRENCY_LEVEL); + @VisibleForTesting protected final ConcurrentMap rpcCounters = + new ConcurrentHashMap<>(CAPACITY, LOAD_FACTOR, CONCURRENCY_LEVEL); public MetricsConnection(final ConnectionManager.HConnectionImplementation conn) { this.scope = conn.toString(); @@ -450,8 +453,7 @@ private T getMetric(String key, ConcurrentMap map, NewMetric f } /** Update call stats for non-critical-path methods */ - private void updateRpcGeneric(MethodDescriptor method, CallStats stats) { - final String methodName = method.getService().getName() + "_" + method.getName(); + private void updateRpcGeneric(String methodName, CallStats stats) { getMetric(DRTN_BASE + methodName, rpcTimers, timerFactory) .update(stats.getCallTimeMs(), TimeUnit.MILLISECONDS); getMetric(REQ_BASE + methodName, rpcHistograms, histogramFactory) @@ -466,6 +468,9 @@ public void updateRpc(MethodDescriptor method, Message param, CallStats stats) { if (callsPerServer > 0) { concurrentCallsPerServerHist.update(callsPerServer); } + // Update the counter that tracks RPCs by type. + final String methodName = method.getService().getName() + "_" + method.getName(); + getMetric(CNT_BASE + methodName, rpcCounters, counterFactory).inc(); // this implementation is tied directly to protobuf implementation details. would be better // if we could dispatch based on something static, ie, request Message type. if (method.getService() == ClientService.getDescriptor()) { @@ -518,7 +523,7 @@ public void updateRpc(MethodDescriptor method, Message param, CallStats stats) { } } // Fallback to dynamic registry lookup for DDL methods. - updateRpcGeneric(method, stats); + updateRpcGeneric(methodName, stats); } public void incrCacheDroppingExceptions(Object exception) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java new file mode 100644 index 000000000000..384d4e695b17 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java @@ -0,0 +1,205 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; + +/** + * Represents table state. + */ +@InterfaceAudience.Private +public class TableState { + + @InterfaceAudience.Public + @InterfaceStability.Evolving + public static enum State { + ENABLED, + DISABLED, + DISABLING, + ENABLING; + + /** + * Covert from PB version of State + * + * @param state convert from + * @return POJO + */ + public static State convert(HBaseProtos.TableState.State state) { + State ret; + switch (state) { + case ENABLED: + ret = State.ENABLED; + break; + case DISABLED: + ret = State.DISABLED; + break; + case DISABLING: + ret = State.DISABLING; + break; + case ENABLING: + ret = State.ENABLING; + break; + default: + throw new IllegalStateException(state.toString()); + } + return ret; + } + + /** + * Covert to PB version of State + * + * @return PB + */ + public HBaseProtos.TableState.State convert() { + HBaseProtos.TableState.State state; + switch (this) { + case ENABLED: + state = HBaseProtos.TableState.State.ENABLED; + break; + case DISABLED: + state = HBaseProtos.TableState.State.DISABLED; + break; + case DISABLING: + state = HBaseProtos.TableState.State.DISABLING; + break; + case ENABLING: + state = HBaseProtos.TableState.State.ENABLING; + break; + default: + throw new IllegalStateException(this.toString()); + } + return state; + } + + } + + private final long timestamp; + private final TableName tableName; + private final State state; + + /** + * Create instance of TableState. + * @param state table state + */ + public TableState(TableName tableName, State state, long timestamp) { + this.tableName = tableName; + this.state = state; + this.timestamp = timestamp; + } + + /** + * Create instance of TableState with current timestamp + * + * @param tableName table for which state is created + * @param state state of the table + */ + public TableState(TableName tableName, State state) { + this(tableName, state, System.currentTimeMillis()); + } + + /** + * @return table state + */ + public State getState() { + return state; + } + + /** + * Timestamp of table state + * + * @return milliseconds + */ + public long getTimestamp() { + return timestamp; + } + + /** + * Table name for state + * + * @return milliseconds + */ + public TableName getTableName() { + return tableName; + } + + /** + * Check that table in given states + * @param state state + * @return true if satisfies + */ + public boolean inStates(State state) { + return this.state.equals(state); + } + + /** + * Check that table in given states + * @param states state list + * @return true if satisfies + */ + public boolean inStates(State... states) { + for (State s : states) { + if (s.equals(this.state)) { + return true; + } + } + return false; + } + + + /** + * Covert to PB version of TableState + * @return PB + */ + public HBaseProtos.TableState convert() { + return HBaseProtos.TableState.newBuilder() + .setState(this.state.convert()) + .setTable(ProtobufUtil.toProtoTableName(this.tableName)) + .setTimestamp(this.timestamp) + .build(); + } + + /** + * Covert from PB version of TableState + * @param tableState convert from + * @return POJO + */ + public static TableState convert(HBaseProtos.TableState tableState) { + TableState.State state = State.convert(tableState.getState()); + return new TableState(ProtobufUtil.toTableName(tableState.getTable()), + state, tableState.getTimestamp()); + } + + /** + * Static version of state checker + * @param state desired + * @param target equals to any of + * @return true if satisfies + */ + public static boolean isInStates(State state, State... target) { + for (State tableState : target) { + if (state.equals(tableState)) { + return true; + } + } + return false; + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java similarity index 74% rename from hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java rename to hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java index 05572b7becdb..c656da86c635 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java @@ -18,27 +18,26 @@ package org.apache.hadoop.hbase.client; import java.io.IOException; -import java.io.InterruptedIOException; import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; import org.apache.hadoop.hbase.zookeeper.ZKClusterId; -import org.apache.hadoop.hbase.zookeeper.ZKTableStateClientSideReader; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.zookeeper.KeeperException; /** * A cluster registry that stores to zookeeper. */ -class ZooKeeperRegistry implements Registry { - private static final Log LOG = LogFactory.getLog(ZooKeeperRegistry.class); +class ZKConnectionRegistry implements ConnectionRegistry { + private static final Log LOG = LogFactory.getLog(ZKConnectionRegistry.class); // Needs an instance of hci to function. Set after construct this instance. ConnectionManager.HConnectionImplementation hci; @@ -51,10 +50,19 @@ public void init(Connection connection) { } @Override - public RegionLocations getMetaRegionLocation() throws IOException { - ZooKeeperKeepAliveConnection zkw = hci.getKeepAliveZooKeeperWatcher(); + public ServerName getActiveMaster() throws IOException { + ServerName sn; + try (ZooKeeperKeepAliveConnection zkw = hci.getKeepAliveZooKeeperWatcher()) { + sn = MasterAddressTracker.getMasterAddress(zkw); + } catch (KeeperException e) { + throw new HBaseIOException(e); + } + return sn; + } - try { + @Override + public RegionLocations getMetaRegionLocations() throws IOException { + try (ZooKeeperKeepAliveConnection zkw = hci.getKeepAliveZooKeeperWatcher();) { if (LOG.isTraceEnabled()) { LOG.trace("Looking up meta region location in ZK," + " connection=" + this); } @@ -87,8 +95,6 @@ public RegionLocations getMetaRegionLocation() throws IOException { } catch (InterruptedException e) { Thread.currentThread().interrupt(); return null; - } finally { - zkw.close(); } } @@ -99,52 +105,29 @@ public String getClusterId() { if (this.clusterId != null) return this.clusterId; // No synchronized here, worse case we will retrieve it twice, that's // not an issue. - ZooKeeperKeepAliveConnection zkw = null; - try { - zkw = hci.getKeepAliveZooKeeperWatcher(); + try (ZooKeeperKeepAliveConnection zkw = hci.getKeepAliveZooKeeperWatcher()) { this.clusterId = ZKClusterId.readClusterIdZNode(zkw); if (this.clusterId == null) { LOG.info("ClusterId read in ZooKeeper is null"); } - } catch (KeeperException e) { - LOG.warn("Can't retrieve clusterId from Zookeeper", e); - } catch (IOException e) { + } catch (KeeperException | IOException e) { LOG.warn("Can't retrieve clusterId from Zookeeper", e); - } finally { - if (zkw != null) zkw.close(); } return this.clusterId; } - @Override - public boolean isTableOnlineState(TableName tableName, boolean enabled) - throws IOException { - ZooKeeperKeepAliveConnection zkw = hci.getKeepAliveZooKeeperWatcher(); - try { - if (enabled) { - return ZKTableStateClientSideReader.isEnabledTable(zkw, tableName); - } - return ZKTableStateClientSideReader.isDisabledTable(zkw, tableName); - } catch (KeeperException e) { - throw new IOException("Enable/Disable failed", e); - } catch (InterruptedException e) { - throw new InterruptedIOException(); - } finally { - zkw.close(); - } - } - @Override public int getCurrentNrHRS() throws IOException { - ZooKeeperKeepAliveConnection zkw = hci.getKeepAliveZooKeeperWatcher(); - try { + try (ZooKeeperKeepAliveConnection zkw = hci.getKeepAliveZooKeeperWatcher()) { // We go to zk rather than to master to get count of regions to avoid // HTable having a Master dependency. See HBase-2828 return ZKUtil.getNumberOfChildren(zkw, zkw.rsZNode); } catch (KeeperException ke) { throw new IOException("Unexpected ZooKeeper exception", ke); - } finally { - zkw.close(); } } + + @Override + public void close() { + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java index 975cf4458c22..240133ad12fb 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java @@ -68,6 +68,7 @@ import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.NamespaceDescriptor; @@ -86,6 +87,7 @@ import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor; import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.metrics.ScanMetrics; @@ -95,6 +97,7 @@ import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.io.LimitInputStream; import org.apache.hadoop.hbase.io.TimeRange; +import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos; import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.AccessControlService; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService; @@ -158,6 +161,7 @@ import org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.EventType; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.StoreDescriptor; +import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.quotas.QuotaScope; import org.apache.hadoop.hbase.quotas.QuotaType; import org.apache.hadoop.hbase.quotas.ThrottleType; @@ -170,6 +174,7 @@ import org.apache.hadoop.hbase.security.token.AuthenticationTokenIdentifier; import org.apache.hadoop.hbase.security.visibility.Authorizations; import org.apache.hadoop.hbase.security.visibility.CellVisibility; +import org.apache.hadoop.hbase.util.Addressing; import org.apache.hadoop.hbase.util.ByteStringer; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.DynamicClassLoader; @@ -3657,4 +3662,104 @@ public static Set toCompactedStoreFiles(byte[] bytes) throws IOException } return Collections.emptySet(); } + + /** + * Get the Meta region state from the passed data bytes. Can handle both old and new style + * server names. + * @param data protobuf serialized data with meta server name. + * @param replicaId replica ID for this region + * @return RegionState instance corresponding to the serialized data. + * @throws DeserializationException if the data is invalid. + */ + public static RegionState parseMetaRegionStateFrom(final byte[] data, int replicaId) + throws DeserializationException { + RegionState.State state = RegionState.State.OPEN; + ServerName serverName; + if (data != null && data.length > 0 && ProtobufUtil.isPBMagicPrefix(data)) { + try { + int prefixLen = ProtobufUtil.lengthOfPBMagic(); + ZooKeeperProtos.MetaRegionServer rl = + ZooKeeperProtos.MetaRegionServer.PARSER.parseFrom(data, prefixLen, + data.length - prefixLen); + if (rl.hasState()) { + state = RegionState.State.convert(rl.getState()); + } + HBaseProtos.ServerName sn = rl.getServer(); + serverName = ServerName.valueOf( + sn.getHostName(), sn.getPort(), sn.getStartCode()); + } catch (InvalidProtocolBufferException e) { + throw new DeserializationException("Unable to parse meta region location"); + } + } else { + // old style of meta region location? + serverName = parseServerNameFrom(data); + } + if (serverName == null) { + state = RegionState.State.OFFLINE; + } + return new RegionState(RegionReplicaUtil.getRegionInfoForReplica( + HRegionInfo.FIRST_META_REGIONINFO, replicaId), state, serverName); + } + + /** + * Get a ServerName from the passed in data bytes. + * @param data Data with a serialize server name in it; can handle the old style + * servername where servername was host and port. Works too with data that + * begins w/ the pb 'PBUF' magic and that is then followed by a protobuf that + * has a serialized {@link ServerName} in it. + * @return Returns null if data is null else converts passed data + * to a ServerName instance. + * @throws DeserializationException when data cannot be de-serialized as expected. + */ + public static ServerName parseServerNameFrom(final byte [] data) throws DeserializationException { + if (data == null || data.length <= 0) { + return null; + } + if (isPBMagicPrefix(data)) { + int prefixLen = lengthOfPBMagic(); + try { + ZooKeeperProtos.Master rss = + ZooKeeperProtos.Master.PARSER.parseFrom(data, prefixLen, data.length - prefixLen); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName sn = + rss.getMaster(); + return ServerName.valueOf(sn.getHostName(), sn.getPort(), sn.getStartCode()); + } catch (/*InvalidProtocolBufferException*/IOException e) { + // A failed parse of the znode is pretty catastrophic. Rather than loop + // retrying hoping the bad bytes will changes, and rather than change + // the signature on this method to add an IOE which will send ripples all + // over the code base, throw a RuntimeException. This should "never" happen. + // Fail fast if it does. + throw new DeserializationException(e); + } + } + // The str returned could be old style -- pre hbase-1502 -- which was + // hostname and port seperated by a colon rather than hostname, port and + // startcode delimited by a ','. + String str = Bytes.toString(data); + int index = str.indexOf(ServerName.SERVERNAME_SEPARATOR); + if (index != -1) { + // Presume its ServerName serialized with versioned bytes. + return ServerName.parseVersionedServerName(data); + } + // Presume it a hostname:port format. + String hostname = Addressing.parseHostname(str); + int port = Addressing.parsePort(str); + return ServerName.valueOf(hostname, port, -1L); + } + + public static HBaseProtos.RegionLocation toRegionLocation(HRegionLocation loc) { + HBaseProtos.RegionLocation.Builder builder = HBaseProtos.RegionLocation.newBuilder(); + builder.setRegionInfo(HRegionInfo.convert(loc.getRegionInfo())); + if (loc.getServerName() != null) { + builder.setServerName(toServerName(loc.getServerName())); + } + builder.setSeqNum(loc.getSeqNum()); + return builder.build(); + } + + public static HRegionLocation toRegionLocation(HBaseProtos.RegionLocation proto) { + org.apache.hadoop.hbase.HRegionInfo regionInfo = HRegionInfo.convert(proto.getRegionInfo()); + ServerName serverName = proto.hasServerName() ? toServerName(proto.getServerName()) : null; + return new HRegionLocation(regionInfo, serverName, proto.getSeqNum()); + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java index 63b8af260c08..31e69cc8323f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java @@ -22,6 +22,8 @@ import java.util.regex.Pattern; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.util.ByteStringer; + import org.apache.hadoop.hbase.CellScannable; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HColumnDescriptor; @@ -95,6 +97,7 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCleanerChoreEnabledRequest; @@ -117,7 +120,6 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.TruncateTableRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest; -import org.apache.hadoop.hbase.util.ByteStringer; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Pair; @@ -1411,6 +1413,18 @@ public static GetTableNamesRequest buildGetTableNamesRequest(final Pattern patte return builder.build(); } + /* + * Creates a protocol buffer GetTableStateRequest + * + * @param tableName table to get request for + * @return a GetTableStateRequest + */ + public static GetTableStateRequest buildGetTableStateRequest(final TableName tableName) { + return GetTableStateRequest.newBuilder() + .setTableName(ProtobufUtil.toProtoTableName(tableName)) + .build(); + } + /** * Creates a protocol buffer GetTableDescriptorsRequest for a single table * diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecurityInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecurityInfo.java index eca54a41ba01..1091ee6132f9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecurityInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecurityInfo.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos; import org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos.TokenIdentifier.Kind; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClientMetaService; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos; @@ -42,6 +43,8 @@ public class SecurityInfo { new SecurityInfo("hbase.regionserver.kerberos.principal", Kind.HBASE_AUTH_TOKEN)); infos.put(MasterService.getDescriptor().getName(), new SecurityInfo("hbase.master.kerberos.principal", Kind.HBASE_AUTH_TOKEN)); + infos.put(ClientMetaService.getDescriptor().getName(), + new SecurityInfo("hbase.master.kerberos.principal", Kind.HBASE_AUTH_TOKEN)); infos.put(RegionServerStatusProtos.RegionServerStatusService.getDescriptor().getName(), new SecurityInfo("hbase.master.kerberos.principal", Kind.HBASE_AUTH_TOKEN)); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java index 78e438c0339d..5d30bf8ecb3e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java @@ -305,7 +305,7 @@ public static void obtainTokenForJob(final Connection conn, final JobConf job, U public static void addTokenForJob(final Connection conn, final JobConf job, User user) throws IOException, InterruptedException { - Token token = getAuthToken(conn.getConfiguration(), user); + Token token = getAuthToken(conn, user); if (token == null) { token = obtainToken(conn, user); } @@ -324,7 +324,7 @@ public static void addTokenForJob(final Connection conn, final JobConf job, User */ public static void addTokenForJob(final Connection conn, User user, Job job) throws IOException, InterruptedException { - Token token = getAuthToken(conn.getConfiguration(), user); + Token token = getAuthToken(conn, user); if (token == null) { token = obtainToken(conn, user); } @@ -343,7 +343,7 @@ public static void addTokenForJob(final Connection conn, User user, Job job) */ public static boolean addTokenIfMissing(Connection conn, User user) throws IOException, InterruptedException { - Token token = getAuthToken(conn.getConfiguration(), user); + Token token = getAuthToken(conn, user); if (token == null) { token = obtainToken(conn, user); user.getUGI().addToken(token.getService(), token); @@ -356,19 +356,9 @@ public static boolean addTokenIfMissing(Connection conn, User user) * Get the authentication token of the user for the cluster specified in the configuration * @return null if the user does not have the token, otherwise the auth token for the cluster. */ - private static Token getAuthToken(Configuration conf, User user) - throws IOException, InterruptedException { - ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "TokenUtil-getAuthToken", null); - try { - String clusterId = ZKClusterId.readClusterIdZNode(zkw); - if (clusterId == null) { - throw new IOException("Failed to get cluster ID"); - } - return new AuthenticationTokenSelector().selectToken(new Text(clusterId), user.getTokens()); - } catch (KeeperException e) { - throw new IOException(e); - } finally { - zkw.close(); - } + private static Token getAuthToken(Connection conn, User user) + throws IOException { + String clusterId = conn.getClusterId(); + return new AuthenticationTokenSelector().selectToken(new Text(clusterId), user.getTokens()); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterAddressTracker.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterAddressTracker.java index 311202c71445..c34d29488570 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterAddressTracker.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterAddressTracker.java @@ -17,6 +17,10 @@ */ package org.apache.hadoop.hbase.zookeeper; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HConstants; @@ -67,6 +71,57 @@ public MasterAddressTracker(ZooKeeperWatcher watcher, Abortable abortable) { super(watcher, watcher.getMasterAddressZNode(), abortable); } + /** + * @param watcher ZooKeeperWatcher instance to use for querying ZK. + * @return current list of backup masters. + */ + public static List getBackupMastersAndRenewWatch( + ZooKeeperWatcher watcher) { + // Build Set of backup masters from ZK nodes + List backupMasterStrings; + try { + backupMasterStrings = ZKUtil.listChildrenAndWatchForNewChildren( + watcher, watcher.backupMasterAddressesZNode); + } catch (KeeperException e) { + LOG.warn(watcher.prefix("Unable to list backup servers"), e); + backupMasterStrings = null; + } + + List backupMasters = new ArrayList<>(); + if (backupMasterStrings != null && !backupMasterStrings.isEmpty()) { + for (String s: backupMasterStrings) { + try { + byte [] bytes; + try { + bytes = ZKUtil.getData(watcher, ZKUtil.joinZNode( + watcher.backupMasterAddressesZNode, s)); + } catch (InterruptedException e) { + throw new InterruptedIOException("Thread interrupted."); + } + if (bytes != null) { + ServerName sn; + try { + sn = ServerName.parseFrom(bytes); + } catch (DeserializationException e) { + LOG.warn("Failed parse, skipping registering backup server", e); + continue; + } + backupMasters.add(sn); + } + } catch (KeeperException | InterruptedIOException e) { + LOG.warn(watcher.prefix("Unable to get information about " + + "backup servers"), e); + } + } + Collections.sort(backupMasters, new Comparator() { + @Override + public int compare(ServerName s1, ServerName s2) { + return s1.getServerName().compareTo(s2.getServerName()); + }}); + } + return backupMasters; + } + /** * Get the address of the current master if one is available. Returns null * if no current master. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableStateClientSideReader.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableStateClientSideReader.java deleted file mode 100644 index 7c21b0154268..000000000000 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableStateClientSideReader.java +++ /dev/null @@ -1,205 +0,0 @@ -/** - * Copyright The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.zookeeper; - -import org.apache.hadoop.hbase.TableNotFoundException; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; -import org.apache.zookeeper.KeeperException; - -import java.io.IOException; -import java.util.HashSet; -import java.util.List; -import java.util.Set; - -/** - * Non-instantiable class that provides helper functions to learn - * about HBase table state for code running on client side (hence, not having - * access to consensus context). - * - * Doesn't cache any table state, just goes directly to ZooKeeper. - * TODO: decouple this class from ZooKeeper. - */ -@InterfaceAudience.Private -public class ZKTableStateClientSideReader { - - private ZKTableStateClientSideReader() {} - - /** - * Go to zookeeper and see if state of table is {@code ZooKeeperProtos.Table.State#DISABLED}. - * This method does not use cache. - * This method is for clients other than AssignmentManager - * @param zkw ZooKeeperWatcher instance to use - * @param tableName table we're checking - * @return True if table is enabled. - * @throws KeeperException - */ - public static boolean isDisabledTable(final ZooKeeperWatcher zkw, - final TableName tableName) - throws KeeperException, InterruptedException, TableNotFoundException { - ZooKeeperProtos.Table.State state = getTableState(zkw, tableName); - return isTableState(ZooKeeperProtos.Table.State.DISABLED, state); - } - - /** - * Go to zookeeper and see if state of table is {@code ZooKeeperProtos.Table.State#ENABLED}. - * This method does not use cache. - * This method is for clients other than AssignmentManager - * @param zkw ZooKeeperWatcher instance to use - * @param tableName table we're checking - * @return True if table is enabled. - * @throws KeeperException - */ - public static boolean isEnabledTable(final ZooKeeperWatcher zkw, - final TableName tableName) - throws KeeperException, InterruptedException, TableNotFoundException { - return getTableState(zkw, tableName) == ZooKeeperProtos.Table.State.ENABLED; - } - - /** - * Go to zookeeper and see if state of table is {@code ZooKeeperProtos.Table.State#DISABLING} - * of {@code ZooKeeperProtos.Table.State#DISABLED}. - * This method does not use cache. - * This method is for clients other than AssignmentManager. - * @param zkw ZooKeeperWatcher instance to use - * @param tableName table we're checking - * @return True if table is enabled. - * @throws KeeperException - */ - public static boolean isDisablingOrDisabledTable(final ZooKeeperWatcher zkw, - final TableName tableName) - throws KeeperException, InterruptedException, TableNotFoundException { - ZooKeeperProtos.Table.State state = getTableState(zkw, tableName); - return isTableState(ZooKeeperProtos.Table.State.DISABLING, state) || - isTableState(ZooKeeperProtos.Table.State.DISABLED, state); - } - - /** - * Gets a list of all the tables set as disabled in zookeeper. - * @return Set of disabled tables, empty Set if none - * @throws KeeperException - */ - public static Set getDisabledTables(ZooKeeperWatcher zkw) - throws KeeperException, InterruptedException, TableNotFoundException { - Set disabledTables = new HashSet(); - List children = - ZKUtil.listChildrenNoWatch(zkw, zkw.tableZNode); - for (String child: children) { - TableName tableName = - TableName.valueOf(child); - ZooKeeperProtos.Table.State state = getTableState(zkw, tableName); - if (state == ZooKeeperProtos.Table.State.DISABLED) disabledTables.add(tableName); - } - return disabledTables; - } - - /** - * Gets a list of all the tables set as disabled in zookeeper. - * @return Set of disabled tables, empty Set if none - * @throws KeeperException - */ - public static Set getDisabledOrDisablingTables(ZooKeeperWatcher zkw) - throws KeeperException, InterruptedException, TableNotFoundException { - return - getTablesInStates( - zkw, - ZooKeeperProtos.Table.State.DISABLED, - ZooKeeperProtos.Table.State.DISABLING); - } - - /** - * Gets a list of all the tables set as enabling in zookeeper. - * @param zkw ZooKeeperWatcher instance to use - * @return Set of enabling tables, empty Set if none - * @throws KeeperException - * @throws InterruptedException - */ - public static Set getEnablingTables(ZooKeeperWatcher zkw) - throws KeeperException, InterruptedException, TableNotFoundException { - return getTablesInStates(zkw, ZooKeeperProtos.Table.State.ENABLING); - } - - /** - * Gets a list of tables that are set as one of the passing in states in zookeeper. - * @param zkw ZooKeeperWatcher instance to use - * @param states the list of states that a table could be in - * @return Set of tables in one of the states, empty Set if none - * @throws KeeperException - * @throws InterruptedException - */ - private static Set getTablesInStates( - ZooKeeperWatcher zkw, - ZooKeeperProtos.Table.State... states) - throws KeeperException, InterruptedException, TableNotFoundException { - Set tableNameSet = new HashSet(); - List children = ZKUtil.listChildrenNoWatch(zkw, zkw.tableZNode); - TableName tableName; - ZooKeeperProtos.Table.State tableState; - for (String child: children) { - tableName = TableName.valueOf(child); - tableState = getTableState(zkw, tableName); - for (ZooKeeperProtos.Table.State state : states) { - if (tableState == state) { - tableNameSet.add(tableName); - break; - } - } - } - return tableNameSet; - } - - static boolean isTableState(final ZooKeeperProtos.Table.State expectedState, - final ZooKeeperProtos.Table.State currentState) { - return currentState != null && currentState.equals(expectedState); - } - - /** - * @param zkw ZooKeeperWatcher instance to use - * @param tableName table we're checking - * @return {@link ZooKeeperProtos.Table.State} found in znode. - * @throws KeeperException - * @throws TableNotFoundException if tableName doesn't exist - */ - static ZooKeeperProtos.Table.State getTableState(final ZooKeeperWatcher zkw, - final TableName tableName) - throws KeeperException, InterruptedException, TableNotFoundException { - String znode = ZKUtil.joinZNode(zkw.tableZNode, tableName.getNameAsString()); - byte [] data = ZKUtil.getData(zkw, znode); - if (data == null || data.length <= 0) { - throw new TableNotFoundException(tableName); - } - try { - ProtobufUtil.expectPBMagicPrefix(data); - ZooKeeperProtos.Table.Builder builder = ZooKeeperProtos.Table.newBuilder(); - int magicLen = ProtobufUtil.lengthOfPBMagic(); - ProtobufUtil.mergeFrom(builder, data, magicLen, data.length - magicLen); - return builder.getState(); - } catch (IOException e) { - KeeperException ke = new KeeperException.DataInconsistencyException(); - ke.initCause(e); - throw ke; - } catch (DeserializationException e) { - throw ZKUtil.convert(e); - } - } -} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java index 901fc71d8bdb..be05054ab4f0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java @@ -1,4 +1,4 @@ -/** +/* * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file @@ -18,6 +18,11 @@ */ package org.apache.hadoop.hbase.zookeeper; +import static org.apache.hadoop.hbase.HConstants.DEFAULT_META_REPLICA_NUM; +import static org.apache.hadoop.hbase.HConstants.META_REPLICAS_NUM; +import static org.apache.hadoop.hbase.HRegionInfo.DEFAULT_REPLICA_ID; +import static org.apache.hadoop.hbase.zookeeper.ZKUtil.joinZNode; +import com.google.common.collect.ImmutableMap; import java.io.Closeable; import java.io.IOException; import java.util.ArrayList; @@ -39,7 +44,6 @@ import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.AuthUtil; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.security.Superusers; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; @@ -71,6 +75,9 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable { private static final Log LOG = LogFactory.getLog(ZooKeeperWatcher.class); + public static final String META_ZNODE_PREFIX_CONF_KEY = "zookeeper.znode.metaserver"; + public static final String META_ZNODE_PREFIX = "meta-region-server"; + // Identifier for this watcher (for logging only). It is made of the prefix // passed on construction and the zookeeper sessionid. private String prefix; @@ -91,6 +98,11 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable { private final List listeners = new CopyOnWriteArrayList(); + /** + * znodes containing the locations of the servers hosting the meta replicas + */ + private final ImmutableMap metaReplicaZNodes; + // Single threaded executor pool that processes event notifications from Zookeeper. Events are // processed in the order in which they arrive (pool backed by an unbounded fifo queue). We do // this to decouple the event processing from Zookeeper's ClientCnxn's EventThread context. @@ -126,6 +138,7 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable { // znode used for region transitioning and assignment public String assignmentZNode; // znode used for table disabling/enabling + @Deprecated public String tableZNode; // znode containing the unique cluster ID public String clusterIdZNode; @@ -148,6 +161,13 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable { // znode of indicating master maintenance mode public static String masterMaintZNode = "masterMaintenance"; + /** + * The prefix of meta znode. Does not include baseZNode. + * Its a 'prefix' because meta replica id integer can be tagged on the end (if + * no number present, it is 'default' replica). + */ + private final String metaZNodePrefix; + // Certain ZooKeeper nodes need to be world-readable public static final ArrayList CREATOR_ALL_AND_WORLD_READABLE = new ArrayList() { { @@ -155,7 +175,6 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable { add(new ACL(ZooDefs.Perms.ALL,ZooDefs.Ids.AUTH_IDS)); }}; - public final static String META_ZNODE_PREFIX = "meta-region-server"; private static final String DEFAULT_SNAPSHOT_CLEANUP_ZNODE = "snapshot-cleanup"; private final Configuration conf; @@ -202,6 +221,15 @@ public ZooKeeperWatcher(Configuration conf, String identifier, PendingWatcher pendingWatcher = new PendingWatcher(); this.recoverableZooKeeper = ZKUtil.connect(conf, quorum, pendingWatcher, identifier); pendingWatcher.prepare(this); + ImmutableMap.Builder builder = ImmutableMap.builder(); + metaZNodePrefix = conf.get(META_ZNODE_PREFIX_CONF_KEY, META_ZNODE_PREFIX); + String defaultMetaReplicaZNode = joinZNode(baseZNode, metaZNodePrefix); + builder.put(DEFAULT_REPLICA_ID, defaultMetaReplicaZNode); + int numMetaReplicas = conf.getInt(META_REPLICAS_NUM, DEFAULT_META_REPLICA_NUM); + for (int i = 1; i < numMetaReplicas; i++) { + builder.put(i, defaultMetaReplicaZNode + "-" + i); + } + metaReplicaZNodes = builder.build(); if (canCreateBaseZNode) { try { createBaseZNodes(); @@ -219,6 +247,13 @@ public ZooKeeperWatcher(Configuration conf, String identifier, HConstants.ZK_SYNC_BLOCKING_TIMEOUT_DEFAULT_MS); } + /** + * @return true if the znode is a meta region replica + */ + public boolean isAnyMetaReplicaZNode(String node) { + return this.metaReplicaZNodes.containsValue(node); + } + private void createBaseZNodes() throws ZooKeeperConnectionException { try { // Create all the necessary "directories" of znodes @@ -296,7 +331,7 @@ private void setZnodeAclsRecursive(String znode) throws KeeperException, Interru List children = recoverableZooKeeper.getChildren(znode, false); for (String child : children) { - setZnodeAclsRecursive(ZKUtil.joinZNode(znode, child)); + setZnodeAclsRecursive(joinZNode(znode, child)); } List acls = ZKUtil.createACL(this, znode, true); LOG.info("Setting ACLs for znode:" + znode + " , acl:" + acls); @@ -446,47 +481,47 @@ public String prefix(final String str) { private void setNodeNames(Configuration conf) { baseZNode = conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT, HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); - metaReplicaZnodes.put(0, ZKUtil.joinZNode(baseZNode, + metaReplicaZnodes.put(0, joinZNode(baseZNode, conf.get("zookeeper.znode.metaserver", "meta-region-server"))); - int numMetaReplicas = conf.getInt(HConstants.META_REPLICAS_NUM, - HConstants.DEFAULT_META_REPLICA_NUM); + int numMetaReplicas = conf.getInt(META_REPLICAS_NUM, + DEFAULT_META_REPLICA_NUM); for (int i = 1; i < numMetaReplicas; i++) { - String str = ZKUtil.joinZNode(baseZNode, + String str = joinZNode(baseZNode, conf.get("zookeeper.znode.metaserver", "meta-region-server") + "-" + i); metaReplicaZnodes.put(i, str); } - rsZNode = ZKUtil.joinZNode(baseZNode, + rsZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.rs", "rs")); - drainingZNode = ZKUtil.joinZNode(baseZNode, + drainingZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.draining.rs", "draining")); - masterAddressZNode = ZKUtil.joinZNode(baseZNode, + masterAddressZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.master", "master")); - backupMasterAddressesZNode = ZKUtil.joinZNode(baseZNode, + backupMasterAddressesZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.backup.masters", "backup-masters")); - clusterStateZNode = ZKUtil.joinZNode(baseZNode, + clusterStateZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.state", "running")); - assignmentZNode = ZKUtil.joinZNode(baseZNode, + assignmentZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.unassigned", "region-in-transition")); - tableZNode = ZKUtil.joinZNode(baseZNode, + tableZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.tableEnableDisable", "table")); - clusterIdZNode = ZKUtil.joinZNode(baseZNode, + clusterIdZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.clusterId", "hbaseid")); - splitLogZNode = ZKUtil.joinZNode(baseZNode, + splitLogZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.splitlog", HConstants.SPLIT_LOGDIR_NAME)); - balancerZNode = ZKUtil.joinZNode(baseZNode, + balancerZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.balancer", "balancer")); - regionNormalizerZNode = ZKUtil.joinZNode(baseZNode, + regionNormalizerZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.regionNormalizer", "normalizer")); - switchZNode = ZKUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.switch", "switch")); - tableLockZNode = ZKUtil.joinZNode(baseZNode, + switchZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.switch", "switch")); + tableLockZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.tableLock", "table-lock")); - snapshotCleanupZNode = ZKUtil.joinZNode(baseZNode, + snapshotCleanupZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.snapshot.cleanup", DEFAULT_SNAPSHOT_CLEANUP_ZNODE)); - recoveringRegionsZNode = ZKUtil.joinZNode(baseZNode, + recoveringRegionsZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.recovering.regions", "recovering-regions")); - namespaceZNode = ZKUtil.joinZNode(baseZNode, + namespaceZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.namespace", "namespace")); - masterMaintZNode = ZKUtil.joinZNode(baseZNode, + masterMaintZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.masterMaintenance", "master-maintenance")); } @@ -508,7 +543,7 @@ public boolean isAnyMetaReplicaZnode(String node) { * @return true or false */ public boolean isDefaultMetaReplicaZnode(String node) { - if (getZNodeForReplica(HRegionInfo.DEFAULT_REPLICA_ID).equals(node)) { + if (getZNodeForReplica(DEFAULT_REPLICA_ID).equals(node)) { return true; } return false; @@ -542,7 +577,7 @@ public String getZNodeForReplica(int replicaId) { // This is mostly needed for tests that attempt to create meta replicas // from outside the master if (str == null) { - str = ZKUtil.joinZNode(baseZNode, + str = joinZNode(baseZNode, conf.get("zookeeper.znode.metaserver", "meta-region-server") + "-" + replicaId); } return str; @@ -555,7 +590,9 @@ public String getZNodeForReplica(int replicaId) { */ public int getMetaReplicaIdFromZnode(String znode) { String pattern = conf.get("zookeeper.znode.metaserver","meta-region-server"); - if (znode.equals(pattern)) return HRegionInfo.DEFAULT_REPLICA_ID; + if (znode.equals(pattern)) { + return DEFAULT_REPLICA_ID; + } // the non-default replicas are of the pattern meta-region-server- String nonDefaultPattern = pattern + "-"; return Integer.parseInt(znode.substring(nonDefaultPattern.length())); @@ -868,4 +905,45 @@ public String getRegionNormalizerZNode() { public String getSwitchZNode() { return switchZNode; } + + /** + * Parses the meta replicaId from the passed path. + * @param path the name of the full path which includes baseZNode. + * @return replicaId + */ + public int getMetaReplicaIdFromPath(String path) { + // Extract the znode from path. The prefix is of the following format. + // baseZNode + PATH_SEPARATOR. + int prefixLen = baseZNode.length() + 1; + return getMetaReplicaIdFromZnode(path.substring(prefixLen)); + } + + /** + * Same as {@link #getMetaReplicaNodes()} except that this also registers a watcher on base znode + * for subsequent CREATE/DELETE operations on child nodes. + */ + public List getMetaReplicaNodesAndWatchChildren() throws KeeperException { + List childrenOfBaseNode = + ZKUtil.listChildrenAndWatchForNewChildren(this, baseZNode); + return filterMetaReplicaNodes(childrenOfBaseNode); + } + + /** + * @param nodes Input list of znodes + * @return Filtered list of znodes from nodes that belong to meta replica(s). + */ + private List filterMetaReplicaNodes(List nodes) { + if (nodes == null || nodes.isEmpty()) { + return new ArrayList<>(); + } + List metaReplicaNodes = new ArrayList<>(2); + String pattern = conf.get(META_ZNODE_PREFIX_CONF_KEY, META_ZNODE_PREFIX); + for (String child : nodes) { + if (child.startsWith(pattern)) { + metaReplicaNodes.add(child); + } + } + return metaReplicaNodes; + } + } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java index 5d37ad7be127..a73674a0ada9 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java @@ -460,29 +460,33 @@ private static interface ResponseGenerator { * Returns our async process. */ static class MyConnectionImpl extends ConnectionManager.HConnectionImplementation { - public static class TestRegistry implements Registry { + public static class TestConnectionRegistry implements ConnectionRegistry { @Override public void init(Connection connection) {} @Override - public RegionLocations getMetaRegionLocation() throws IOException { + public ServerName getActiveMaster() { return null; } @Override - public String getClusterId() { - return "testClusterId"; + public RegionLocations getMetaRegionLocations() throws IOException { + return null; } @Override - public boolean isTableOnlineState(TableName tableName, boolean enabled) throws IOException { - return false; + public String getClusterId() { + return "testClusterId"; } @Override public int getCurrentNrHRS() throws IOException { return 1; } + + @Override + public void close() { + } } final AtomicInteger nbThreads = new AtomicInteger(0); @@ -492,7 +496,7 @@ protected MyConnectionImpl(Configuration conf) throws IOException { } private static Configuration setupConf(Configuration conf) { - conf.setClass(RegistryFactory.REGISTRY_IMPL_CONF_KEY, TestRegistry.class, Registry.class); + conf.setClass(HConstants.REGISTRY_IMPL_CONF_KEY, TestConnectionRegistry.class, ConnectionRegistry.class); return conf; } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java index f6968bc42157..5c85980691dd 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java @@ -105,13 +105,13 @@ public void setUp() throws Exception { // Run my HConnection overrides. Use my little HConnectionImplementation below which // allows me insert mocks and also use my Registry below rather than the default zk based // one so tests run faster and don't have zk dependency. - this.conf.set("hbase.client.registry.impl", SimpleRegistry.class.getName()); + this.conf.set("hbase.client.registry.impl", SimpleConnectionRegistry.class.getName()); } /** * Simple cluster registry inserted in place of our usual zookeeper based one. */ - static class SimpleRegistry implements Registry { + static class SimpleConnectionRegistry implements ConnectionRegistry { final ServerName META_HOST = META_SERVERNAME; @Override @@ -119,7 +119,12 @@ public void init(Connection connection) { } @Override - public RegionLocations getMetaRegionLocation() throws IOException { + public ServerName getActiveMaster() { + return null; + } + + @Override + public RegionLocations getMetaRegionLocations() throws IOException { return new RegionLocations( new HRegionLocation(HRegionInfo.FIRST_META_REGIONINFO, META_HOST)); } @@ -130,14 +135,12 @@ public String getClusterId() { } @Override - public boolean isTableOnlineState(TableName tableName, boolean enabled) - throws IOException { - return enabled; + public int getCurrentNrHRS() throws IOException { + return 1; } @Override - public int getCurrentNrHRS() throws IOException { - return 1; + public void close() { } } @@ -802,7 +805,7 @@ public int run(String[] arg0) throws Exception { getConf().set("hbase.client.connection.impl", ManyServersManyRegionsConnection.class.getName()); // Use simple kv registry rather than zk - getConf().set("hbase.client.registry.impl", SimpleRegistry.class.getName()); + getConf().set("hbase.client.registry.impl", SimpleConnectionRegistry.class.getName()); // When to report fails. Default is we report the 10th. This means we'll see log everytime // an exception is thrown -- usually RegionTooBusyException when we have more than // hbase.test.multi.too.many requests outstanding at any time. diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMetricsConnection.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMetricsConnection.java index f1be81bc31b8..9638c722b4d1 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMetricsConnection.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMetricsConnection.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.client; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; import com.google.protobuf.ByteString; import com.yammer.metrics.util.RatioGauge; @@ -129,6 +130,11 @@ public void testStaticMetrics() throws IOException { .build(), MetricsConnection.newCallStats()); } + for (String method: new String[]{"Get", "Scan", "Mutate"}) { + final String metricKey = "rpcCount_" + ClientService.getDescriptor().getName() + "_" + method; + final long metricVal = METRICS.rpcCounters.get(metricKey).count(); + assertTrue("metric: " + metricKey + " val: " + metricVal, metricVal >= loop); + } for (MetricsConnection.CallTracker t : new MetricsConnection.CallTracker[] { METRICS.getTracker, METRICS.scanTracker, METRICS.multiTracker, METRICS.appendTracker, METRICS.deleteTracker, METRICS.incrementTracker, METRICS.putTracker diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKTableStateClientSideReader.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKTableStateClientSideReader.java deleted file mode 100644 index e82d3b0fc34c..000000000000 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKTableStateClientSideReader.java +++ /dev/null @@ -1,52 +0,0 @@ -/** - * Copyright The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.zookeeper; - -import static org.junit.Assert.fail; - -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.TableNotFoundException; -import org.apache.hadoop.hbase.testclassification.SmallTests; - -import org.apache.zookeeper.Watcher; -import org.apache.zookeeper.data.Stat; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.mockito.Mockito; - -@Category({SmallTests.class}) -public class TestZKTableStateClientSideReader { - - @Test - public void test() throws Exception { - ZooKeeperWatcher zkw = Mockito.mock(ZooKeeperWatcher.class); - RecoverableZooKeeper rzk = Mockito.mock(RecoverableZooKeeper.class); - Mockito.doReturn(rzk).when(zkw).getRecoverableZooKeeper(); - Mockito.doReturn(null).when(rzk).getData(Mockito.anyString(), - Mockito.any(Watcher.class), Mockito.any(Stat.class)); - TableName table = TableName.valueOf("table-not-exists"); - try { - ZKTableStateClientSideReader.getTableState(zkw, table); - fail("Shouldn't reach here"); - } catch(TableNotFoundException e) { - // Expected Table not found exception - } - } -} diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java index 28b2d1ccf664..41e75259f505 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -79,6 +79,7 @@ public final class HConstants { Bytes.SIZEOF_BYTE + 2 * Bytes.SIZEOF_INT; /** Just an array of bytes of the right size. */ public static final byte[] HFILEBLOCK_DUMMY_HEADER = new byte[HFILEBLOCK_HEADER_SIZE]; + public static final String REGISTRY_IMPL_CONF_KEY = "hbase.client.registry.impl"; //End HFileBlockConstants. @@ -176,7 +177,7 @@ public enum OperationStatusCode { public static final String MASTER_INFO_PORT = "hbase.master.info.port"; /** Configuration key for the list of master host:ports **/ - public static final String MASTER_ADDRS_KEY = "hbase.master.addrs"; + public static final String MASTER_ADDRS_KEY = "hbase.masters"; public static final String MASTER_ADDRS_DEFAULT = "localhost:" + DEFAULT_MASTER_PORT; diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/exceptions/MasterRegistryFetchException.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/exceptions/MasterRegistryFetchException.java new file mode 100644 index 000000000000..3a66f61c5fcb --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/exceptions/MasterRegistryFetchException.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.exceptions; + +import java.util.Set; +import org.apache.hadoop.hbase.HBaseIOException; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.util.PrettyPrinter; + +/** + * Exception thrown when an master registry RPC fails in client. The exception includes the list of + * masters to which RPC was attempted and the last exception encountered. Prior exceptions are + * included in the logs. + */ +@InterfaceAudience.Private +public class MasterRegistryFetchException extends HBaseIOException { + + private static final long serialVersionUID = 6992134872168185171L; + + public MasterRegistryFetchException(Set masters, Throwable failure) { + super(String.format("Exception making rpc to masters %s", PrettyPrinter.toString(masters)), + failure); + } +} \ No newline at end of file diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/DNS.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/DNS.java index 4b9e87f4ae3e..1241dc80495f 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/DNS.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/DNS.java @@ -19,6 +19,7 @@ import java.lang.reflect.Method; import java.net.UnknownHostException; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.classification.InterfaceAudience; /** @@ -66,4 +67,14 @@ public static String getDefaultHost(String strInterface, String nameserver) return org.apache.hadoop.net.DNS.getDefaultHost(strInterface, nameserver); } } + + public static String getMasterHostname(Configuration conf) throws UnknownHostException { + String hostname = conf.get("hbase.master.hostname", ""); + if (hostname.isEmpty()) { + return Strings.domainNamePointerToHostName(getDefaultHost( + conf.get("hbase.master.dns.interface", "default"), + conf.get("hbase.master.dns.nameserver", "default"))); + } + return hostname; + } } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java index efdd144cd552..65304478e648 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java @@ -19,6 +19,11 @@ package org.apache.hadoop.hbase.util; +import com.google.common.base.Joiner; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Objects; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.HConstants; @@ -97,4 +102,18 @@ private static String humanReadableTTL(final long interval){ return sb.toString(); } + /** + * Pretty prints a collection of any type to a string. Relies on toString() implementation of the + * object type. + * @param collection collection to pretty print. + * @return Pretty printed string for the collection. + */ + public static String toString(Collection collection) { + List stringList = new ArrayList<>(); + for (Object o: collection) { + stringList.add(Objects.toString(o)); + } + return "[" + Joiner.on(',').join(stringList) + "]"; + } + } diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java index d5a7150efa55..f86370d48213 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java @@ -1975,94 +1975,74 @@ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Bui // @@protoc_insertion_point(class_scope:hbase.pb.TableSchema) } - public interface ColumnFamilySchemaOrBuilder + public interface TableStateOrBuilder extends com.google.protobuf.MessageOrBuilder { - // required bytes name = 1; + // required .hbase.pb.TableState.State state = 1; /** - * required bytes name = 1; + * required .hbase.pb.TableState.State state = 1; + * + *

+     * This is the table's state.
+     * 
*/ - boolean hasName(); + boolean hasState(); /** - * required bytes name = 1; + * required .hbase.pb.TableState.State state = 1; + * + *
+     * This is the table's state.
+     * 
*/ - com.google.protobuf.ByteString getName(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State getState(); - // repeated .hbase.pb.BytesBytesPair attributes = 2; - /** - * repeated .hbase.pb.BytesBytesPair attributes = 2; - */ - java.util.List - getAttributesList(); - /** - * repeated .hbase.pb.BytesBytesPair attributes = 2; - */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair getAttributes(int index); + // required .hbase.pb.TableName table = 2; /** - * repeated .hbase.pb.BytesBytesPair attributes = 2; + * required .hbase.pb.TableName table = 2; */ - int getAttributesCount(); + boolean hasTable(); /** - * repeated .hbase.pb.BytesBytesPair attributes = 2; + * required .hbase.pb.TableName table = 2; */ - java.util.List - getAttributesOrBuilderList(); + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTable(); /** - * repeated .hbase.pb.BytesBytesPair attributes = 2; + * required .hbase.pb.TableName table = 2; */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPairOrBuilder getAttributesOrBuilder( - int index); + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableOrBuilder(); - // repeated .hbase.pb.NameStringPair configuration = 3; + // optional uint64 timestamp = 3; /** - * repeated .hbase.pb.NameStringPair configuration = 3; - */ - java.util.List - getConfigurationList(); - /** - * repeated .hbase.pb.NameStringPair configuration = 3; - */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair getConfiguration(int index); - /** - * repeated .hbase.pb.NameStringPair configuration = 3; - */ - int getConfigurationCount(); - /** - * repeated .hbase.pb.NameStringPair configuration = 3; + * optional uint64 timestamp = 3; */ - java.util.List - getConfigurationOrBuilderList(); + boolean hasTimestamp(); /** - * repeated .hbase.pb.NameStringPair configuration = 3; + * optional uint64 timestamp = 3; */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder getConfigurationOrBuilder( - int index); + long getTimestamp(); } /** - * Protobuf type {@code hbase.pb.ColumnFamilySchema} + * Protobuf type {@code hbase.pb.TableState} * *
-   **
-   * Column Family Schema
-   * Inspired by the rest ColumSchemaMessage
+   ** Denotes state of the table 
    * 
*/ - public static final class ColumnFamilySchema extends + public static final class TableState extends com.google.protobuf.GeneratedMessage - implements ColumnFamilySchemaOrBuilder { - // Use ColumnFamilySchema.newBuilder() to construct. - private ColumnFamilySchema(com.google.protobuf.GeneratedMessage.Builder builder) { + implements TableStateOrBuilder { + // Use TableState.newBuilder() to construct. + private TableState(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private ColumnFamilySchema(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private TableState(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final ColumnFamilySchema defaultInstance; - public static ColumnFamilySchema getDefaultInstance() { + private static final TableState defaultInstance; + public static TableState getDefaultInstance() { return defaultInstance; } - public ColumnFamilySchema getDefaultInstanceForType() { + public TableState getDefaultInstanceForType() { return defaultInstance; } @@ -2072,7 +2052,7 @@ public ColumnFamilySchema getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private ColumnFamilySchema( + private TableState( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -2095,25 +2075,33 @@ private ColumnFamilySchema( } break; } - case 10: { - bitField0_ |= 0x00000001; - name_ = input.readBytes(); + case 8: { + int rawValue = input.readEnum(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State value = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(1, rawValue); + } else { + bitField0_ |= 0x00000001; + state_ = value; + } break; } case 18: { - if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { - attributes_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000002; + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder subBuilder = null; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + subBuilder = table_.toBuilder(); } - attributes_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.PARSER, extensionRegistry)); + table_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(table_); + table_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000002; break; } - case 26: { - if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) { - configuration_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000004; - } - configuration_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.PARSER, extensionRegistry)); + case 24: { + bitField0_ |= 0x00000004; + timestamp_ = input.readUInt64(); break; } } @@ -2124,157 +2112,225 @@ private ColumnFamilySchema( throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { - if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { - attributes_ = java.util.Collections.unmodifiableList(attributes_); - } - if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) { - configuration_ = java.util.Collections.unmodifiableList(configuration_); - } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_ColumnFamilySchema_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_TableState_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_ColumnFamilySchema_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_TableState_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public ColumnFamilySchema parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public TableState parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new ColumnFamilySchema(input, extensionRegistry); + return new TableState(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } - private int bitField0_; - // required bytes name = 1; - public static final int NAME_FIELD_NUMBER = 1; - private com.google.protobuf.ByteString name_; - /** - * required bytes name = 1; - */ - public boolean hasName() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } /** - * required bytes name = 1; + * Protobuf enum {@code hbase.pb.TableState.State} + * + *
+     * Table's current state
+     * 
*/ - public com.google.protobuf.ByteString getName() { - return name_; - } + public enum State + implements com.google.protobuf.ProtocolMessageEnum { + /** + * ENABLED = 0; + */ + ENABLED(0, 0), + /** + * DISABLED = 1; + */ + DISABLED(1, 1), + /** + * DISABLING = 2; + */ + DISABLING(2, 2), + /** + * ENABLING = 3; + */ + ENABLING(3, 3), + ; - // repeated .hbase.pb.BytesBytesPair attributes = 2; - public static final int ATTRIBUTES_FIELD_NUMBER = 2; - private java.util.List attributes_; - /** - * repeated .hbase.pb.BytesBytesPair attributes = 2; - */ - public java.util.List getAttributesList() { - return attributes_; - } - /** - * repeated .hbase.pb.BytesBytesPair attributes = 2; - */ - public java.util.List - getAttributesOrBuilderList() { - return attributes_; - } - /** - * repeated .hbase.pb.BytesBytesPair attributes = 2; - */ - public int getAttributesCount() { - return attributes_.size(); + /** + * ENABLED = 0; + */ + public static final int ENABLED_VALUE = 0; + /** + * DISABLED = 1; + */ + public static final int DISABLED_VALUE = 1; + /** + * DISABLING = 2; + */ + public static final int DISABLING_VALUE = 2; + /** + * ENABLING = 3; + */ + public static final int ENABLING_VALUE = 3; + + + public final int getNumber() { return value; } + + public static State valueOf(int value) { + switch (value) { + case 0: return ENABLED; + case 1: return DISABLED; + case 2: return DISABLING; + case 3: return ENABLING; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public State findValueByNumber(int number) { + return State.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.getDescriptor().getEnumTypes().get(0); + } + + private static final State[] VALUES = values(); + + public static State valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private State(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:hbase.pb.TableState.State) } + + private int bitField0_; + // required .hbase.pb.TableState.State state = 1; + public static final int STATE_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State state_; /** - * repeated .hbase.pb.BytesBytesPair attributes = 2; + * required .hbase.pb.TableState.State state = 1; + * + *
+     * This is the table's state.
+     * 
*/ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair getAttributes(int index) { - return attributes_.get(index); + public boolean hasState() { + return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * repeated .hbase.pb.BytesBytesPair attributes = 2; + * required .hbase.pb.TableState.State state = 1; + * + *
+     * This is the table's state.
+     * 
*/ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPairOrBuilder getAttributesOrBuilder( - int index) { - return attributes_.get(index); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State getState() { + return state_; } - // repeated .hbase.pb.NameStringPair configuration = 3; - public static final int CONFIGURATION_FIELD_NUMBER = 3; - private java.util.List configuration_; + // required .hbase.pb.TableName table = 2; + public static final int TABLE_FIELD_NUMBER = 2; + private org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName table_; /** - * repeated .hbase.pb.NameStringPair configuration = 3; + * required .hbase.pb.TableName table = 2; */ - public java.util.List getConfigurationList() { - return configuration_; + public boolean hasTable() { + return ((bitField0_ & 0x00000002) == 0x00000002); } /** - * repeated .hbase.pb.NameStringPair configuration = 3; + * required .hbase.pb.TableName table = 2; */ - public java.util.List - getConfigurationOrBuilderList() { - return configuration_; + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTable() { + return table_; } /** - * repeated .hbase.pb.NameStringPair configuration = 3; + * required .hbase.pb.TableName table = 2; */ - public int getConfigurationCount() { - return configuration_.size(); + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableOrBuilder() { + return table_; } + + // optional uint64 timestamp = 3; + public static final int TIMESTAMP_FIELD_NUMBER = 3; + private long timestamp_; /** - * repeated .hbase.pb.NameStringPair configuration = 3; + * optional uint64 timestamp = 3; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair getConfiguration(int index) { - return configuration_.get(index); + public boolean hasTimestamp() { + return ((bitField0_ & 0x00000004) == 0x00000004); } /** - * repeated .hbase.pb.NameStringPair configuration = 3; + * optional uint64 timestamp = 3; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder getConfigurationOrBuilder( - int index) { - return configuration_.get(index); + public long getTimestamp() { + return timestamp_; } private void initFields() { - name_ = com.google.protobuf.ByteString.EMPTY; - attributes_ = java.util.Collections.emptyList(); - configuration_ = java.util.Collections.emptyList(); + state_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State.ENABLED; + table_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance(); + timestamp_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (!hasName()) { + if (!hasState()) { memoizedIsInitialized = 0; return false; } - for (int i = 0; i < getAttributesCount(); i++) { - if (!getAttributes(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - for (int i = 0; i < getConfigurationCount(); i++) { - if (!getConfiguration(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } + if (!hasTable()) { + memoizedIsInitialized = 0; + return false; + } + if (!getTable().isInitialized()) { + memoizedIsInitialized = 0; + return false; } memoizedIsInitialized = 1; return true; @@ -2284,13 +2340,13 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, name_); + output.writeEnum(1, state_.getNumber()); } - for (int i = 0; i < attributes_.size(); i++) { - output.writeMessage(2, attributes_.get(i)); + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, table_); } - for (int i = 0; i < configuration_.size(); i++) { - output.writeMessage(3, configuration_.get(i)); + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeUInt64(3, timestamp_); } getUnknownFields().writeTo(output); } @@ -2303,15 +2359,15 @@ public int getSerializedSize() { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, name_); + .computeEnumSize(1, state_.getNumber()); } - for (int i = 0; i < attributes_.size(); i++) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(2, attributes_.get(i)); + .computeMessageSize(2, table_); } - for (int i = 0; i < configuration_.size(); i++) { + if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(3, configuration_.get(i)); + .computeUInt64Size(3, timestamp_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -2330,21 +2386,27 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema) obj; + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState) obj; boolean result = true; - result = result && (hasName() == other.hasName()); - if (hasName()) { - result = result && getName() - .equals(other.getName()); + result = result && (hasState() == other.hasState()); + if (hasState()) { + result = result && + (getState() == other.getState()); + } + result = result && (hasTable() == other.hasTable()); + if (hasTable()) { + result = result && getTable() + .equals(other.getTable()); + } + result = result && (hasTimestamp() == other.hasTimestamp()); + if (hasTimestamp()) { + result = result && (getTimestamp() + == other.getTimestamp()); } - result = result && getAttributesList() - .equals(other.getAttributesList()); - result = result && getConfigurationList() - .equals(other.getConfigurationList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -2358,70 +2420,70 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasName()) { - hash = (37 * hash) + NAME_FIELD_NUMBER; - hash = (53 * hash) + getName().hashCode(); + if (hasState()) { + hash = (37 * hash) + STATE_FIELD_NUMBER; + hash = (53 * hash) + hashEnum(getState()); } - if (getAttributesCount() > 0) { - hash = (37 * hash) + ATTRIBUTES_FIELD_NUMBER; - hash = (53 * hash) + getAttributesList().hashCode(); + if (hasTable()) { + hash = (37 * hash) + TABLE_FIELD_NUMBER; + hash = (53 * hash) + getTable().hashCode(); } - if (getConfigurationCount() > 0) { - hash = (37 * hash) + CONFIGURATION_FIELD_NUMBER; - hash = (53 * hash) + getConfigurationList().hashCode(); + if (hasTimestamp()) { + hash = (37 * hash) + TIMESTAMP_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getTimestamp()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -2430,7 +2492,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamil public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -2442,30 +2504,28 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.ColumnFamilySchema} + * Protobuf type {@code hbase.pb.TableState} * *
-     **
-     * Column Family Schema
-     * Inspired by the rest ColumSchemaMessage
+     ** Denotes state of the table 
      * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableStateOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_ColumnFamilySchema_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_TableState_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_ColumnFamilySchema_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_TableState_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -2477,8 +2537,7 @@ private Builder( } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getAttributesFieldBuilder(); - getConfigurationFieldBuilder(); + getTableFieldBuilder(); } } private static Builder create() { @@ -2487,20 +2546,16 @@ private static Builder create() { public Builder clear() { super.clear(); - name_ = com.google.protobuf.ByteString.EMPTY; + state_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State.ENABLED; bitField0_ = (bitField0_ & ~0x00000001); - if (attributesBuilder_ == null) { - attributes_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000002); - } else { - attributesBuilder_.clear(); - } - if (configurationBuilder_ == null) { - configuration_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000004); + if (tableBuilder_ == null) { + table_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance(); } else { - configurationBuilder_.clear(); + tableBuilder_.clear(); } + bitField0_ = (bitField0_ & ~0x00000002); + timestamp_ = 0L; + bitField0_ = (bitField0_ & ~0x00000004); return this; } @@ -2510,138 +2565,82 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_ColumnFamilySchema_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_TableState_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema build() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState build() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema(this); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - result.name_ = name_; - if (attributesBuilder_ == null) { - if (((bitField0_ & 0x00000002) == 0x00000002)) { - attributes_ = java.util.Collections.unmodifiableList(attributes_); - bitField0_ = (bitField0_ & ~0x00000002); - } - result.attributes_ = attributes_; - } else { - result.attributes_ = attributesBuilder_.build(); + result.state_ = state_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; } - if (configurationBuilder_ == null) { - if (((bitField0_ & 0x00000004) == 0x00000004)) { - configuration_ = java.util.Collections.unmodifiableList(configuration_); - bitField0_ = (bitField0_ & ~0x00000004); - } - result.configuration_ = configuration_; + if (tableBuilder_ == null) { + result.table_ = table_; } else { - result.configuration_ = configurationBuilder_.build(); + result.table_ = tableBuilder_.build(); + } + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; } + result.timestamp_ = timestamp_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.getDefaultInstance()) return this; - if (other.hasName()) { - setName(other.getName()); + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.getDefaultInstance()) return this; + if (other.hasState()) { + setState(other.getState()); } - if (attributesBuilder_ == null) { - if (!other.attributes_.isEmpty()) { - if (attributes_.isEmpty()) { - attributes_ = other.attributes_; - bitField0_ = (bitField0_ & ~0x00000002); - } else { - ensureAttributesIsMutable(); - attributes_.addAll(other.attributes_); - } - onChanged(); - } - } else { - if (!other.attributes_.isEmpty()) { - if (attributesBuilder_.isEmpty()) { - attributesBuilder_.dispose(); - attributesBuilder_ = null; - attributes_ = other.attributes_; - bitField0_ = (bitField0_ & ~0x00000002); - attributesBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getAttributesFieldBuilder() : null; - } else { - attributesBuilder_.addAllMessages(other.attributes_); - } - } + if (other.hasTable()) { + mergeTable(other.getTable()); } - if (configurationBuilder_ == null) { - if (!other.configuration_.isEmpty()) { - if (configuration_.isEmpty()) { - configuration_ = other.configuration_; - bitField0_ = (bitField0_ & ~0x00000004); - } else { - ensureConfigurationIsMutable(); - configuration_.addAll(other.configuration_); - } - onChanged(); - } - } else { - if (!other.configuration_.isEmpty()) { - if (configurationBuilder_.isEmpty()) { - configurationBuilder_.dispose(); - configurationBuilder_ = null; - configuration_ = other.configuration_; - bitField0_ = (bitField0_ & ~0x00000004); - configurationBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getConfigurationFieldBuilder() : null; - } else { - configurationBuilder_.addAllMessages(other.configuration_); - } - } + if (other.hasTimestamp()) { + setTimestamp(other.getTimestamp()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { - if (!hasName()) { + if (!hasState()) { return false; } - for (int i = 0; i < getAttributesCount(); i++) { - if (!getAttributes(i).isInitialized()) { - - return false; - } + if (!hasTable()) { + + return false; } - for (int i = 0; i < getConfigurationCount(); i++) { - if (!getConfiguration(i).isInitialized()) { - - return false; - } + if (!getTable().isInitialized()) { + + return false; } return true; } @@ -2650,11 +2649,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -2665,634 +2664,269 @@ public Builder mergeFrom( } private int bitField0_; - // required bytes name = 1; - private com.google.protobuf.ByteString name_ = com.google.protobuf.ByteString.EMPTY; + // required .hbase.pb.TableState.State state = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State state_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State.ENABLED; /** - * required bytes name = 1; + * required .hbase.pb.TableState.State state = 1; + * + *
+       * This is the table's state.
+       * 
*/ - public boolean hasName() { + public boolean hasState() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * required bytes name = 1; + * required .hbase.pb.TableState.State state = 1; + * + *
+       * This is the table's state.
+       * 
*/ - public com.google.protobuf.ByteString getName() { - return name_; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State getState() { + return state_; } /** - * required bytes name = 1; + * required .hbase.pb.TableState.State state = 1; + * + *
+       * This is the table's state.
+       * 
*/ - public Builder setName(com.google.protobuf.ByteString value) { + public Builder setState(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State value) { if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - name_ = value; + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + state_ = value; onChanged(); return this; } /** - * required bytes name = 1; + * required .hbase.pb.TableState.State state = 1; + * + *
+       * This is the table's state.
+       * 
*/ - public Builder clearName() { + public Builder clearState() { bitField0_ = (bitField0_ & ~0x00000001); - name_ = getDefaultInstance().getName(); + state_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State.ENABLED; onChanged(); return this; } - // repeated .hbase.pb.BytesBytesPair attributes = 2; - private java.util.List attributes_ = - java.util.Collections.emptyList(); - private void ensureAttributesIsMutable() { - if (!((bitField0_ & 0x00000002) == 0x00000002)) { - attributes_ = new java.util.ArrayList(attributes_); - bitField0_ |= 0x00000002; - } + // required .hbase.pb.TableName table = 2; + private org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName table_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder> tableBuilder_; + /** + * required .hbase.pb.TableName table = 2; + */ + public boolean hasTable() { + return ((bitField0_ & 0x00000002) == 0x00000002); } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPairOrBuilder> attributesBuilder_; - /** - * repeated .hbase.pb.BytesBytesPair attributes = 2; + * required .hbase.pb.TableName table = 2; */ - public java.util.List getAttributesList() { - if (attributesBuilder_ == null) { - return java.util.Collections.unmodifiableList(attributes_); + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTable() { + if (tableBuilder_ == null) { + return table_; } else { - return attributesBuilder_.getMessageList(); + return tableBuilder_.getMessage(); } } /** - * repeated .hbase.pb.BytesBytesPair attributes = 2; + * required .hbase.pb.TableName table = 2; */ - public int getAttributesCount() { - if (attributesBuilder_ == null) { - return attributes_.size(); + public Builder setTable(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName value) { + if (tableBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + table_ = value; + onChanged(); } else { - return attributesBuilder_.getCount(); + tableBuilder_.setMessage(value); } + bitField0_ |= 0x00000002; + return this; } /** - * repeated .hbase.pb.BytesBytesPair attributes = 2; + * required .hbase.pb.TableName table = 2; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair getAttributes(int index) { - if (attributesBuilder_ == null) { - return attributes_.get(index); - } else { - return attributesBuilder_.getMessage(index); - } - } - /** - * repeated .hbase.pb.BytesBytesPair attributes = 2; - */ - public Builder setAttributes( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair value) { - if (attributesBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureAttributesIsMutable(); - attributes_.set(index, value); - onChanged(); - } else { - attributesBuilder_.setMessage(index, value); - } - return this; - } - /** - * repeated .hbase.pb.BytesBytesPair attributes = 2; - */ - public Builder setAttributes( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder builderForValue) { - if (attributesBuilder_ == null) { - ensureAttributesIsMutable(); - attributes_.set(index, builderForValue.build()); - onChanged(); - } else { - attributesBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .hbase.pb.BytesBytesPair attributes = 2; - */ - public Builder addAttributes(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair value) { - if (attributesBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureAttributesIsMutable(); - attributes_.add(value); + public Builder setTable( + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder builderForValue) { + if (tableBuilder_ == null) { + table_ = builderForValue.build(); onChanged(); } else { - attributesBuilder_.addMessage(value); + tableBuilder_.setMessage(builderForValue.build()); } + bitField0_ |= 0x00000002; return this; } /** - * repeated .hbase.pb.BytesBytesPair attributes = 2; + * required .hbase.pb.TableName table = 2; */ - public Builder addAttributes( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair value) { - if (attributesBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); + public Builder mergeTable(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName value) { + if (tableBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + table_ != org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance()) { + table_ = + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.newBuilder(table_).mergeFrom(value).buildPartial(); + } else { + table_ = value; } - ensureAttributesIsMutable(); - attributes_.add(index, value); - onChanged(); - } else { - attributesBuilder_.addMessage(index, value); - } - return this; - } - /** - * repeated .hbase.pb.BytesBytesPair attributes = 2; - */ - public Builder addAttributes( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder builderForValue) { - if (attributesBuilder_ == null) { - ensureAttributesIsMutable(); - attributes_.add(builderForValue.build()); - onChanged(); - } else { - attributesBuilder_.addMessage(builderForValue.build()); - } - return this; - } - /** - * repeated .hbase.pb.BytesBytesPair attributes = 2; - */ - public Builder addAttributes( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder builderForValue) { - if (attributesBuilder_ == null) { - ensureAttributesIsMutable(); - attributes_.add(index, builderForValue.build()); - onChanged(); - } else { - attributesBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .hbase.pb.BytesBytesPair attributes = 2; - */ - public Builder addAllAttributes( - java.lang.Iterable values) { - if (attributesBuilder_ == null) { - ensureAttributesIsMutable(); - super.addAll(values, attributes_); - onChanged(); - } else { - attributesBuilder_.addAllMessages(values); - } - return this; - } - /** - * repeated .hbase.pb.BytesBytesPair attributes = 2; - */ - public Builder clearAttributes() { - if (attributesBuilder_ == null) { - attributes_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000002); onChanged(); } else { - attributesBuilder_.clear(); + tableBuilder_.mergeFrom(value); } + bitField0_ |= 0x00000002; return this; } /** - * repeated .hbase.pb.BytesBytesPair attributes = 2; + * required .hbase.pb.TableName table = 2; */ - public Builder removeAttributes(int index) { - if (attributesBuilder_ == null) { - ensureAttributesIsMutable(); - attributes_.remove(index); + public Builder clearTable() { + if (tableBuilder_ == null) { + table_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance(); onChanged(); } else { - attributesBuilder_.remove(index); + tableBuilder_.clear(); } + bitField0_ = (bitField0_ & ~0x00000002); return this; } /** - * repeated .hbase.pb.BytesBytesPair attributes = 2; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder getAttributesBuilder( - int index) { - return getAttributesFieldBuilder().getBuilder(index); - } - /** - * repeated .hbase.pb.BytesBytesPair attributes = 2; + * required .hbase.pb.TableName table = 2; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPairOrBuilder getAttributesOrBuilder( - int index) { - if (attributesBuilder_ == null) { - return attributes_.get(index); } else { - return attributesBuilder_.getMessageOrBuilder(index); - } + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder getTableBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getTableFieldBuilder().getBuilder(); } /** - * repeated .hbase.pb.BytesBytesPair attributes = 2; + * required .hbase.pb.TableName table = 2; */ - public java.util.List - getAttributesOrBuilderList() { - if (attributesBuilder_ != null) { - return attributesBuilder_.getMessageOrBuilderList(); + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableOrBuilder() { + if (tableBuilder_ != null) { + return tableBuilder_.getMessageOrBuilder(); } else { - return java.util.Collections.unmodifiableList(attributes_); + return table_; } } /** - * repeated .hbase.pb.BytesBytesPair attributes = 2; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder addAttributesBuilder() { - return getAttributesFieldBuilder().addBuilder( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.getDefaultInstance()); - } - /** - * repeated .hbase.pb.BytesBytesPair attributes = 2; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder addAttributesBuilder( - int index) { - return getAttributesFieldBuilder().addBuilder( - index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.getDefaultInstance()); - } - /** - * repeated .hbase.pb.BytesBytesPair attributes = 2; + * required .hbase.pb.TableName table = 2; */ - public java.util.List - getAttributesBuilderList() { - return getAttributesFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPairOrBuilder> - getAttributesFieldBuilder() { - if (attributesBuilder_ == null) { - attributesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPairOrBuilder>( - attributes_, - ((bitField0_ & 0x00000002) == 0x00000002), + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder> + getTableFieldBuilder() { + if (tableBuilder_ == null) { + tableBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder>( + table_, getParentForChildren(), isClean()); - attributes_ = null; + table_ = null; } - return attributesBuilder_; - } - - // repeated .hbase.pb.NameStringPair configuration = 3; - private java.util.List configuration_ = - java.util.Collections.emptyList(); - private void ensureConfigurationIsMutable() { - if (!((bitField0_ & 0x00000004) == 0x00000004)) { - configuration_ = new java.util.ArrayList(configuration_); - bitField0_ |= 0x00000004; - } + return tableBuilder_; } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder> configurationBuilder_; - - /** - * repeated .hbase.pb.NameStringPair configuration = 3; - */ - public java.util.List getConfigurationList() { - if (configurationBuilder_ == null) { - return java.util.Collections.unmodifiableList(configuration_); - } else { - return configurationBuilder_.getMessageList(); - } - } + // optional uint64 timestamp = 3; + private long timestamp_ ; /** - * repeated .hbase.pb.NameStringPair configuration = 3; + * optional uint64 timestamp = 3; */ - public int getConfigurationCount() { - if (configurationBuilder_ == null) { - return configuration_.size(); - } else { - return configurationBuilder_.getCount(); - } + public boolean hasTimestamp() { + return ((bitField0_ & 0x00000004) == 0x00000004); } /** - * repeated .hbase.pb.NameStringPair configuration = 3; + * optional uint64 timestamp = 3; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair getConfiguration(int index) { - if (configurationBuilder_ == null) { - return configuration_.get(index); - } else { - return configurationBuilder_.getMessage(index); - } + public long getTimestamp() { + return timestamp_; } /** - * repeated .hbase.pb.NameStringPair configuration = 3; + * optional uint64 timestamp = 3; */ - public Builder setConfiguration( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair value) { - if (configurationBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureConfigurationIsMutable(); - configuration_.set(index, value); - onChanged(); - } else { - configurationBuilder_.setMessage(index, value); - } + public Builder setTimestamp(long value) { + bitField0_ |= 0x00000004; + timestamp_ = value; + onChanged(); return this; } /** - * repeated .hbase.pb.NameStringPair configuration = 3; + * optional uint64 timestamp = 3; */ - public Builder setConfiguration( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder builderForValue) { - if (configurationBuilder_ == null) { - ensureConfigurationIsMutable(); - configuration_.set(index, builderForValue.build()); - onChanged(); - } else { - configurationBuilder_.setMessage(index, builderForValue.build()); - } + public Builder clearTimestamp() { + bitField0_ = (bitField0_ & ~0x00000004); + timestamp_ = 0L; + onChanged(); return this; } - /** - * repeated .hbase.pb.NameStringPair configuration = 3; - */ - public Builder addConfiguration(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair value) { - if (configurationBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureConfigurationIsMutable(); - configuration_.add(value); - onChanged(); - } else { - configurationBuilder_.addMessage(value); - } - return this; - } - /** - * repeated .hbase.pb.NameStringPair configuration = 3; - */ - public Builder addConfiguration( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair value) { - if (configurationBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureConfigurationIsMutable(); - configuration_.add(index, value); - onChanged(); - } else { - configurationBuilder_.addMessage(index, value); - } - return this; - } - /** - * repeated .hbase.pb.NameStringPair configuration = 3; - */ - public Builder addConfiguration( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder builderForValue) { - if (configurationBuilder_ == null) { - ensureConfigurationIsMutable(); - configuration_.add(builderForValue.build()); - onChanged(); - } else { - configurationBuilder_.addMessage(builderForValue.build()); - } - return this; - } - /** - * repeated .hbase.pb.NameStringPair configuration = 3; - */ - public Builder addConfiguration( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder builderForValue) { - if (configurationBuilder_ == null) { - ensureConfigurationIsMutable(); - configuration_.add(index, builderForValue.build()); - onChanged(); - } else { - configurationBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .hbase.pb.NameStringPair configuration = 3; - */ - public Builder addAllConfiguration( - java.lang.Iterable values) { - if (configurationBuilder_ == null) { - ensureConfigurationIsMutable(); - super.addAll(values, configuration_); - onChanged(); - } else { - configurationBuilder_.addAllMessages(values); - } - return this; - } - /** - * repeated .hbase.pb.NameStringPair configuration = 3; - */ - public Builder clearConfiguration() { - if (configurationBuilder_ == null) { - configuration_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000004); - onChanged(); - } else { - configurationBuilder_.clear(); - } - return this; - } - /** - * repeated .hbase.pb.NameStringPair configuration = 3; - */ - public Builder removeConfiguration(int index) { - if (configurationBuilder_ == null) { - ensureConfigurationIsMutable(); - configuration_.remove(index); - onChanged(); - } else { - configurationBuilder_.remove(index); - } - return this; - } - /** - * repeated .hbase.pb.NameStringPair configuration = 3; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder getConfigurationBuilder( - int index) { - return getConfigurationFieldBuilder().getBuilder(index); - } - /** - * repeated .hbase.pb.NameStringPair configuration = 3; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder getConfigurationOrBuilder( - int index) { - if (configurationBuilder_ == null) { - return configuration_.get(index); } else { - return configurationBuilder_.getMessageOrBuilder(index); - } - } - /** - * repeated .hbase.pb.NameStringPair configuration = 3; - */ - public java.util.List - getConfigurationOrBuilderList() { - if (configurationBuilder_ != null) { - return configurationBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(configuration_); - } - } - /** - * repeated .hbase.pb.NameStringPair configuration = 3; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder addConfigurationBuilder() { - return getConfigurationFieldBuilder().addBuilder( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.getDefaultInstance()); - } - /** - * repeated .hbase.pb.NameStringPair configuration = 3; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder addConfigurationBuilder( - int index) { - return getConfigurationFieldBuilder().addBuilder( - index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.getDefaultInstance()); - } - /** - * repeated .hbase.pb.NameStringPair configuration = 3; - */ - public java.util.List - getConfigurationBuilderList() { - return getConfigurationFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder> - getConfigurationFieldBuilder() { - if (configurationBuilder_ == null) { - configurationBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder>( - configuration_, - ((bitField0_ & 0x00000004) == 0x00000004), - getParentForChildren(), - isClean()); - configuration_ = null; - } - return configurationBuilder_; - } - // @@protoc_insertion_point(builder_scope:hbase.pb.ColumnFamilySchema) + // @@protoc_insertion_point(builder_scope:hbase.pb.TableState) } static { - defaultInstance = new ColumnFamilySchema(true); + defaultInstance = new TableState(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.ColumnFamilySchema) + // @@protoc_insertion_point(class_scope:hbase.pb.TableState) } - public interface RegionInfoOrBuilder + public interface TableDescriptorOrBuilder extends com.google.protobuf.MessageOrBuilder { - // required uint64 region_id = 1; - /** - * required uint64 region_id = 1; - */ - boolean hasRegionId(); - /** - * required uint64 region_id = 1; - */ - long getRegionId(); - - // required .hbase.pb.TableName table_name = 2; - /** - * required .hbase.pb.TableName table_name = 2; - */ - boolean hasTableName(); - /** - * required .hbase.pb.TableName table_name = 2; - */ - org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName(); - /** - * required .hbase.pb.TableName table_name = 2; - */ - org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder(); - - // optional bytes start_key = 3; - /** - * optional bytes start_key = 3; - */ - boolean hasStartKey(); - /** - * optional bytes start_key = 3; - */ - com.google.protobuf.ByteString getStartKey(); - - // optional bytes end_key = 4; - /** - * optional bytes end_key = 4; - */ - boolean hasEndKey(); - /** - * optional bytes end_key = 4; - */ - com.google.protobuf.ByteString getEndKey(); - - // optional bool offline = 5; - /** - * optional bool offline = 5; - */ - boolean hasOffline(); + // required .hbase.pb.TableSchema schema = 1; /** - * optional bool offline = 5; + * required .hbase.pb.TableSchema schema = 1; */ - boolean getOffline(); - - // optional bool split = 6; + boolean hasSchema(); /** - * optional bool split = 6; + * required .hbase.pb.TableSchema schema = 1; */ - boolean hasSplit(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getSchema(); /** - * optional bool split = 6; + * required .hbase.pb.TableSchema schema = 1; */ - boolean getSplit(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getSchemaOrBuilder(); - // optional int32 replica_id = 7 [default = 0]; + // optional .hbase.pb.TableState.State state = 2 [default = ENABLED]; /** - * optional int32 replica_id = 7 [default = 0]; + * optional .hbase.pb.TableState.State state = 2 [default = ENABLED]; */ - boolean hasReplicaId(); + boolean hasState(); /** - * optional int32 replica_id = 7 [default = 0]; + * optional .hbase.pb.TableState.State state = 2 [default = ENABLED]; */ - int getReplicaId(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State getState(); } /** - * Protobuf type {@code hbase.pb.RegionInfo} + * Protobuf type {@code hbase.pb.TableDescriptor} * *
-   **
-   * Protocol buffer version of HRegionInfo.
+   ** On HDFS representation of table state. 
    * 
*/ - public static final class RegionInfo extends + public static final class TableDescriptor extends com.google.protobuf.GeneratedMessage - implements RegionInfoOrBuilder { - // Use RegionInfo.newBuilder() to construct. - private RegionInfo(com.google.protobuf.GeneratedMessage.Builder builder) { + implements TableDescriptorOrBuilder { + // Use TableDescriptor.newBuilder() to construct. + private TableDescriptor(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private RegionInfo(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private TableDescriptor(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final RegionInfo defaultInstance; - public static RegionInfo getDefaultInstance() { + private static final TableDescriptor defaultInstance; + public static TableDescriptor getDefaultInstance() { return defaultInstance; } - public RegionInfo getDefaultInstanceForType() { + public TableDescriptor getDefaultInstanceForType() { return defaultInstance; } @@ -3302,7 +2936,7 @@ public RegionInfo getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private RegionInfo( + private TableDescriptor( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -3325,47 +2959,28 @@ private RegionInfo( } break; } - case 8: { + case 10: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = schema_.toBuilder(); + } + schema_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(schema_); + schema_ = subBuilder.buildPartial(); + } bitField0_ |= 0x00000001; - regionId_ = input.readUInt64(); break; } - case 18: { - org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder subBuilder = null; - if (((bitField0_ & 0x00000002) == 0x00000002)) { - subBuilder = tableName_.toBuilder(); - } - tableName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(tableName_); - tableName_ = subBuilder.buildPartial(); + case 16: { + int rawValue = input.readEnum(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State value = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(2, rawValue); + } else { + bitField0_ |= 0x00000002; + state_ = value; } - bitField0_ |= 0x00000002; - break; - } - case 26: { - bitField0_ |= 0x00000004; - startKey_ = input.readBytes(); - break; - } - case 34: { - bitField0_ |= 0x00000008; - endKey_ = input.readBytes(); - break; - } - case 40: { - bitField0_ |= 0x00000010; - offline_ = input.readBool(); - break; - } - case 48: { - bitField0_ |= 0x00000020; - split_ = input.readBool(); - break; - } - case 56: { - bitField0_ |= 0x00000040; - replicaId_ = input.readInt32(); break; } } @@ -3382,173 +2997,84 @@ private RegionInfo( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_RegionInfo_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_TableDescriptor_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_RegionInfo_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_TableDescriptor_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public RegionInfo parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public TableDescriptor parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new RegionInfo(input, extensionRegistry); + return new TableDescriptor(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; - // required uint64 region_id = 1; - public static final int REGION_ID_FIELD_NUMBER = 1; - private long regionId_; + // required .hbase.pb.TableSchema schema = 1; + public static final int SCHEMA_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema schema_; /** - * required uint64 region_id = 1; + * required .hbase.pb.TableSchema schema = 1; */ - public boolean hasRegionId() { + public boolean hasSchema() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * required uint64 region_id = 1; - */ - public long getRegionId() { - return regionId_; - } - - // required .hbase.pb.TableName table_name = 2; - public static final int TABLE_NAME_FIELD_NUMBER = 2; - private org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName tableName_; - /** - * required .hbase.pb.TableName table_name = 2; - */ - public boolean hasTableName() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * required .hbase.pb.TableName table_name = 2; - */ - public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName() { - return tableName_; - } - /** - * required .hbase.pb.TableName table_name = 2; - */ - public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder() { - return tableName_; - } - - // optional bytes start_key = 3; - public static final int START_KEY_FIELD_NUMBER = 3; - private com.google.protobuf.ByteString startKey_; - /** - * optional bytes start_key = 3; - */ - public boolean hasStartKey() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * optional bytes start_key = 3; - */ - public com.google.protobuf.ByteString getStartKey() { - return startKey_; - } - - // optional bytes end_key = 4; - public static final int END_KEY_FIELD_NUMBER = 4; - private com.google.protobuf.ByteString endKey_; - /** - * optional bytes end_key = 4; - */ - public boolean hasEndKey() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - /** - * optional bytes end_key = 4; - */ - public com.google.protobuf.ByteString getEndKey() { - return endKey_; - } - - // optional bool offline = 5; - public static final int OFFLINE_FIELD_NUMBER = 5; - private boolean offline_; - /** - * optional bool offline = 5; - */ - public boolean hasOffline() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - /** - * optional bool offline = 5; - */ - public boolean getOffline() { - return offline_; - } - - // optional bool split = 6; - public static final int SPLIT_FIELD_NUMBER = 6; - private boolean split_; - /** - * optional bool split = 6; + * required .hbase.pb.TableSchema schema = 1; */ - public boolean hasSplit() { - return ((bitField0_ & 0x00000020) == 0x00000020); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getSchema() { + return schema_; } /** - * optional bool split = 6; + * required .hbase.pb.TableSchema schema = 1; */ - public boolean getSplit() { - return split_; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getSchemaOrBuilder() { + return schema_; } - // optional int32 replica_id = 7 [default = 0]; - public static final int REPLICA_ID_FIELD_NUMBER = 7; - private int replicaId_; + // optional .hbase.pb.TableState.State state = 2 [default = ENABLED]; + public static final int STATE_FIELD_NUMBER = 2; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State state_; /** - * optional int32 replica_id = 7 [default = 0]; + * optional .hbase.pb.TableState.State state = 2 [default = ENABLED]; */ - public boolean hasReplicaId() { - return ((bitField0_ & 0x00000040) == 0x00000040); + public boolean hasState() { + return ((bitField0_ & 0x00000002) == 0x00000002); } /** - * optional int32 replica_id = 7 [default = 0]; + * optional .hbase.pb.TableState.State state = 2 [default = ENABLED]; */ - public int getReplicaId() { - return replicaId_; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State getState() { + return state_; } private void initFields() { - regionId_ = 0L; - tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance(); - startKey_ = com.google.protobuf.ByteString.EMPTY; - endKey_ = com.google.protobuf.ByteString.EMPTY; - offline_ = false; - split_ = false; - replicaId_ = 0; + schema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + state_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State.ENABLED; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (!hasRegionId()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasTableName()) { + if (!hasSchema()) { memoizedIsInitialized = 0; return false; } - if (!getTableName().isInitialized()) { + if (!getSchema().isInitialized()) { memoizedIsInitialized = 0; return false; } @@ -3560,25 +3086,10 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeUInt64(1, regionId_); + output.writeMessage(1, schema_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeMessage(2, tableName_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeBytes(3, startKey_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeBytes(4, endKey_); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - output.writeBool(5, offline_); - } - if (((bitField0_ & 0x00000020) == 0x00000020)) { - output.writeBool(6, split_); - } - if (((bitField0_ & 0x00000040) == 0x00000040)) { - output.writeInt32(7, replicaId_); + output.writeEnum(2, state_.getNumber()); } getUnknownFields().writeTo(output); } @@ -3591,31 +3102,11 @@ public int getSerializedSize() { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(1, regionId_); + .computeMessageSize(1, schema_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(2, tableName_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(3, startKey_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(4, endKey_); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(5, offline_); - } - if (((bitField0_ & 0x00000020) == 0x00000020)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(6, split_); - } - if (((bitField0_ & 0x00000040) == 0x00000040)) { - size += com.google.protobuf.CodedOutputStream - .computeInt32Size(7, replicaId_); + .computeEnumSize(2, state_.getNumber()); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -3634,46 +3125,21 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo) obj; + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor) obj; boolean result = true; - result = result && (hasRegionId() == other.hasRegionId()); - if (hasRegionId()) { - result = result && (getRegionId() - == other.getRegionId()); - } - result = result && (hasTableName() == other.hasTableName()); - if (hasTableName()) { - result = result && getTableName() - .equals(other.getTableName()); - } - result = result && (hasStartKey() == other.hasStartKey()); - if (hasStartKey()) { - result = result && getStartKey() - .equals(other.getStartKey()); - } - result = result && (hasEndKey() == other.hasEndKey()); - if (hasEndKey()) { - result = result && getEndKey() - .equals(other.getEndKey()); - } - result = result && (hasOffline() == other.hasOffline()); - if (hasOffline()) { - result = result && (getOffline() - == other.getOffline()); - } - result = result && (hasSplit() == other.hasSplit()); - if (hasSplit()) { - result = result && (getSplit() - == other.getSplit()); + result = result && (hasSchema() == other.hasSchema()); + if (hasSchema()) { + result = result && getSchema() + .equals(other.getSchema()); } - result = result && (hasReplicaId() == other.hasReplicaId()); - if (hasReplicaId()) { - result = result && (getReplicaId() - == other.getReplicaId()); + result = result && (hasState() == other.hasState()); + if (hasState()) { + result = result && + (getState() == other.getState()); } result = result && getUnknownFields().equals(other.getUnknownFields()); @@ -3688,86 +3154,66 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasRegionId()) { - hash = (37 * hash) + REGION_ID_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getRegionId()); - } - if (hasTableName()) { - hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER; - hash = (53 * hash) + getTableName().hashCode(); - } - if (hasStartKey()) { - hash = (37 * hash) + START_KEY_FIELD_NUMBER; - hash = (53 * hash) + getStartKey().hashCode(); - } - if (hasEndKey()) { - hash = (37 * hash) + END_KEY_FIELD_NUMBER; - hash = (53 * hash) + getEndKey().hashCode(); - } - if (hasOffline()) { - hash = (37 * hash) + OFFLINE_FIELD_NUMBER; - hash = (53 * hash) + hashBoolean(getOffline()); - } - if (hasSplit()) { - hash = (37 * hash) + SPLIT_FIELD_NUMBER; - hash = (53 * hash) + hashBoolean(getSplit()); + if (hasSchema()) { + hash = (37 * hash) + SCHEMA_FIELD_NUMBER; + hash = (53 * hash) + getSchema().hashCode(); } - if (hasReplicaId()) { - hash = (37 * hash) + REPLICA_ID_FIELD_NUMBER; - hash = (53 * hash) + getReplicaId(); + if (hasState()) { + hash = (37 * hash) + STATE_FIELD_NUMBER; + hash = (53 * hash) + hashEnum(getState()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -3776,7 +3222,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -3788,29 +3234,28 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.RegionInfo} + * Protobuf type {@code hbase.pb.TableDescriptor} * *
-     **
-     * Protocol buffer version of HRegionInfo.
+     ** On HDFS representation of table state. 
      * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptorOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_RegionInfo_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_TableDescriptor_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_RegionInfo_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_TableDescriptor_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -3822,7 +3267,7 @@ private Builder( } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getTableNameFieldBuilder(); + getSchemaFieldBuilder(); } } private static Builder create() { @@ -3831,24 +3276,14 @@ private static Builder create() { public Builder clear() { super.clear(); - regionId_ = 0L; - bitField0_ = (bitField0_ & ~0x00000001); - if (tableNameBuilder_ == null) { - tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance(); + if (schemaBuilder_ == null) { + schema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); } else { - tableNameBuilder_.clear(); + schemaBuilder_.clear(); } + bitField0_ = (bitField0_ & ~0x00000001); + state_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State.ENABLED; bitField0_ = (bitField0_ & ~0x00000002); - startKey_ = com.google.protobuf.ByteString.EMPTY; - bitField0_ = (bitField0_ & ~0x00000004); - endKey_ = com.google.protobuf.ByteString.EMPTY; - bitField0_ = (bitField0_ & ~0x00000008); - offline_ = false; - bitField0_ = (bitField0_ & ~0x00000010); - split_ = false; - bitField0_ = (bitField0_ & ~0x00000020); - replicaId_ = 0; - bitField0_ = (bitField0_ & ~0x00000040); return this; } @@ -3858,108 +3293,69 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_RegionInfo_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_TableDescriptor_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo build() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor build() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo(this); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - result.regionId_ = regionId_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - if (tableNameBuilder_ == null) { - result.tableName_ = tableName_; + if (schemaBuilder_ == null) { + result.schema_ = schema_; } else { - result.tableName_ = tableNameBuilder_.build(); - } - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.startKey_ = startKey_; - if (((from_bitField0_ & 0x00000008) == 0x00000008)) { - to_bitField0_ |= 0x00000008; - } - result.endKey_ = endKey_; - if (((from_bitField0_ & 0x00000010) == 0x00000010)) { - to_bitField0_ |= 0x00000010; - } - result.offline_ = offline_; - if (((from_bitField0_ & 0x00000020) == 0x00000020)) { - to_bitField0_ |= 0x00000020; + result.schema_ = schemaBuilder_.build(); } - result.split_ = split_; - if (((from_bitField0_ & 0x00000040) == 0x00000040)) { - to_bitField0_ |= 0x00000040; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; } - result.replicaId_ = replicaId_; + result.state_ = state_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance()) return this; - if (other.hasRegionId()) { - setRegionId(other.getRegionId()); - } - if (other.hasTableName()) { - mergeTableName(other.getTableName()); - } - if (other.hasStartKey()) { - setStartKey(other.getStartKey()); - } - if (other.hasEndKey()) { - setEndKey(other.getEndKey()); - } - if (other.hasOffline()) { - setOffline(other.getOffline()); - } - if (other.hasSplit()) { - setSplit(other.getSplit()); + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor.getDefaultInstance()) return this; + if (other.hasSchema()) { + mergeSchema(other.getSchema()); } - if (other.hasReplicaId()) { - setReplicaId(other.getReplicaId()); + if (other.hasState()) { + setState(other.getState()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { - if (!hasRegionId()) { - - return false; - } - if (!hasTableName()) { + if (!hasSchema()) { return false; } - if (!getTableName().isInitialized()) { + if (!getSchema().isInitialized()) { return false; } @@ -3970,11 +3366,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -3985,390 +3381,258 @@ public Builder mergeFrom( } private int bitField0_; - // required uint64 region_id = 1; - private long regionId_ ; - /** - * required uint64 region_id = 1; - */ - public boolean hasRegionId() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required uint64 region_id = 1; - */ - public long getRegionId() { - return regionId_; - } - /** - * required uint64 region_id = 1; - */ - public Builder setRegionId(long value) { - bitField0_ |= 0x00000001; - regionId_ = value; - onChanged(); - return this; - } - /** - * required uint64 region_id = 1; - */ - public Builder clearRegionId() { - bitField0_ = (bitField0_ & ~0x00000001); - regionId_ = 0L; - onChanged(); - return this; - } - - // required .hbase.pb.TableName table_name = 2; - private org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance(); + // required .hbase.pb.TableSchema schema = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema schema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder> tableNameBuilder_; + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder> schemaBuilder_; /** - * required .hbase.pb.TableName table_name = 2; + * required .hbase.pb.TableSchema schema = 1; */ - public boolean hasTableName() { - return ((bitField0_ & 0x00000002) == 0x00000002); + public boolean hasSchema() { + return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * required .hbase.pb.TableName table_name = 2; + * required .hbase.pb.TableSchema schema = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName() { - if (tableNameBuilder_ == null) { - return tableName_; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getSchema() { + if (schemaBuilder_ == null) { + return schema_; } else { - return tableNameBuilder_.getMessage(); + return schemaBuilder_.getMessage(); } } /** - * required .hbase.pb.TableName table_name = 2; + * required .hbase.pb.TableSchema schema = 1; */ - public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName value) { - if (tableNameBuilder_ == null) { + public Builder setSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) { + if (schemaBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - tableName_ = value; + schema_ = value; onChanged(); } else { - tableNameBuilder_.setMessage(value); + schemaBuilder_.setMessage(value); } - bitField0_ |= 0x00000002; + bitField0_ |= 0x00000001; return this; } /** - * required .hbase.pb.TableName table_name = 2; + * required .hbase.pb.TableSchema schema = 1; */ - public Builder setTableName( - org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder builderForValue) { - if (tableNameBuilder_ == null) { - tableName_ = builderForValue.build(); + public Builder setSchema( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder builderForValue) { + if (schemaBuilder_ == null) { + schema_ = builderForValue.build(); onChanged(); } else { - tableNameBuilder_.setMessage(builderForValue.build()); + schemaBuilder_.setMessage(builderForValue.build()); } - bitField0_ |= 0x00000002; + bitField0_ |= 0x00000001; return this; } /** - * required .hbase.pb.TableName table_name = 2; + * required .hbase.pb.TableSchema schema = 1; */ - public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName value) { - if (tableNameBuilder_ == null) { - if (((bitField0_ & 0x00000002) == 0x00000002) && - tableName_ != org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance()) { - tableName_ = - org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial(); + public Builder mergeSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) { + if (schemaBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + schema_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance()) { + schema_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.newBuilder(schema_).mergeFrom(value).buildPartial(); } else { - tableName_ = value; + schema_ = value; } onChanged(); } else { - tableNameBuilder_.mergeFrom(value); + schemaBuilder_.mergeFrom(value); } - bitField0_ |= 0x00000002; + bitField0_ |= 0x00000001; return this; } /** - * required .hbase.pb.TableName table_name = 2; + * required .hbase.pb.TableSchema schema = 1; */ - public Builder clearTableName() { - if (tableNameBuilder_ == null) { - tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance(); + public Builder clearSchema() { + if (schemaBuilder_ == null) { + schema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); onChanged(); } else { - tableNameBuilder_.clear(); + schemaBuilder_.clear(); } - bitField0_ = (bitField0_ & ~0x00000002); + bitField0_ = (bitField0_ & ~0x00000001); return this; } /** - * required .hbase.pb.TableName table_name = 2; + * required .hbase.pb.TableSchema schema = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder getTableNameBuilder() { - bitField0_ |= 0x00000002; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder getSchemaBuilder() { + bitField0_ |= 0x00000001; onChanged(); - return getTableNameFieldBuilder().getBuilder(); + return getSchemaFieldBuilder().getBuilder(); } /** - * required .hbase.pb.TableName table_name = 2; + * required .hbase.pb.TableSchema schema = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder() { - if (tableNameBuilder_ != null) { - return tableNameBuilder_.getMessageOrBuilder(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getSchemaOrBuilder() { + if (schemaBuilder_ != null) { + return schemaBuilder_.getMessageOrBuilder(); } else { - return tableName_; + return schema_; } } /** - * required .hbase.pb.TableName table_name = 2; + * required .hbase.pb.TableSchema schema = 1; */ private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder> - getTableNameFieldBuilder() { - if (tableNameBuilder_ == null) { - tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder>( - tableName_, + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder> + getSchemaFieldBuilder() { + if (schemaBuilder_ == null) { + schemaBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder>( + schema_, getParentForChildren(), isClean()); - tableName_ = null; + schema_ = null; } - return tableNameBuilder_; + return schemaBuilder_; } - // optional bytes start_key = 3; - private com.google.protobuf.ByteString startKey_ = com.google.protobuf.ByteString.EMPTY; + // optional .hbase.pb.TableState.State state = 2 [default = ENABLED]; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State state_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State.ENABLED; /** - * optional bytes start_key = 3; + * optional .hbase.pb.TableState.State state = 2 [default = ENABLED]; */ - public boolean hasStartKey() { - return ((bitField0_ & 0x00000004) == 0x00000004); + public boolean hasState() { + return ((bitField0_ & 0x00000002) == 0x00000002); } /** - * optional bytes start_key = 3; + * optional .hbase.pb.TableState.State state = 2 [default = ENABLED]; */ - public com.google.protobuf.ByteString getStartKey() { - return startKey_; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State getState() { + return state_; } /** - * optional bytes start_key = 3; + * optional .hbase.pb.TableState.State state = 2 [default = ENABLED]; */ - public Builder setStartKey(com.google.protobuf.ByteString value) { + public Builder setState(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State value) { if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000004; - startKey_ = value; + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + state_ = value; onChanged(); return this; } /** - * optional bytes start_key = 3; + * optional .hbase.pb.TableState.State state = 2 [default = ENABLED]; */ - public Builder clearStartKey() { - bitField0_ = (bitField0_ & ~0x00000004); - startKey_ = getDefaultInstance().getStartKey(); + public Builder clearState() { + bitField0_ = (bitField0_ & ~0x00000002); + state_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State.ENABLED; onChanged(); return this; } - // optional bytes end_key = 4; - private com.google.protobuf.ByteString endKey_ = com.google.protobuf.ByteString.EMPTY; - /** - * optional bytes end_key = 4; - */ - public boolean hasEndKey() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - /** - * optional bytes end_key = 4; - */ - public com.google.protobuf.ByteString getEndKey() { - return endKey_; - } - /** - * optional bytes end_key = 4; - */ - public Builder setEndKey(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); + // @@protoc_insertion_point(builder_scope:hbase.pb.TableDescriptor) + } + + static { + defaultInstance = new TableDescriptor(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.TableDescriptor) } - bitField0_ |= 0x00000008; - endKey_ = value; - onChanged(); - return this; - } - /** - * optional bytes end_key = 4; - */ - public Builder clearEndKey() { - bitField0_ = (bitField0_ & ~0x00000008); - endKey_ = getDefaultInstance().getEndKey(); - onChanged(); - return this; - } - // optional bool offline = 5; - private boolean offline_ ; - /** - * optional bool offline = 5; - */ - public boolean hasOffline() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - /** - * optional bool offline = 5; - */ - public boolean getOffline() { - return offline_; - } - /** - * optional bool offline = 5; - */ - public Builder setOffline(boolean value) { - bitField0_ |= 0x00000010; - offline_ = value; - onChanged(); - return this; - } - /** - * optional bool offline = 5; - */ - public Builder clearOffline() { - bitField0_ = (bitField0_ & ~0x00000010); - offline_ = false; - onChanged(); - return this; - } - - // optional bool split = 6; - private boolean split_ ; - /** - * optional bool split = 6; - */ - public boolean hasSplit() { - return ((bitField0_ & 0x00000020) == 0x00000020); - } - /** - * optional bool split = 6; - */ - public boolean getSplit() { - return split_; - } - /** - * optional bool split = 6; - */ - public Builder setSplit(boolean value) { - bitField0_ |= 0x00000020; - split_ = value; - onChanged(); - return this; - } - /** - * optional bool split = 6; - */ - public Builder clearSplit() { - bitField0_ = (bitField0_ & ~0x00000020); - split_ = false; - onChanged(); - return this; - } - - // optional int32 replica_id = 7 [default = 0]; - private int replicaId_ ; - /** - * optional int32 replica_id = 7 [default = 0]; - */ - public boolean hasReplicaId() { - return ((bitField0_ & 0x00000040) == 0x00000040); - } - /** - * optional int32 replica_id = 7 [default = 0]; - */ - public int getReplicaId() { - return replicaId_; - } - /** - * optional int32 replica_id = 7 [default = 0]; - */ - public Builder setReplicaId(int value) { - bitField0_ |= 0x00000040; - replicaId_ = value; - onChanged(); - return this; - } - /** - * optional int32 replica_id = 7 [default = 0]; - */ - public Builder clearReplicaId() { - bitField0_ = (bitField0_ & ~0x00000040); - replicaId_ = 0; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:hbase.pb.RegionInfo) - } - - static { - defaultInstance = new RegionInfo(true); - defaultInstance.initFields(); - } + public interface ColumnFamilySchemaOrBuilder + extends com.google.protobuf.MessageOrBuilder { - // @@protoc_insertion_point(class_scope:hbase.pb.RegionInfo) - } + // required bytes name = 1; + /** + * required bytes name = 1; + */ + boolean hasName(); + /** + * required bytes name = 1; + */ + com.google.protobuf.ByteString getName(); - public interface FavoredNodesOrBuilder - extends com.google.protobuf.MessageOrBuilder { + // repeated .hbase.pb.BytesBytesPair attributes = 2; + /** + * repeated .hbase.pb.BytesBytesPair attributes = 2; + */ + java.util.List + getAttributesList(); + /** + * repeated .hbase.pb.BytesBytesPair attributes = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair getAttributes(int index); + /** + * repeated .hbase.pb.BytesBytesPair attributes = 2; + */ + int getAttributesCount(); + /** + * repeated .hbase.pb.BytesBytesPair attributes = 2; + */ + java.util.List + getAttributesOrBuilderList(); + /** + * repeated .hbase.pb.BytesBytesPair attributes = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPairOrBuilder getAttributesOrBuilder( + int index); - // repeated .hbase.pb.ServerName favored_node = 1; + // repeated .hbase.pb.NameStringPair configuration = 3; /** - * repeated .hbase.pb.ServerName favored_node = 1; + * repeated .hbase.pb.NameStringPair configuration = 3; */ - java.util.List - getFavoredNodeList(); + java.util.List + getConfigurationList(); /** - * repeated .hbase.pb.ServerName favored_node = 1; + * repeated .hbase.pb.NameStringPair configuration = 3; */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getFavoredNode(int index); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair getConfiguration(int index); /** - * repeated .hbase.pb.ServerName favored_node = 1; + * repeated .hbase.pb.NameStringPair configuration = 3; */ - int getFavoredNodeCount(); + int getConfigurationCount(); /** - * repeated .hbase.pb.ServerName favored_node = 1; + * repeated .hbase.pb.NameStringPair configuration = 3; */ - java.util.List - getFavoredNodeOrBuilderList(); + java.util.List + getConfigurationOrBuilderList(); /** - * repeated .hbase.pb.ServerName favored_node = 1; + * repeated .hbase.pb.NameStringPair configuration = 3; */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getFavoredNodeOrBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder getConfigurationOrBuilder( int index); } /** - * Protobuf type {@code hbase.pb.FavoredNodes} + * Protobuf type {@code hbase.pb.ColumnFamilySchema} * *
    **
-   * Protocol buffer for favored nodes
+   * Column Family Schema
+   * Inspired by the rest ColumSchemaMessage
    * 
*/ - public static final class FavoredNodes extends + public static final class ColumnFamilySchema extends com.google.protobuf.GeneratedMessage - implements FavoredNodesOrBuilder { - // Use FavoredNodes.newBuilder() to construct. - private FavoredNodes(com.google.protobuf.GeneratedMessage.Builder builder) { + implements ColumnFamilySchemaOrBuilder { + // Use ColumnFamilySchema.newBuilder() to construct. + private ColumnFamilySchema(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private FavoredNodes(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private ColumnFamilySchema(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final FavoredNodes defaultInstance; - public static FavoredNodes getDefaultInstance() { + private static final ColumnFamilySchema defaultInstance; + public static ColumnFamilySchema getDefaultInstance() { return defaultInstance; } - public FavoredNodes getDefaultInstanceForType() { + public ColumnFamilySchema getDefaultInstanceForType() { return defaultInstance; } @@ -4378,7 +3642,7 @@ public FavoredNodes getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private FavoredNodes( + private ColumnFamilySchema( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -4402,11 +3666,24 @@ private FavoredNodes( break; } case 10: { - if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - favoredNode_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000001; + bitField0_ |= 0x00000001; + name_ = input.readBytes(); + break; + } + case 18: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + attributes_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; } - favoredNode_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry)); + attributes_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.PARSER, extensionRegistry)); + break; + } + case 26: { + if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + configuration_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000004; + } + configuration_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.PARSER, extensionRegistry)); break; } } @@ -4417,8 +3694,11 @@ private FavoredNodes( throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { - if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - favoredNode_ = java.util.Collections.unmodifiableList(favoredNode_); + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + attributes_ = java.util.Collections.unmodifiableList(attributes_); + } + if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + configuration_ = java.util.Collections.unmodifiableList(configuration_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); @@ -4426,77 +3706,142 @@ private FavoredNodes( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_FavoredNodes_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_ColumnFamilySchema_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_FavoredNodes_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_ColumnFamilySchema_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public FavoredNodes parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ColumnFamilySchema parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new FavoredNodes(input, extensionRegistry); + return new ColumnFamilySchema(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } - // repeated .hbase.pb.ServerName favored_node = 1; - public static final int FAVORED_NODE_FIELD_NUMBER = 1; - private java.util.List favoredNode_; + private int bitField0_; + // required bytes name = 1; + public static final int NAME_FIELD_NUMBER = 1; + private com.google.protobuf.ByteString name_; /** - * repeated .hbase.pb.ServerName favored_node = 1; + * required bytes name = 1; */ - public java.util.List getFavoredNodeList() { - return favoredNode_; + public boolean hasName() { + return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * repeated .hbase.pb.ServerName favored_node = 1; + * required bytes name = 1; */ - public java.util.List - getFavoredNodeOrBuilderList() { - return favoredNode_; + public com.google.protobuf.ByteString getName() { + return name_; } - /** - * repeated .hbase.pb.ServerName favored_node = 1; + + // repeated .hbase.pb.BytesBytesPair attributes = 2; + public static final int ATTRIBUTES_FIELD_NUMBER = 2; + private java.util.List attributes_; + /** + * repeated .hbase.pb.BytesBytesPair attributes = 2; */ - public int getFavoredNodeCount() { - return favoredNode_.size(); + public java.util.List getAttributesList() { + return attributes_; } /** - * repeated .hbase.pb.ServerName favored_node = 1; + * repeated .hbase.pb.BytesBytesPair attributes = 2; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getFavoredNode(int index) { - return favoredNode_.get(index); + public java.util.List + getAttributesOrBuilderList() { + return attributes_; } /** - * repeated .hbase.pb.ServerName favored_node = 1; + * repeated .hbase.pb.BytesBytesPair attributes = 2; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getFavoredNodeOrBuilder( + public int getAttributesCount() { + return attributes_.size(); + } + /** + * repeated .hbase.pb.BytesBytesPair attributes = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair getAttributes(int index) { + return attributes_.get(index); + } + /** + * repeated .hbase.pb.BytesBytesPair attributes = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPairOrBuilder getAttributesOrBuilder( int index) { - return favoredNode_.get(index); + return attributes_.get(index); + } + + // repeated .hbase.pb.NameStringPair configuration = 3; + public static final int CONFIGURATION_FIELD_NUMBER = 3; + private java.util.List configuration_; + /** + * repeated .hbase.pb.NameStringPair configuration = 3; + */ + public java.util.List getConfigurationList() { + return configuration_; + } + /** + * repeated .hbase.pb.NameStringPair configuration = 3; + */ + public java.util.List + getConfigurationOrBuilderList() { + return configuration_; + } + /** + * repeated .hbase.pb.NameStringPair configuration = 3; + */ + public int getConfigurationCount() { + return configuration_.size(); + } + /** + * repeated .hbase.pb.NameStringPair configuration = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair getConfiguration(int index) { + return configuration_.get(index); + } + /** + * repeated .hbase.pb.NameStringPair configuration = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder getConfigurationOrBuilder( + int index) { + return configuration_.get(index); } private void initFields() { - favoredNode_ = java.util.Collections.emptyList(); + name_ = com.google.protobuf.ByteString.EMPTY; + attributes_ = java.util.Collections.emptyList(); + configuration_ = java.util.Collections.emptyList(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - for (int i = 0; i < getFavoredNodeCount(); i++) { - if (!getFavoredNode(i).isInitialized()) { + if (!hasName()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getAttributesCount(); i++) { + if (!getAttributes(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + for (int i = 0; i < getConfigurationCount(); i++) { + if (!getConfiguration(i).isInitialized()) { memoizedIsInitialized = 0; return false; } @@ -4508,8 +3853,14 @@ public final boolean isInitialized() { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); - for (int i = 0; i < favoredNode_.size(); i++) { - output.writeMessage(1, favoredNode_.get(i)); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, name_); + } + for (int i = 0; i < attributes_.size(); i++) { + output.writeMessage(2, attributes_.get(i)); + } + for (int i = 0; i < configuration_.size(); i++) { + output.writeMessage(3, configuration_.get(i)); } getUnknownFields().writeTo(output); } @@ -4520,9 +3871,17 @@ public int getSerializedSize() { if (size != -1) return size; size = 0; - for (int i = 0; i < favoredNode_.size(); i++) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, favoredNode_.get(i)); + .computeBytesSize(1, name_); + } + for (int i = 0; i < attributes_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, attributes_.get(i)); + } + for (int i = 0; i < configuration_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, configuration_.get(i)); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -4541,14 +3900,21 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes) obj; + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema) obj; boolean result = true; - result = result && getFavoredNodeList() - .equals(other.getFavoredNodeList()); + result = result && (hasName() == other.hasName()); + if (hasName()) { + result = result && getName() + .equals(other.getName()); + } + result = result && getAttributesList() + .equals(other.getAttributesList()); + result = result && getConfigurationList() + .equals(other.getConfigurationList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -4562,62 +3928,70 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (getFavoredNodeCount() > 0) { - hash = (37 * hash) + FAVORED_NODE_FIELD_NUMBER; - hash = (53 * hash) + getFavoredNodeList().hashCode(); + if (hasName()) { + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + } + if (getAttributesCount() > 0) { + hash = (37 * hash) + ATTRIBUTES_FIELD_NUMBER; + hash = (53 * hash) + getAttributesList().hashCode(); + } + if (getConfigurationCount() > 0) { + hash = (37 * hash) + CONFIGURATION_FIELD_NUMBER; + hash = (53 * hash) + getConfigurationList().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -4626,7 +4000,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNode public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -4638,29 +4012,30 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.FavoredNodes} + * Protobuf type {@code hbase.pb.ColumnFamilySchema} * *
      **
-     * Protocol buffer for favored nodes
+     * Column Family Schema
+     * Inspired by the rest ColumSchemaMessage
      * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodesOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_FavoredNodes_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_ColumnFamilySchema_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_FavoredNodes_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_ColumnFamilySchema_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -4672,7 +4047,8 @@ private Builder( } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getFavoredNodeFieldBuilder(); + getAttributesFieldBuilder(); + getConfigurationFieldBuilder(); } } private static Builder create() { @@ -4681,11 +4057,19 @@ private static Builder create() { public Builder clear() { super.clear(); - if (favoredNodeBuilder_ == null) { - favoredNode_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); + name_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + if (attributesBuilder_ == null) { + attributes_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); } else { - favoredNodeBuilder_.clear(); + attributesBuilder_.clear(); + } + if (configurationBuilder_ == null) { + configuration_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + } else { + configurationBuilder_.clear(); } return this; } @@ -4696,71 +4080,115 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_FavoredNodes_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_ColumnFamilySchema_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes build() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema build() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes(this); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema(this); int from_bitField0_ = bitField0_; - if (favoredNodeBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001)) { - favoredNode_ = java.util.Collections.unmodifiableList(favoredNode_); - bitField0_ = (bitField0_ & ~0x00000001); + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.name_ = name_; + if (attributesBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { + attributes_ = java.util.Collections.unmodifiableList(attributes_); + bitField0_ = (bitField0_ & ~0x00000002); } - result.favoredNode_ = favoredNode_; + result.attributes_ = attributes_; } else { - result.favoredNode_ = favoredNodeBuilder_.build(); + result.attributes_ = attributesBuilder_.build(); + } + if (configurationBuilder_ == null) { + if (((bitField0_ & 0x00000004) == 0x00000004)) { + configuration_ = java.util.Collections.unmodifiableList(configuration_); + bitField0_ = (bitField0_ & ~0x00000004); + } + result.configuration_ = configuration_; + } else { + result.configuration_ = configurationBuilder_.build(); } + result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes.getDefaultInstance()) return this; - if (favoredNodeBuilder_ == null) { - if (!other.favoredNode_.isEmpty()) { - if (favoredNode_.isEmpty()) { - favoredNode_ = other.favoredNode_; - bitField0_ = (bitField0_ & ~0x00000001); - } else { - ensureFavoredNodeIsMutable(); - favoredNode_.addAll(other.favoredNode_); - } - onChanged(); - } - } else { - if (!other.favoredNode_.isEmpty()) { - if (favoredNodeBuilder_.isEmpty()) { - favoredNodeBuilder_.dispose(); - favoredNodeBuilder_ = null; - favoredNode_ = other.favoredNode_; - bitField0_ = (bitField0_ & ~0x00000001); - favoredNodeBuilder_ = + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.getDefaultInstance()) return this; + if (other.hasName()) { + setName(other.getName()); + } + if (attributesBuilder_ == null) { + if (!other.attributes_.isEmpty()) { + if (attributes_.isEmpty()) { + attributes_ = other.attributes_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureAttributesIsMutable(); + attributes_.addAll(other.attributes_); + } + onChanged(); + } + } else { + if (!other.attributes_.isEmpty()) { + if (attributesBuilder_.isEmpty()) { + attributesBuilder_.dispose(); + attributesBuilder_ = null; + attributes_ = other.attributes_; + bitField0_ = (bitField0_ & ~0x00000002); + attributesBuilder_ = com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getFavoredNodeFieldBuilder() : null; + getAttributesFieldBuilder() : null; } else { - favoredNodeBuilder_.addAllMessages(other.favoredNode_); + attributesBuilder_.addAllMessages(other.attributes_); + } + } + } + if (configurationBuilder_ == null) { + if (!other.configuration_.isEmpty()) { + if (configuration_.isEmpty()) { + configuration_ = other.configuration_; + bitField0_ = (bitField0_ & ~0x00000004); + } else { + ensureConfigurationIsMutable(); + configuration_.addAll(other.configuration_); + } + onChanged(); + } + } else { + if (!other.configuration_.isEmpty()) { + if (configurationBuilder_.isEmpty()) { + configurationBuilder_.dispose(); + configurationBuilder_ = null; + configuration_ = other.configuration_; + bitField0_ = (bitField0_ & ~0x00000004); + configurationBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getConfigurationFieldBuilder() : null; + } else { + configurationBuilder_.addAllMessages(other.configuration_); } } } @@ -4769,8 +4197,18 @@ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos. } public final boolean isInitialized() { - for (int i = 0; i < getFavoredNodeCount(); i++) { - if (!getFavoredNode(i).isInitialized()) { + if (!hasName()) { + + return false; + } + for (int i = 0; i < getAttributesCount(); i++) { + if (!getAttributes(i).isInitialized()) { + + return false; + } + } + for (int i = 0; i < getConfigurationCount(); i++) { + if (!getConfiguration(i).isInitialized()) { return false; } @@ -4782,11 +4220,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -4797,973 +4235,634 @@ public Builder mergeFrom( } private int bitField0_; - // repeated .hbase.pb.ServerName favored_node = 1; - private java.util.List favoredNode_ = + // required bytes name = 1; + private com.google.protobuf.ByteString name_ = com.google.protobuf.ByteString.EMPTY; + /** + * required bytes name = 1; + */ + public boolean hasName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bytes name = 1; + */ + public com.google.protobuf.ByteString getName() { + return name_; + } + /** + * required bytes name = 1; + */ + public Builder setName(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + name_ = value; + onChanged(); + return this; + } + /** + * required bytes name = 1; + */ + public Builder clearName() { + bitField0_ = (bitField0_ & ~0x00000001); + name_ = getDefaultInstance().getName(); + onChanged(); + return this; + } + + // repeated .hbase.pb.BytesBytesPair attributes = 2; + private java.util.List attributes_ = java.util.Collections.emptyList(); - private void ensureFavoredNodeIsMutable() { - if (!((bitField0_ & 0x00000001) == 0x00000001)) { - favoredNode_ = new java.util.ArrayList(favoredNode_); - bitField0_ |= 0x00000001; + private void ensureAttributesIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + attributes_ = new java.util.ArrayList(attributes_); + bitField0_ |= 0x00000002; } } private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> favoredNodeBuilder_; + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPairOrBuilder> attributesBuilder_; /** - * repeated .hbase.pb.ServerName favored_node = 1; + * repeated .hbase.pb.BytesBytesPair attributes = 2; */ - public java.util.List getFavoredNodeList() { - if (favoredNodeBuilder_ == null) { - return java.util.Collections.unmodifiableList(favoredNode_); + public java.util.List getAttributesList() { + if (attributesBuilder_ == null) { + return java.util.Collections.unmodifiableList(attributes_); } else { - return favoredNodeBuilder_.getMessageList(); + return attributesBuilder_.getMessageList(); } } /** - * repeated .hbase.pb.ServerName favored_node = 1; + * repeated .hbase.pb.BytesBytesPair attributes = 2; */ - public int getFavoredNodeCount() { - if (favoredNodeBuilder_ == null) { - return favoredNode_.size(); + public int getAttributesCount() { + if (attributesBuilder_ == null) { + return attributes_.size(); } else { - return favoredNodeBuilder_.getCount(); + return attributesBuilder_.getCount(); } } /** - * repeated .hbase.pb.ServerName favored_node = 1; + * repeated .hbase.pb.BytesBytesPair attributes = 2; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getFavoredNode(int index) { - if (favoredNodeBuilder_ == null) { - return favoredNode_.get(index); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair getAttributes(int index) { + if (attributesBuilder_ == null) { + return attributes_.get(index); } else { - return favoredNodeBuilder_.getMessage(index); + return attributesBuilder_.getMessage(index); } } /** - * repeated .hbase.pb.ServerName favored_node = 1; + * repeated .hbase.pb.BytesBytesPair attributes = 2; */ - public Builder setFavoredNode( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { - if (favoredNodeBuilder_ == null) { + public Builder setAttributes( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair value) { + if (attributesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - ensureFavoredNodeIsMutable(); - favoredNode_.set(index, value); + ensureAttributesIsMutable(); + attributes_.set(index, value); onChanged(); } else { - favoredNodeBuilder_.setMessage(index, value); + attributesBuilder_.setMessage(index, value); } return this; } /** - * repeated .hbase.pb.ServerName favored_node = 1; + * repeated .hbase.pb.BytesBytesPair attributes = 2; */ - public Builder setFavoredNode( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { - if (favoredNodeBuilder_ == null) { - ensureFavoredNodeIsMutable(); - favoredNode_.set(index, builderForValue.build()); + public Builder setAttributes( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder builderForValue) { + if (attributesBuilder_ == null) { + ensureAttributesIsMutable(); + attributes_.set(index, builderForValue.build()); onChanged(); } else { - favoredNodeBuilder_.setMessage(index, builderForValue.build()); + attributesBuilder_.setMessage(index, builderForValue.build()); } return this; } /** - * repeated .hbase.pb.ServerName favored_node = 1; + * repeated .hbase.pb.BytesBytesPair attributes = 2; */ - public Builder addFavoredNode(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { - if (favoredNodeBuilder_ == null) { + public Builder addAttributes(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair value) { + if (attributesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - ensureFavoredNodeIsMutable(); - favoredNode_.add(value); + ensureAttributesIsMutable(); + attributes_.add(value); onChanged(); } else { - favoredNodeBuilder_.addMessage(value); + attributesBuilder_.addMessage(value); } return this; } /** - * repeated .hbase.pb.ServerName favored_node = 1; + * repeated .hbase.pb.BytesBytesPair attributes = 2; */ - public Builder addFavoredNode( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { - if (favoredNodeBuilder_ == null) { + public Builder addAttributes( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair value) { + if (attributesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - ensureFavoredNodeIsMutable(); - favoredNode_.add(index, value); + ensureAttributesIsMutable(); + attributes_.add(index, value); onChanged(); } else { - favoredNodeBuilder_.addMessage(index, value); + attributesBuilder_.addMessage(index, value); } return this; } /** - * repeated .hbase.pb.ServerName favored_node = 1; + * repeated .hbase.pb.BytesBytesPair attributes = 2; */ - public Builder addFavoredNode( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { - if (favoredNodeBuilder_ == null) { - ensureFavoredNodeIsMutable(); - favoredNode_.add(builderForValue.build()); + public Builder addAttributes( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder builderForValue) { + if (attributesBuilder_ == null) { + ensureAttributesIsMutable(); + attributes_.add(builderForValue.build()); onChanged(); } else { - favoredNodeBuilder_.addMessage(builderForValue.build()); + attributesBuilder_.addMessage(builderForValue.build()); } return this; } /** - * repeated .hbase.pb.ServerName favored_node = 1; + * repeated .hbase.pb.BytesBytesPair attributes = 2; */ - public Builder addFavoredNode( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { - if (favoredNodeBuilder_ == null) { - ensureFavoredNodeIsMutable(); - favoredNode_.add(index, builderForValue.build()); + public Builder addAttributes( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder builderForValue) { + if (attributesBuilder_ == null) { + ensureAttributesIsMutable(); + attributes_.add(index, builderForValue.build()); onChanged(); } else { - favoredNodeBuilder_.addMessage(index, builderForValue.build()); + attributesBuilder_.addMessage(index, builderForValue.build()); } return this; } /** - * repeated .hbase.pb.ServerName favored_node = 1; + * repeated .hbase.pb.BytesBytesPair attributes = 2; */ - public Builder addAllFavoredNode( - java.lang.Iterable values) { - if (favoredNodeBuilder_ == null) { - ensureFavoredNodeIsMutable(); - super.addAll(values, favoredNode_); + public Builder addAllAttributes( + java.lang.Iterable values) { + if (attributesBuilder_ == null) { + ensureAttributesIsMutable(); + super.addAll(values, attributes_); onChanged(); } else { - favoredNodeBuilder_.addAllMessages(values); + attributesBuilder_.addAllMessages(values); } return this; } /** - * repeated .hbase.pb.ServerName favored_node = 1; + * repeated .hbase.pb.BytesBytesPair attributes = 2; */ - public Builder clearFavoredNode() { - if (favoredNodeBuilder_ == null) { - favoredNode_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); + public Builder clearAttributes() { + if (attributesBuilder_ == null) { + attributes_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); onChanged(); } else { - favoredNodeBuilder_.clear(); + attributesBuilder_.clear(); } return this; } /** - * repeated .hbase.pb.ServerName favored_node = 1; + * repeated .hbase.pb.BytesBytesPair attributes = 2; */ - public Builder removeFavoredNode(int index) { - if (favoredNodeBuilder_ == null) { - ensureFavoredNodeIsMutable(); - favoredNode_.remove(index); + public Builder removeAttributes(int index) { + if (attributesBuilder_ == null) { + ensureAttributesIsMutable(); + attributes_.remove(index); onChanged(); } else { - favoredNodeBuilder_.remove(index); + attributesBuilder_.remove(index); } return this; } /** - * repeated .hbase.pb.ServerName favored_node = 1; + * repeated .hbase.pb.BytesBytesPair attributes = 2; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getFavoredNodeBuilder( + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder getAttributesBuilder( int index) { - return getFavoredNodeFieldBuilder().getBuilder(index); + return getAttributesFieldBuilder().getBuilder(index); } /** - * repeated .hbase.pb.ServerName favored_node = 1; + * repeated .hbase.pb.BytesBytesPair attributes = 2; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getFavoredNodeOrBuilder( + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPairOrBuilder getAttributesOrBuilder( int index) { - if (favoredNodeBuilder_ == null) { - return favoredNode_.get(index); } else { - return favoredNodeBuilder_.getMessageOrBuilder(index); + if (attributesBuilder_ == null) { + return attributes_.get(index); } else { + return attributesBuilder_.getMessageOrBuilder(index); } } /** - * repeated .hbase.pb.ServerName favored_node = 1; + * repeated .hbase.pb.BytesBytesPair attributes = 2; */ - public java.util.List - getFavoredNodeOrBuilderList() { - if (favoredNodeBuilder_ != null) { - return favoredNodeBuilder_.getMessageOrBuilderList(); + public java.util.List + getAttributesOrBuilderList() { + if (attributesBuilder_ != null) { + return attributesBuilder_.getMessageOrBuilderList(); } else { - return java.util.Collections.unmodifiableList(favoredNode_); + return java.util.Collections.unmodifiableList(attributes_); } } /** - * repeated .hbase.pb.ServerName favored_node = 1; + * repeated .hbase.pb.BytesBytesPair attributes = 2; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder addFavoredNodeBuilder() { - return getFavoredNodeFieldBuilder().addBuilder( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder addAttributesBuilder() { + return getAttributesFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.getDefaultInstance()); } /** - * repeated .hbase.pb.ServerName favored_node = 1; + * repeated .hbase.pb.BytesBytesPair attributes = 2; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder addFavoredNodeBuilder( + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder addAttributesBuilder( int index) { - return getFavoredNodeFieldBuilder().addBuilder( - index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()); + return getAttributesFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.getDefaultInstance()); } /** - * repeated .hbase.pb.ServerName favored_node = 1; + * repeated .hbase.pb.BytesBytesPair attributes = 2; */ - public java.util.List - getFavoredNodeBuilderList() { - return getFavoredNodeFieldBuilder().getBuilderList(); + public java.util.List + getAttributesBuilderList() { + return getAttributesFieldBuilder().getBuilderList(); } private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> - getFavoredNodeFieldBuilder() { - if (favoredNodeBuilder_ == null) { - favoredNodeBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>( - favoredNode_, - ((bitField0_ & 0x00000001) == 0x00000001), + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPairOrBuilder> + getAttributesFieldBuilder() { + if (attributesBuilder_ == null) { + attributesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPairOrBuilder>( + attributes_, + ((bitField0_ & 0x00000002) == 0x00000002), getParentForChildren(), isClean()); - favoredNode_ = null; - } - return favoredNodeBuilder_; - } - - // @@protoc_insertion_point(builder_scope:hbase.pb.FavoredNodes) - } - - static { - defaultInstance = new FavoredNodes(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:hbase.pb.FavoredNodes) - } - - public interface RegionSpecifierOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .hbase.pb.RegionSpecifier.RegionSpecifierType type = 1; - /** - * required .hbase.pb.RegionSpecifier.RegionSpecifierType type = 1; - */ - boolean hasType(); - /** - * required .hbase.pb.RegionSpecifier.RegionSpecifierType type = 1; - */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType getType(); - - // required bytes value = 2; - /** - * required bytes value = 2; - */ - boolean hasValue(); - /** - * required bytes value = 2; - */ - com.google.protobuf.ByteString getValue(); - } - /** - * Protobuf type {@code hbase.pb.RegionSpecifier} - * - *
-   **
-   * Container protocol buffer to specify a region.
-   * You can specify region by region name, or the hash
-   * of the region name, which is known as encoded
-   * region name.
-   * 
- */ - public static final class RegionSpecifier extends - com.google.protobuf.GeneratedMessage - implements RegionSpecifierOrBuilder { - // Use RegionSpecifier.newBuilder() to construct. - private RegionSpecifier(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private RegionSpecifier(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final RegionSpecifier defaultInstance; - public static RegionSpecifier getDefaultInstance() { - return defaultInstance; - } - - public RegionSpecifier getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private RegionSpecifier( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 8: { - int rawValue = input.readEnum(); - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType value = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType.valueOf(rawValue); - if (value == null) { - unknownFields.mergeVarintField(1, rawValue); - } else { - bitField0_ |= 0x00000001; - type_ = value; - } - break; - } - case 18: { - bitField0_ |= 0x00000002; - value_ = input.readBytes(); - break; - } - } + attributes_ = null; } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); + return attributesBuilder_; } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_RegionSpecifier_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_RegionSpecifier_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder.class); - } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public RegionSpecifier parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new RegionSpecifier(input, extensionRegistry); + // repeated .hbase.pb.NameStringPair configuration = 3; + private java.util.List configuration_ = + java.util.Collections.emptyList(); + private void ensureConfigurationIsMutable() { + if (!((bitField0_ & 0x00000004) == 0x00000004)) { + configuration_ = new java.util.ArrayList(configuration_); + bitField0_ |= 0x00000004; + } } - }; - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder> configurationBuilder_; - /** - * Protobuf enum {@code hbase.pb.RegionSpecifier.RegionSpecifierType} - */ - public enum RegionSpecifierType - implements com.google.protobuf.ProtocolMessageEnum { /** - * REGION_NAME = 1; - * - *
-       * <tablename>,<startkey>,<regionId>.<encodedName>
-       * 
+ * repeated .hbase.pb.NameStringPair configuration = 3; */ - REGION_NAME(0, 1), + public java.util.List getConfigurationList() { + if (configurationBuilder_ == null) { + return java.util.Collections.unmodifiableList(configuration_); + } else { + return configurationBuilder_.getMessageList(); + } + } /** - * ENCODED_REGION_NAME = 2; - * - *
-       * hash of <tablename>,<startkey>,<regionId>
-       * 
+ * repeated .hbase.pb.NameStringPair configuration = 3; */ - ENCODED_REGION_NAME(1, 2), - ; - + public int getConfigurationCount() { + if (configurationBuilder_ == null) { + return configuration_.size(); + } else { + return configurationBuilder_.getCount(); + } + } /** - * REGION_NAME = 1; - * - *
-       * <tablename>,<startkey>,<regionId>.<encodedName>
-       * 
+ * repeated .hbase.pb.NameStringPair configuration = 3; */ - public static final int REGION_NAME_VALUE = 1; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair getConfiguration(int index) { + if (configurationBuilder_ == null) { + return configuration_.get(index); + } else { + return configurationBuilder_.getMessage(index); + } + } /** - * ENCODED_REGION_NAME = 2; - * - *
-       * hash of <tablename>,<startkey>,<regionId>
-       * 
+ * repeated .hbase.pb.NameStringPair configuration = 3; */ - public static final int ENCODED_REGION_NAME_VALUE = 2; - - - public final int getNumber() { return value; } - - public static RegionSpecifierType valueOf(int value) { - switch (value) { - case 1: return REGION_NAME; - case 2: return ENCODED_REGION_NAME; - default: return null; + public Builder setConfiguration( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair value) { + if (configurationBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureConfigurationIsMutable(); + configuration_.set(index, value); + onChanged(); + } else { + configurationBuilder_.setMessage(index, value); } + return this; } - - public static com.google.protobuf.Internal.EnumLiteMap - internalGetValueMap() { - return internalValueMap; - } - private static com.google.protobuf.Internal.EnumLiteMap - internalValueMap = - new com.google.protobuf.Internal.EnumLiteMap() { - public RegionSpecifierType findValueByNumber(int number) { - return RegionSpecifierType.valueOf(number); - } - }; - - public final com.google.protobuf.Descriptors.EnumValueDescriptor - getValueDescriptor() { - return getDescriptor().getValues().get(index); + /** + * repeated .hbase.pb.NameStringPair configuration = 3; + */ + public Builder setConfiguration( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder builderForValue) { + if (configurationBuilder_ == null) { + ensureConfigurationIsMutable(); + configuration_.set(index, builderForValue.build()); + onChanged(); + } else { + configurationBuilder_.setMessage(index, builderForValue.build()); + } + return this; } - public final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptorForType() { - return getDescriptor(); + /** + * repeated .hbase.pb.NameStringPair configuration = 3; + */ + public Builder addConfiguration(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair value) { + if (configurationBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureConfigurationIsMutable(); + configuration_.add(value); + onChanged(); + } else { + configurationBuilder_.addMessage(value); + } + return this; } - public static final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDescriptor().getEnumTypes().get(0); + /** + * repeated .hbase.pb.NameStringPair configuration = 3; + */ + public Builder addConfiguration( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair value) { + if (configurationBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureConfigurationIsMutable(); + configuration_.add(index, value); + onChanged(); + } else { + configurationBuilder_.addMessage(index, value); + } + return this; } - - private static final RegionSpecifierType[] VALUES = values(); - - public static RegionSpecifierType valueOf( - com.google.protobuf.Descriptors.EnumValueDescriptor desc) { - if (desc.getType() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "EnumValueDescriptor is not for this type."); + /** + * repeated .hbase.pb.NameStringPair configuration = 3; + */ + public Builder addConfiguration( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder builderForValue) { + if (configurationBuilder_ == null) { + ensureConfigurationIsMutable(); + configuration_.add(builderForValue.build()); + onChanged(); + } else { + configurationBuilder_.addMessage(builderForValue.build()); } - return VALUES[desc.getIndex()]; + return this; } - - private final int index; - private final int value; - - private RegionSpecifierType(int index, int value) { - this.index = index; - this.value = value; + /** + * repeated .hbase.pb.NameStringPair configuration = 3; + */ + public Builder addConfiguration( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder builderForValue) { + if (configurationBuilder_ == null) { + ensureConfigurationIsMutable(); + configuration_.add(index, builderForValue.build()); + onChanged(); + } else { + configurationBuilder_.addMessage(index, builderForValue.build()); + } + return this; } - - // @@protoc_insertion_point(enum_scope:hbase.pb.RegionSpecifier.RegionSpecifierType) - } - - private int bitField0_; - // required .hbase.pb.RegionSpecifier.RegionSpecifierType type = 1; - public static final int TYPE_FIELD_NUMBER = 1; - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType type_; - /** - * required .hbase.pb.RegionSpecifier.RegionSpecifierType type = 1; - */ - public boolean hasType() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required .hbase.pb.RegionSpecifier.RegionSpecifierType type = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType getType() { - return type_; - } - - // required bytes value = 2; - public static final int VALUE_FIELD_NUMBER = 2; - private com.google.protobuf.ByteString value_; - /** - * required bytes value = 2; - */ - public boolean hasValue() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * required bytes value = 2; - */ - public com.google.protobuf.ByteString getValue() { - return value_; - } - - private void initFields() { - type_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME; - value_ = com.google.protobuf.ByteString.EMPTY; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasType()) { - memoizedIsInitialized = 0; - return false; + /** + * repeated .hbase.pb.NameStringPair configuration = 3; + */ + public Builder addAllConfiguration( + java.lang.Iterable values) { + if (configurationBuilder_ == null) { + ensureConfigurationIsMutable(); + super.addAll(values, configuration_); + onChanged(); + } else { + configurationBuilder_.addAllMessages(values); + } + return this; } - if (!hasValue()) { - memoizedIsInitialized = 0; - return false; + /** + * repeated .hbase.pb.NameStringPair configuration = 3; + */ + public Builder clearConfiguration() { + if (configurationBuilder_ == null) { + configuration_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + } else { + configurationBuilder_.clear(); + } + return this; } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeEnum(1, type_.getNumber()); + /** + * repeated .hbase.pb.NameStringPair configuration = 3; + */ + public Builder removeConfiguration(int index) { + if (configurationBuilder_ == null) { + ensureConfigurationIsMutable(); + configuration_.remove(index); + onChanged(); + } else { + configurationBuilder_.remove(index); + } + return this; } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, value_); + /** + * repeated .hbase.pb.NameStringPair configuration = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder getConfigurationBuilder( + int index) { + return getConfigurationFieldBuilder().getBuilder(index); } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeEnumSize(1, type_.getNumber()); + /** + * repeated .hbase.pb.NameStringPair configuration = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder getConfigurationOrBuilder( + int index) { + if (configurationBuilder_ == null) { + return configuration_.get(index); } else { + return configurationBuilder_.getMessageOrBuilder(index); + } } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, value_); + /** + * repeated .hbase.pb.NameStringPair configuration = 3; + */ + public java.util.List + getConfigurationOrBuilderList() { + if (configurationBuilder_ != null) { + return configurationBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(configuration_); + } } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; + /** + * repeated .hbase.pb.NameStringPair configuration = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder addConfigurationBuilder() { + return getConfigurationFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.getDefaultInstance()); } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier)) { - return super.equals(obj); + /** + * repeated .hbase.pb.NameStringPair configuration = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder addConfigurationBuilder( + int index) { + return getConfigurationFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.getDefaultInstance()); } - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier) obj; - - boolean result = true; - result = result && (hasType() == other.hasType()); - if (hasType()) { - result = result && - (getType() == other.getType()); + /** + * repeated .hbase.pb.NameStringPair configuration = 3; + */ + public java.util.List + getConfigurationBuilderList() { + return getConfigurationFieldBuilder().getBuilderList(); } - result = result && (hasValue() == other.hasValue()); - if (hasValue()) { - result = result && getValue() - .equals(other.getValue()); + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder> + getConfigurationFieldBuilder() { + if (configurationBuilder_ == null) { + configurationBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder>( + configuration_, + ((bitField0_ & 0x00000004) == 0x00000004), + getParentForChildren(), + isClean()); + configuration_ = null; + } + return configurationBuilder_; } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - private int memoizedHashCode = 0; - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; - } - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasType()) { - hash = (37 * hash) + TYPE_FIELD_NUMBER; - hash = (53 * hash) + hashEnum(getType()); - } - if (hasValue()) { - hash = (37 * hash) + VALUE_FIELD_NUMBER; - hash = (53 * hash) + getValue().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - memoizedHashCode = hash; - return hash; + // @@protoc_insertion_point(builder_scope:hbase.pb.ColumnFamilySchema) } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); + static { + defaultInstance = new ColumnFamilySchema(true); + defaultInstance.initFields(); } - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } + // @@protoc_insertion_point(class_scope:hbase.pb.ColumnFamilySchema) + } - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } + public interface RegionInfoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required uint64 region_id = 1; /** - * Protobuf type {@code hbase.pb.RegionSpecifier} - * - *
-     **
-     * Container protocol buffer to specify a region.
-     * You can specify region by region name, or the hash
-     * of the region name, which is known as encoded
-     * region name.
-     * 
+ * required uint64 region_id = 1; */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_RegionSpecifier_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_RegionSpecifier_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder.class); - } - - // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } + boolean hasRegionId(); + /** + * required uint64 region_id = 1; + */ + long getRegionId(); - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - type_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME; - bitField0_ = (bitField0_ & ~0x00000001); - value_ = com.google.protobuf.ByteString.EMPTY; - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_RegionSpecifier_descriptor; - } - - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance(); - } - - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier build() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.type_ = type_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.value_ = value_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance()) return this; - if (other.hasType()) { - setType(other.getType()); - } - if (other.hasValue()) { - setValue(other.getValue()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasType()) { - - return false; - } - if (!hasValue()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // required .hbase.pb.RegionSpecifier.RegionSpecifierType type = 1; - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType type_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME; - /** - * required .hbase.pb.RegionSpecifier.RegionSpecifierType type = 1; - */ - public boolean hasType() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required .hbase.pb.RegionSpecifier.RegionSpecifierType type = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType getType() { - return type_; - } - /** - * required .hbase.pb.RegionSpecifier.RegionSpecifierType type = 1; - */ - public Builder setType(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - type_ = value; - onChanged(); - return this; - } - /** - * required .hbase.pb.RegionSpecifier.RegionSpecifierType type = 1; - */ - public Builder clearType() { - bitField0_ = (bitField0_ & ~0x00000001); - type_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME; - onChanged(); - return this; - } - - // required bytes value = 2; - private com.google.protobuf.ByteString value_ = com.google.protobuf.ByteString.EMPTY; - /** - * required bytes value = 2; - */ - public boolean hasValue() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * required bytes value = 2; - */ - public com.google.protobuf.ByteString getValue() { - return value_; - } - /** - * required bytes value = 2; - */ - public Builder setValue(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - value_ = value; - onChanged(); - return this; - } - /** - * required bytes value = 2; - */ - public Builder clearValue() { - bitField0_ = (bitField0_ & ~0x00000002); - value_ = getDefaultInstance().getValue(); - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:hbase.pb.RegionSpecifier) - } + // required .hbase.pb.TableName table_name = 2; + /** + * required .hbase.pb.TableName table_name = 2; + */ + boolean hasTableName(); + /** + * required .hbase.pb.TableName table_name = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName(); + /** + * required .hbase.pb.TableName table_name = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder(); - static { - defaultInstance = new RegionSpecifier(true); - defaultInstance.initFields(); - } + // optional bytes start_key = 3; + /** + * optional bytes start_key = 3; + */ + boolean hasStartKey(); + /** + * optional bytes start_key = 3; + */ + com.google.protobuf.ByteString getStartKey(); - // @@protoc_insertion_point(class_scope:hbase.pb.RegionSpecifier) - } + // optional bytes end_key = 4; + /** + * optional bytes end_key = 4; + */ + boolean hasEndKey(); + /** + * optional bytes end_key = 4; + */ + com.google.protobuf.ByteString getEndKey(); - public interface TimeRangeOrBuilder - extends com.google.protobuf.MessageOrBuilder { + // optional bool offline = 5; + /** + * optional bool offline = 5; + */ + boolean hasOffline(); + /** + * optional bool offline = 5; + */ + boolean getOffline(); - // optional uint64 from = 1; + // optional bool split = 6; /** - * optional uint64 from = 1; + * optional bool split = 6; */ - boolean hasFrom(); + boolean hasSplit(); /** - * optional uint64 from = 1; + * optional bool split = 6; */ - long getFrom(); + boolean getSplit(); - // optional uint64 to = 2; + // optional int32 replica_id = 7 [default = 0]; /** - * optional uint64 to = 2; + * optional int32 replica_id = 7 [default = 0]; */ - boolean hasTo(); + boolean hasReplicaId(); /** - * optional uint64 to = 2; + * optional int32 replica_id = 7 [default = 0]; */ - long getTo(); + int getReplicaId(); } /** - * Protobuf type {@code hbase.pb.TimeRange} + * Protobuf type {@code hbase.pb.RegionInfo} * *
    **
-   * A range of time. Both from and to are Java time
-   * stamp in milliseconds. If you don't specify a time
-   * range, it means all time.  By default, if not
-   * specified, from = 0, and to = Long.MAX_VALUE
+   * Protocol buffer version of HRegionInfo.
    * 
*/ - public static final class TimeRange extends + public static final class RegionInfo extends com.google.protobuf.GeneratedMessage - implements TimeRangeOrBuilder { - // Use TimeRange.newBuilder() to construct. - private TimeRange(com.google.protobuf.GeneratedMessage.Builder builder) { + implements RegionInfoOrBuilder { + // Use RegionInfo.newBuilder() to construct. + private RegionInfo(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private TimeRange(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private RegionInfo(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final TimeRange defaultInstance; - public static TimeRange getDefaultInstance() { + private static final RegionInfo defaultInstance; + public static RegionInfo getDefaultInstance() { return defaultInstance; } - public TimeRange getDefaultInstanceForType() { + public RegionInfo getDefaultInstanceForType() { return defaultInstance; } @@ -5773,7 +4872,7 @@ public TimeRange getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private TimeRange( + private RegionInfo( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -5798,12 +4897,45 @@ private TimeRange( } case 8: { bitField0_ |= 0x00000001; - from_ = input.readUInt64(); + regionId_ = input.readUInt64(); break; } - case 16: { + case 18: { + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder subBuilder = null; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + subBuilder = tableName_.toBuilder(); + } + tableName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(tableName_); + tableName_ = subBuilder.buildPartial(); + } bitField0_ |= 0x00000002; - to_ = input.readUInt64(); + break; + } + case 26: { + bitField0_ |= 0x00000004; + startKey_ = input.readBytes(); + break; + } + case 34: { + bitField0_ |= 0x00000008; + endKey_ = input.readBytes(); + break; + } + case 40: { + bitField0_ |= 0x00000010; + offline_ = input.readBool(); + break; + } + case 48: { + bitField0_ |= 0x00000020; + split_ = input.readBool(); + break; + } + case 56: { + bitField0_ |= 0x00000040; + replicaId_ = input.readInt32(); break; } } @@ -5820,73 +4952,176 @@ private TimeRange( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_TimeRange_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_RegionInfo_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_TimeRange_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_RegionInfo_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public TimeRange parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public RegionInfo parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new TimeRange(input, extensionRegistry); + return new RegionInfo(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; - // optional uint64 from = 1; - public static final int FROM_FIELD_NUMBER = 1; - private long from_; + // required uint64 region_id = 1; + public static final int REGION_ID_FIELD_NUMBER = 1; + private long regionId_; /** - * optional uint64 from = 1; + * required uint64 region_id = 1; */ - public boolean hasFrom() { + public boolean hasRegionId() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * optional uint64 from = 1; + * required uint64 region_id = 1; */ - public long getFrom() { - return from_; + public long getRegionId() { + return regionId_; } - // optional uint64 to = 2; - public static final int TO_FIELD_NUMBER = 2; - private long to_; + // required .hbase.pb.TableName table_name = 2; + public static final int TABLE_NAME_FIELD_NUMBER = 2; + private org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName tableName_; /** - * optional uint64 to = 2; + * required .hbase.pb.TableName table_name = 2; */ - public boolean hasTo() { + public boolean hasTableName() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** - * optional uint64 to = 2; + * required .hbase.pb.TableName table_name = 2; */ - public long getTo() { - return to_; + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName() { + return tableName_; + } + /** + * required .hbase.pb.TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder() { + return tableName_; + } + + // optional bytes start_key = 3; + public static final int START_KEY_FIELD_NUMBER = 3; + private com.google.protobuf.ByteString startKey_; + /** + * optional bytes start_key = 3; + */ + public boolean hasStartKey() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional bytes start_key = 3; + */ + public com.google.protobuf.ByteString getStartKey() { + return startKey_; + } + + // optional bytes end_key = 4; + public static final int END_KEY_FIELD_NUMBER = 4; + private com.google.protobuf.ByteString endKey_; + /** + * optional bytes end_key = 4; + */ + public boolean hasEndKey() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional bytes end_key = 4; + */ + public com.google.protobuf.ByteString getEndKey() { + return endKey_; + } + + // optional bool offline = 5; + public static final int OFFLINE_FIELD_NUMBER = 5; + private boolean offline_; + /** + * optional bool offline = 5; + */ + public boolean hasOffline() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional bool offline = 5; + */ + public boolean getOffline() { + return offline_; + } + + // optional bool split = 6; + public static final int SPLIT_FIELD_NUMBER = 6; + private boolean split_; + /** + * optional bool split = 6; + */ + public boolean hasSplit() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + /** + * optional bool split = 6; + */ + public boolean getSplit() { + return split_; + } + + // optional int32 replica_id = 7 [default = 0]; + public static final int REPLICA_ID_FIELD_NUMBER = 7; + private int replicaId_; + /** + * optional int32 replica_id = 7 [default = 0]; + */ + public boolean hasReplicaId() { + return ((bitField0_ & 0x00000040) == 0x00000040); + } + /** + * optional int32 replica_id = 7 [default = 0]; + */ + public int getReplicaId() { + return replicaId_; } private void initFields() { - from_ = 0L; - to_ = 0L; + regionId_ = 0L; + tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance(); + startKey_ = com.google.protobuf.ByteString.EMPTY; + endKey_ = com.google.protobuf.ByteString.EMPTY; + offline_ = false; + split_ = false; + replicaId_ = 0; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; + if (!hasRegionId()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasTableName()) { + memoizedIsInitialized = 0; + return false; + } + if (!getTableName().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } memoizedIsInitialized = 1; return true; } @@ -5895,10 +5130,25 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeUInt64(1, from_); + output.writeUInt64(1, regionId_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeUInt64(2, to_); + output.writeMessage(2, tableName_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBytes(3, startKey_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeBytes(4, endKey_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeBool(5, offline_); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + output.writeBool(6, split_); + } + if (((bitField0_ & 0x00000040) == 0x00000040)) { + output.writeInt32(7, replicaId_); } getUnknownFields().writeTo(output); } @@ -5911,11 +5161,31 @@ public int getSerializedSize() { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(1, from_); + .computeUInt64Size(1, regionId_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(2, to_); + .computeMessageSize(2, tableName_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(3, startKey_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(4, endKey_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(5, offline_); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(6, split_); + } + if (((bitField0_ & 0x00000040) == 0x00000040)) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(7, replicaId_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -5934,21 +5204,46 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange) obj; + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo) obj; boolean result = true; - result = result && (hasFrom() == other.hasFrom()); - if (hasFrom()) { - result = result && (getFrom() - == other.getFrom()); + result = result && (hasRegionId() == other.hasRegionId()); + if (hasRegionId()) { + result = result && (getRegionId() + == other.getRegionId()); } - result = result && (hasTo() == other.hasTo()); - if (hasTo()) { - result = result && (getTo() - == other.getTo()); + result = result && (hasTableName() == other.hasTableName()); + if (hasTableName()) { + result = result && getTableName() + .equals(other.getTableName()); + } + result = result && (hasStartKey() == other.hasStartKey()); + if (hasStartKey()) { + result = result && getStartKey() + .equals(other.getStartKey()); + } + result = result && (hasEndKey() == other.hasEndKey()); + if (hasEndKey()) { + result = result && getEndKey() + .equals(other.getEndKey()); + } + result = result && (hasOffline() == other.hasOffline()); + if (hasOffline()) { + result = result && (getOffline() + == other.getOffline()); + } + result = result && (hasSplit() == other.hasSplit()); + if (hasSplit()) { + result = result && (getSplit() + == other.getSplit()); + } + result = result && (hasReplicaId() == other.hasReplicaId()); + if (hasReplicaId()) { + result = result && (getReplicaId() + == other.getReplicaId()); } result = result && getUnknownFields().equals(other.getUnknownFields()); @@ -5963,66 +5258,86 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasFrom()) { - hash = (37 * hash) + FROM_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getFrom()); + if (hasRegionId()) { + hash = (37 * hash) + REGION_ID_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getRegionId()); } - if (hasTo()) { - hash = (37 * hash) + TO_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getTo()); + if (hasTableName()) { + hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER; + hash = (53 * hash) + getTableName().hashCode(); + } + if (hasStartKey()) { + hash = (37 * hash) + START_KEY_FIELD_NUMBER; + hash = (53 * hash) + getStartKey().hashCode(); + } + if (hasEndKey()) { + hash = (37 * hash) + END_KEY_FIELD_NUMBER; + hash = (53 * hash) + getEndKey().hashCode(); + } + if (hasOffline()) { + hash = (37 * hash) + OFFLINE_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getOffline()); + } + if (hasSplit()) { + hash = (37 * hash) + SPLIT_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getSplit()); + } + if (hasReplicaId()) { + hash = (37 * hash) + REPLICA_ID_FIELD_NUMBER; + hash = (53 * hash) + getReplicaId(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -6031,7 +5346,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange p public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -6043,32 +5358,29 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.TimeRange} + * Protobuf type {@code hbase.pb.RegionInfo} * *
      **
-     * A range of time. Both from and to are Java time
-     * stamp in milliseconds. If you don't specify a time
-     * range, it means all time.  By default, if not
-     * specified, from = 0, and to = Long.MAX_VALUE
+     * Protocol buffer version of HRegionInfo.
      * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRangeOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_TimeRange_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_RegionInfo_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_TimeRange_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_RegionInfo_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -6080,6 +5392,7 @@ private Builder( } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableNameFieldBuilder(); } } private static Builder create() { @@ -6088,10 +5401,24 @@ private static Builder create() { public Builder clear() { super.clear(); - from_ = 0L; + regionId_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); - to_ = 0L; + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance(); + } else { + tableNameBuilder_.clear(); + } bitField0_ = (bitField0_ & ~0x00000002); + startKey_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000004); + endKey_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000008); + offline_ = false; + bitField0_ = (bitField0_ & ~0x00000010); + split_ = false; + bitField0_ = (bitField0_ & ~0x00000020); + replicaId_ = 0; + bitField0_ = (bitField0_ & ~0x00000040); return this; } @@ -6101,60 +5428,111 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_TimeRange_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_RegionInfo_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange build() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo build() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange(this); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - result.from_ = from_; + result.regionId_ = regionId_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } - result.to_ = to_; + if (tableNameBuilder_ == null) { + result.tableName_ = tableName_; + } else { + result.tableName_ = tableNameBuilder_.build(); + } + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.startKey_ = startKey_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + result.endKey_ = endKey_; + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000010; + } + result.offline_ = offline_; + if (((from_bitField0_ & 0x00000020) == 0x00000020)) { + to_bitField0_ |= 0x00000020; + } + result.split_ = split_; + if (((from_bitField0_ & 0x00000040) == 0x00000040)) { + to_bitField0_ |= 0x00000040; + } + result.replicaId_ = replicaId_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.getDefaultInstance()) return this; - if (other.hasFrom()) { - setFrom(other.getFrom()); + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance()) return this; + if (other.hasRegionId()) { + setRegionId(other.getRegionId()); } - if (other.hasTo()) { - setTo(other.getTo()); + if (other.hasTableName()) { + mergeTableName(other.getTableName()); + } + if (other.hasStartKey()) { + setStartKey(other.getStartKey()); + } + if (other.hasEndKey()) { + setEndKey(other.getEndKey()); + } + if (other.hasOffline()) { + setOffline(other.getOffline()); + } + if (other.hasSplit()) { + setSplit(other.getSplit()); + } + if (other.hasReplicaId()) { + setReplicaId(other.getReplicaId()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { + if (!hasRegionId()) { + + return false; + } + if (!hasTableName()) { + + return false; + } + if (!getTableName().isInitialized()) { + + return false; + } return true; } @@ -6162,11 +5540,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -6177,157 +5555,414 @@ public Builder mergeFrom( } private int bitField0_; - // optional uint64 from = 1; - private long from_ ; + // required uint64 region_id = 1; + private long regionId_ ; /** - * optional uint64 from = 1; + * required uint64 region_id = 1; */ - public boolean hasFrom() { + public boolean hasRegionId() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * optional uint64 from = 1; + * required uint64 region_id = 1; */ - public long getFrom() { - return from_; + public long getRegionId() { + return regionId_; } /** - * optional uint64 from = 1; + * required uint64 region_id = 1; */ - public Builder setFrom(long value) { + public Builder setRegionId(long value) { bitField0_ |= 0x00000001; - from_ = value; + regionId_ = value; onChanged(); return this; } /** - * optional uint64 from = 1; + * required uint64 region_id = 1; */ - public Builder clearFrom() { + public Builder clearRegionId() { bitField0_ = (bitField0_ & ~0x00000001); - from_ = 0L; + regionId_ = 0L; onChanged(); return this; } - // optional uint64 to = 2; - private long to_ ; + // required .hbase.pb.TableName table_name = 2; + private org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder> tableNameBuilder_; /** - * optional uint64 to = 2; + * required .hbase.pb.TableName table_name = 2; */ - public boolean hasTo() { + public boolean hasTableName() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** - * optional uint64 to = 2; + * required .hbase.pb.TableName table_name = 2; */ - public long getTo() { - return to_; + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName() { + if (tableNameBuilder_ == null) { + return tableName_; + } else { + return tableNameBuilder_.getMessage(); + } } /** - * optional uint64 to = 2; + * required .hbase.pb.TableName table_name = 2; */ - public Builder setTo(long value) { + public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + tableName_ = value; + onChanged(); + } else { + tableNameBuilder_.setMessage(value); + } bitField0_ |= 0x00000002; - to_ = value; - onChanged(); return this; } /** - * optional uint64 to = 2; + * required .hbase.pb.TableName table_name = 2; */ - public Builder clearTo() { + public Builder setTableName( + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + tableName_ = builderForValue.build(); + onChanged(); + } else { + tableNameBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .hbase.pb.TableName table_name = 2; + */ + public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + tableName_ != org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance()) { + tableName_ = + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial(); + } else { + tableName_ = value; + } + onChanged(); + } else { + tableNameBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .hbase.pb.TableName table_name = 2; + */ + public Builder clearTableName() { + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance(); + onChanged(); + } else { + tableNameBuilder_.clear(); + } bitField0_ = (bitField0_ & ~0x00000002); - to_ = 0L; - onChanged(); return this; } + /** + * required .hbase.pb.TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder getTableNameBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getTableNameFieldBuilder().getBuilder(); + } + /** + * required .hbase.pb.TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder() { + if (tableNameBuilder_ != null) { + return tableNameBuilder_.getMessageOrBuilder(); + } else { + return tableName_; + } + } + /** + * required .hbase.pb.TableName table_name = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder> + getTableNameFieldBuilder() { + if (tableNameBuilder_ == null) { + tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder>( + tableName_, + getParentForChildren(), + isClean()); + tableName_ = null; + } + return tableNameBuilder_; + } - // @@protoc_insertion_point(builder_scope:hbase.pb.TimeRange) - } - - static { - defaultInstance = new TimeRange(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:hbase.pb.TimeRange) + // optional bytes start_key = 3; + private com.google.protobuf.ByteString startKey_ = com.google.protobuf.ByteString.EMPTY; + /** + * optional bytes start_key = 3; + */ + public boolean hasStartKey() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional bytes start_key = 3; + */ + public com.google.protobuf.ByteString getStartKey() { + return startKey_; + } + /** + * optional bytes start_key = 3; + */ + public Builder setStartKey(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); } + bitField0_ |= 0x00000004; + startKey_ = value; + onChanged(); + return this; + } + /** + * optional bytes start_key = 3; + */ + public Builder clearStartKey() { + bitField0_ = (bitField0_ & ~0x00000004); + startKey_ = getDefaultInstance().getStartKey(); + onChanged(); + return this; + } - public interface ColumnFamilyTimeRangeOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required bytes column_family = 1; - /** - * required bytes column_family = 1; - */ - boolean hasColumnFamily(); - /** - * required bytes column_family = 1; - */ - com.google.protobuf.ByteString getColumnFamily(); - - // required .hbase.pb.TimeRange time_range = 2; - /** - * required .hbase.pb.TimeRange time_range = 2; - */ - boolean hasTimeRange(); - /** - * required .hbase.pb.TimeRange time_range = 2; - */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange getTimeRange(); - /** - * required .hbase.pb.TimeRange time_range = 2; - */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRangeOrBuilder getTimeRangeOrBuilder(); + // optional bytes end_key = 4; + private com.google.protobuf.ByteString endKey_ = com.google.protobuf.ByteString.EMPTY; + /** + * optional bytes end_key = 4; + */ + public boolean hasEndKey() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional bytes end_key = 4; + */ + public com.google.protobuf.ByteString getEndKey() { + return endKey_; + } + /** + * optional bytes end_key = 4; + */ + public Builder setEndKey(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); } - /** - * Protobuf type {@code hbase.pb.ColumnFamilyTimeRange} - * - *
-   * ColumnFamily Specific TimeRange 
-   * 
- */ - public static final class ColumnFamilyTimeRange extends - com.google.protobuf.GeneratedMessage - implements ColumnFamilyTimeRangeOrBuilder { - // Use ColumnFamilyTimeRange.newBuilder() to construct. - private ColumnFamilyTimeRange(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private ColumnFamilyTimeRange(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final ColumnFamilyTimeRange defaultInstance; - public static ColumnFamilyTimeRange getDefaultInstance() { - return defaultInstance; - } - - public ColumnFamilyTimeRange getDefaultInstanceForType() { - return defaultInstance; - } + bitField0_ |= 0x00000008; + endKey_ = value; + onChanged(); + return this; + } + /** + * optional bytes end_key = 4; + */ + public Builder clearEndKey() { + bitField0_ = (bitField0_ & ~0x00000008); + endKey_ = getDefaultInstance().getEndKey(); + onChanged(); + return this; + } - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private ColumnFamilyTimeRange( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; + // optional bool offline = 5; + private boolean offline_ ; + /** + * optional bool offline = 5; + */ + public boolean hasOffline() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional bool offline = 5; + */ + public boolean getOffline() { + return offline_; + } + /** + * optional bool offline = 5; + */ + public Builder setOffline(boolean value) { + bitField0_ |= 0x00000010; + offline_ = value; + onChanged(); + return this; + } + /** + * optional bool offline = 5; + */ + public Builder clearOffline() { + bitField0_ = (bitField0_ & ~0x00000010); + offline_ = false; + onChanged(); + return this; + } + + // optional bool split = 6; + private boolean split_ ; + /** + * optional bool split = 6; + */ + public boolean hasSplit() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + /** + * optional bool split = 6; + */ + public boolean getSplit() { + return split_; + } + /** + * optional bool split = 6; + */ + public Builder setSplit(boolean value) { + bitField0_ |= 0x00000020; + split_ = value; + onChanged(); + return this; + } + /** + * optional bool split = 6; + */ + public Builder clearSplit() { + bitField0_ = (bitField0_ & ~0x00000020); + split_ = false; + onChanged(); + return this; + } + + // optional int32 replica_id = 7 [default = 0]; + private int replicaId_ ; + /** + * optional int32 replica_id = 7 [default = 0]; + */ + public boolean hasReplicaId() { + return ((bitField0_ & 0x00000040) == 0x00000040); + } + /** + * optional int32 replica_id = 7 [default = 0]; + */ + public int getReplicaId() { + return replicaId_; + } + /** + * optional int32 replica_id = 7 [default = 0]; + */ + public Builder setReplicaId(int value) { + bitField0_ |= 0x00000040; + replicaId_ = value; + onChanged(); + return this; + } + /** + * optional int32 replica_id = 7 [default = 0]; + */ + public Builder clearReplicaId() { + bitField0_ = (bitField0_ & ~0x00000040); + replicaId_ = 0; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.RegionInfo) + } + + static { + defaultInstance = new RegionInfo(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.RegionInfo) + } + + public interface FavoredNodesOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // repeated .hbase.pb.ServerName favored_node = 1; + /** + * repeated .hbase.pb.ServerName favored_node = 1; + */ + java.util.List + getFavoredNodeList(); + /** + * repeated .hbase.pb.ServerName favored_node = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getFavoredNode(int index); + /** + * repeated .hbase.pb.ServerName favored_node = 1; + */ + int getFavoredNodeCount(); + /** + * repeated .hbase.pb.ServerName favored_node = 1; + */ + java.util.List + getFavoredNodeOrBuilderList(); + /** + * repeated .hbase.pb.ServerName favored_node = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getFavoredNodeOrBuilder( + int index); + } + /** + * Protobuf type {@code hbase.pb.FavoredNodes} + * + *
+   **
+   * Protocol buffer for favored nodes
+   * 
+ */ + public static final class FavoredNodes extends + com.google.protobuf.GeneratedMessage + implements FavoredNodesOrBuilder { + // Use FavoredNodes.newBuilder() to construct. + private FavoredNodes(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private FavoredNodes(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final FavoredNodes defaultInstance; + public static FavoredNodes getDefaultInstance() { + return defaultInstance; + } + + public FavoredNodes getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private FavoredNodes( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; break; default: { if (!parseUnknownField(input, unknownFields, @@ -6337,21 +5972,11 @@ private ColumnFamilyTimeRange( break; } case 10: { - bitField0_ |= 0x00000001; - columnFamily_ = input.readBytes(); - break; - } - case 18: { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.Builder subBuilder = null; - if (((bitField0_ & 0x00000002) == 0x00000002)) { - subBuilder = timeRange_.toBuilder(); - } - timeRange_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(timeRange_); - timeRange_ = subBuilder.buildPartial(); + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + favoredNode_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; } - bitField0_ |= 0x00000002; + favoredNode_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry)); break; } } @@ -6362,92 +5987,89 @@ private ColumnFamilyTimeRange( throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + favoredNode_ = java.util.Collections.unmodifiableList(favoredNode_); + } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_ColumnFamilyTimeRange_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_FavoredNodes_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_ColumnFamilyTimeRange_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_FavoredNodes_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public ColumnFamilyTimeRange parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public FavoredNodes parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new ColumnFamilyTimeRange(input, extensionRegistry); + return new FavoredNodes(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } - private int bitField0_; - // required bytes column_family = 1; - public static final int COLUMN_FAMILY_FIELD_NUMBER = 1; - private com.google.protobuf.ByteString columnFamily_; + // repeated .hbase.pb.ServerName favored_node = 1; + public static final int FAVORED_NODE_FIELD_NUMBER = 1; + private java.util.List favoredNode_; /** - * required bytes column_family = 1; + * repeated .hbase.pb.ServerName favored_node = 1; */ - public boolean hasColumnFamily() { - return ((bitField0_ & 0x00000001) == 0x00000001); + public java.util.List getFavoredNodeList() { + return favoredNode_; } /** - * required bytes column_family = 1; + * repeated .hbase.pb.ServerName favored_node = 1; */ - public com.google.protobuf.ByteString getColumnFamily() { - return columnFamily_; + public java.util.List + getFavoredNodeOrBuilderList() { + return favoredNode_; } - - // required .hbase.pb.TimeRange time_range = 2; - public static final int TIME_RANGE_FIELD_NUMBER = 2; - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange timeRange_; /** - * required .hbase.pb.TimeRange time_range = 2; + * repeated .hbase.pb.ServerName favored_node = 1; */ - public boolean hasTimeRange() { - return ((bitField0_ & 0x00000002) == 0x00000002); + public int getFavoredNodeCount() { + return favoredNode_.size(); } /** - * required .hbase.pb.TimeRange time_range = 2; + * repeated .hbase.pb.ServerName favored_node = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange getTimeRange() { - return timeRange_; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getFavoredNode(int index) { + return favoredNode_.get(index); } /** - * required .hbase.pb.TimeRange time_range = 2; + * repeated .hbase.pb.ServerName favored_node = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRangeOrBuilder getTimeRangeOrBuilder() { - return timeRange_; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getFavoredNodeOrBuilder( + int index) { + return favoredNode_.get(index); } private void initFields() { - columnFamily_ = com.google.protobuf.ByteString.EMPTY; - timeRange_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.getDefaultInstance(); + favoredNode_ = java.util.Collections.emptyList(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (!hasColumnFamily()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasTimeRange()) { - memoizedIsInitialized = 0; - return false; + for (int i = 0; i < getFavoredNodeCount(); i++) { + if (!getFavoredNode(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } } memoizedIsInitialized = 1; return true; @@ -6456,11 +6078,8 @@ public final boolean isInitialized() { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, columnFamily_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeMessage(2, timeRange_); + for (int i = 0; i < favoredNode_.size(); i++) { + output.writeMessage(1, favoredNode_.get(i)); } getUnknownFields().writeTo(output); } @@ -6471,13 +6090,9 @@ public int getSerializedSize() { if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, columnFamily_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { + for (int i = 0; i < favoredNode_.size(); i++) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(2, timeRange_); + .computeMessageSize(1, favoredNode_.get(i)); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -6496,22 +6111,14 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange) obj; + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes) obj; boolean result = true; - result = result && (hasColumnFamily() == other.hasColumnFamily()); - if (hasColumnFamily()) { - result = result && getColumnFamily() - .equals(other.getColumnFamily()); - } - result = result && (hasTimeRange() == other.hasTimeRange()); - if (hasTimeRange()) { - result = result && getTimeRange() - .equals(other.getTimeRange()); - } + result = result && getFavoredNodeList() + .equals(other.getFavoredNodeList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -6525,66 +6132,62 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasColumnFamily()) { - hash = (37 * hash) + COLUMN_FAMILY_FIELD_NUMBER; - hash = (53 * hash) + getColumnFamily().hashCode(); - } - if (hasTimeRange()) { - hash = (37 * hash) + TIME_RANGE_FIELD_NUMBER; - hash = (53 * hash) + getTimeRange().hashCode(); + if (getFavoredNodeCount() > 0) { + hash = (37 * hash) + FAVORED_NODE_FIELD_NUMBER; + hash = (53 * hash) + getFavoredNodeList().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -6593,7 +6196,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamil public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -6605,28 +6208,29 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.ColumnFamilyTimeRange} + * Protobuf type {@code hbase.pb.FavoredNodes} * *
-     * ColumnFamily Specific TimeRange 
+     **
+     * Protocol buffer for favored nodes
      * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRangeOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodesOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_ColumnFamilyTimeRange_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_FavoredNodes_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_ColumnFamilyTimeRange_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_FavoredNodes_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -6638,7 +6242,7 @@ private Builder( } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getTimeRangeFieldBuilder(); + getFavoredNodeFieldBuilder(); } } private static Builder create() { @@ -6647,14 +6251,12 @@ private static Builder create() { public Builder clear() { super.clear(); - columnFamily_ = com.google.protobuf.ByteString.EMPTY; - bitField0_ = (bitField0_ & ~0x00000001); - if (timeRangeBuilder_ == null) { - timeRange_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.getDefaultInstance(); + if (favoredNodeBuilder_ == null) { + favoredNode_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); } else { - timeRangeBuilder_.clear(); + favoredNodeBuilder_.clear(); } - bitField0_ = (bitField0_ & ~0x00000002); return this; } @@ -6664,71 +6266,84 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_ColumnFamilyTimeRange_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_FavoredNodes_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange build() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes build() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange(this); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes(this); int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.columnFamily_ = columnFamily_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - if (timeRangeBuilder_ == null) { - result.timeRange_ = timeRange_; + if (favoredNodeBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + favoredNode_ = java.util.Collections.unmodifiableList(favoredNode_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.favoredNode_ = favoredNode_; } else { - result.timeRange_ = timeRangeBuilder_.build(); + result.favoredNode_ = favoredNodeBuilder_.build(); } - result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange.getDefaultInstance()) return this; - if (other.hasColumnFamily()) { - setColumnFamily(other.getColumnFamily()); - } - if (other.hasTimeRange()) { - mergeTimeRange(other.getTimeRange()); + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes.getDefaultInstance()) return this; + if (favoredNodeBuilder_ == null) { + if (!other.favoredNode_.isEmpty()) { + if (favoredNode_.isEmpty()) { + favoredNode_ = other.favoredNode_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureFavoredNodeIsMutable(); + favoredNode_.addAll(other.favoredNode_); + } + onChanged(); + } + } else { + if (!other.favoredNode_.isEmpty()) { + if (favoredNodeBuilder_.isEmpty()) { + favoredNodeBuilder_.dispose(); + favoredNodeBuilder_ = null; + favoredNode_ = other.favoredNode_; + bitField0_ = (bitField0_ & ~0x00000001); + favoredNodeBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getFavoredNodeFieldBuilder() : null; + } else { + favoredNodeBuilder_.addAllMessages(other.favoredNode_); + } + } } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { - if (!hasColumnFamily()) { - - return false; - } - if (!hasTimeRange()) { - - return false; + for (int i = 0; i < getFavoredNodeCount(); i++) { + if (!getFavoredNode(i).isInitialized()) { + + return false; + } } return true; } @@ -6737,11 +6352,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -6752,232 +6367,307 @@ public Builder mergeFrom( } private int bitField0_; - // required bytes column_family = 1; - private com.google.protobuf.ByteString columnFamily_ = com.google.protobuf.ByteString.EMPTY; + // repeated .hbase.pb.ServerName favored_node = 1; + private java.util.List favoredNode_ = + java.util.Collections.emptyList(); + private void ensureFavoredNodeIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + favoredNode_ = new java.util.ArrayList(favoredNode_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> favoredNodeBuilder_; + /** - * required bytes column_family = 1; + * repeated .hbase.pb.ServerName favored_node = 1; */ - public boolean hasColumnFamily() { - return ((bitField0_ & 0x00000001) == 0x00000001); + public java.util.List getFavoredNodeList() { + if (favoredNodeBuilder_ == null) { + return java.util.Collections.unmodifiableList(favoredNode_); + } else { + return favoredNodeBuilder_.getMessageList(); + } } /** - * required bytes column_family = 1; + * repeated .hbase.pb.ServerName favored_node = 1; */ - public com.google.protobuf.ByteString getColumnFamily() { - return columnFamily_; + public int getFavoredNodeCount() { + if (favoredNodeBuilder_ == null) { + return favoredNode_.size(); + } else { + return favoredNodeBuilder_.getCount(); + } } /** - * required bytes column_family = 1; + * repeated .hbase.pb.ServerName favored_node = 1; */ - public Builder setColumnFamily(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - columnFamily_ = value; - onChanged(); - return this; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getFavoredNode(int index) { + if (favoredNodeBuilder_ == null) { + return favoredNode_.get(index); + } else { + return favoredNodeBuilder_.getMessage(index); + } } /** - * required bytes column_family = 1; + * repeated .hbase.pb.ServerName favored_node = 1; */ - public Builder clearColumnFamily() { - bitField0_ = (bitField0_ & ~0x00000001); - columnFamily_ = getDefaultInstance().getColumnFamily(); - onChanged(); + public Builder setFavoredNode( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { + if (favoredNodeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureFavoredNodeIsMutable(); + favoredNode_.set(index, value); + onChanged(); + } else { + favoredNodeBuilder_.setMessage(index, value); + } return this; } - - // required .hbase.pb.TimeRange time_range = 2; - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange timeRange_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRangeOrBuilder> timeRangeBuilder_; /** - * required .hbase.pb.TimeRange time_range = 2; + * repeated .hbase.pb.ServerName favored_node = 1; */ - public boolean hasTimeRange() { - return ((bitField0_ & 0x00000002) == 0x00000002); + public Builder setFavoredNode( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { + if (favoredNodeBuilder_ == null) { + ensureFavoredNodeIsMutable(); + favoredNode_.set(index, builderForValue.build()); + onChanged(); + } else { + favoredNodeBuilder_.setMessage(index, builderForValue.build()); + } + return this; } /** - * required .hbase.pb.TimeRange time_range = 2; + * repeated .hbase.pb.ServerName favored_node = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange getTimeRange() { - if (timeRangeBuilder_ == null) { - return timeRange_; + public Builder addFavoredNode(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { + if (favoredNodeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureFavoredNodeIsMutable(); + favoredNode_.add(value); + onChanged(); } else { - return timeRangeBuilder_.getMessage(); + favoredNodeBuilder_.addMessage(value); } + return this; } /** - * required .hbase.pb.TimeRange time_range = 2; + * repeated .hbase.pb.ServerName favored_node = 1; */ - public Builder setTimeRange(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange value) { - if (timeRangeBuilder_ == null) { + public Builder addFavoredNode( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { + if (favoredNodeBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - timeRange_ = value; + ensureFavoredNodeIsMutable(); + favoredNode_.add(index, value); onChanged(); } else { - timeRangeBuilder_.setMessage(value); + favoredNodeBuilder_.addMessage(index, value); } - bitField0_ |= 0x00000002; return this; } /** - * required .hbase.pb.TimeRange time_range = 2; + * repeated .hbase.pb.ServerName favored_node = 1; */ - public Builder setTimeRange( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.Builder builderForValue) { - if (timeRangeBuilder_ == null) { - timeRange_ = builderForValue.build(); + public Builder addFavoredNode( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { + if (favoredNodeBuilder_ == null) { + ensureFavoredNodeIsMutable(); + favoredNode_.add(builderForValue.build()); onChanged(); } else { - timeRangeBuilder_.setMessage(builderForValue.build()); + favoredNodeBuilder_.addMessage(builderForValue.build()); } - bitField0_ |= 0x00000002; return this; } /** - * required .hbase.pb.TimeRange time_range = 2; + * repeated .hbase.pb.ServerName favored_node = 1; */ - public Builder mergeTimeRange(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange value) { - if (timeRangeBuilder_ == null) { - if (((bitField0_ & 0x00000002) == 0x00000002) && - timeRange_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.getDefaultInstance()) { - timeRange_ = - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.newBuilder(timeRange_).mergeFrom(value).buildPartial(); - } else { - timeRange_ = value; - } + public Builder addFavoredNode( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { + if (favoredNodeBuilder_ == null) { + ensureFavoredNodeIsMutable(); + favoredNode_.add(index, builderForValue.build()); onChanged(); } else { - timeRangeBuilder_.mergeFrom(value); + favoredNodeBuilder_.addMessage(index, builderForValue.build()); } - bitField0_ |= 0x00000002; return this; } /** - * required .hbase.pb.TimeRange time_range = 2; + * repeated .hbase.pb.ServerName favored_node = 1; */ - public Builder clearTimeRange() { - if (timeRangeBuilder_ == null) { - timeRange_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.getDefaultInstance(); + public Builder addAllFavoredNode( + java.lang.Iterable values) { + if (favoredNodeBuilder_ == null) { + ensureFavoredNodeIsMutable(); + super.addAll(values, favoredNode_); onChanged(); } else { - timeRangeBuilder_.clear(); + favoredNodeBuilder_.addAllMessages(values); } - bitField0_ = (bitField0_ & ~0x00000002); return this; } /** - * required .hbase.pb.TimeRange time_range = 2; + * repeated .hbase.pb.ServerName favored_node = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.Builder getTimeRangeBuilder() { - bitField0_ |= 0x00000002; - onChanged(); - return getTimeRangeFieldBuilder().getBuilder(); + public Builder clearFavoredNode() { + if (favoredNodeBuilder_ == null) { + favoredNode_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + favoredNodeBuilder_.clear(); + } + return this; } /** - * required .hbase.pb.TimeRange time_range = 2; + * repeated .hbase.pb.ServerName favored_node = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRangeOrBuilder getTimeRangeOrBuilder() { - if (timeRangeBuilder_ != null) { - return timeRangeBuilder_.getMessageOrBuilder(); + public Builder removeFavoredNode(int index) { + if (favoredNodeBuilder_ == null) { + ensureFavoredNodeIsMutable(); + favoredNode_.remove(index); + onChanged(); } else { - return timeRange_; + favoredNodeBuilder_.remove(index); } + return this; } /** - * required .hbase.pb.TimeRange time_range = 2; + * repeated .hbase.pb.ServerName favored_node = 1; */ - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRangeOrBuilder> - getTimeRangeFieldBuilder() { - if (timeRangeBuilder_ == null) { - timeRangeBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRangeOrBuilder>( - timeRange_, + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getFavoredNodeBuilder( + int index) { + return getFavoredNodeFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.ServerName favored_node = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getFavoredNodeOrBuilder( + int index) { + if (favoredNodeBuilder_ == null) { + return favoredNode_.get(index); } else { + return favoredNodeBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.ServerName favored_node = 1; + */ + public java.util.List + getFavoredNodeOrBuilderList() { + if (favoredNodeBuilder_ != null) { + return favoredNodeBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(favoredNode_); + } + } + /** + * repeated .hbase.pb.ServerName favored_node = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder addFavoredNodeBuilder() { + return getFavoredNodeFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()); + } + /** + * repeated .hbase.pb.ServerName favored_node = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder addFavoredNodeBuilder( + int index) { + return getFavoredNodeFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()); + } + /** + * repeated .hbase.pb.ServerName favored_node = 1; + */ + public java.util.List + getFavoredNodeBuilderList() { + return getFavoredNodeFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> + getFavoredNodeFieldBuilder() { + if (favoredNodeBuilder_ == null) { + favoredNodeBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>( + favoredNode_, + ((bitField0_ & 0x00000001) == 0x00000001), getParentForChildren(), isClean()); - timeRange_ = null; + favoredNode_ = null; } - return timeRangeBuilder_; + return favoredNodeBuilder_; } - // @@protoc_insertion_point(builder_scope:hbase.pb.ColumnFamilyTimeRange) + // @@protoc_insertion_point(builder_scope:hbase.pb.FavoredNodes) } static { - defaultInstance = new ColumnFamilyTimeRange(true); + defaultInstance = new FavoredNodes(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.ColumnFamilyTimeRange) + // @@protoc_insertion_point(class_scope:hbase.pb.FavoredNodes) } - public interface ServerNameOrBuilder + public interface RegionSpecifierOrBuilder extends com.google.protobuf.MessageOrBuilder { - // required string host_name = 1; - /** - * required string host_name = 1; - */ - boolean hasHostName(); - /** - * required string host_name = 1; - */ - java.lang.String getHostName(); - /** - * required string host_name = 1; - */ - com.google.protobuf.ByteString - getHostNameBytes(); - - // optional uint32 port = 2; + // required .hbase.pb.RegionSpecifier.RegionSpecifierType type = 1; /** - * optional uint32 port = 2; + * required .hbase.pb.RegionSpecifier.RegionSpecifierType type = 1; */ - boolean hasPort(); + boolean hasType(); /** - * optional uint32 port = 2; + * required .hbase.pb.RegionSpecifier.RegionSpecifierType type = 1; */ - int getPort(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType getType(); - // optional uint64 start_code = 3; + // required bytes value = 2; /** - * optional uint64 start_code = 3; + * required bytes value = 2; */ - boolean hasStartCode(); + boolean hasValue(); /** - * optional uint64 start_code = 3; + * required bytes value = 2; */ - long getStartCode(); + com.google.protobuf.ByteString getValue(); } /** - * Protobuf type {@code hbase.pb.ServerName} + * Protobuf type {@code hbase.pb.RegionSpecifier} * *
    **
-   * Protocol buffer version of ServerName
+   * Container protocol buffer to specify a region.
+   * You can specify region by region name, or the hash
+   * of the region name, which is known as encoded
+   * region name.
    * 
*/ - public static final class ServerName extends + public static final class RegionSpecifier extends com.google.protobuf.GeneratedMessage - implements ServerNameOrBuilder { - // Use ServerName.newBuilder() to construct. - private ServerName(com.google.protobuf.GeneratedMessage.Builder builder) { + implements RegionSpecifierOrBuilder { + // Use RegionSpecifier.newBuilder() to construct. + private RegionSpecifier(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private ServerName(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private RegionSpecifier(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final ServerName defaultInstance; - public static ServerName getDefaultInstance() { + private static final RegionSpecifier defaultInstance; + public static RegionSpecifier getDefaultInstance() { return defaultInstance; } - public ServerName getDefaultInstanceForType() { + public RegionSpecifier getDefaultInstanceForType() { return defaultInstance; } @@ -6987,7 +6677,7 @@ public ServerName getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private ServerName( + private RegionSpecifier( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -7010,19 +6700,20 @@ private ServerName( } break; } - case 10: { - bitField0_ |= 0x00000001; - hostName_ = input.readBytes(); + case 8: { + int rawValue = input.readEnum(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType value = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(1, rawValue); + } else { + bitField0_ |= 0x00000001; + type_ = value; + } break; } - case 16: { + case 18: { bitField0_ |= 0x00000002; - port_ = input.readUInt32(); - break; - } - case 24: { - bitField0_ |= 0x00000004; - startCode_ = input.readUInt64(); + value_ = input.readBytes(); break; } } @@ -7039,118 +6730,176 @@ private ServerName( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_ServerName_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_RegionSpecifier_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_ServerName_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_RegionSpecifier_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public ServerName parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public RegionSpecifier parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new ServerName(input, extensionRegistry); + return new RegionSpecifier(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } - private int bitField0_; - // required string host_name = 1; - public static final int HOST_NAME_FIELD_NUMBER = 1; - private java.lang.Object hostName_; - /** - * required string host_name = 1; - */ - public boolean hasHostName() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } /** - * required string host_name = 1; + * Protobuf enum {@code hbase.pb.RegionSpecifier.RegionSpecifierType} */ - public java.lang.String getHostName() { - java.lang.Object ref = hostName_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - hostName_ = s; + public enum RegionSpecifierType + implements com.google.protobuf.ProtocolMessageEnum { + /** + * REGION_NAME = 1; + * + *
+       * <tablename>,<startkey>,<regionId>.<encodedName>
+       * 
+ */ + REGION_NAME(0, 1), + /** + * ENCODED_REGION_NAME = 2; + * + *
+       * hash of <tablename>,<startkey>,<regionId>
+       * 
+ */ + ENCODED_REGION_NAME(1, 2), + ; + + /** + * REGION_NAME = 1; + * + *
+       * <tablename>,<startkey>,<regionId>.<encodedName>
+       * 
+ */ + public static final int REGION_NAME_VALUE = 1; + /** + * ENCODED_REGION_NAME = 2; + * + *
+       * hash of <tablename>,<startkey>,<regionId>
+       * 
+ */ + public static final int ENCODED_REGION_NAME_VALUE = 2; + + + public final int getNumber() { return value; } + + public static RegionSpecifierType valueOf(int value) { + switch (value) { + case 1: return REGION_NAME; + case 2: return ENCODED_REGION_NAME; + default: return null; } - return s; } - } - /** - * required string host_name = 1; - */ - public com.google.protobuf.ByteString - getHostNameBytes() { - java.lang.Object ref = hostName_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - hostName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public RegionSpecifierType findValueByNumber(int number) { + return RegionSpecifierType.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDescriptor().getEnumTypes().get(0); + } + + private static final RegionSpecifierType[] VALUES = values(); + + public static RegionSpecifierType valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private RegionSpecifierType(int index, int value) { + this.index = index; + this.value = value; } + + // @@protoc_insertion_point(enum_scope:hbase.pb.RegionSpecifier.RegionSpecifierType) } - // optional uint32 port = 2; - public static final int PORT_FIELD_NUMBER = 2; - private int port_; + private int bitField0_; + // required .hbase.pb.RegionSpecifier.RegionSpecifierType type = 1; + public static final int TYPE_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType type_; /** - * optional uint32 port = 2; + * required .hbase.pb.RegionSpecifier.RegionSpecifierType type = 1; */ - public boolean hasPort() { - return ((bitField0_ & 0x00000002) == 0x00000002); + public boolean hasType() { + return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * optional uint32 port = 2; + * required .hbase.pb.RegionSpecifier.RegionSpecifierType type = 1; */ - public int getPort() { - return port_; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType getType() { + return type_; } - // optional uint64 start_code = 3; - public static final int START_CODE_FIELD_NUMBER = 3; - private long startCode_; + // required bytes value = 2; + public static final int VALUE_FIELD_NUMBER = 2; + private com.google.protobuf.ByteString value_; /** - * optional uint64 start_code = 3; + * required bytes value = 2; */ - public boolean hasStartCode() { - return ((bitField0_ & 0x00000004) == 0x00000004); + public boolean hasValue() { + return ((bitField0_ & 0x00000002) == 0x00000002); } /** - * optional uint64 start_code = 3; + * required bytes value = 2; */ - public long getStartCode() { - return startCode_; + public com.google.protobuf.ByteString getValue() { + return value_; } private void initFields() { - hostName_ = ""; - port_ = 0; - startCode_ = 0L; + type_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME; + value_ = com.google.protobuf.ByteString.EMPTY; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (!hasHostName()) { + if (!hasType()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasValue()) { memoizedIsInitialized = 0; return false; } @@ -7162,13 +6911,10 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getHostNameBytes()); + output.writeEnum(1, type_.getNumber()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeUInt32(2, port_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeUInt64(3, startCode_); + output.writeBytes(2, value_); } getUnknownFields().writeTo(output); } @@ -7181,15 +6927,11 @@ public int getSerializedSize() { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getHostNameBytes()); + .computeEnumSize(1, type_.getNumber()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream - .computeUInt32Size(2, port_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(3, startCode_); + .computeBytesSize(2, value_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -7208,26 +6950,21 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName) obj; + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier) obj; boolean result = true; - result = result && (hasHostName() == other.hasHostName()); - if (hasHostName()) { - result = result && getHostName() - .equals(other.getHostName()); - } - result = result && (hasPort() == other.hasPort()); - if (hasPort()) { - result = result && (getPort() - == other.getPort()); + result = result && (hasType() == other.hasType()); + if (hasType()) { + result = result && + (getType() == other.getType()); } - result = result && (hasStartCode() == other.hasStartCode()); - if (hasStartCode()) { - result = result && (getStartCode() - == other.getStartCode()); + result = result && (hasValue() == other.hasValue()); + if (hasValue()) { + result = result && getValue() + .equals(other.getValue()); } result = result && getUnknownFields().equals(other.getUnknownFields()); @@ -7242,70 +6979,66 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasHostName()) { - hash = (37 * hash) + HOST_NAME_FIELD_NUMBER; - hash = (53 * hash) + getHostName().hashCode(); - } - if (hasPort()) { - hash = (37 * hash) + PORT_FIELD_NUMBER; - hash = (53 * hash) + getPort(); + if (hasType()) { + hash = (37 * hash) + TYPE_FIELD_NUMBER; + hash = (53 * hash) + hashEnum(getType()); } - if (hasStartCode()) { - hash = (37 * hash) + START_CODE_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getStartCode()); + if (hasValue()) { + hash = (37 * hash) + VALUE_FIELD_NUMBER; + hash = (53 * hash) + getValue().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -7314,7 +7047,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -7326,29 +7059,32 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.ServerName} + * Protobuf type {@code hbase.pb.RegionSpecifier} * *
      **
-     * Protocol buffer version of ServerName
+     * Container protocol buffer to specify a region.
+     * You can specify region by region name, or the hash
+     * of the region name, which is known as encoded
+     * region name.
      * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_ServerName_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_RegionSpecifier_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_ServerName_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_RegionSpecifier_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -7368,12 +7104,10 @@ private static Builder create() { public Builder clear() { super.clear(); - hostName_ = ""; + type_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME; bitField0_ = (bitField0_ & ~0x00000001); - port_ = 0; + value_ = com.google.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000002); - startCode_ = 0L; - bitField0_ = (bitField0_ & ~0x00000004); return this; } @@ -7383,70 +7117,65 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_ServerName_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_RegionSpecifier_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName build() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier build() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName(this); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - result.hostName_ = hostName_; + result.type_ = type_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } - result.port_ = port_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.startCode_ = startCode_; + result.value_ = value_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()) return this; - if (other.hasHostName()) { - bitField0_ |= 0x00000001; - hostName_ = other.hostName_; - onChanged(); - } - if (other.hasPort()) { - setPort(other.getPort()); + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance()) return this; + if (other.hasType()) { + setType(other.getType()); } - if (other.hasStartCode()) { - setStartCode(other.getStartCode()); + if (other.hasValue()) { + setValue(other.getValue()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { - if (!hasHostName()) { + if (!hasType()) { + + return false; + } + if (!hasValue()) { return false; } @@ -7457,11 +7186,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -7472,194 +7201,139 @@ public Builder mergeFrom( } private int bitField0_; - // required string host_name = 1; - private java.lang.Object hostName_ = ""; + // required .hbase.pb.RegionSpecifier.RegionSpecifierType type = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType type_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME; /** - * required string host_name = 1; + * required .hbase.pb.RegionSpecifier.RegionSpecifierType type = 1; */ - public boolean hasHostName() { + public boolean hasType() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * required string host_name = 1; - */ - public java.lang.String getHostName() { - java.lang.Object ref = hostName_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - hostName_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * required string host_name = 1; + * required .hbase.pb.RegionSpecifier.RegionSpecifierType type = 1; */ - public com.google.protobuf.ByteString - getHostNameBytes() { - java.lang.Object ref = hostName_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - hostName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType getType() { + return type_; } /** - * required string host_name = 1; + * required .hbase.pb.RegionSpecifier.RegionSpecifierType type = 1; */ - public Builder setHostName( - java.lang.String value) { + public Builder setType(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType value) { if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - hostName_ = value; + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + type_ = value; onChanged(); return this; } /** - * required string host_name = 1; + * required .hbase.pb.RegionSpecifier.RegionSpecifierType type = 1; */ - public Builder clearHostName() { + public Builder clearType() { bitField0_ = (bitField0_ & ~0x00000001); - hostName_ = getDefaultInstance().getHostName(); - onChanged(); - return this; - } - /** - * required string host_name = 1; - */ - public Builder setHostNameBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - hostName_ = value; + type_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME; onChanged(); return this; } - // optional uint32 port = 2; - private int port_ ; + // required bytes value = 2; + private com.google.protobuf.ByteString value_ = com.google.protobuf.ByteString.EMPTY; /** - * optional uint32 port = 2; + * required bytes value = 2; */ - public boolean hasPort() { + public boolean hasValue() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** - * optional uint32 port = 2; + * required bytes value = 2; */ - public int getPort() { - return port_; + public com.google.protobuf.ByteString getValue() { + return value_; } /** - * optional uint32 port = 2; + * required bytes value = 2; */ - public Builder setPort(int value) { - bitField0_ |= 0x00000002; - port_ = value; + public Builder setValue(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + value_ = value; onChanged(); return this; } /** - * optional uint32 port = 2; + * required bytes value = 2; */ - public Builder clearPort() { + public Builder clearValue() { bitField0_ = (bitField0_ & ~0x00000002); - port_ = 0; + value_ = getDefaultInstance().getValue(); onChanged(); return this; } - // optional uint64 start_code = 3; - private long startCode_ ; - /** - * optional uint64 start_code = 3; - */ - public boolean hasStartCode() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * optional uint64 start_code = 3; - */ - public long getStartCode() { - return startCode_; - } - /** - * optional uint64 start_code = 3; - */ - public Builder setStartCode(long value) { - bitField0_ |= 0x00000004; - startCode_ = value; - onChanged(); - return this; - } - /** - * optional uint64 start_code = 3; - */ - public Builder clearStartCode() { - bitField0_ = (bitField0_ & ~0x00000004); - startCode_ = 0L; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:hbase.pb.ServerName) + // @@protoc_insertion_point(builder_scope:hbase.pb.RegionSpecifier) } static { - defaultInstance = new ServerName(true); + defaultInstance = new RegionSpecifier(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.ServerName) + // @@protoc_insertion_point(class_scope:hbase.pb.RegionSpecifier) } - public interface CoprocessorOrBuilder + public interface TimeRangeOrBuilder extends com.google.protobuf.MessageOrBuilder { - // required string name = 1; + // optional uint64 from = 1; /** - * required string name = 1; + * optional uint64 from = 1; */ - boolean hasName(); + boolean hasFrom(); /** - * required string name = 1; + * optional uint64 from = 1; */ - java.lang.String getName(); + long getFrom(); + + // optional uint64 to = 2; /** - * required string name = 1; + * optional uint64 to = 2; */ - com.google.protobuf.ByteString - getNameBytes(); + boolean hasTo(); + /** + * optional uint64 to = 2; + */ + long getTo(); } /** - * Protobuf type {@code hbase.pb.Coprocessor} + * Protobuf type {@code hbase.pb.TimeRange} + * + *
+   **
+   * A range of time. Both from and to are Java time
+   * stamp in milliseconds. If you don't specify a time
+   * range, it means all time.  By default, if not
+   * specified, from = 0, and to = Long.MAX_VALUE
+   * 
*/ - public static final class Coprocessor extends + public static final class TimeRange extends com.google.protobuf.GeneratedMessage - implements CoprocessorOrBuilder { - // Use Coprocessor.newBuilder() to construct. - private Coprocessor(com.google.protobuf.GeneratedMessage.Builder builder) { + implements TimeRangeOrBuilder { + // Use TimeRange.newBuilder() to construct. + private TimeRange(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private Coprocessor(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private TimeRange(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final Coprocessor defaultInstance; - public static Coprocessor getDefaultInstance() { + private static final TimeRange defaultInstance; + public static TimeRange getDefaultInstance() { return defaultInstance; } - public Coprocessor getDefaultInstanceForType() { + public TimeRange getDefaultInstanceForType() { return defaultInstance; } @@ -7669,7 +7343,7 @@ public Coprocessor getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private Coprocessor( + private TimeRange( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -7692,9 +7366,14 @@ private Coprocessor( } break; } - case 10: { + case 8: { bitField0_ |= 0x00000001; - name_ = input.readBytes(); + from_ = input.readUInt64(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + to_ = input.readUInt64(); break; } } @@ -7711,87 +7390,73 @@ private Coprocessor( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_Coprocessor_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_TimeRange_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_Coprocessor_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_TimeRange_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public Coprocessor parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public TimeRange parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new Coprocessor(input, extensionRegistry); + return new TimeRange(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; - // required string name = 1; - public static final int NAME_FIELD_NUMBER = 1; - private java.lang.Object name_; + // optional uint64 from = 1; + public static final int FROM_FIELD_NUMBER = 1; + private long from_; /** - * required string name = 1; + * optional uint64 from = 1; */ - public boolean hasName() { + public boolean hasFrom() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * required string name = 1; + * optional uint64 from = 1; */ - public java.lang.String getName() { - java.lang.Object ref = name_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - name_ = s; - } - return s; - } + public long getFrom() { + return from_; } + + // optional uint64 to = 2; + public static final int TO_FIELD_NUMBER = 2; + private long to_; /** - * required string name = 1; + * optional uint64 to = 2; */ - public com.google.protobuf.ByteString - getNameBytes() { - java.lang.Object ref = name_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - name_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } + public boolean hasTo() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional uint64 to = 2; + */ + public long getTo() { + return to_; } private void initFields() { - name_ = ""; + from_ = 0L; + to_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (!hasName()) { - memoizedIsInitialized = 0; - return false; - } memoizedIsInitialized = 1; return true; } @@ -7800,7 +7465,10 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getNameBytes()); + output.writeUInt64(1, from_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeUInt64(2, to_); } getUnknownFields().writeTo(output); } @@ -7813,7 +7481,11 @@ public int getSerializedSize() { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getNameBytes()); + .computeUInt64Size(1, from_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(2, to_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -7832,16 +7504,21 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor) obj; + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange) obj; boolean result = true; - result = result && (hasName() == other.hasName()); - if (hasName()) { - result = result && getName() - .equals(other.getName()); + result = result && (hasFrom() == other.hasFrom()); + if (hasFrom()) { + result = result && (getFrom() + == other.getFrom()); + } + result = result && (hasTo() == other.hasTo()); + if (hasTo()) { + result = result && (getTo() + == other.getTo()); } result = result && getUnknownFields().equals(other.getUnknownFields()); @@ -7856,62 +7533,66 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasName()) { - hash = (37 * hash) + NAME_FIELD_NUMBER; - hash = (53 * hash) + getName().hashCode(); + if (hasFrom()) { + hash = (37 * hash) + FROM_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getFrom()); + } + if (hasTo()) { + hash = (37 * hash) + TO_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getTo()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -7920,7 +7601,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -7932,24 +7613,32 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.Coprocessor} + * Protobuf type {@code hbase.pb.TimeRange} + * + *
+     **
+     * A range of time. Both from and to are Java time
+     * stamp in milliseconds. If you don't specify a time
+     * range, it means all time.  By default, if not
+     * specified, from = 0, and to = Long.MAX_VALUE
+     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRangeOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_Coprocessor_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_TimeRange_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_Coprocessor_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_TimeRange_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -7969,8 +7658,10 @@ private static Builder create() { public Builder clear() { super.clear(); - name_ = ""; + from_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); + to_ = 0L; + bitField0_ = (bitField0_ & ~0x00000002); return this; } @@ -7980,59 +7671,60 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_Coprocessor_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_TimeRange_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor build() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange build() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor(this); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - result.name_ = name_; + result.from_ = from_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.to_ = to_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.getDefaultInstance()) return this; - if (other.hasName()) { - bitField0_ |= 0x00000001; - name_ = other.name_; - onChanged(); + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.getDefaultInstance()) return this; + if (other.hasFrom()) { + setFrom(other.getFrom()); + } + if (other.hasTo()) { + setTo(other.getTo()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { - if (!hasName()) { - - return false; - } return true; } @@ -8040,11 +7732,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -8055,143 +7747,133 @@ public Builder mergeFrom( } private int bitField0_; - // required string name = 1; - private java.lang.Object name_ = ""; + // optional uint64 from = 1; + private long from_ ; /** - * required string name = 1; + * optional uint64 from = 1; */ - public boolean hasName() { + public boolean hasFrom() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * required string name = 1; + * optional uint64 from = 1; */ - public java.lang.String getName() { - java.lang.Object ref = name_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - name_ = s; - return s; - } else { - return (java.lang.String) ref; - } + public long getFrom() { + return from_; } /** - * required string name = 1; + * optional uint64 from = 1; */ - public com.google.protobuf.ByteString - getNameBytes() { - java.lang.Object ref = name_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - name_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } + public Builder setFrom(long value) { + bitField0_ |= 0x00000001; + from_ = value; + onChanged(); + return this; } /** - * required string name = 1; + * optional uint64 from = 1; */ - public Builder setName( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - name_ = value; + public Builder clearFrom() { + bitField0_ = (bitField0_ & ~0x00000001); + from_ = 0L; onChanged(); return this; } + + // optional uint64 to = 2; + private long to_ ; /** - * required string name = 1; + * optional uint64 to = 2; */ - public Builder clearName() { - bitField0_ = (bitField0_ & ~0x00000001); - name_ = getDefaultInstance().getName(); + public boolean hasTo() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional uint64 to = 2; + */ + public long getTo() { + return to_; + } + /** + * optional uint64 to = 2; + */ + public Builder setTo(long value) { + bitField0_ |= 0x00000002; + to_ = value; onChanged(); return this; } /** - * required string name = 1; + * optional uint64 to = 2; */ - public Builder setNameBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - name_ = value; + public Builder clearTo() { + bitField0_ = (bitField0_ & ~0x00000002); + to_ = 0L; onChanged(); return this; } - // @@protoc_insertion_point(builder_scope:hbase.pb.Coprocessor) + // @@protoc_insertion_point(builder_scope:hbase.pb.TimeRange) } static { - defaultInstance = new Coprocessor(true); + defaultInstance = new TimeRange(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.Coprocessor) + // @@protoc_insertion_point(class_scope:hbase.pb.TimeRange) } - public interface NameStringPairOrBuilder + public interface ColumnFamilyTimeRangeOrBuilder extends com.google.protobuf.MessageOrBuilder { - // required string name = 1; - /** - * required string name = 1; - */ - boolean hasName(); + // required bytes column_family = 1; /** - * required string name = 1; + * required bytes column_family = 1; */ - java.lang.String getName(); + boolean hasColumnFamily(); /** - * required string name = 1; + * required bytes column_family = 1; */ - com.google.protobuf.ByteString - getNameBytes(); + com.google.protobuf.ByteString getColumnFamily(); - // required string value = 2; + // required .hbase.pb.TimeRange time_range = 2; /** - * required string value = 2; + * required .hbase.pb.TimeRange time_range = 2; */ - boolean hasValue(); + boolean hasTimeRange(); /** - * required string value = 2; + * required .hbase.pb.TimeRange time_range = 2; */ - java.lang.String getValue(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange getTimeRange(); /** - * required string value = 2; + * required .hbase.pb.TimeRange time_range = 2; */ - com.google.protobuf.ByteString - getValueBytes(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRangeOrBuilder getTimeRangeOrBuilder(); } /** - * Protobuf type {@code hbase.pb.NameStringPair} + * Protobuf type {@code hbase.pb.ColumnFamilyTimeRange} + * + *
+   * ColumnFamily Specific TimeRange 
+   * 
*/ - public static final class NameStringPair extends + public static final class ColumnFamilyTimeRange extends com.google.protobuf.GeneratedMessage - implements NameStringPairOrBuilder { - // Use NameStringPair.newBuilder() to construct. - private NameStringPair(com.google.protobuf.GeneratedMessage.Builder builder) { + implements ColumnFamilyTimeRangeOrBuilder { + // Use ColumnFamilyTimeRange.newBuilder() to construct. + private ColumnFamilyTimeRange(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private NameStringPair(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private ColumnFamilyTimeRange(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final NameStringPair defaultInstance; - public static NameStringPair getDefaultInstance() { + private static final ColumnFamilyTimeRange defaultInstance; + public static ColumnFamilyTimeRange getDefaultInstance() { return defaultInstance; } - public NameStringPair getDefaultInstanceForType() { + public ColumnFamilyTimeRange getDefaultInstanceForType() { return defaultInstance; } @@ -8201,7 +7883,7 @@ public NameStringPair getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private NameStringPair( + private ColumnFamilyTimeRange( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -8226,12 +7908,20 @@ private NameStringPair( } case 10: { bitField0_ |= 0x00000001; - name_ = input.readBytes(); + columnFamily_ = input.readBytes(); break; } case 18: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.Builder subBuilder = null; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + subBuilder = timeRange_.toBuilder(); + } + timeRange_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(timeRange_); + timeRange_ = subBuilder.buildPartial(); + } bitField0_ |= 0x00000002; - value_ = input.readBytes(); break; } } @@ -8248,132 +7938,84 @@ private NameStringPair( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_NameStringPair_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_ColumnFamilyTimeRange_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_NameStringPair_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_ColumnFamilyTimeRange_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public NameStringPair parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ColumnFamilyTimeRange parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new NameStringPair(input, extensionRegistry); + return new ColumnFamilyTimeRange(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; - // required string name = 1; - public static final int NAME_FIELD_NUMBER = 1; - private java.lang.Object name_; + // required bytes column_family = 1; + public static final int COLUMN_FAMILY_FIELD_NUMBER = 1; + private com.google.protobuf.ByteString columnFamily_; /** - * required string name = 1; + * required bytes column_family = 1; */ - public boolean hasName() { + public boolean hasColumnFamily() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * required string name = 1; - */ - public java.lang.String getName() { - java.lang.Object ref = name_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - name_ = s; - } - return s; - } - } - /** - * required string name = 1; + * required bytes column_family = 1; */ - public com.google.protobuf.ByteString - getNameBytes() { - java.lang.Object ref = name_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - name_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } + public com.google.protobuf.ByteString getColumnFamily() { + return columnFamily_; } - // required string value = 2; - public static final int VALUE_FIELD_NUMBER = 2; - private java.lang.Object value_; + // required .hbase.pb.TimeRange time_range = 2; + public static final int TIME_RANGE_FIELD_NUMBER = 2; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange timeRange_; /** - * required string value = 2; + * required .hbase.pb.TimeRange time_range = 2; */ - public boolean hasValue() { + public boolean hasTimeRange() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** - * required string value = 2; + * required .hbase.pb.TimeRange time_range = 2; */ - public java.lang.String getValue() { - java.lang.Object ref = value_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - value_ = s; - } - return s; - } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange getTimeRange() { + return timeRange_; } /** - * required string value = 2; + * required .hbase.pb.TimeRange time_range = 2; */ - public com.google.protobuf.ByteString - getValueBytes() { - java.lang.Object ref = value_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - value_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRangeOrBuilder getTimeRangeOrBuilder() { + return timeRange_; } private void initFields() { - name_ = ""; - value_ = ""; + columnFamily_ = com.google.protobuf.ByteString.EMPTY; + timeRange_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (!hasName()) { + if (!hasColumnFamily()) { memoizedIsInitialized = 0; return false; } - if (!hasValue()) { + if (!hasTimeRange()) { memoizedIsInitialized = 0; return false; } @@ -8385,10 +8027,10 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getNameBytes()); + output.writeBytes(1, columnFamily_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, getValueBytes()); + output.writeMessage(2, timeRange_); } getUnknownFields().writeTo(output); } @@ -8401,11 +8043,11 @@ public int getSerializedSize() { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getNameBytes()); + .computeBytesSize(1, columnFamily_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, getValueBytes()); + .computeMessageSize(2, timeRange_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -8424,21 +8066,21 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair) obj; + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange) obj; boolean result = true; - result = result && (hasName() == other.hasName()); - if (hasName()) { - result = result && getName() - .equals(other.getName()); + result = result && (hasColumnFamily() == other.hasColumnFamily()); + if (hasColumnFamily()) { + result = result && getColumnFamily() + .equals(other.getColumnFamily()); } - result = result && (hasValue() == other.hasValue()); - if (hasValue()) { - result = result && getValue() - .equals(other.getValue()); + result = result && (hasTimeRange() == other.hasTimeRange()); + if (hasTimeRange()) { + result = result && getTimeRange() + .equals(other.getTimeRange()); } result = result && getUnknownFields().equals(other.getUnknownFields()); @@ -8453,66 +8095,66 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasName()) { - hash = (37 * hash) + NAME_FIELD_NUMBER; - hash = (53 * hash) + getName().hashCode(); + if (hasColumnFamily()) { + hash = (37 * hash) + COLUMN_FAMILY_FIELD_NUMBER; + hash = (53 * hash) + getColumnFamily().hashCode(); } - if (hasValue()) { - hash = (37 * hash) + VALUE_FIELD_NUMBER; - hash = (53 * hash) + getValue().hashCode(); + if (hasTimeRange()) { + hash = (37 * hash) + TIME_RANGE_FIELD_NUMBER; + hash = (53 * hash) + getTimeRange().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -8521,7 +8163,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringP public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -8533,24 +8175,28 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.NameStringPair} + * Protobuf type {@code hbase.pb.ColumnFamilyTimeRange} + * + *
+     * ColumnFamily Specific TimeRange 
+     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRangeOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_NameStringPair_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_ColumnFamilyTimeRange_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_NameStringPair_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_ColumnFamilyTimeRange_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -8562,6 +8208,7 @@ private Builder( } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTimeRangeFieldBuilder(); } } private static Builder create() { @@ -8570,9 +8217,13 @@ private static Builder create() { public Builder clear() { super.clear(); - name_ = ""; + columnFamily_ = com.google.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000001); - value_ = ""; + if (timeRangeBuilder_ == null) { + timeRange_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.getDefaultInstance(); + } else { + timeRangeBuilder_.clear(); + } bitField0_ = (bitField0_ & ~0x00000002); return this; } @@ -8583,69 +8234,69 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_NameStringPair_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_ColumnFamilyTimeRange_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair build() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange build() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair(this); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - result.name_ = name_; + result.columnFamily_ = columnFamily_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } - result.value_ = value_; + if (timeRangeBuilder_ == null) { + result.timeRange_ = timeRange_; + } else { + result.timeRange_ = timeRangeBuilder_.build(); + } result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.getDefaultInstance()) return this; - if (other.hasName()) { - bitField0_ |= 0x00000001; - name_ = other.name_; - onChanged(); + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange.getDefaultInstance()) return this; + if (other.hasColumnFamily()) { + setColumnFamily(other.getColumnFamily()); } - if (other.hasValue()) { - bitField0_ |= 0x00000002; - value_ = other.value_; - onChanged(); + if (other.hasTimeRange()) { + mergeTimeRange(other.getTimeRange()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { - if (!hasName()) { + if (!hasColumnFamily()) { return false; } - if (!hasValue()) { + if (!hasTimeRange()) { return false; } @@ -8656,11 +8307,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -8671,212 +8322,232 @@ public Builder mergeFrom( } private int bitField0_; - // required string name = 1; - private java.lang.Object name_ = ""; + // required bytes column_family = 1; + private com.google.protobuf.ByteString columnFamily_ = com.google.protobuf.ByteString.EMPTY; /** - * required string name = 1; + * required bytes column_family = 1; */ - public boolean hasName() { + public boolean hasColumnFamily() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * required string name = 1; - */ - public java.lang.String getName() { - java.lang.Object ref = name_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - name_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * required string name = 1; + * required bytes column_family = 1; */ - public com.google.protobuf.ByteString - getNameBytes() { - java.lang.Object ref = name_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - name_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } + public com.google.protobuf.ByteString getColumnFamily() { + return columnFamily_; } /** - * required string name = 1; + * required bytes column_family = 1; */ - public Builder setName( - java.lang.String value) { + public Builder setColumnFamily(com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; - name_ = value; + columnFamily_ = value; onChanged(); return this; } /** - * required string name = 1; + * required bytes column_family = 1; */ - public Builder clearName() { + public Builder clearColumnFamily() { bitField0_ = (bitField0_ & ~0x00000001); - name_ = getDefaultInstance().getName(); - onChanged(); - return this; - } - /** - * required string name = 1; - */ - public Builder setNameBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - name_ = value; + columnFamily_ = getDefaultInstance().getColumnFamily(); onChanged(); return this; } - // required string value = 2; - private java.lang.Object value_ = ""; + // required .hbase.pb.TimeRange time_range = 2; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange timeRange_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRangeOrBuilder> timeRangeBuilder_; /** - * required string value = 2; + * required .hbase.pb.TimeRange time_range = 2; */ - public boolean hasValue() { + public boolean hasTimeRange() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** - * required string value = 2; + * required .hbase.pb.TimeRange time_range = 2; */ - public java.lang.String getValue() { - java.lang.Object ref = value_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - value_ = s; - return s; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange getTimeRange() { + if (timeRangeBuilder_ == null) { + return timeRange_; } else { - return (java.lang.String) ref; + return timeRangeBuilder_.getMessage(); } } /** - * required string value = 2; + * required .hbase.pb.TimeRange time_range = 2; */ - public com.google.protobuf.ByteString - getValueBytes() { - java.lang.Object ref = value_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - value_ = b; - return b; + public Builder setTimeRange(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange value) { + if (timeRangeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + timeRange_ = value; + onChanged(); } else { - return (com.google.protobuf.ByteString) ref; + timeRangeBuilder_.setMessage(value); } + bitField0_ |= 0x00000002; + return this; } /** - * required string value = 2; + * required .hbase.pb.TimeRange time_range = 2; */ - public Builder setValue( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - value_ = value; - onChanged(); + public Builder setTimeRange( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.Builder builderForValue) { + if (timeRangeBuilder_ == null) { + timeRange_ = builderForValue.build(); + onChanged(); + } else { + timeRangeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; return this; } /** - * required string value = 2; + * required .hbase.pb.TimeRange time_range = 2; */ - public Builder clearValue() { + public Builder mergeTimeRange(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange value) { + if (timeRangeBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + timeRange_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.getDefaultInstance()) { + timeRange_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.newBuilder(timeRange_).mergeFrom(value).buildPartial(); + } else { + timeRange_ = value; + } + onChanged(); + } else { + timeRangeBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .hbase.pb.TimeRange time_range = 2; + */ + public Builder clearTimeRange() { + if (timeRangeBuilder_ == null) { + timeRange_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.getDefaultInstance(); + onChanged(); + } else { + timeRangeBuilder_.clear(); + } bitField0_ = (bitField0_ & ~0x00000002); - value_ = getDefaultInstance().getValue(); - onChanged(); return this; } /** - * required string value = 2; + * required .hbase.pb.TimeRange time_range = 2; */ - public Builder setValueBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - value_ = value; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.Builder getTimeRangeBuilder() { + bitField0_ |= 0x00000002; onChanged(); - return this; + return getTimeRangeFieldBuilder().getBuilder(); + } + /** + * required .hbase.pb.TimeRange time_range = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRangeOrBuilder getTimeRangeOrBuilder() { + if (timeRangeBuilder_ != null) { + return timeRangeBuilder_.getMessageOrBuilder(); + } else { + return timeRange_; + } + } + /** + * required .hbase.pb.TimeRange time_range = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRangeOrBuilder> + getTimeRangeFieldBuilder() { + if (timeRangeBuilder_ == null) { + timeRangeBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRangeOrBuilder>( + timeRange_, + getParentForChildren(), + isClean()); + timeRange_ = null; + } + return timeRangeBuilder_; } - // @@protoc_insertion_point(builder_scope:hbase.pb.NameStringPair) + // @@protoc_insertion_point(builder_scope:hbase.pb.ColumnFamilyTimeRange) } static { - defaultInstance = new NameStringPair(true); + defaultInstance = new ColumnFamilyTimeRange(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.NameStringPair) + // @@protoc_insertion_point(class_scope:hbase.pb.ColumnFamilyTimeRange) } - public interface NameBytesPairOrBuilder + public interface ServerNameOrBuilder extends com.google.protobuf.MessageOrBuilder { - // required string name = 1; + // required string host_name = 1; /** - * required string name = 1; + * required string host_name = 1; */ - boolean hasName(); + boolean hasHostName(); /** - * required string name = 1; + * required string host_name = 1; */ - java.lang.String getName(); + java.lang.String getHostName(); /** - * required string name = 1; + * required string host_name = 1; */ com.google.protobuf.ByteString - getNameBytes(); + getHostNameBytes(); - // optional bytes value = 2; + // optional uint32 port = 2; /** - * optional bytes value = 2; + * optional uint32 port = 2; */ - boolean hasValue(); + boolean hasPort(); /** - * optional bytes value = 2; + * optional uint32 port = 2; */ - com.google.protobuf.ByteString getValue(); + int getPort(); + + // optional uint64 start_code = 3; + /** + * optional uint64 start_code = 3; + */ + boolean hasStartCode(); + /** + * optional uint64 start_code = 3; + */ + long getStartCode(); } /** - * Protobuf type {@code hbase.pb.NameBytesPair} + * Protobuf type {@code hbase.pb.ServerName} + * + *
+   **
+   * Protocol buffer version of ServerName
+   * 
*/ - public static final class NameBytesPair extends + public static final class ServerName extends com.google.protobuf.GeneratedMessage - implements NameBytesPairOrBuilder { - // Use NameBytesPair.newBuilder() to construct. - private NameBytesPair(com.google.protobuf.GeneratedMessage.Builder builder) { + implements ServerNameOrBuilder { + // Use ServerName.newBuilder() to construct. + private ServerName(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private NameBytesPair(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private ServerName(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final NameBytesPair defaultInstance; - public static NameBytesPair getDefaultInstance() { + private static final ServerName defaultInstance; + public static ServerName getDefaultInstance() { return defaultInstance; } - public NameBytesPair getDefaultInstanceForType() { + public ServerName getDefaultInstanceForType() { return defaultInstance; } @@ -8886,7 +8557,7 @@ public NameBytesPair getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private NameBytesPair( + private ServerName( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -8911,12 +8582,17 @@ private NameBytesPair( } case 10: { bitField0_ |= 0x00000001; - name_ = input.readBytes(); + hostName_ = input.readBytes(); break; } - case 18: { + case 16: { bitField0_ |= 0x00000002; - value_ = input.readBytes(); + port_ = input.readUInt32(); + break; + } + case 24: { + bitField0_ |= 0x00000004; + startCode_ = input.readUInt64(); break; } } @@ -8933,46 +8609,46 @@ private NameBytesPair( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_NameBytesPair_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_ServerName_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_NameBytesPair_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_ServerName_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public NameBytesPair parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ServerName parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new NameBytesPair(input, extensionRegistry); + return new ServerName(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; - // required string name = 1; - public static final int NAME_FIELD_NUMBER = 1; - private java.lang.Object name_; + // required string host_name = 1; + public static final int HOST_NAME_FIELD_NUMBER = 1; + private java.lang.Object hostName_; /** - * required string name = 1; + * required string host_name = 1; */ - public boolean hasName() { + public boolean hasHostName() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * required string name = 1; + * required string host_name = 1; */ - public java.lang.String getName() { - java.lang.Object ref = name_; + public java.lang.String getHostName() { + java.lang.Object ref = hostName_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { @@ -8980,54 +8656,71 @@ public java.lang.String getName() { (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { - name_ = s; + hostName_ = s; } return s; } } /** - * required string name = 1; + * required string host_name = 1; */ public com.google.protobuf.ByteString - getNameBytes() { - java.lang.Object ref = name_; + getHostNameBytes() { + java.lang.Object ref = hostName_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); - name_ = b; + hostName_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } - // optional bytes value = 2; - public static final int VALUE_FIELD_NUMBER = 2; - private com.google.protobuf.ByteString value_; + // optional uint32 port = 2; + public static final int PORT_FIELD_NUMBER = 2; + private int port_; /** - * optional bytes value = 2; + * optional uint32 port = 2; */ - public boolean hasValue() { + public boolean hasPort() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** - * optional bytes value = 2; + * optional uint32 port = 2; */ - public com.google.protobuf.ByteString getValue() { - return value_; + public int getPort() { + return port_; } - private void initFields() { - name_ = ""; - value_ = com.google.protobuf.ByteString.EMPTY; + // optional uint64 start_code = 3; + public static final int START_CODE_FIELD_NUMBER = 3; + private long startCode_; + /** + * optional uint64 start_code = 3; + */ + public boolean hasStartCode() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional uint64 start_code = 3; + */ + public long getStartCode() { + return startCode_; + } + + private void initFields() { + hostName_ = ""; + port_ = 0; + startCode_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (!hasName()) { + if (!hasHostName()) { memoizedIsInitialized = 0; return false; } @@ -9039,10 +8732,13 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getNameBytes()); + output.writeBytes(1, getHostNameBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, value_); + output.writeUInt32(2, port_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeUInt64(3, startCode_); } getUnknownFields().writeTo(output); } @@ -9055,11 +8751,15 @@ public int getSerializedSize() { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getNameBytes()); + .computeBytesSize(1, getHostNameBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, value_); + .computeUInt32Size(2, port_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(3, startCode_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -9078,21 +8778,26 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair) obj; + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName) obj; boolean result = true; - result = result && (hasName() == other.hasName()); - if (hasName()) { - result = result && getName() - .equals(other.getName()); + result = result && (hasHostName() == other.hasHostName()); + if (hasHostName()) { + result = result && getHostName() + .equals(other.getHostName()); } - result = result && (hasValue() == other.hasValue()); - if (hasValue()) { - result = result && getValue() - .equals(other.getValue()); + result = result && (hasPort() == other.hasPort()); + if (hasPort()) { + result = result && (getPort() + == other.getPort()); + } + result = result && (hasStartCode() == other.hasStartCode()); + if (hasStartCode()) { + result = result && (getStartCode() + == other.getStartCode()); } result = result && getUnknownFields().equals(other.getUnknownFields()); @@ -9107,66 +8812,70 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasName()) { - hash = (37 * hash) + NAME_FIELD_NUMBER; - hash = (53 * hash) + getName().hashCode(); + if (hasHostName()) { + hash = (37 * hash) + HOST_NAME_FIELD_NUMBER; + hash = (53 * hash) + getHostName().hashCode(); } - if (hasValue()) { - hash = (37 * hash) + VALUE_FIELD_NUMBER; - hash = (53 * hash) + getValue().hashCode(); + if (hasPort()) { + hash = (37 * hash) + PORT_FIELD_NUMBER; + hash = (53 * hash) + getPort(); + } + if (hasStartCode()) { + hash = (37 * hash) + START_CODE_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getStartCode()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -9175,7 +8884,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPa public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -9187,24 +8896,29 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.NameBytesPair} + * Protobuf type {@code hbase.pb.ServerName} + * + *
+     **
+     * Protocol buffer version of ServerName
+     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPairOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_NameBytesPair_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_ServerName_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_NameBytesPair_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_ServerName_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -9224,10 +8938,12 @@ private static Builder create() { public Builder clear() { super.clear(); - name_ = ""; + hostName_ = ""; bitField0_ = (bitField0_ & ~0x00000001); - value_ = com.google.protobuf.ByteString.EMPTY; + port_ = 0; bitField0_ = (bitField0_ & ~0x00000002); + startCode_ = 0L; + bitField0_ = (bitField0_ & ~0x00000004); return this; } @@ -9237,63 +8953,70 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_NameBytesPair_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_ServerName_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair build() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName build() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair(this); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - result.name_ = name_; + result.hostName_ = hostName_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } - result.value_ = value_; + result.port_ = port_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.startCode_ = startCode_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair.getDefaultInstance()) return this; - if (other.hasName()) { + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()) return this; + if (other.hasHostName()) { bitField0_ |= 0x00000001; - name_ = other.name_; + hostName_ = other.hostName_; onChanged(); } - if (other.hasValue()) { - setValue(other.getValue()); + if (other.hasPort()) { + setPort(other.getPort()); + } + if (other.hasStartCode()) { + setStartCode(other.getStartCode()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { - if (!hasName()) { + if (!hasHostName()) { return false; } @@ -9304,11 +9027,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -9319,169 +9042,194 @@ public Builder mergeFrom( } private int bitField0_; - // required string name = 1; - private java.lang.Object name_ = ""; + // required string host_name = 1; + private java.lang.Object hostName_ = ""; /** - * required string name = 1; + * required string host_name = 1; */ - public boolean hasName() { + public boolean hasHostName() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * required string name = 1; + * required string host_name = 1; */ - public java.lang.String getName() { - java.lang.Object ref = name_; + public java.lang.String getHostName() { + java.lang.Object ref = hostName_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((com.google.protobuf.ByteString) ref) .toStringUtf8(); - name_ = s; + hostName_ = s; return s; } else { return (java.lang.String) ref; } } /** - * required string name = 1; + * required string host_name = 1; */ public com.google.protobuf.ByteString - getNameBytes() { - java.lang.Object ref = name_; + getHostNameBytes() { + java.lang.Object ref = hostName_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); - name_ = b; + hostName_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** - * required string name = 1; + * required string host_name = 1; */ - public Builder setName( + public Builder setHostName( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; - name_ = value; + hostName_ = value; onChanged(); return this; } /** - * required string name = 1; + * required string host_name = 1; */ - public Builder clearName() { + public Builder clearHostName() { bitField0_ = (bitField0_ & ~0x00000001); - name_ = getDefaultInstance().getName(); + hostName_ = getDefaultInstance().getHostName(); onChanged(); return this; } /** - * required string name = 1; + * required string host_name = 1; */ - public Builder setNameBytes( + public Builder setHostNameBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; - name_ = value; + hostName_ = value; onChanged(); return this; } - // optional bytes value = 2; - private com.google.protobuf.ByteString value_ = com.google.protobuf.ByteString.EMPTY; + // optional uint32 port = 2; + private int port_ ; /** - * optional bytes value = 2; + * optional uint32 port = 2; */ - public boolean hasValue() { + public boolean hasPort() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** - * optional bytes value = 2; + * optional uint32 port = 2; */ - public com.google.protobuf.ByteString getValue() { - return value_; + public int getPort() { + return port_; } /** - * optional bytes value = 2; + * optional uint32 port = 2; */ - public Builder setValue(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - value_ = value; + public Builder setPort(int value) { + bitField0_ |= 0x00000002; + port_ = value; onChanged(); return this; } /** - * optional bytes value = 2; + * optional uint32 port = 2; */ - public Builder clearValue() { + public Builder clearPort() { bitField0_ = (bitField0_ & ~0x00000002); - value_ = getDefaultInstance().getValue(); + port_ = 0; onChanged(); return this; } - // @@protoc_insertion_point(builder_scope:hbase.pb.NameBytesPair) - } - - static { - defaultInstance = new NameBytesPair(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:hbase.pb.NameBytesPair) - } - - public interface BytesBytesPairOrBuilder - extends com.google.protobuf.MessageOrBuilder { + // optional uint64 start_code = 3; + private long startCode_ ; + /** + * optional uint64 start_code = 3; + */ + public boolean hasStartCode() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional uint64 start_code = 3; + */ + public long getStartCode() { + return startCode_; + } + /** + * optional uint64 start_code = 3; + */ + public Builder setStartCode(long value) { + bitField0_ |= 0x00000004; + startCode_ = value; + onChanged(); + return this; + } + /** + * optional uint64 start_code = 3; + */ + public Builder clearStartCode() { + bitField0_ = (bitField0_ & ~0x00000004); + startCode_ = 0L; + onChanged(); + return this; + } - // required bytes first = 1; - /** - * required bytes first = 1; - */ - boolean hasFirst(); + // @@protoc_insertion_point(builder_scope:hbase.pb.ServerName) + } + + static { + defaultInstance = new ServerName(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.ServerName) + } + + public interface CoprocessorOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string name = 1; /** - * required bytes first = 1; + * required string name = 1; */ - com.google.protobuf.ByteString getFirst(); - - // required bytes second = 2; + boolean hasName(); /** - * required bytes second = 2; + * required string name = 1; */ - boolean hasSecond(); + java.lang.String getName(); /** - * required bytes second = 2; + * required string name = 1; */ - com.google.protobuf.ByteString getSecond(); + com.google.protobuf.ByteString + getNameBytes(); } /** - * Protobuf type {@code hbase.pb.BytesBytesPair} + * Protobuf type {@code hbase.pb.Coprocessor} */ - public static final class BytesBytesPair extends + public static final class Coprocessor extends com.google.protobuf.GeneratedMessage - implements BytesBytesPairOrBuilder { - // Use BytesBytesPair.newBuilder() to construct. - private BytesBytesPair(com.google.protobuf.GeneratedMessage.Builder builder) { + implements CoprocessorOrBuilder { + // Use Coprocessor.newBuilder() to construct. + private Coprocessor(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private BytesBytesPair(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private Coprocessor(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final BytesBytesPair defaultInstance; - public static BytesBytesPair getDefaultInstance() { + private static final Coprocessor defaultInstance; + public static Coprocessor getDefaultInstance() { return defaultInstance; } - public BytesBytesPair getDefaultInstanceForType() { + public Coprocessor getDefaultInstanceForType() { return defaultInstance; } @@ -9491,7 +9239,7 @@ public BytesBytesPair getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private BytesBytesPair( + private Coprocessor( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -9516,12 +9264,7 @@ private BytesBytesPair( } case 10: { bitField0_ |= 0x00000001; - first_ = input.readBytes(); - break; - } - case 18: { - bitField0_ |= 0x00000002; - second_ = input.readBytes(); + name_ = input.readBytes(); break; } } @@ -9538,78 +9281,84 @@ private BytesBytesPair( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_BytesBytesPair_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_Coprocessor_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_BytesBytesPair_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_Coprocessor_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public BytesBytesPair parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public Coprocessor parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new BytesBytesPair(input, extensionRegistry); + return new Coprocessor(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; - // required bytes first = 1; - public static final int FIRST_FIELD_NUMBER = 1; - private com.google.protobuf.ByteString first_; + // required string name = 1; + public static final int NAME_FIELD_NUMBER = 1; + private java.lang.Object name_; /** - * required bytes first = 1; + * required string name = 1; */ - public boolean hasFirst() { + public boolean hasName() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * required bytes first = 1; - */ - public com.google.protobuf.ByteString getFirst() { - return first_; - } - - // required bytes second = 2; - public static final int SECOND_FIELD_NUMBER = 2; - private com.google.protobuf.ByteString second_; - /** - * required bytes second = 2; + * required string name = 1; */ - public boolean hasSecond() { - return ((bitField0_ & 0x00000002) == 0x00000002); + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + name_ = s; + } + return s; + } } /** - * required bytes second = 2; + * required string name = 1; */ - public com.google.protobuf.ByteString getSecond() { - return second_; + public com.google.protobuf.ByteString + getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } } private void initFields() { - first_ = com.google.protobuf.ByteString.EMPTY; - second_ = com.google.protobuf.ByteString.EMPTY; + name_ = ""; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (!hasFirst()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasSecond()) { + if (!hasName()) { memoizedIsInitialized = 0; return false; } @@ -9621,10 +9370,7 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, first_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, second_); + output.writeBytes(1, getNameBytes()); } getUnknownFields().writeTo(output); } @@ -9637,11 +9383,7 @@ public int getSerializedSize() { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, first_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, second_); + .computeBytesSize(1, getNameBytes()); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -9660,21 +9402,16 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair) obj; + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor) obj; boolean result = true; - result = result && (hasFirst() == other.hasFirst()); - if (hasFirst()) { - result = result && getFirst() - .equals(other.getFirst()); - } - result = result && (hasSecond() == other.hasSecond()); - if (hasSecond()) { - result = result && getSecond() - .equals(other.getSecond()); + result = result && (hasName() == other.hasName()); + if (hasName()) { + result = result && getName() + .equals(other.getName()); } result = result && getUnknownFields().equals(other.getUnknownFields()); @@ -9689,66 +9426,62 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasFirst()) { - hash = (37 * hash) + FIRST_FIELD_NUMBER; - hash = (53 * hash) + getFirst().hashCode(); - } - if (hasSecond()) { - hash = (37 * hash) + SECOND_FIELD_NUMBER; - hash = (53 * hash) + getSecond().hashCode(); + if (hasName()) { + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -9757,7 +9490,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesP public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -9769,24 +9502,24 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.BytesBytesPair} + * Protobuf type {@code hbase.pb.Coprocessor} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPairOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_BytesBytesPair_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_Coprocessor_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_BytesBytesPair_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_Coprocessor_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -9806,10 +9539,8 @@ private static Builder create() { public Builder clear() { super.clear(); - first_ = com.google.protobuf.ByteString.EMPTY; + name_ = ""; bitField0_ = (bitField0_ & ~0x00000001); - second_ = com.google.protobuf.ByteString.EMPTY; - bitField0_ = (bitField0_ & ~0x00000002); return this; } @@ -9819,65 +9550,56 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_BytesBytesPair_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_Coprocessor_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair build() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor build() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair(this); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - result.first_ = first_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.second_ = second_; + result.name_ = name_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.getDefaultInstance()) return this; - if (other.hasFirst()) { - setFirst(other.getFirst()); - } - if (other.hasSecond()) { - setSecond(other.getSecond()); + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.getDefaultInstance()) return this; + if (other.hasName()) { + bitField0_ |= 0x00000001; + name_ = other.name_; + onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { - if (!hasFirst()) { - - return false; - } - if (!hasSecond()) { + if (!hasName()) { return false; } @@ -9888,11 +9610,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -9903,136 +9625,143 @@ public Builder mergeFrom( } private int bitField0_; - // required bytes first = 1; - private com.google.protobuf.ByteString first_ = com.google.protobuf.ByteString.EMPTY; + // required string name = 1; + private java.lang.Object name_ = ""; /** - * required bytes first = 1; + * required string name = 1; */ - public boolean hasFirst() { + public boolean hasName() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * required bytes first = 1; + * required string name = 1; */ - public com.google.protobuf.ByteString getFirst() { - return first_; + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } } /** - * required bytes first = 1; + * required string name = 1; */ - public Builder setFirst(com.google.protobuf.ByteString value) { + public com.google.protobuf.ByteString + getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string name = 1; + */ + public Builder setName( + java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; - first_ = value; + name_ = value; onChanged(); return this; } /** - * required bytes first = 1; + * required string name = 1; */ - public Builder clearFirst() { + public Builder clearName() { bitField0_ = (bitField0_ & ~0x00000001); - first_ = getDefaultInstance().getFirst(); + name_ = getDefaultInstance().getName(); onChanged(); return this; } - - // required bytes second = 2; - private com.google.protobuf.ByteString second_ = com.google.protobuf.ByteString.EMPTY; - /** - * required bytes second = 2; - */ - public boolean hasSecond() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * required bytes second = 2; - */ - public com.google.protobuf.ByteString getSecond() { - return second_; - } /** - * required bytes second = 2; + * required string name = 1; */ - public Builder setSecond(com.google.protobuf.ByteString value) { + public Builder setNameBytes( + com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000002; - second_ = value; - onChanged(); - return this; - } - /** - * required bytes second = 2; - */ - public Builder clearSecond() { - bitField0_ = (bitField0_ & ~0x00000002); - second_ = getDefaultInstance().getSecond(); + bitField0_ |= 0x00000001; + name_ = value; onChanged(); return this; } - // @@protoc_insertion_point(builder_scope:hbase.pb.BytesBytesPair) + // @@protoc_insertion_point(builder_scope:hbase.pb.Coprocessor) } static { - defaultInstance = new BytesBytesPair(true); + defaultInstance = new Coprocessor(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.BytesBytesPair) + // @@protoc_insertion_point(class_scope:hbase.pb.Coprocessor) } - public interface NameInt64PairOrBuilder + public interface NameStringPairOrBuilder extends com.google.protobuf.MessageOrBuilder { - // optional string name = 1; + // required string name = 1; /** - * optional string name = 1; + * required string name = 1; */ boolean hasName(); /** - * optional string name = 1; + * required string name = 1; */ java.lang.String getName(); /** - * optional string name = 1; + * required string name = 1; */ com.google.protobuf.ByteString getNameBytes(); - // optional int64 value = 2; + // required string value = 2; /** - * optional int64 value = 2; + * required string value = 2; */ boolean hasValue(); /** - * optional int64 value = 2; + * required string value = 2; */ - long getValue(); + java.lang.String getValue(); + /** + * required string value = 2; + */ + com.google.protobuf.ByteString + getValueBytes(); } /** - * Protobuf type {@code hbase.pb.NameInt64Pair} + * Protobuf type {@code hbase.pb.NameStringPair} */ - public static final class NameInt64Pair extends + public static final class NameStringPair extends com.google.protobuf.GeneratedMessage - implements NameInt64PairOrBuilder { - // Use NameInt64Pair.newBuilder() to construct. - private NameInt64Pair(com.google.protobuf.GeneratedMessage.Builder builder) { + implements NameStringPairOrBuilder { + // Use NameStringPair.newBuilder() to construct. + private NameStringPair(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private NameInt64Pair(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private NameStringPair(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final NameInt64Pair defaultInstance; - public static NameInt64Pair getDefaultInstance() { + private static final NameStringPair defaultInstance; + public static NameStringPair getDefaultInstance() { return defaultInstance; } - public NameInt64Pair getDefaultInstanceForType() { + public NameStringPair getDefaultInstanceForType() { return defaultInstance; } @@ -10042,7 +9771,7 @@ public NameInt64Pair getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private NameInt64Pair( + private NameStringPair( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -10070,9 +9799,9 @@ private NameInt64Pair( name_ = input.readBytes(); break; } - case 16: { + case 18: { bitField0_ |= 0x00000002; - value_ = input.readInt64(); + value_ = input.readBytes(); break; } } @@ -10089,43 +9818,43 @@ private NameInt64Pair( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_NameInt64Pair_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_NameStringPair_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_NameInt64Pair_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_NameStringPair_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public NameInt64Pair parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public NameStringPair parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new NameInt64Pair(input, extensionRegistry); + return new NameStringPair(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; - // optional string name = 1; + // required string name = 1; public static final int NAME_FIELD_NUMBER = 1; private java.lang.Object name_; /** - * optional string name = 1; + * required string name = 1; */ public boolean hasName() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * optional string name = 1; + * required string name = 1; */ public java.lang.String getName() { java.lang.Object ref = name_; @@ -10142,7 +9871,7 @@ public java.lang.String getName() { } } /** - * optional string name = 1; + * required string name = 1; */ public com.google.protobuf.ByteString getNameBytes() { @@ -10158,31 +9887,66 @@ public java.lang.String getName() { } } - // optional int64 value = 2; + // required string value = 2; public static final int VALUE_FIELD_NUMBER = 2; - private long value_; + private java.lang.Object value_; /** - * optional int64 value = 2; + * required string value = 2; */ public boolean hasValue() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** - * optional int64 value = 2; + * required string value = 2; */ - public long getValue() { - return value_; + public java.lang.String getValue() { + java.lang.Object ref = value_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + value_ = s; + } + return s; + } + } + /** + * required string value = 2; + */ + public com.google.protobuf.ByteString + getValueBytes() { + java.lang.Object ref = value_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + value_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } } private void initFields() { name_ = ""; - value_ = 0L; + value_ = ""; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; + if (!hasName()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasValue()) { + memoizedIsInitialized = 0; + return false; + } memoizedIsInitialized = 1; return true; } @@ -10194,7 +9958,7 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) output.writeBytes(1, getNameBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeInt64(2, value_); + output.writeBytes(2, getValueBytes()); } getUnknownFields().writeTo(output); } @@ -10211,7 +9975,7 @@ public int getSerializedSize() { } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream - .computeInt64Size(2, value_); + .computeBytesSize(2, getValueBytes()); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -10230,10 +9994,10 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair) obj; + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair) obj; boolean result = true; result = result && (hasName() == other.hasName()); @@ -10243,8 +10007,8 @@ public boolean equals(final java.lang.Object obj) { } result = result && (hasValue() == other.hasValue()); if (hasValue()) { - result = result && (getValue() - == other.getValue()); + result = result && getValue() + .equals(other.getValue()); } result = result && getUnknownFields().equals(other.getUnknownFields()); @@ -10265,60 +10029,60 @@ public int hashCode() { } if (hasValue()) { hash = (37 * hash) + VALUE_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getValue()); + hash = (53 * hash) + getValue().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -10327,7 +10091,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pa public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -10339,24 +10103,24 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.NameInt64Pair} + * Protobuf type {@code hbase.pb.NameStringPair} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64PairOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_NameInt64Pair_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_NameStringPair_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_NameInt64Pair_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_NameStringPair_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -10378,7 +10142,7 @@ public Builder clear() { super.clear(); name_ = ""; bitField0_ = (bitField0_ & ~0x00000001); - value_ = 0L; + value_ = ""; bitField0_ = (bitField0_ & ~0x00000002); return this; } @@ -10389,23 +10153,23 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_NameInt64Pair_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_NameStringPair_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair build() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair build() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair(this); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { @@ -10422,29 +10186,39 @@ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair buil } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair.getDefaultInstance()) return this; + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.getDefaultInstance()) return this; if (other.hasName()) { bitField0_ |= 0x00000001; name_ = other.name_; onChanged(); } if (other.hasValue()) { - setValue(other.getValue()); + bitField0_ |= 0x00000002; + value_ = other.value_; + onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { + if (!hasName()) { + + return false; + } + if (!hasValue()) { + + return false; + } return true; } @@ -10452,11 +10226,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -10467,16 +10241,16 @@ public Builder mergeFrom( } private int bitField0_; - // optional string name = 1; + // required string name = 1; private java.lang.Object name_ = ""; /** - * optional string name = 1; + * required string name = 1; */ public boolean hasName() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * optional string name = 1; + * required string name = 1; */ public java.lang.String getName() { java.lang.Object ref = name_; @@ -10490,7 +10264,7 @@ public java.lang.String getName() { } } /** - * optional string name = 1; + * required string name = 1; */ public com.google.protobuf.ByteString getNameBytes() { @@ -10506,7 +10280,7 @@ public java.lang.String getName() { } } /** - * optional string name = 1; + * required string name = 1; */ public Builder setName( java.lang.String value) { @@ -10519,7 +10293,7 @@ public Builder setName( return this; } /** - * optional string name = 1; + * required string name = 1; */ public Builder clearName() { bitField0_ = (bitField0_ & ~0x00000001); @@ -10528,7 +10302,7 @@ public Builder clearName() { return this; } /** - * optional string name = 1; + * required string name = 1; */ public Builder setNameBytes( com.google.protobuf.ByteString value) { @@ -10541,166 +10315,138 @@ public Builder setNameBytes( return this; } - // optional int64 value = 2; - private long value_ ; + // required string value = 2; + private java.lang.Object value_ = ""; /** - * optional int64 value = 2; + * required string value = 2; */ public boolean hasValue() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** - * optional int64 value = 2; + * required string value = 2; */ - public long getValue() { - return value_; + public java.lang.String getValue() { + java.lang.Object ref = value_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + value_ = s; + return s; + } else { + return (java.lang.String) ref; + } } /** - * optional int64 value = 2; + * required string value = 2; */ - public Builder setValue(long value) { - bitField0_ |= 0x00000002; + public com.google.protobuf.ByteString + getValueBytes() { + java.lang.Object ref = value_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + value_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string value = 2; + */ + public Builder setValue( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; value_ = value; onChanged(); return this; } /** - * optional int64 value = 2; + * required string value = 2; */ public Builder clearValue() { bitField0_ = (bitField0_ & ~0x00000002); - value_ = 0L; + value_ = getDefaultInstance().getValue(); + onChanged(); + return this; + } + /** + * required string value = 2; + */ + public Builder setValueBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + value_ = value; onChanged(); return this; } - // @@protoc_insertion_point(builder_scope:hbase.pb.NameInt64Pair) + // @@protoc_insertion_point(builder_scope:hbase.pb.NameStringPair) } static { - defaultInstance = new NameInt64Pair(true); + defaultInstance = new NameStringPair(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.NameInt64Pair) + // @@protoc_insertion_point(class_scope:hbase.pb.NameStringPair) } - public interface ProcedureDescriptionOrBuilder + public interface NameBytesPairOrBuilder extends com.google.protobuf.MessageOrBuilder { - // required string signature = 1; - /** - * required string signature = 1; - * - *
-     * the unique signature of the procedure
-     * 
- */ - boolean hasSignature(); - /** - * required string signature = 1; - * - *
-     * the unique signature of the procedure
-     * 
- */ - java.lang.String getSignature(); - /** - * required string signature = 1; - * - *
-     * the unique signature of the procedure
-     * 
- */ - com.google.protobuf.ByteString - getSignatureBytes(); - - // optional string instance = 2; + // required string name = 1; /** - * optional string instance = 2; - * - *
-     * the procedure instance name
-     * 
+ * required string name = 1; */ - boolean hasInstance(); + boolean hasName(); /** - * optional string instance = 2; - * - *
-     * the procedure instance name
-     * 
+ * required string name = 1; */ - java.lang.String getInstance(); + java.lang.String getName(); /** - * optional string instance = 2; - * - *
-     * the procedure instance name
-     * 
+ * required string name = 1; */ com.google.protobuf.ByteString - getInstanceBytes(); - - // optional int64 creation_time = 3 [default = 0]; - /** - * optional int64 creation_time = 3 [default = 0]; - */ - boolean hasCreationTime(); - /** - * optional int64 creation_time = 3 [default = 0]; - */ - long getCreationTime(); + getNameBytes(); - // repeated .hbase.pb.NameStringPair configuration = 4; - /** - * repeated .hbase.pb.NameStringPair configuration = 4; - */ - java.util.List - getConfigurationList(); - /** - * repeated .hbase.pb.NameStringPair configuration = 4; - */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair getConfiguration(int index); - /** - * repeated .hbase.pb.NameStringPair configuration = 4; - */ - int getConfigurationCount(); + // optional bytes value = 2; /** - * repeated .hbase.pb.NameStringPair configuration = 4; + * optional bytes value = 2; */ - java.util.List - getConfigurationOrBuilderList(); + boolean hasValue(); /** - * repeated .hbase.pb.NameStringPair configuration = 4; + * optional bytes value = 2; */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder getConfigurationOrBuilder( - int index); + com.google.protobuf.ByteString getValue(); } /** - * Protobuf type {@code hbase.pb.ProcedureDescription} - * - *
-   **
-   * Description of the distributed procedure to take
-   * 
+ * Protobuf type {@code hbase.pb.NameBytesPair} */ - public static final class ProcedureDescription extends + public static final class NameBytesPair extends com.google.protobuf.GeneratedMessage - implements ProcedureDescriptionOrBuilder { - // Use ProcedureDescription.newBuilder() to construct. - private ProcedureDescription(com.google.protobuf.GeneratedMessage.Builder builder) { + implements NameBytesPairOrBuilder { + // Use NameBytesPair.newBuilder() to construct. + private NameBytesPair(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private ProcedureDescription(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private NameBytesPair(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final ProcedureDescription defaultInstance; - public static ProcedureDescription getDefaultInstance() { + private static final NameBytesPair defaultInstance; + public static NameBytesPair getDefaultInstance() { return defaultInstance; } - public ProcedureDescription getDefaultInstanceForType() { + public NameBytesPair getDefaultInstanceForType() { return defaultInstance; } @@ -10710,7 +10456,7 @@ public ProcedureDescription getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private ProcedureDescription( + private NameBytesPair( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -10735,25 +10481,12 @@ private ProcedureDescription( } case 10: { bitField0_ |= 0x00000001; - signature_ = input.readBytes(); + name_ = input.readBytes(); break; } case 18: { bitField0_ |= 0x00000002; - instance_ = input.readBytes(); - break; - } - case 24: { - bitField0_ |= 0x00000004; - creationTime_ = input.readInt64(); - break; - } - case 34: { - if (!((mutable_bitField0_ & 0x00000008) == 0x00000008)) { - configuration_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000008; - } - configuration_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.PARSER, extensionRegistry)); + value_ = input.readBytes(); break; } } @@ -10764,63 +10497,52 @@ private ProcedureDescription( throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { - if (((mutable_bitField0_ & 0x00000008) == 0x00000008)) { - configuration_ = java.util.Collections.unmodifiableList(configuration_); - } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_ProcedureDescription_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_NameBytesPair_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_ProcedureDescription_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_NameBytesPair_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public ProcedureDescription parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public NameBytesPair parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new ProcedureDescription(input, extensionRegistry); + return new NameBytesPair(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; - // required string signature = 1; - public static final int SIGNATURE_FIELD_NUMBER = 1; - private java.lang.Object signature_; + // required string name = 1; + public static final int NAME_FIELD_NUMBER = 1; + private java.lang.Object name_; /** - * required string signature = 1; - * - *
-     * the unique signature of the procedure
-     * 
+ * required string name = 1; */ - public boolean hasSignature() { + public boolean hasName() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * required string signature = 1; - * - *
-     * the unique signature of the procedure
-     * 
+ * required string name = 1; */ - public java.lang.String getSignature() { - java.lang.Object ref = signature_; + public java.lang.String getName() { + java.lang.Object ref = name_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { @@ -10828,160 +10550,57 @@ public java.lang.String getSignature() { (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { - signature_ = s; + name_ = s; } return s; } } /** - * required string signature = 1; - * - *
-     * the unique signature of the procedure
-     * 
+ * required string name = 1; */ public com.google.protobuf.ByteString - getSignatureBytes() { - java.lang.Object ref = signature_; + getNameBytes() { + java.lang.Object ref = name_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); - signature_ = b; + name_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } - // optional string instance = 2; - public static final int INSTANCE_FIELD_NUMBER = 2; - private java.lang.Object instance_; + // optional bytes value = 2; + public static final int VALUE_FIELD_NUMBER = 2; + private com.google.protobuf.ByteString value_; /** - * optional string instance = 2; - * - *
-     * the procedure instance name
-     * 
+ * optional bytes value = 2; */ - public boolean hasInstance() { + public boolean hasValue() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** - * optional string instance = 2; - * - *
-     * the procedure instance name
-     * 
- */ - public java.lang.String getInstance() { - java.lang.Object ref = instance_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - instance_ = s; - } - return s; - } - } - /** - * optional string instance = 2; - * - *
-     * the procedure instance name
-     * 
- */ - public com.google.protobuf.ByteString - getInstanceBytes() { - java.lang.Object ref = instance_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - instance_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // optional int64 creation_time = 3 [default = 0]; - public static final int CREATION_TIME_FIELD_NUMBER = 3; - private long creationTime_; - /** - * optional int64 creation_time = 3 [default = 0]; - */ - public boolean hasCreationTime() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * optional int64 creation_time = 3 [default = 0]; - */ - public long getCreationTime() { - return creationTime_; - } - - // repeated .hbase.pb.NameStringPair configuration = 4; - public static final int CONFIGURATION_FIELD_NUMBER = 4; - private java.util.List configuration_; - /** - * repeated .hbase.pb.NameStringPair configuration = 4; - */ - public java.util.List getConfigurationList() { - return configuration_; - } - /** - * repeated .hbase.pb.NameStringPair configuration = 4; - */ - public java.util.List - getConfigurationOrBuilderList() { - return configuration_; - } - /** - * repeated .hbase.pb.NameStringPair configuration = 4; - */ - public int getConfigurationCount() { - return configuration_.size(); - } - /** - * repeated .hbase.pb.NameStringPair configuration = 4; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair getConfiguration(int index) { - return configuration_.get(index); - } - /** - * repeated .hbase.pb.NameStringPair configuration = 4; + * optional bytes value = 2; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder getConfigurationOrBuilder( - int index) { - return configuration_.get(index); + public com.google.protobuf.ByteString getValue() { + return value_; } private void initFields() { - signature_ = ""; - instance_ = ""; - creationTime_ = 0L; - configuration_ = java.util.Collections.emptyList(); + name_ = ""; + value_ = com.google.protobuf.ByteString.EMPTY; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (!hasSignature()) { + if (!hasName()) { memoizedIsInitialized = 0; return false; } - for (int i = 0; i < getConfigurationCount(); i++) { - if (!getConfiguration(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } memoizedIsInitialized = 1; return true; } @@ -10990,16 +10609,10 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getSignatureBytes()); + output.writeBytes(1, getNameBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, getInstanceBytes()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeInt64(3, creationTime_); - } - for (int i = 0; i < configuration_.size(); i++) { - output.writeMessage(4, configuration_.get(i)); + output.writeBytes(2, value_); } getUnknownFields().writeTo(output); } @@ -11012,19 +10625,11 @@ public int getSerializedSize() { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getSignatureBytes()); + .computeBytesSize(1, getNameBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, getInstanceBytes()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeInt64Size(3, creationTime_); - } - for (int i = 0; i < configuration_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(4, configuration_.get(i)); + .computeBytesSize(2, value_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -11043,29 +10648,22 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription) obj; + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair) obj; boolean result = true; - result = result && (hasSignature() == other.hasSignature()); - if (hasSignature()) { - result = result && getSignature() - .equals(other.getSignature()); + result = result && (hasName() == other.hasName()); + if (hasName()) { + result = result && getName() + .equals(other.getName()); } - result = result && (hasInstance() == other.hasInstance()); - if (hasInstance()) { - result = result && getInstance() - .equals(other.getInstance()); - } - result = result && (hasCreationTime() == other.hasCreationTime()); - if (hasCreationTime()) { - result = result && (getCreationTime() - == other.getCreationTime()); + result = result && (hasValue() == other.hasValue()); + if (hasValue()) { + result = result && getValue() + .equals(other.getValue()); } - result = result && getConfigurationList() - .equals(other.getConfigurationList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -11079,74 +10677,66 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasSignature()) { - hash = (37 * hash) + SIGNATURE_FIELD_NUMBER; - hash = (53 * hash) + getSignature().hashCode(); - } - if (hasInstance()) { - hash = (37 * hash) + INSTANCE_FIELD_NUMBER; - hash = (53 * hash) + getInstance().hashCode(); - } - if (hasCreationTime()) { - hash = (37 * hash) + CREATION_TIME_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getCreationTime()); + if (hasName()) { + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); } - if (getConfigurationCount() > 0) { - hash = (37 * hash) + CONFIGURATION_FIELD_NUMBER; - hash = (53 * hash) + getConfigurationList().hashCode(); + if (hasValue()) { + hash = (37 * hash) + VALUE_FIELD_NUMBER; + hash = (53 * hash) + getValue().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -11155,7 +10745,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDe public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -11167,29 +10757,24 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.ProcedureDescription} - * - *
-     **
-     * Description of the distributed procedure to take
-     * 
+ * Protobuf type {@code hbase.pb.NameBytesPair} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPairOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_ProcedureDescription_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_NameBytesPair_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_ProcedureDescription_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_NameBytesPair_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -11201,7 +10786,6 @@ private Builder( } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getConfigurationFieldBuilder(); } } private static Builder create() { @@ -11210,18 +10794,10 @@ private static Builder create() { public Builder clear() { super.clear(); - signature_ = ""; + name_ = ""; bitField0_ = (bitField0_ & ~0x00000001); - instance_ = ""; + value_ = com.google.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000002); - creationTime_ = 0L; - bitField0_ = (bitField0_ & ~0x00000004); - if (configurationBuilder_ == null) { - configuration_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000008); - } else { - configurationBuilder_.clear(); - } return this; } @@ -11231,116 +10807,66 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_ProcedureDescription_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_NameBytesPair_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription build() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair build() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription(this); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - result.signature_ = signature_; + result.name_ = name_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } - result.instance_ = instance_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.creationTime_ = creationTime_; - if (configurationBuilder_ == null) { - if (((bitField0_ & 0x00000008) == 0x00000008)) { - configuration_ = java.util.Collections.unmodifiableList(configuration_); - bitField0_ = (bitField0_ & ~0x00000008); - } - result.configuration_ = configuration_; - } else { - result.configuration_ = configurationBuilder_.build(); - } + result.value_ = value_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.getDefaultInstance()) return this; - if (other.hasSignature()) { + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair.getDefaultInstance()) return this; + if (other.hasName()) { bitField0_ |= 0x00000001; - signature_ = other.signature_; - onChanged(); - } - if (other.hasInstance()) { - bitField0_ |= 0x00000002; - instance_ = other.instance_; + name_ = other.name_; onChanged(); } - if (other.hasCreationTime()) { - setCreationTime(other.getCreationTime()); - } - if (configurationBuilder_ == null) { - if (!other.configuration_.isEmpty()) { - if (configuration_.isEmpty()) { - configuration_ = other.configuration_; - bitField0_ = (bitField0_ & ~0x00000008); - } else { - ensureConfigurationIsMutable(); - configuration_.addAll(other.configuration_); - } - onChanged(); - } - } else { - if (!other.configuration_.isEmpty()) { - if (configurationBuilder_.isEmpty()) { - configurationBuilder_.dispose(); - configurationBuilder_ = null; - configuration_ = other.configuration_; - bitField0_ = (bitField0_ & ~0x00000008); - configurationBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getConfigurationFieldBuilder() : null; - } else { - configurationBuilder_.addAllMessages(other.configuration_); - } - } + if (other.hasValue()) { + setValue(other.getValue()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { - if (!hasSignature()) { + if (!hasName()) { return false; } - for (int i = 0; i < getConfigurationCount(); i++) { - if (!getConfiguration(i).isInitialized()) { - - return false; - } - } return true; } @@ -11348,11 +10874,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -11363,294 +10889,2338 @@ public Builder mergeFrom( } private int bitField0_; - // required string signature = 1; - private java.lang.Object signature_ = ""; + // required string name = 1; + private java.lang.Object name_ = ""; /** - * required string signature = 1; - * - *
-       * the unique signature of the procedure
-       * 
+ * required string name = 1; */ - public boolean hasSignature() { + public boolean hasName() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * required string signature = 1; - * - *
-       * the unique signature of the procedure
-       * 
+ * required string name = 1; */ - public java.lang.String getSignature() { - java.lang.Object ref = signature_; + public java.lang.String getName() { + java.lang.Object ref = name_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((com.google.protobuf.ByteString) ref) .toStringUtf8(); - signature_ = s; + name_ = s; return s; } else { return (java.lang.String) ref; } } /** - * required string signature = 1; - * - *
-       * the unique signature of the procedure
-       * 
+ * required string name = 1; */ public com.google.protobuf.ByteString - getSignatureBytes() { - java.lang.Object ref = signature_; + getNameBytes() { + java.lang.Object ref = name_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); - signature_ = b; + name_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** - * required string signature = 1; - * - *
-       * the unique signature of the procedure
-       * 
+ * required string name = 1; */ - public Builder setSignature( + public Builder setName( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; - signature_ = value; + name_ = value; onChanged(); return this; } /** - * required string signature = 1; - * - *
-       * the unique signature of the procedure
-       * 
+ * required string name = 1; */ - public Builder clearSignature() { + public Builder clearName() { bitField0_ = (bitField0_ & ~0x00000001); - signature_ = getDefaultInstance().getSignature(); + name_ = getDefaultInstance().getName(); onChanged(); return this; } /** - * required string signature = 1; - * - *
-       * the unique signature of the procedure
-       * 
+ * required string name = 1; */ - public Builder setSignatureBytes( + public Builder setNameBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; - signature_ = value; + name_ = value; onChanged(); return this; } - // optional string instance = 2; - private java.lang.Object instance_ = ""; + // optional bytes value = 2; + private com.google.protobuf.ByteString value_ = com.google.protobuf.ByteString.EMPTY; /** - * optional string instance = 2; - * - *
-       * the procedure instance name
-       * 
+ * optional bytes value = 2; */ - public boolean hasInstance() { + public boolean hasValue() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** - * optional string instance = 2; - * - *
-       * the procedure instance name
-       * 
- */ - public java.lang.String getInstance() { - java.lang.Object ref = instance_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - instance_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * optional string instance = 2; - * - *
-       * the procedure instance name
-       * 
+ * optional bytes value = 2; */ - public com.google.protobuf.ByteString - getInstanceBytes() { - java.lang.Object ref = instance_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - instance_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } + public com.google.protobuf.ByteString getValue() { + return value_; } /** - * optional string instance = 2; - * - *
-       * the procedure instance name
-       * 
+ * optional bytes value = 2; */ - public Builder setInstance( - java.lang.String value) { + public Builder setValue(com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; - instance_ = value; + value_ = value; onChanged(); return this; } /** - * optional string instance = 2; - * - *
-       * the procedure instance name
-       * 
+ * optional bytes value = 2; */ - public Builder clearInstance() { + public Builder clearValue() { bitField0_ = (bitField0_ & ~0x00000002); - instance_ = getDefaultInstance().getInstance(); + value_ = getDefaultInstance().getValue(); onChanged(); return this; } - /** - * optional string instance = 2; - * - *
-       * the procedure instance name
-       * 
- */ - public Builder setInstanceBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); + + // @@protoc_insertion_point(builder_scope:hbase.pb.NameBytesPair) + } + + static { + defaultInstance = new NameBytesPair(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.NameBytesPair) } - bitField0_ |= 0x00000002; - instance_ = value; - onChanged(); - return this; - } - // optional int64 creation_time = 3 [default = 0]; - private long creationTime_ ; - /** - * optional int64 creation_time = 3 [default = 0]; - */ - public boolean hasCreationTime() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * optional int64 creation_time = 3 [default = 0]; - */ - public long getCreationTime() { - return creationTime_; - } - /** - * optional int64 creation_time = 3 [default = 0]; - */ - public Builder setCreationTime(long value) { - bitField0_ |= 0x00000004; - creationTime_ = value; - onChanged(); - return this; - } - /** - * optional int64 creation_time = 3 [default = 0]; - */ - public Builder clearCreationTime() { - bitField0_ = (bitField0_ & ~0x00000004); - creationTime_ = 0L; - onChanged(); - return this; - } + public interface BytesBytesPairOrBuilder + extends com.google.protobuf.MessageOrBuilder { - // repeated .hbase.pb.NameStringPair configuration = 4; - private java.util.List configuration_ = - java.util.Collections.emptyList(); - private void ensureConfigurationIsMutable() { - if (!((bitField0_ & 0x00000008) == 0x00000008)) { - configuration_ = new java.util.ArrayList(configuration_); - bitField0_ |= 0x00000008; - } - } + // required bytes first = 1; + /** + * required bytes first = 1; + */ + boolean hasFirst(); + /** + * required bytes first = 1; + */ + com.google.protobuf.ByteString getFirst(); - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder> configurationBuilder_; + // required bytes second = 2; + /** + * required bytes second = 2; + */ + boolean hasSecond(); + /** + * required bytes second = 2; + */ + com.google.protobuf.ByteString getSecond(); + } + /** + * Protobuf type {@code hbase.pb.BytesBytesPair} + */ + public static final class BytesBytesPair extends + com.google.protobuf.GeneratedMessage + implements BytesBytesPairOrBuilder { + // Use BytesBytesPair.newBuilder() to construct. + private BytesBytesPair(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private BytesBytesPair(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - /** - * repeated .hbase.pb.NameStringPair configuration = 4; - */ - public java.util.List getConfigurationList() { - if (configurationBuilder_ == null) { - return java.util.Collections.unmodifiableList(configuration_); - } else { - return configurationBuilder_.getMessageList(); - } - } - /** - * repeated .hbase.pb.NameStringPair configuration = 4; - */ - public int getConfigurationCount() { - if (configurationBuilder_ == null) { - return configuration_.size(); - } else { - return configurationBuilder_.getCount(); - } - } - /** - * repeated .hbase.pb.NameStringPair configuration = 4; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair getConfiguration(int index) { - if (configurationBuilder_ == null) { - return configuration_.get(index); - } else { - return configurationBuilder_.getMessage(index); - } - } - /** - * repeated .hbase.pb.NameStringPair configuration = 4; - */ - public Builder setConfiguration( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair value) { - if (configurationBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); + private static final BytesBytesPair defaultInstance; + public static BytesBytesPair getDefaultInstance() { + return defaultInstance; + } + + public BytesBytesPair getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private BytesBytesPair( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + first_ = input.readBytes(); + break; + } + case 18: { + bitField0_ |= 0x00000002; + second_ = input.readBytes(); + break; + } } - ensureConfigurationIsMutable(); - configuration_.set(index, value); - onChanged(); - } else { - configurationBuilder_.setMessage(index, value); } - return this; + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_BytesBytesPair_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_BytesBytesPair_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public BytesBytesPair parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new BytesBytesPair(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required bytes first = 1; + public static final int FIRST_FIELD_NUMBER = 1; + private com.google.protobuf.ByteString first_; + /** + * required bytes first = 1; + */ + public boolean hasFirst() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bytes first = 1; + */ + public com.google.protobuf.ByteString getFirst() { + return first_; + } + + // required bytes second = 2; + public static final int SECOND_FIELD_NUMBER = 2; + private com.google.protobuf.ByteString second_; + /** + * required bytes second = 2; + */ + public boolean hasSecond() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required bytes second = 2; + */ + public com.google.protobuf.ByteString getSecond() { + return second_; + } + + private void initFields() { + first_ = com.google.protobuf.ByteString.EMPTY; + second_ = com.google.protobuf.ByteString.EMPTY; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasFirst()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasSecond()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, first_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, second_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, first_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, second_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair) obj; + + boolean result = true; + result = result && (hasFirst() == other.hasFirst()); + if (hasFirst()) { + result = result && getFirst() + .equals(other.getFirst()); + } + result = result && (hasSecond() == other.hasSecond()); + if (hasSecond()) { + result = result && getSecond() + .equals(other.getSecond()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasFirst()) { + hash = (37 * hash) + FIRST_FIELD_NUMBER; + hash = (53 * hash) + getFirst().hashCode(); + } + if (hasSecond()) { + hash = (37 * hash) + SECOND_FIELD_NUMBER; + hash = (53 * hash) + getSecond().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.BytesBytesPair} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPairOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_BytesBytesPair_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_BytesBytesPair_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + first_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + second_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_BytesBytesPair_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair build() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.first_ = first_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.second_ = second_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.getDefaultInstance()) return this; + if (other.hasFirst()) { + setFirst(other.getFirst()); + } + if (other.hasSecond()) { + setSecond(other.getSecond()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasFirst()) { + + return false; + } + if (!hasSecond()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required bytes first = 1; + private com.google.protobuf.ByteString first_ = com.google.protobuf.ByteString.EMPTY; + /** + * required bytes first = 1; + */ + public boolean hasFirst() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bytes first = 1; + */ + public com.google.protobuf.ByteString getFirst() { + return first_; + } + /** + * required bytes first = 1; + */ + public Builder setFirst(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + first_ = value; + onChanged(); + return this; + } + /** + * required bytes first = 1; + */ + public Builder clearFirst() { + bitField0_ = (bitField0_ & ~0x00000001); + first_ = getDefaultInstance().getFirst(); + onChanged(); + return this; + } + + // required bytes second = 2; + private com.google.protobuf.ByteString second_ = com.google.protobuf.ByteString.EMPTY; + /** + * required bytes second = 2; + */ + public boolean hasSecond() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required bytes second = 2; + */ + public com.google.protobuf.ByteString getSecond() { + return second_; + } + /** + * required bytes second = 2; + */ + public Builder setSecond(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + second_ = value; + onChanged(); + return this; + } + /** + * required bytes second = 2; + */ + public Builder clearSecond() { + bitField0_ = (bitField0_ & ~0x00000002); + second_ = getDefaultInstance().getSecond(); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.BytesBytesPair) + } + + static { + defaultInstance = new BytesBytesPair(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.BytesBytesPair) + } + + public interface NameInt64PairOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional string name = 1; + /** + * optional string name = 1; + */ + boolean hasName(); + /** + * optional string name = 1; + */ + java.lang.String getName(); + /** + * optional string name = 1; + */ + com.google.protobuf.ByteString + getNameBytes(); + + // optional int64 value = 2; + /** + * optional int64 value = 2; + */ + boolean hasValue(); + /** + * optional int64 value = 2; + */ + long getValue(); + } + /** + * Protobuf type {@code hbase.pb.NameInt64Pair} + */ + public static final class NameInt64Pair extends + com.google.protobuf.GeneratedMessage + implements NameInt64PairOrBuilder { + // Use NameInt64Pair.newBuilder() to construct. + private NameInt64Pair(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private NameInt64Pair(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final NameInt64Pair defaultInstance; + public static NameInt64Pair getDefaultInstance() { + return defaultInstance; + } + + public NameInt64Pair getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private NameInt64Pair( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + name_ = input.readBytes(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + value_ = input.readInt64(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_NameInt64Pair_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_NameInt64Pair_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public NameInt64Pair parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new NameInt64Pair(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional string name = 1; + public static final int NAME_FIELD_NUMBER = 1; + private java.lang.Object name_; + /** + * optional string name = 1; + */ + public boolean hasName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional string name = 1; + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + name_ = s; + } + return s; + } + } + /** + * optional string name = 1; + */ + public com.google.protobuf.ByteString + getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional int64 value = 2; + public static final int VALUE_FIELD_NUMBER = 2; + private long value_; + /** + * optional int64 value = 2; + */ + public boolean hasValue() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional int64 value = 2; + */ + public long getValue() { + return value_; + } + + private void initFields() { + name_ = ""; + value_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeInt64(2, value_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(2, value_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair) obj; + + boolean result = true; + result = result && (hasName() == other.hasName()); + if (hasName()) { + result = result && getName() + .equals(other.getName()); + } + result = result && (hasValue() == other.hasValue()); + if (hasValue()) { + result = result && (getValue() + == other.getValue()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasName()) { + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + } + if (hasValue()) { + hash = (37 * hash) + VALUE_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getValue()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.NameInt64Pair} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64PairOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_NameInt64Pair_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_NameInt64Pair_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + name_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + value_ = 0L; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_NameInt64Pair_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair build() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.name_ = name_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.value_ = value_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair.getDefaultInstance()) return this; + if (other.hasName()) { + bitField0_ |= 0x00000001; + name_ = other.name_; + onChanged(); + } + if (other.hasValue()) { + setValue(other.getValue()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional string name = 1; + private java.lang.Object name_ = ""; + /** + * optional string name = 1; + */ + public boolean hasName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional string name = 1; + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string name = 1; + */ + public com.google.protobuf.ByteString + getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string name = 1; + */ + public Builder setName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + name_ = value; + onChanged(); + return this; + } + /** + * optional string name = 1; + */ + public Builder clearName() { + bitField0_ = (bitField0_ & ~0x00000001); + name_ = getDefaultInstance().getName(); + onChanged(); + return this; + } + /** + * optional string name = 1; + */ + public Builder setNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + name_ = value; + onChanged(); + return this; + } + + // optional int64 value = 2; + private long value_ ; + /** + * optional int64 value = 2; + */ + public boolean hasValue() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional int64 value = 2; + */ + public long getValue() { + return value_; + } + /** + * optional int64 value = 2; + */ + public Builder setValue(long value) { + bitField0_ |= 0x00000002; + value_ = value; + onChanged(); + return this; + } + /** + * optional int64 value = 2; + */ + public Builder clearValue() { + bitField0_ = (bitField0_ & ~0x00000002); + value_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.NameInt64Pair) + } + + static { + defaultInstance = new NameInt64Pair(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.NameInt64Pair) + } + + public interface ProcedureDescriptionOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string signature = 1; + /** + * required string signature = 1; + * + *
+     * the unique signature of the procedure
+     * 
+ */ + boolean hasSignature(); + /** + * required string signature = 1; + * + *
+     * the unique signature of the procedure
+     * 
+ */ + java.lang.String getSignature(); + /** + * required string signature = 1; + * + *
+     * the unique signature of the procedure
+     * 
+ */ + com.google.protobuf.ByteString + getSignatureBytes(); + + // optional string instance = 2; + /** + * optional string instance = 2; + * + *
+     * the procedure instance name
+     * 
+ */ + boolean hasInstance(); + /** + * optional string instance = 2; + * + *
+     * the procedure instance name
+     * 
+ */ + java.lang.String getInstance(); + /** + * optional string instance = 2; + * + *
+     * the procedure instance name
+     * 
+ */ + com.google.protobuf.ByteString + getInstanceBytes(); + + // optional int64 creation_time = 3 [default = 0]; + /** + * optional int64 creation_time = 3 [default = 0]; + */ + boolean hasCreationTime(); + /** + * optional int64 creation_time = 3 [default = 0]; + */ + long getCreationTime(); + + // repeated .hbase.pb.NameStringPair configuration = 4; + /** + * repeated .hbase.pb.NameStringPair configuration = 4; + */ + java.util.List + getConfigurationList(); + /** + * repeated .hbase.pb.NameStringPair configuration = 4; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair getConfiguration(int index); + /** + * repeated .hbase.pb.NameStringPair configuration = 4; + */ + int getConfigurationCount(); + /** + * repeated .hbase.pb.NameStringPair configuration = 4; + */ + java.util.List + getConfigurationOrBuilderList(); + /** + * repeated .hbase.pb.NameStringPair configuration = 4; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder getConfigurationOrBuilder( + int index); + } + /** + * Protobuf type {@code hbase.pb.ProcedureDescription} + * + *
+   **
+   * Description of the distributed procedure to take
+   * 
+ */ + public static final class ProcedureDescription extends + com.google.protobuf.GeneratedMessage + implements ProcedureDescriptionOrBuilder { + // Use ProcedureDescription.newBuilder() to construct. + private ProcedureDescription(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private ProcedureDescription(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final ProcedureDescription defaultInstance; + public static ProcedureDescription getDefaultInstance() { + return defaultInstance; + } + + public ProcedureDescription getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ProcedureDescription( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + signature_ = input.readBytes(); + break; + } + case 18: { + bitField0_ |= 0x00000002; + instance_ = input.readBytes(); + break; + } + case 24: { + bitField0_ |= 0x00000004; + creationTime_ = input.readInt64(); + break; + } + case 34: { + if (!((mutable_bitField0_ & 0x00000008) == 0x00000008)) { + configuration_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000008; + } + configuration_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000008) == 0x00000008)) { + configuration_ = java.util.Collections.unmodifiableList(configuration_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_ProcedureDescription_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_ProcedureDescription_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ProcedureDescription parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ProcedureDescription(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string signature = 1; + public static final int SIGNATURE_FIELD_NUMBER = 1; + private java.lang.Object signature_; + /** + * required string signature = 1; + * + *
+     * the unique signature of the procedure
+     * 
+ */ + public boolean hasSignature() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string signature = 1; + * + *
+     * the unique signature of the procedure
+     * 
+ */ + public java.lang.String getSignature() { + java.lang.Object ref = signature_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + signature_ = s; + } + return s; + } + } + /** + * required string signature = 1; + * + *
+     * the unique signature of the procedure
+     * 
+ */ + public com.google.protobuf.ByteString + getSignatureBytes() { + java.lang.Object ref = signature_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + signature_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional string instance = 2; + public static final int INSTANCE_FIELD_NUMBER = 2; + private java.lang.Object instance_; + /** + * optional string instance = 2; + * + *
+     * the procedure instance name
+     * 
+ */ + public boolean hasInstance() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional string instance = 2; + * + *
+     * the procedure instance name
+     * 
+ */ + public java.lang.String getInstance() { + java.lang.Object ref = instance_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + instance_ = s; + } + return s; + } + } + /** + * optional string instance = 2; + * + *
+     * the procedure instance name
+     * 
+ */ + public com.google.protobuf.ByteString + getInstanceBytes() { + java.lang.Object ref = instance_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + instance_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional int64 creation_time = 3 [default = 0]; + public static final int CREATION_TIME_FIELD_NUMBER = 3; + private long creationTime_; + /** + * optional int64 creation_time = 3 [default = 0]; + */ + public boolean hasCreationTime() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional int64 creation_time = 3 [default = 0]; + */ + public long getCreationTime() { + return creationTime_; + } + + // repeated .hbase.pb.NameStringPair configuration = 4; + public static final int CONFIGURATION_FIELD_NUMBER = 4; + private java.util.List configuration_; + /** + * repeated .hbase.pb.NameStringPair configuration = 4; + */ + public java.util.List getConfigurationList() { + return configuration_; + } + /** + * repeated .hbase.pb.NameStringPair configuration = 4; + */ + public java.util.List + getConfigurationOrBuilderList() { + return configuration_; + } + /** + * repeated .hbase.pb.NameStringPair configuration = 4; + */ + public int getConfigurationCount() { + return configuration_.size(); + } + /** + * repeated .hbase.pb.NameStringPair configuration = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair getConfiguration(int index) { + return configuration_.get(index); + } + /** + * repeated .hbase.pb.NameStringPair configuration = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder getConfigurationOrBuilder( + int index) { + return configuration_.get(index); + } + + private void initFields() { + signature_ = ""; + instance_ = ""; + creationTime_ = 0L; + configuration_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasSignature()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getConfigurationCount(); i++) { + if (!getConfiguration(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getSignatureBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getInstanceBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeInt64(3, creationTime_); + } + for (int i = 0; i < configuration_.size(); i++) { + output.writeMessage(4, configuration_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getSignatureBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getInstanceBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(3, creationTime_); + } + for (int i = 0; i < configuration_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(4, configuration_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription) obj; + + boolean result = true; + result = result && (hasSignature() == other.hasSignature()); + if (hasSignature()) { + result = result && getSignature() + .equals(other.getSignature()); + } + result = result && (hasInstance() == other.hasInstance()); + if (hasInstance()) { + result = result && getInstance() + .equals(other.getInstance()); + } + result = result && (hasCreationTime() == other.hasCreationTime()); + if (hasCreationTime()) { + result = result && (getCreationTime() + == other.getCreationTime()); + } + result = result && getConfigurationList() + .equals(other.getConfigurationList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasSignature()) { + hash = (37 * hash) + SIGNATURE_FIELD_NUMBER; + hash = (53 * hash) + getSignature().hashCode(); + } + if (hasInstance()) { + hash = (37 * hash) + INSTANCE_FIELD_NUMBER; + hash = (53 * hash) + getInstance().hashCode(); + } + if (hasCreationTime()) { + hash = (37 * hash) + CREATION_TIME_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getCreationTime()); + } + if (getConfigurationCount() > 0) { + hash = (37 * hash) + CONFIGURATION_FIELD_NUMBER; + hash = (53 * hash) + getConfigurationList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.ProcedureDescription} + * + *
+     **
+     * Description of the distributed procedure to take
+     * 
+ */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_ProcedureDescription_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_ProcedureDescription_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getConfigurationFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + signature_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + instance_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + creationTime_ = 0L; + bitField0_ = (bitField0_ & ~0x00000004); + if (configurationBuilder_ == null) { + configuration_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000008); + } else { + configurationBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_ProcedureDescription_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription build() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.signature_ = signature_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.instance_ = instance_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.creationTime_ = creationTime_; + if (configurationBuilder_ == null) { + if (((bitField0_ & 0x00000008) == 0x00000008)) { + configuration_ = java.util.Collections.unmodifiableList(configuration_); + bitField0_ = (bitField0_ & ~0x00000008); + } + result.configuration_ = configuration_; + } else { + result.configuration_ = configurationBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.getDefaultInstance()) return this; + if (other.hasSignature()) { + bitField0_ |= 0x00000001; + signature_ = other.signature_; + onChanged(); + } + if (other.hasInstance()) { + bitField0_ |= 0x00000002; + instance_ = other.instance_; + onChanged(); + } + if (other.hasCreationTime()) { + setCreationTime(other.getCreationTime()); + } + if (configurationBuilder_ == null) { + if (!other.configuration_.isEmpty()) { + if (configuration_.isEmpty()) { + configuration_ = other.configuration_; + bitField0_ = (bitField0_ & ~0x00000008); + } else { + ensureConfigurationIsMutable(); + configuration_.addAll(other.configuration_); + } + onChanged(); + } + } else { + if (!other.configuration_.isEmpty()) { + if (configurationBuilder_.isEmpty()) { + configurationBuilder_.dispose(); + configurationBuilder_ = null; + configuration_ = other.configuration_; + bitField0_ = (bitField0_ & ~0x00000008); + configurationBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getConfigurationFieldBuilder() : null; + } else { + configurationBuilder_.addAllMessages(other.configuration_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasSignature()) { + + return false; + } + for (int i = 0; i < getConfigurationCount(); i++) { + if (!getConfiguration(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string signature = 1; + private java.lang.Object signature_ = ""; + /** + * required string signature = 1; + * + *
+       * the unique signature of the procedure
+       * 
+ */ + public boolean hasSignature() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string signature = 1; + * + *
+       * the unique signature of the procedure
+       * 
+ */ + public java.lang.String getSignature() { + java.lang.Object ref = signature_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + signature_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string signature = 1; + * + *
+       * the unique signature of the procedure
+       * 
+ */ + public com.google.protobuf.ByteString + getSignatureBytes() { + java.lang.Object ref = signature_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + signature_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string signature = 1; + * + *
+       * the unique signature of the procedure
+       * 
+ */ + public Builder setSignature( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + signature_ = value; + onChanged(); + return this; + } + /** + * required string signature = 1; + * + *
+       * the unique signature of the procedure
+       * 
+ */ + public Builder clearSignature() { + bitField0_ = (bitField0_ & ~0x00000001); + signature_ = getDefaultInstance().getSignature(); + onChanged(); + return this; + } + /** + * required string signature = 1; + * + *
+       * the unique signature of the procedure
+       * 
+ */ + public Builder setSignatureBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + signature_ = value; + onChanged(); + return this; + } + + // optional string instance = 2; + private java.lang.Object instance_ = ""; + /** + * optional string instance = 2; + * + *
+       * the procedure instance name
+       * 
+ */ + public boolean hasInstance() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional string instance = 2; + * + *
+       * the procedure instance name
+       * 
+ */ + public java.lang.String getInstance() { + java.lang.Object ref = instance_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + instance_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string instance = 2; + * + *
+       * the procedure instance name
+       * 
+ */ + public com.google.protobuf.ByteString + getInstanceBytes() { + java.lang.Object ref = instance_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + instance_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string instance = 2; + * + *
+       * the procedure instance name
+       * 
+ */ + public Builder setInstance( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + instance_ = value; + onChanged(); + return this; + } + /** + * optional string instance = 2; + * + *
+       * the procedure instance name
+       * 
+ */ + public Builder clearInstance() { + bitField0_ = (bitField0_ & ~0x00000002); + instance_ = getDefaultInstance().getInstance(); + onChanged(); + return this; + } + /** + * optional string instance = 2; + * + *
+       * the procedure instance name
+       * 
+ */ + public Builder setInstanceBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + instance_ = value; + onChanged(); + return this; + } + + // optional int64 creation_time = 3 [default = 0]; + private long creationTime_ ; + /** + * optional int64 creation_time = 3 [default = 0]; + */ + public boolean hasCreationTime() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional int64 creation_time = 3 [default = 0]; + */ + public long getCreationTime() { + return creationTime_; + } + /** + * optional int64 creation_time = 3 [default = 0]; + */ + public Builder setCreationTime(long value) { + bitField0_ |= 0x00000004; + creationTime_ = value; + onChanged(); + return this; + } + /** + * optional int64 creation_time = 3 [default = 0]; + */ + public Builder clearCreationTime() { + bitField0_ = (bitField0_ & ~0x00000004); + creationTime_ = 0L; + onChanged(); + return this; + } + + // repeated .hbase.pb.NameStringPair configuration = 4; + private java.util.List configuration_ = + java.util.Collections.emptyList(); + private void ensureConfigurationIsMutable() { + if (!((bitField0_ & 0x00000008) == 0x00000008)) { + configuration_ = new java.util.ArrayList(configuration_); + bitField0_ |= 0x00000008; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder> configurationBuilder_; + + /** + * repeated .hbase.pb.NameStringPair configuration = 4; + */ + public java.util.List getConfigurationList() { + if (configurationBuilder_ == null) { + return java.util.Collections.unmodifiableList(configuration_); + } else { + return configurationBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.NameStringPair configuration = 4; + */ + public int getConfigurationCount() { + if (configurationBuilder_ == null) { + return configuration_.size(); + } else { + return configurationBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.NameStringPair configuration = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair getConfiguration(int index) { + if (configurationBuilder_ == null) { + return configuration_.get(index); + } else { + return configurationBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.NameStringPair configuration = 4; + */ + public Builder setConfiguration( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair value) { + if (configurationBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureConfigurationIsMutable(); + configuration_.set(index, value); + onChanged(); + } else { + configurationBuilder_.setMessage(index, value); + } + return this; } /** * repeated .hbase.pb.NameStringPair configuration = 4; @@ -16848,279 +18418,661 @@ public Builder clear() { bitField0_ = (bitField0_ & ~0x00000002); return this; } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_RegionServerInfo_descriptor; - } - - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo.getDefaultInstance(); + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_RegionServerInfo_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo build() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.infoPort_ = infoPort_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + if (versionInfoBuilder_ == null) { + result.versionInfo_ = versionInfo_; + } else { + result.versionInfo_ = versionInfoBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo.getDefaultInstance()) return this; + if (other.hasInfoPort()) { + setInfoPort(other.getInfoPort()); + } + if (other.hasVersionInfo()) { + mergeVersionInfo(other.getVersionInfo()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (hasVersionInfo()) { + if (!getVersionInfo().isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional int32 infoPort = 1; + private int infoPort_ ; + /** + * optional int32 infoPort = 1; + */ + public boolean hasInfoPort() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional int32 infoPort = 1; + */ + public int getInfoPort() { + return infoPort_; + } + /** + * optional int32 infoPort = 1; + */ + public Builder setInfoPort(int value) { + bitField0_ |= 0x00000001; + infoPort_ = value; + onChanged(); + return this; + } + /** + * optional int32 infoPort = 1; + */ + public Builder clearInfoPort() { + bitField0_ = (bitField0_ & ~0x00000001); + infoPort_ = 0; + onChanged(); + return this; + } + + // optional .hbase.pb.VersionInfo version_info = 2; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.VersionInfo versionInfo_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.VersionInfo.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.VersionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.VersionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.VersionInfoOrBuilder> versionInfoBuilder_; + /** + * optional .hbase.pb.VersionInfo version_info = 2; + */ + public boolean hasVersionInfo() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional .hbase.pb.VersionInfo version_info = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.VersionInfo getVersionInfo() { + if (versionInfoBuilder_ == null) { + return versionInfo_; + } else { + return versionInfoBuilder_.getMessage(); + } + } + /** + * optional .hbase.pb.VersionInfo version_info = 2; + */ + public Builder setVersionInfo(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.VersionInfo value) { + if (versionInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + versionInfo_ = value; + onChanged(); + } else { + versionInfoBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * optional .hbase.pb.VersionInfo version_info = 2; + */ + public Builder setVersionInfo( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.VersionInfo.Builder builderForValue) { + if (versionInfoBuilder_ == null) { + versionInfo_ = builderForValue.build(); + onChanged(); + } else { + versionInfoBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * optional .hbase.pb.VersionInfo version_info = 2; + */ + public Builder mergeVersionInfo(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.VersionInfo value) { + if (versionInfoBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + versionInfo_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.VersionInfo.getDefaultInstance()) { + versionInfo_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.VersionInfo.newBuilder(versionInfo_).mergeFrom(value).buildPartial(); + } else { + versionInfo_ = value; + } + onChanged(); + } else { + versionInfoBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * optional .hbase.pb.VersionInfo version_info = 2; + */ + public Builder clearVersionInfo() { + if (versionInfoBuilder_ == null) { + versionInfo_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.VersionInfo.getDefaultInstance(); + onChanged(); + } else { + versionInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + /** + * optional .hbase.pb.VersionInfo version_info = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.VersionInfo.Builder getVersionInfoBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getVersionInfoFieldBuilder().getBuilder(); } - - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo build() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); + /** + * optional .hbase.pb.VersionInfo version_info = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.VersionInfoOrBuilder getVersionInfoOrBuilder() { + if (versionInfoBuilder_ != null) { + return versionInfoBuilder_.getMessageOrBuilder(); + } else { + return versionInfo_; } - return result; } - - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.infoPort_ = infoPort_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } + /** + * optional .hbase.pb.VersionInfo version_info = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.VersionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.VersionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.VersionInfoOrBuilder> + getVersionInfoFieldBuilder() { if (versionInfoBuilder_ == null) { - result.versionInfo_ = versionInfo_; - } else { - result.versionInfo_ = versionInfoBuilder_.build(); + versionInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.VersionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.VersionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.VersionInfoOrBuilder>( + versionInfo_, + getParentForChildren(), + isClean()); + versionInfo_ = null; } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; + return versionInfoBuilder_; } - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo)other); - } else { - super.mergeFrom(other); - return this; - } - } + // @@protoc_insertion_point(builder_scope:hbase.pb.RegionServerInfo) + } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo.getDefaultInstance()) return this; - if (other.hasInfoPort()) { - setInfoPort(other.getInfoPort()); - } - if (other.hasVersionInfo()) { - mergeVersionInfo(other.getVersionInfo()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } + static { + defaultInstance = new RegionServerInfo(true); + defaultInstance.initFields(); + } - public final boolean isInitialized() { - if (hasVersionInfo()) { - if (!getVersionInfo().isInitialized()) { - - return false; + // @@protoc_insertion_point(class_scope:hbase.pb.RegionServerInfo) + } + + public interface SnapshotDescriptionOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string name = 1; + /** + * required string name = 1; + */ + boolean hasName(); + /** + * required string name = 1; + */ + java.lang.String getName(); + /** + * required string name = 1; + */ + com.google.protobuf.ByteString + getNameBytes(); + + // optional string table = 2; + /** + * optional string table = 2; + * + *
+     * not needed for delete, but checked for in taking snapshot
+     * 
+ */ + boolean hasTable(); + /** + * optional string table = 2; + * + *
+     * not needed for delete, but checked for in taking snapshot
+     * 
+ */ + java.lang.String getTable(); + /** + * optional string table = 2; + * + *
+     * not needed for delete, but checked for in taking snapshot
+     * 
+ */ + com.google.protobuf.ByteString + getTableBytes(); + + // optional int64 creation_time = 3 [default = 0]; + /** + * optional int64 creation_time = 3 [default = 0]; + */ + boolean hasCreationTime(); + /** + * optional int64 creation_time = 3 [default = 0]; + */ + long getCreationTime(); + + // optional .hbase.pb.SnapshotDescription.Type type = 4 [default = FLUSH]; + /** + * optional .hbase.pb.SnapshotDescription.Type type = 4 [default = FLUSH]; + */ + boolean hasType(); + /** + * optional .hbase.pb.SnapshotDescription.Type type = 4 [default = FLUSH]; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type getType(); + + // optional int32 version = 5; + /** + * optional int32 version = 5; + */ + boolean hasVersion(); + /** + * optional int32 version = 5; + */ + int getVersion(); + + // optional string owner = 6; + /** + * optional string owner = 6; + */ + boolean hasOwner(); + /** + * optional string owner = 6; + */ + java.lang.String getOwner(); + /** + * optional string owner = 6; + */ + com.google.protobuf.ByteString + getOwnerBytes(); + + // optional .hbase.pb.UsersAndPermissions users_and_permissions = 7; + /** + * optional .hbase.pb.UsersAndPermissions users_and_permissions = 7; + */ + boolean hasUsersAndPermissions(); + /** + * optional .hbase.pb.UsersAndPermissions users_and_permissions = 7; + */ + org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissions getUsersAndPermissions(); + /** + * optional .hbase.pb.UsersAndPermissions users_and_permissions = 7; + */ + org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissionsOrBuilder getUsersAndPermissionsOrBuilder(); + + // optional int64 ttl = 8 [default = 0]; + /** + * optional int64 ttl = 8 [default = 0]; + */ + boolean hasTtl(); + /** + * optional int64 ttl = 8 [default = 0]; + */ + long getTtl(); + } + /** + * Protobuf type {@code hbase.pb.SnapshotDescription} + * + *
+   **
+   * Description of the snapshot to take
+   * 
+ */ + public static final class SnapshotDescription extends + com.google.protobuf.GeneratedMessage + implements SnapshotDescriptionOrBuilder { + // Use SnapshotDescription.newBuilder() to construct. + private SnapshotDescription(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private SnapshotDescription(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final SnapshotDescription defaultInstance; + public static SnapshotDescription getDefaultInstance() { + return defaultInstance; + } + + public SnapshotDescription getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private SnapshotDescription( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + name_ = input.readBytes(); + break; + } + case 18: { + bitField0_ |= 0x00000002; + table_ = input.readBytes(); + break; + } + case 24: { + bitField0_ |= 0x00000004; + creationTime_ = input.readInt64(); + break; + } + case 32: { + int rawValue = input.readEnum(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type value = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(4, rawValue); + } else { + bitField0_ |= 0x00000008; + type_ = value; + } + break; + } + case 40: { + bitField0_ |= 0x00000010; + version_ = input.readInt32(); + break; + } + case 50: { + bitField0_ |= 0x00000020; + owner_ = input.readBytes(); + break; + } + case 58: { + org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissions.Builder subBuilder = null; + if (((bitField0_ & 0x00000040) == 0x00000040)) { + subBuilder = usersAndPermissions_.toBuilder(); + } + usersAndPermissions_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissions.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(usersAndPermissions_); + usersAndPermissions_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000040; + break; + } + case 64: { + bitField0_ |= 0x00000080; + ttl_ = input.readInt64(); + break; + } } } - return true; + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_SnapshotDescription_descriptor; + } - public Builder mergeFrom( + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_SnapshotDescription_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public SnapshotDescription parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; + throws com.google.protobuf.InvalidProtocolBufferException { + return new SnapshotDescription(input, extensionRegistry); } - private int bitField0_; + }; - // optional int32 infoPort = 1; - private int infoPort_ ; - /** - * optional int32 infoPort = 1; - */ - public boolean hasInfoPort() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * optional int32 infoPort = 1; - */ - public int getInfoPort() { - return infoPort_; - } - /** - * optional int32 infoPort = 1; - */ - public Builder setInfoPort(int value) { - bitField0_ |= 0x00000001; - infoPort_ = value; - onChanged(); - return this; - } - /** - * optional int32 infoPort = 1; - */ - public Builder clearInfoPort() { - bitField0_ = (bitField0_ & ~0x00000001); - infoPort_ = 0; - onChanged(); - return this; - } + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } - // optional .hbase.pb.VersionInfo version_info = 2; - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.VersionInfo versionInfo_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.VersionInfo.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.VersionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.VersionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.VersionInfoOrBuilder> versionInfoBuilder_; - /** - * optional .hbase.pb.VersionInfo version_info = 2; - */ - public boolean hasVersionInfo() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional .hbase.pb.VersionInfo version_info = 2; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.VersionInfo getVersionInfo() { - if (versionInfoBuilder_ == null) { - return versionInfo_; - } else { - return versionInfoBuilder_.getMessage(); - } - } - /** - * optional .hbase.pb.VersionInfo version_info = 2; - */ - public Builder setVersionInfo(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.VersionInfo value) { - if (versionInfoBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - versionInfo_ = value; - onChanged(); - } else { - versionInfoBuilder_.setMessage(value); - } - bitField0_ |= 0x00000002; - return this; - } + /** + * Protobuf enum {@code hbase.pb.SnapshotDescription.Type} + */ + public enum Type + implements com.google.protobuf.ProtocolMessageEnum { /** - * optional .hbase.pb.VersionInfo version_info = 2; + * DISABLED = 0; */ - public Builder setVersionInfo( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.VersionInfo.Builder builderForValue) { - if (versionInfoBuilder_ == null) { - versionInfo_ = builderForValue.build(); - onChanged(); - } else { - versionInfoBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000002; - return this; - } + DISABLED(0, 0), /** - * optional .hbase.pb.VersionInfo version_info = 2; + * FLUSH = 1; */ - public Builder mergeVersionInfo(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.VersionInfo value) { - if (versionInfoBuilder_ == null) { - if (((bitField0_ & 0x00000002) == 0x00000002) && - versionInfo_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.VersionInfo.getDefaultInstance()) { - versionInfo_ = - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.VersionInfo.newBuilder(versionInfo_).mergeFrom(value).buildPartial(); - } else { - versionInfo_ = value; - } - onChanged(); - } else { - versionInfoBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000002; - return this; - } + FLUSH(1, 1), /** - * optional .hbase.pb.VersionInfo version_info = 2; + * SKIPFLUSH = 2; */ - public Builder clearVersionInfo() { - if (versionInfoBuilder_ == null) { - versionInfo_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.VersionInfo.getDefaultInstance(); - onChanged(); - } else { - versionInfoBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } + SKIPFLUSH(2, 2), + ; + /** - * optional .hbase.pb.VersionInfo version_info = 2; + * DISABLED = 0; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.VersionInfo.Builder getVersionInfoBuilder() { - bitField0_ |= 0x00000002; - onChanged(); - return getVersionInfoFieldBuilder().getBuilder(); - } + public static final int DISABLED_VALUE = 0; /** - * optional .hbase.pb.VersionInfo version_info = 2; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.VersionInfoOrBuilder getVersionInfoOrBuilder() { - if (versionInfoBuilder_ != null) { - return versionInfoBuilder_.getMessageOrBuilder(); - } else { - return versionInfo_; - } - } + * FLUSH = 1; + */ + public static final int FLUSH_VALUE = 1; /** - * optional .hbase.pb.VersionInfo version_info = 2; + * SKIPFLUSH = 2; */ - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.VersionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.VersionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.VersionInfoOrBuilder> - getVersionInfoFieldBuilder() { - if (versionInfoBuilder_ == null) { - versionInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.VersionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.VersionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.VersionInfoOrBuilder>( - versionInfo_, - getParentForChildren(), - isClean()); - versionInfo_ = null; + public static final int SKIPFLUSH_VALUE = 2; + + + public final int getNumber() { return value; } + + public static Type valueOf(int value) { + switch (value) { + case 0: return DISABLED; + case 1: return FLUSH; + case 2: return SKIPFLUSH; + default: return null; } - return versionInfoBuilder_; } - // @@protoc_insertion_point(builder_scope:hbase.pb.RegionServerInfo) - } + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public Type findValueByNumber(int number) { + return Type.valueOf(number); + } + }; - static { - defaultInstance = new RegionServerInfo(true); - defaultInstance.initFields(); - } + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.getDescriptor().getEnumTypes().get(0); + } - // @@protoc_insertion_point(class_scope:hbase.pb.RegionServerInfo) - } + private static final Type[] VALUES = values(); - public interface SnapshotDescriptionOrBuilder - extends com.google.protobuf.MessageOrBuilder { + public static Type valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private Type(int index, int value) { + this.index = index; + this.value = value; + } + // @@protoc_insertion_point(enum_scope:hbase.pb.SnapshotDescription.Type) + } + + private int bitField0_; // required string name = 1; + public static final int NAME_FIELD_NUMBER = 1; + private java.lang.Object name_; /** * required string name = 1; */ - boolean hasName(); + public boolean hasName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } /** * required string name = 1; */ - java.lang.String getName(); + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + name_ = s; + } + return s; + } + } /** * required string name = 1; */ - com.google.protobuf.ByteString - getNameBytes(); + public com.google.protobuf.ByteString + getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } // optional string table = 2; + public static final int TABLE_FIELD_NUMBER = 2; + private java.lang.Object table_; /** * optional string table = 2; * @@ -17128,7 +19080,9 @@ public interface SnapshotDescriptionOrBuilder * not needed for delete, but checked for in taking snapshot * */ - boolean hasTable(); + public boolean hasTable() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } /** * optional string table = 2; * @@ -17136,7 +19090,20 @@ public interface SnapshotDescriptionOrBuilder * not needed for delete, but checked for in taking snapshot * */ - java.lang.String getTable(); + public java.lang.String getTable() { + java.lang.Object ref = table_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + table_ = s; + } + return s; + } + } /** * optional string table = 2; * @@ -17144,1540 +19111,2015 @@ public interface SnapshotDescriptionOrBuilder * not needed for delete, but checked for in taking snapshot * */ - com.google.protobuf.ByteString - getTableBytes(); + public com.google.protobuf.ByteString + getTableBytes() { + java.lang.Object ref = table_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + table_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } // optional int64 creation_time = 3 [default = 0]; + public static final int CREATION_TIME_FIELD_NUMBER = 3; + private long creationTime_; /** * optional int64 creation_time = 3 [default = 0]; */ - boolean hasCreationTime(); + public boolean hasCreationTime() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } /** * optional int64 creation_time = 3 [default = 0]; */ - long getCreationTime(); + public long getCreationTime() { + return creationTime_; + } // optional .hbase.pb.SnapshotDescription.Type type = 4 [default = FLUSH]; + public static final int TYPE_FIELD_NUMBER = 4; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type type_; /** * optional .hbase.pb.SnapshotDescription.Type type = 4 [default = FLUSH]; */ - boolean hasType(); + public boolean hasType() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } /** * optional .hbase.pb.SnapshotDescription.Type type = 4 [default = FLUSH]; */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type getType(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type getType() { + return type_; + } // optional int32 version = 5; + public static final int VERSION_FIELD_NUMBER = 5; + private int version_; /** * optional int32 version = 5; */ - boolean hasVersion(); + public boolean hasVersion() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } /** * optional int32 version = 5; */ - int getVersion(); + public int getVersion() { + return version_; + } // optional string owner = 6; + public static final int OWNER_FIELD_NUMBER = 6; + private java.lang.Object owner_; /** * optional string owner = 6; */ - boolean hasOwner(); + public boolean hasOwner() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } /** * optional string owner = 6; */ - java.lang.String getOwner(); + public java.lang.String getOwner() { + java.lang.Object ref = owner_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + owner_ = s; + } + return s; + } + } /** * optional string owner = 6; */ - com.google.protobuf.ByteString - getOwnerBytes(); + public com.google.protobuf.ByteString + getOwnerBytes() { + java.lang.Object ref = owner_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + owner_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } // optional .hbase.pb.UsersAndPermissions users_and_permissions = 7; + public static final int USERS_AND_PERMISSIONS_FIELD_NUMBER = 7; + private org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissions usersAndPermissions_; /** * optional .hbase.pb.UsersAndPermissions users_and_permissions = 7; */ - boolean hasUsersAndPermissions(); + public boolean hasUsersAndPermissions() { + return ((bitField0_ & 0x00000040) == 0x00000040); + } /** * optional .hbase.pb.UsersAndPermissions users_and_permissions = 7; */ - org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissions getUsersAndPermissions(); + public org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissions getUsersAndPermissions() { + return usersAndPermissions_; + } /** * optional .hbase.pb.UsersAndPermissions users_and_permissions = 7; */ - org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissionsOrBuilder getUsersAndPermissionsOrBuilder(); + public org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissionsOrBuilder getUsersAndPermissionsOrBuilder() { + return usersAndPermissions_; + } // optional int64 ttl = 8 [default = 0]; + public static final int TTL_FIELD_NUMBER = 8; + private long ttl_; /** * optional int64 ttl = 8 [default = 0]; */ - boolean hasTtl(); + public boolean hasTtl() { + return ((bitField0_ & 0x00000080) == 0x00000080); + } /** * optional int64 ttl = 8 [default = 0]; */ - long getTtl(); - } - /** - * Protobuf type {@code hbase.pb.SnapshotDescription} - * - *
-   **
-   * Description of the snapshot to take
-   * 
- */ - public static final class SnapshotDescription extends - com.google.protobuf.GeneratedMessage - implements SnapshotDescriptionOrBuilder { - // Use SnapshotDescription.newBuilder() to construct. - private SnapshotDescription(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); + public long getTtl() { + return ttl_; } - private SnapshotDescription(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final SnapshotDescription defaultInstance; - public static SnapshotDescription getDefaultInstance() { - return defaultInstance; + private void initFields() { + name_ = ""; + table_ = ""; + creationTime_ = 0L; + type_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type.FLUSH; + version_ = 0; + owner_ = ""; + usersAndPermissions_ = org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissions.getDefaultInstance(); + ttl_ = 0L; } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; - public SnapshotDescription getDefaultInstanceForType() { - return defaultInstance; + if (!hasName()) { + memoizedIsInitialized = 0; + return false; + } + if (hasUsersAndPermissions()) { + if (!getUsersAndPermissions().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; } - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private SnapshotDescription( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - name_ = input.readBytes(); - break; - } - case 18: { - bitField0_ |= 0x00000002; - table_ = input.readBytes(); - break; - } - case 24: { - bitField0_ |= 0x00000004; - creationTime_ = input.readInt64(); - break; - } - case 32: { - int rawValue = input.readEnum(); - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type value = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type.valueOf(rawValue); - if (value == null) { - unknownFields.mergeVarintField(4, rawValue); - } else { - bitField0_ |= 0x00000008; - type_ = value; - } - break; - } - case 40: { - bitField0_ |= 0x00000010; - version_ = input.readInt32(); - break; - } - case 50: { - bitField0_ |= 0x00000020; - owner_ = input.readBytes(); - break; - } - case 58: { - org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissions.Builder subBuilder = null; - if (((bitField0_ & 0x00000040) == 0x00000040)) { - subBuilder = usersAndPermissions_.toBuilder(); - } - usersAndPermissions_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissions.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(usersAndPermissions_); - usersAndPermissions_ = subBuilder.buildPartial(); - } - bitField0_ |= 0x00000040; - break; - } - case 64: { - bitField0_ |= 0x00000080; - ttl_ = input.readInt64(); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getTableBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeInt64(3, creationTime_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeEnum(4, type_.getNumber()); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeInt32(5, version_); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + output.writeBytes(6, getOwnerBytes()); + } + if (((bitField0_ & 0x00000040) == 0x00000040)) { + output.writeMessage(7, usersAndPermissions_); } + if (((bitField0_ & 0x00000080) == 0x00000080)) { + output.writeInt64(8, ttl_); + } + getUnknownFields().writeTo(output); } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_SnapshotDescription_descriptor; + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getTableBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(3, creationTime_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(4, type_.getNumber()); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(5, version_); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(6, getOwnerBytes()); + } + if (((bitField0_ & 0x00000040) == 0x00000040)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(7, usersAndPermissions_); + } + if (((bitField0_ & 0x00000080) == 0x00000080)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(8, ttl_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; } - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_SnapshotDescription_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Builder.class); + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public SnapshotDescription parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new SnapshotDescription(input, extensionRegistry); + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription) obj; + + boolean result = true; + result = result && (hasName() == other.hasName()); + if (hasName()) { + result = result && getName() + .equals(other.getName()); + } + result = result && (hasTable() == other.hasTable()); + if (hasTable()) { + result = result && getTable() + .equals(other.getTable()); + } + result = result && (hasCreationTime() == other.hasCreationTime()); + if (hasCreationTime()) { + result = result && (getCreationTime() + == other.getCreationTime()); + } + result = result && (hasType() == other.hasType()); + if (hasType()) { + result = result && + (getType() == other.getType()); + } + result = result && (hasVersion() == other.hasVersion()); + if (hasVersion()) { + result = result && (getVersion() + == other.getVersion()); + } + result = result && (hasOwner() == other.hasOwner()); + if (hasOwner()) { + result = result && getOwner() + .equals(other.getOwner()); + } + result = result && (hasUsersAndPermissions() == other.hasUsersAndPermissions()); + if (hasUsersAndPermissions()) { + result = result && getUsersAndPermissions() + .equals(other.getUsersAndPermissions()); } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; + result = result && (hasTtl() == other.hasTtl()); + if (hasTtl()) { + result = result && (getTtl() + == other.getTtl()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; } - /** - * Protobuf enum {@code hbase.pb.SnapshotDescription.Type} - */ - public enum Type - implements com.google.protobuf.ProtocolMessageEnum { - /** - * DISABLED = 0; - */ - DISABLED(0, 0), - /** - * FLUSH = 1; - */ - FLUSH(1, 1), - /** - * SKIPFLUSH = 2; - */ - SKIPFLUSH(2, 2), - ; - - /** - * DISABLED = 0; - */ - public static final int DISABLED_VALUE = 0; - /** - * FLUSH = 1; - */ - public static final int FLUSH_VALUE = 1; - /** - * SKIPFLUSH = 2; - */ - public static final int SKIPFLUSH_VALUE = 2; - - - public final int getNumber() { return value; } - - public static Type valueOf(int value) { - switch (value) { - case 0: return DISABLED; - case 1: return FLUSH; - case 2: return SKIPFLUSH; - default: return null; - } + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; } - - public static com.google.protobuf.Internal.EnumLiteMap - internalGetValueMap() { - return internalValueMap; + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasName()) { + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); } - private static com.google.protobuf.Internal.EnumLiteMap - internalValueMap = - new com.google.protobuf.Internal.EnumLiteMap() { - public Type findValueByNumber(int number) { - return Type.valueOf(number); - } - }; - - public final com.google.protobuf.Descriptors.EnumValueDescriptor - getValueDescriptor() { - return getDescriptor().getValues().get(index); + if (hasTable()) { + hash = (37 * hash) + TABLE_FIELD_NUMBER; + hash = (53 * hash) + getTable().hashCode(); } - public final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptorForType() { - return getDescriptor(); + if (hasCreationTime()) { + hash = (37 * hash) + CREATION_TIME_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getCreationTime()); } - public static final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.getDescriptor().getEnumTypes().get(0); + if (hasType()) { + hash = (37 * hash) + TYPE_FIELD_NUMBER; + hash = (53 * hash) + hashEnum(getType()); } - - private static final Type[] VALUES = values(); - - public static Type valueOf( - com.google.protobuf.Descriptors.EnumValueDescriptor desc) { - if (desc.getType() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "EnumValueDescriptor is not for this type."); - } - return VALUES[desc.getIndex()]; + if (hasVersion()) { + hash = (37 * hash) + VERSION_FIELD_NUMBER; + hash = (53 * hash) + getVersion(); } - - private final int index; - private final int value; - - private Type(int index, int value) { - this.index = index; - this.value = value; + if (hasOwner()) { + hash = (37 * hash) + OWNER_FIELD_NUMBER; + hash = (53 * hash) + getOwner().hashCode(); } - - // @@protoc_insertion_point(enum_scope:hbase.pb.SnapshotDescription.Type) + if (hasUsersAndPermissions()) { + hash = (37 * hash) + USERS_AND_PERMISSIONS_FIELD_NUMBER; + hash = (53 * hash) + getUsersAndPermissions().hashCode(); + } + if (hasTtl()) { + hash = (37 * hash) + TTL_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getTtl()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; } - private int bitField0_; - // required string name = 1; - public static final int NAME_FIELD_NUMBER = 1; - private java.lang.Object name_; - /** - * required string name = 1; - */ - public boolean hasName() { - return ((bitField0_ & 0x00000001) == 0x00000001); + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); } - /** - * required string name = 1; - */ - public java.lang.String getName() { - java.lang.Object ref = name_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - name_ = s; - } - return s; - } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); } - /** - * required string name = 1; - */ - public com.google.protobuf.ByteString - getNameBytes() { - java.lang.Object ref = name_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - name_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); } - - // optional string table = 2; - public static final int TABLE_FIELD_NUMBER = 2; - private java.lang.Object table_; - /** - * optional string table = 2; - * - *
-     * not needed for delete, but checked for in taking snapshot
-     * 
- */ - public boolean hasTable() { - return ((bitField0_ & 0x00000002) == 0x00000002); + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); } - /** - * optional string table = 2; - * - *
-     * not needed for delete, but checked for in taking snapshot
-     * 
- */ - public java.lang.String getTable() { - java.lang.Object ref = table_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - table_ = s; - } - return s; - } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; } /** - * optional string table = 2; + * Protobuf type {@code hbase.pb.SnapshotDescription} * *
-     * not needed for delete, but checked for in taking snapshot
+     **
+     * Description of the snapshot to take
      * 
*/ - public com.google.protobuf.ByteString - getTableBytes() { - java.lang.Object ref = table_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - table_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_SnapshotDescription_descriptor; } - } - // optional int64 creation_time = 3 [default = 0]; - public static final int CREATION_TIME_FIELD_NUMBER = 3; - private long creationTime_; - /** - * optional int64 creation_time = 3 [default = 0]; - */ - public boolean hasCreationTime() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * optional int64 creation_time = 3 [default = 0]; - */ - public long getCreationTime() { - return creationTime_; - } + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_SnapshotDescription_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Builder.class); + } - // optional .hbase.pb.SnapshotDescription.Type type = 4 [default = FLUSH]; - public static final int TYPE_FIELD_NUMBER = 4; - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type type_; - /** - * optional .hbase.pb.SnapshotDescription.Type type = 4 [default = FLUSH]; - */ - public boolean hasType() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - /** - * optional .hbase.pb.SnapshotDescription.Type type = 4 [default = FLUSH]; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type getType() { - return type_; - } + // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } - // optional int32 version = 5; - public static final int VERSION_FIELD_NUMBER = 5; - private int version_; - /** - * optional int32 version = 5; - */ - public boolean hasVersion() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - /** - * optional int32 version = 5; - */ - public int getVersion() { - return version_; - } + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getUsersAndPermissionsFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } - // optional string owner = 6; - public static final int OWNER_FIELD_NUMBER = 6; - private java.lang.Object owner_; - /** - * optional string owner = 6; - */ - public boolean hasOwner() { - return ((bitField0_ & 0x00000020) == 0x00000020); - } - /** - * optional string owner = 6; - */ - public java.lang.String getOwner() { - java.lang.Object ref = owner_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - owner_ = s; + public Builder clear() { + super.clear(); + name_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + table_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + creationTime_ = 0L; + bitField0_ = (bitField0_ & ~0x00000004); + type_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type.FLUSH; + bitField0_ = (bitField0_ & ~0x00000008); + version_ = 0; + bitField0_ = (bitField0_ & ~0x00000010); + owner_ = ""; + bitField0_ = (bitField0_ & ~0x00000020); + if (usersAndPermissionsBuilder_ == null) { + usersAndPermissions_ = org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissions.getDefaultInstance(); + } else { + usersAndPermissionsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000040); + ttl_ = 0L; + bitField0_ = (bitField0_ & ~0x00000080); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_SnapshotDescription_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription build() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.name_ = name_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.table_ = table_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.creationTime_ = creationTime_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + result.type_ = type_; + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000010; + } + result.version_ = version_; + if (((from_bitField0_ & 0x00000020) == 0x00000020)) { + to_bitField0_ |= 0x00000020; + } + result.owner_ = owner_; + if (((from_bitField0_ & 0x00000040) == 0x00000040)) { + to_bitField0_ |= 0x00000040; + } + if (usersAndPermissionsBuilder_ == null) { + result.usersAndPermissions_ = usersAndPermissions_; + } else { + result.usersAndPermissions_ = usersAndPermissionsBuilder_.build(); + } + if (((from_bitField0_ & 0x00000080) == 0x00000080)) { + to_bitField0_ |= 0x00000080; + } + result.ttl_ = ttl_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.getDefaultInstance()) return this; + if (other.hasName()) { + bitField0_ |= 0x00000001; + name_ = other.name_; + onChanged(); + } + if (other.hasTable()) { + bitField0_ |= 0x00000002; + table_ = other.table_; + onChanged(); + } + if (other.hasCreationTime()) { + setCreationTime(other.getCreationTime()); + } + if (other.hasType()) { + setType(other.getType()); + } + if (other.hasVersion()) { + setVersion(other.getVersion()); + } + if (other.hasOwner()) { + bitField0_ |= 0x00000020; + owner_ = other.owner_; + onChanged(); + } + if (other.hasUsersAndPermissions()) { + mergeUsersAndPermissions(other.getUsersAndPermissions()); + } + if (other.hasTtl()) { + setTtl(other.getTtl()); } - return s; - } - } - /** - * optional string owner = 6; - */ - public com.google.protobuf.ByteString - getOwnerBytes() { - java.lang.Object ref = owner_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - owner_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; + this.mergeUnknownFields(other.getUnknownFields()); + return this; } - } - - // optional .hbase.pb.UsersAndPermissions users_and_permissions = 7; - public static final int USERS_AND_PERMISSIONS_FIELD_NUMBER = 7; - private org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissions usersAndPermissions_; - /** - * optional .hbase.pb.UsersAndPermissions users_and_permissions = 7; - */ - public boolean hasUsersAndPermissions() { - return ((bitField0_ & 0x00000040) == 0x00000040); - } - /** - * optional .hbase.pb.UsersAndPermissions users_and_permissions = 7; - */ - public org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissions getUsersAndPermissions() { - return usersAndPermissions_; - } - /** - * optional .hbase.pb.UsersAndPermissions users_and_permissions = 7; - */ - public org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissionsOrBuilder getUsersAndPermissionsOrBuilder() { - return usersAndPermissions_; - } - // optional int64 ttl = 8 [default = 0]; - public static final int TTL_FIELD_NUMBER = 8; - private long ttl_; - /** - * optional int64 ttl = 8 [default = 0]; - */ - public boolean hasTtl() { - return ((bitField0_ & 0x00000080) == 0x00000080); - } - /** - * optional int64 ttl = 8 [default = 0]; - */ - public long getTtl() { - return ttl_; - } + public final boolean isInitialized() { + if (!hasName()) { + + return false; + } + if (hasUsersAndPermissions()) { + if (!getUsersAndPermissions().isInitialized()) { + + return false; + } + } + return true; + } - private void initFields() { - name_ = ""; - table_ = ""; - creationTime_ = 0L; - type_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type.FLUSH; - version_ = 0; - owner_ = ""; - usersAndPermissions_ = org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissions.getDefaultInstance(); - ttl_ = 0L; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; - if (!hasName()) { - memoizedIsInitialized = 0; - return false; + // required string name = 1; + private java.lang.Object name_ = ""; + /** + * required string name = 1; + */ + public boolean hasName() { + return ((bitField0_ & 0x00000001) == 0x00000001); } - if (hasUsersAndPermissions()) { - if (!getUsersAndPermissions().isInitialized()) { - memoizedIsInitialized = 0; - return false; + /** + * required string name = 1; + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; } } - memoizedIsInitialized = 1; - return true; - } + /** + * required string name = 1; + */ + public com.google.protobuf.ByteString + getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string name = 1; + */ + public Builder setName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + name_ = value; + onChanged(); + return this; + } + /** + * required string name = 1; + */ + public Builder clearName() { + bitField0_ = (bitField0_ & ~0x00000001); + name_ = getDefaultInstance().getName(); + onChanged(); + return this; + } + /** + * required string name = 1; + */ + public Builder setNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + name_ = value; + onChanged(); + return this; + } - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getNameBytes()); + // optional string table = 2; + private java.lang.Object table_ = ""; + /** + * optional string table = 2; + * + *
+       * not needed for delete, but checked for in taking snapshot
+       * 
+ */ + public boolean hasTable() { + return ((bitField0_ & 0x00000002) == 0x00000002); } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, getTableBytes()); + /** + * optional string table = 2; + * + *
+       * not needed for delete, but checked for in taking snapshot
+       * 
+ */ + public java.lang.String getTable() { + java.lang.Object ref = table_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + table_ = s; + return s; + } else { + return (java.lang.String) ref; + } } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeInt64(3, creationTime_); + /** + * optional string table = 2; + * + *
+       * not needed for delete, but checked for in taking snapshot
+       * 
+ */ + public com.google.protobuf.ByteString + getTableBytes() { + java.lang.Object ref = table_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + table_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string table = 2; + * + *
+       * not needed for delete, but checked for in taking snapshot
+       * 
+ */ + public Builder setTable( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + table_ = value; + onChanged(); + return this; + } + /** + * optional string table = 2; + * + *
+       * not needed for delete, but checked for in taking snapshot
+       * 
+ */ + public Builder clearTable() { + bitField0_ = (bitField0_ & ~0x00000002); + table_ = getDefaultInstance().getTable(); + onChanged(); + return this; } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeEnum(4, type_.getNumber()); + /** + * optional string table = 2; + * + *
+       * not needed for delete, but checked for in taking snapshot
+       * 
+ */ + public Builder setTableBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + table_ = value; + onChanged(); + return this; } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - output.writeInt32(5, version_); + + // optional int64 creation_time = 3 [default = 0]; + private long creationTime_ ; + /** + * optional int64 creation_time = 3 [default = 0]; + */ + public boolean hasCreationTime() { + return ((bitField0_ & 0x00000004) == 0x00000004); } - if (((bitField0_ & 0x00000020) == 0x00000020)) { - output.writeBytes(6, getOwnerBytes()); + /** + * optional int64 creation_time = 3 [default = 0]; + */ + public long getCreationTime() { + return creationTime_; } - if (((bitField0_ & 0x00000040) == 0x00000040)) { - output.writeMessage(7, usersAndPermissions_); + /** + * optional int64 creation_time = 3 [default = 0]; + */ + public Builder setCreationTime(long value) { + bitField0_ |= 0x00000004; + creationTime_ = value; + onChanged(); + return this; } - if (((bitField0_ & 0x00000080) == 0x00000080)) { - output.writeInt64(8, ttl_); + /** + * optional int64 creation_time = 3 [default = 0]; + */ + public Builder clearCreationTime() { + bitField0_ = (bitField0_ & ~0x00000004); + creationTime_ = 0L; + onChanged(); + return this; } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getNameBytes()); + // optional .hbase.pb.SnapshotDescription.Type type = 4 [default = FLUSH]; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type type_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type.FLUSH; + /** + * optional .hbase.pb.SnapshotDescription.Type type = 4 [default = FLUSH]; + */ + public boolean hasType() { + return ((bitField0_ & 0x00000008) == 0x00000008); } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, getTableBytes()); + /** + * optional .hbase.pb.SnapshotDescription.Type type = 4 [default = FLUSH]; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type getType() { + return type_; } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeInt64Size(3, creationTime_); + /** + * optional .hbase.pb.SnapshotDescription.Type type = 4 [default = FLUSH]; + */ + public Builder setType(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000008; + type_ = value; + onChanged(); + return this; } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - size += com.google.protobuf.CodedOutputStream - .computeEnumSize(4, type_.getNumber()); + /** + * optional .hbase.pb.SnapshotDescription.Type type = 4 [default = FLUSH]; + */ + public Builder clearType() { + bitField0_ = (bitField0_ & ~0x00000008); + type_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type.FLUSH; + onChanged(); + return this; } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - size += com.google.protobuf.CodedOutputStream - .computeInt32Size(5, version_); + + // optional int32 version = 5; + private int version_ ; + /** + * optional int32 version = 5; + */ + public boolean hasVersion() { + return ((bitField0_ & 0x00000010) == 0x00000010); } - if (((bitField0_ & 0x00000020) == 0x00000020)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(6, getOwnerBytes()); + /** + * optional int32 version = 5; + */ + public int getVersion() { + return version_; } - if (((bitField0_ & 0x00000040) == 0x00000040)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(7, usersAndPermissions_); + /** + * optional int32 version = 5; + */ + public Builder setVersion(int value) { + bitField0_ |= 0x00000010; + version_ = value; + onChanged(); + return this; } - if (((bitField0_ & 0x00000080) == 0x00000080)) { - size += com.google.protobuf.CodedOutputStream - .computeInt64Size(8, ttl_); + /** + * optional int32 version = 5; + */ + public Builder clearVersion() { + bitField0_ = (bitField0_ & ~0x00000010); + version_ = 0; + onChanged(); + return this; } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; + // optional string owner = 6; + private java.lang.Object owner_ = ""; + /** + * optional string owner = 6; + */ + public boolean hasOwner() { + return ((bitField0_ & 0x00000020) == 0x00000020); } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription)) { - return super.equals(obj); + /** + * optional string owner = 6; + */ + public java.lang.String getOwner() { + java.lang.Object ref = owner_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + owner_ = s; + return s; + } else { + return (java.lang.String) ref; + } } - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription) obj; - - boolean result = true; - result = result && (hasName() == other.hasName()); - if (hasName()) { - result = result && getName() - .equals(other.getName()); + /** + * optional string owner = 6; + */ + public com.google.protobuf.ByteString + getOwnerBytes() { + java.lang.Object ref = owner_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + owner_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } } - result = result && (hasTable() == other.hasTable()); - if (hasTable()) { - result = result && getTable() - .equals(other.getTable()); + /** + * optional string owner = 6; + */ + public Builder setOwner( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000020; + owner_ = value; + onChanged(); + return this; } - result = result && (hasCreationTime() == other.hasCreationTime()); - if (hasCreationTime()) { - result = result && (getCreationTime() - == other.getCreationTime()); + /** + * optional string owner = 6; + */ + public Builder clearOwner() { + bitField0_ = (bitField0_ & ~0x00000020); + owner_ = getDefaultInstance().getOwner(); + onChanged(); + return this; } - result = result && (hasType() == other.hasType()); - if (hasType()) { - result = result && - (getType() == other.getType()); + /** + * optional string owner = 6; + */ + public Builder setOwnerBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000020; + owner_ = value; + onChanged(); + return this; } - result = result && (hasVersion() == other.hasVersion()); - if (hasVersion()) { - result = result && (getVersion() - == other.getVersion()); + + // optional .hbase.pb.UsersAndPermissions users_and_permissions = 7; + private org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissions usersAndPermissions_ = org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissions.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissions, org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissions.Builder, org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissionsOrBuilder> usersAndPermissionsBuilder_; + /** + * optional .hbase.pb.UsersAndPermissions users_and_permissions = 7; + */ + public boolean hasUsersAndPermissions() { + return ((bitField0_ & 0x00000040) == 0x00000040); } - result = result && (hasOwner() == other.hasOwner()); - if (hasOwner()) { - result = result && getOwner() - .equals(other.getOwner()); + /** + * optional .hbase.pb.UsersAndPermissions users_and_permissions = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissions getUsersAndPermissions() { + if (usersAndPermissionsBuilder_ == null) { + return usersAndPermissions_; + } else { + return usersAndPermissionsBuilder_.getMessage(); + } } - result = result && (hasUsersAndPermissions() == other.hasUsersAndPermissions()); - if (hasUsersAndPermissions()) { - result = result && getUsersAndPermissions() - .equals(other.getUsersAndPermissions()); + /** + * optional .hbase.pb.UsersAndPermissions users_and_permissions = 7; + */ + public Builder setUsersAndPermissions(org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissions value) { + if (usersAndPermissionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + usersAndPermissions_ = value; + onChanged(); + } else { + usersAndPermissionsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000040; + return this; } - result = result && (hasTtl() == other.hasTtl()); - if (hasTtl()) { - result = result && (getTtl() - == other.getTtl()); + /** + * optional .hbase.pb.UsersAndPermissions users_and_permissions = 7; + */ + public Builder setUsersAndPermissions( + org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissions.Builder builderForValue) { + if (usersAndPermissionsBuilder_ == null) { + usersAndPermissions_ = builderForValue.build(); + onChanged(); + } else { + usersAndPermissionsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000040; + return this; } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - private int memoizedHashCode = 0; - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; + /** + * optional .hbase.pb.UsersAndPermissions users_and_permissions = 7; + */ + public Builder mergeUsersAndPermissions(org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissions value) { + if (usersAndPermissionsBuilder_ == null) { + if (((bitField0_ & 0x00000040) == 0x00000040) && + usersAndPermissions_ != org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissions.getDefaultInstance()) { + usersAndPermissions_ = + org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissions.newBuilder(usersAndPermissions_).mergeFrom(value).buildPartial(); + } else { + usersAndPermissions_ = value; + } + onChanged(); + } else { + usersAndPermissionsBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000040; + return this; } - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasName()) { - hash = (37 * hash) + NAME_FIELD_NUMBER; - hash = (53 * hash) + getName().hashCode(); + /** + * optional .hbase.pb.UsersAndPermissions users_and_permissions = 7; + */ + public Builder clearUsersAndPermissions() { + if (usersAndPermissionsBuilder_ == null) { + usersAndPermissions_ = org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissions.getDefaultInstance(); + onChanged(); + } else { + usersAndPermissionsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000040); + return this; } - if (hasTable()) { - hash = (37 * hash) + TABLE_FIELD_NUMBER; - hash = (53 * hash) + getTable().hashCode(); + /** + * optional .hbase.pb.UsersAndPermissions users_and_permissions = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissions.Builder getUsersAndPermissionsBuilder() { + bitField0_ |= 0x00000040; + onChanged(); + return getUsersAndPermissionsFieldBuilder().getBuilder(); } - if (hasCreationTime()) { - hash = (37 * hash) + CREATION_TIME_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getCreationTime()); + /** + * optional .hbase.pb.UsersAndPermissions users_and_permissions = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissionsOrBuilder getUsersAndPermissionsOrBuilder() { + if (usersAndPermissionsBuilder_ != null) { + return usersAndPermissionsBuilder_.getMessageOrBuilder(); + } else { + return usersAndPermissions_; + } } - if (hasType()) { - hash = (37 * hash) + TYPE_FIELD_NUMBER; - hash = (53 * hash) + hashEnum(getType()); + /** + * optional .hbase.pb.UsersAndPermissions users_and_permissions = 7; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissions, org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissions.Builder, org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissionsOrBuilder> + getUsersAndPermissionsFieldBuilder() { + if (usersAndPermissionsBuilder_ == null) { + usersAndPermissionsBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissions, org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissions.Builder, org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissionsOrBuilder>( + usersAndPermissions_, + getParentForChildren(), + isClean()); + usersAndPermissions_ = null; + } + return usersAndPermissionsBuilder_; } - if (hasVersion()) { - hash = (37 * hash) + VERSION_FIELD_NUMBER; - hash = (53 * hash) + getVersion(); + + // optional int64 ttl = 8 [default = 0]; + private long ttl_ ; + /** + * optional int64 ttl = 8 [default = 0]; + */ + public boolean hasTtl() { + return ((bitField0_ & 0x00000080) == 0x00000080); } - if (hasOwner()) { - hash = (37 * hash) + OWNER_FIELD_NUMBER; - hash = (53 * hash) + getOwner().hashCode(); + /** + * optional int64 ttl = 8 [default = 0]; + */ + public long getTtl() { + return ttl_; } - if (hasUsersAndPermissions()) { - hash = (37 * hash) + USERS_AND_PERMISSIONS_FIELD_NUMBER; - hash = (53 * hash) + getUsersAndPermissions().hashCode(); + /** + * optional int64 ttl = 8 [default = 0]; + */ + public Builder setTtl(long value) { + bitField0_ |= 0x00000080; + ttl_ = value; + onChanged(); + return this; } - if (hasTtl()) { - hash = (37 * hash) + TTL_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getTtl()); + /** + * optional int64 ttl = 8 [default = 0]; + */ + public Builder clearTtl() { + bitField0_ = (bitField0_ & ~0x00000080); + ttl_ = 0L; + onChanged(); + return this; } - hash = (29 * hash) + getUnknownFields().hashCode(); - memoizedHashCode = hash; - return hash; - } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); + // @@protoc_insertion_point(builder_scope:hbase.pb.SnapshotDescription) } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); + + static { + defaultInstance = new SnapshotDescription(true); + defaultInstance.initFields(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); + + // @@protoc_insertion_point(class_scope:hbase.pb.SnapshotDescription) + } + + public interface RegionLocationOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .hbase.pb.RegionInfo region_info = 1; + /** + * required .hbase.pb.RegionInfo region_info = 1; + */ + boolean hasRegionInfo(); + /** + * required .hbase.pb.RegionInfo region_info = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(); + /** + * required .hbase.pb.RegionInfo region_info = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder(); + + // optional .hbase.pb.ServerName server_name = 2; + /** + * optional .hbase.pb.ServerName server_name = 2; + */ + boolean hasServerName(); + /** + * optional .hbase.pb.ServerName server_name = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName(); + /** + * optional .hbase.pb.ServerName server_name = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder(); + + // required int64 seq_num = 3; + /** + * required int64 seq_num = 3; + */ + boolean hasSeqNum(); + /** + * required int64 seq_num = 3; + */ + long getSeqNum(); + } + /** + * Protobuf type {@code hbase.pb.RegionLocation} + */ + public static final class RegionLocation extends + com.google.protobuf.GeneratedMessage + implements RegionLocationOrBuilder { + // Use RegionLocation.newBuilder() to construct. + private RegionLocation(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); + private RegionLocation(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final RegionLocation defaultInstance; + public static RegionLocation getDefaultInstance() { + return defaultInstance; } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); + + public RegionLocation getDefaultInstanceForType() { + return defaultInstance; } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription parseFrom( + private RegionLocation( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = regionInfo_.toBuilder(); + } + regionInfo_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(regionInfo_); + regionInfo_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 18: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = null; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + subBuilder = serverName_.toBuilder(); + } + serverName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(serverName_); + serverName_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000002; + break; + } + case 24: { + bitField0_ |= 0x00000004; + seqNum_ = input.readInt64(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription prototype) { - return newBuilder().mergeFrom(prototype); + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_RegionLocation_descriptor; } - public Builder toBuilder() { return newBuilder(this); } - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_RegionLocation_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation.Builder.class); } - /** - * Protobuf type {@code hbase.pb.SnapshotDescription} - * - *
-     **
-     * Description of the snapshot to take
-     * 
- */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_SnapshotDescription_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_SnapshotDescription_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Builder.class); - } - - // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getUsersAndPermissionsFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - public Builder clear() { - super.clear(); - name_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - table_ = ""; - bitField0_ = (bitField0_ & ~0x00000002); - creationTime_ = 0L; - bitField0_ = (bitField0_ & ~0x00000004); - type_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type.FLUSH; - bitField0_ = (bitField0_ & ~0x00000008); - version_ = 0; - bitField0_ = (bitField0_ & ~0x00000010); - owner_ = ""; - bitField0_ = (bitField0_ & ~0x00000020); - if (usersAndPermissionsBuilder_ == null) { - usersAndPermissions_ = org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissions.getDefaultInstance(); - } else { - usersAndPermissionsBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000040); - ttl_ = 0L; - bitField0_ = (bitField0_ & ~0x00000080); - return this; + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public RegionLocation parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new RegionLocation(input, extensionRegistry); } + }; - public Builder clone() { - return create().mergeFrom(buildPartial()); - } + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_SnapshotDescription_descriptor; - } + private int bitField0_; + // required .hbase.pb.RegionInfo region_info = 1; + public static final int REGION_INFO_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo regionInfo_; + /** + * required .hbase.pb.RegionInfo region_info = 1; + */ + public boolean hasRegionInfo() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.RegionInfo region_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo() { + return regionInfo_; + } + /** + * required .hbase.pb.RegionInfo region_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder() { + return regionInfo_; + } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.getDefaultInstance(); - } + // optional .hbase.pb.ServerName server_name = 2; + public static final int SERVER_NAME_FIELD_NUMBER = 2; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName serverName_; + /** + * optional .hbase.pb.ServerName server_name = 2; + */ + public boolean hasServerName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional .hbase.pb.ServerName server_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName() { + return serverName_; + } + /** + * optional .hbase.pb.ServerName server_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder() { + return serverName_; + } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription build() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } + // required int64 seq_num = 3; + public static final int SEQ_NUM_FIELD_NUMBER = 3; + private long seqNum_; + /** + * required int64 seq_num = 3; + */ + public boolean hasSeqNum() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required int64 seq_num = 3; + */ + public long getSeqNum() { + return seqNum_; + } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.name_ = name_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.table_ = table_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.creationTime_ = creationTime_; - if (((from_bitField0_ & 0x00000008) == 0x00000008)) { - to_bitField0_ |= 0x00000008; - } - result.type_ = type_; - if (((from_bitField0_ & 0x00000010) == 0x00000010)) { - to_bitField0_ |= 0x00000010; - } - result.version_ = version_; - if (((from_bitField0_ & 0x00000020) == 0x00000020)) { - to_bitField0_ |= 0x00000020; - } - result.owner_ = owner_; - if (((from_bitField0_ & 0x00000040) == 0x00000040)) { - to_bitField0_ |= 0x00000040; - } - if (usersAndPermissionsBuilder_ == null) { - result.usersAndPermissions_ = usersAndPermissions_; - } else { - result.usersAndPermissions_ = usersAndPermissionsBuilder_.build(); - } - if (((from_bitField0_ & 0x00000080) == 0x00000080)) { - to_bitField0_ |= 0x00000080; + private void initFields() { + regionInfo_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance(); + serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); + seqNum_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasRegionInfo()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasSeqNum()) { + memoizedIsInitialized = 0; + return false; + } + if (!getRegionInfo().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + if (hasServerName()) { + if (!getServerName().isInitialized()) { + memoizedIsInitialized = 0; + return false; } - result.ttl_ = ttl_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; } + memoizedIsInitialized = 1; + return true; + } - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription)other); - } else { - super.mergeFrom(other); - return this; - } + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, regionInfo_); } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, serverName_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeInt64(3, seqNum_); + } + getUnknownFields().writeTo(output); + } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.getDefaultInstance()) return this; - if (other.hasName()) { - bitField0_ |= 0x00000001; - name_ = other.name_; - onChanged(); - } - if (other.hasTable()) { - bitField0_ |= 0x00000002; - table_ = other.table_; - onChanged(); - } - if (other.hasCreationTime()) { - setCreationTime(other.getCreationTime()); - } - if (other.hasType()) { - setType(other.getType()); - } - if (other.hasVersion()) { - setVersion(other.getVersion()); - } - if (other.hasOwner()) { - bitField0_ |= 0x00000020; - owner_ = other.owner_; - onChanged(); - } - if (other.hasUsersAndPermissions()) { - mergeUsersAndPermissions(other.getUsersAndPermissions()); - } - if (other.hasTtl()) { - setTtl(other.getTtl()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, regionInfo_); } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, serverName_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(3, seqNum_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } - public final boolean isInitialized() { - if (!hasName()) { - - return false; - } - if (hasUsersAndPermissions()) { - if (!getUsersAndPermissions().isInitialized()) { - - return false; - } - } - return true; + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation)) { + return super.equals(obj); } + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation) obj; - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; + boolean result = true; + result = result && (hasRegionInfo() == other.hasRegionInfo()); + if (hasRegionInfo()) { + result = result && getRegionInfo() + .equals(other.getRegionInfo()); } - private int bitField0_; + result = result && (hasServerName() == other.hasServerName()); + if (hasServerName()) { + result = result && getServerName() + .equals(other.getServerName()); + } + result = result && (hasSeqNum() == other.hasSeqNum()); + if (hasSeqNum()) { + result = result && (getSeqNum() + == other.getSeqNum()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } - // required string name = 1; - private java.lang.Object name_ = ""; - /** - * required string name = 1; - */ - public boolean hasName() { - return ((bitField0_ & 0x00000001) == 0x00000001); + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; } - /** - * required string name = 1; - */ - public java.lang.String getName() { - java.lang.Object ref = name_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - name_ = s; - return s; - } else { - return (java.lang.String) ref; - } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasRegionInfo()) { + hash = (37 * hash) + REGION_INFO_FIELD_NUMBER; + hash = (53 * hash) + getRegionInfo().hashCode(); } - /** - * required string name = 1; - */ - public com.google.protobuf.ByteString - getNameBytes() { - java.lang.Object ref = name_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - name_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } + if (hasServerName()) { + hash = (37 * hash) + SERVER_NAME_FIELD_NUMBER; + hash = (53 * hash) + getServerName().hashCode(); } - /** - * required string name = 1; - */ - public Builder setName( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - name_ = value; - onChanged(); - return this; + if (hasSeqNum()) { + hash = (37 * hash) + SEQ_NUM_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getSeqNum()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.RegionLocation} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocationOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_RegionLocation_descriptor; } - /** - * required string name = 1; - */ - public Builder clearName() { - bitField0_ = (bitField0_ & ~0x00000001); - name_ = getDefaultInstance().getName(); - onChanged(); - return this; + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_RegionLocation_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation.Builder.class); } - /** - * required string name = 1; - */ - public Builder setNameBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - name_ = value; - onChanged(); - return this; + + // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); } - // optional string table = 2; - private java.lang.Object table_ = ""; - /** - * optional string table = 2; - * - *
-       * not needed for delete, but checked for in taking snapshot
-       * 
- */ - public boolean hasTable() { - return ((bitField0_ & 0x00000002) == 0x00000002); + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); } - /** - * optional string table = 2; - * - *
-       * not needed for delete, but checked for in taking snapshot
-       * 
- */ - public java.lang.String getTable() { - java.lang.Object ref = table_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - table_ = s; - return s; - } else { - return (java.lang.String) ref; + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getRegionInfoFieldBuilder(); + getServerNameFieldBuilder(); } } - /** - * optional string table = 2; - * - *
-       * not needed for delete, but checked for in taking snapshot
-       * 
- */ - public com.google.protobuf.ByteString - getTableBytes() { - java.lang.Object ref = table_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - table_ = b; - return b; + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (regionInfoBuilder_ == null) { + regionInfo_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance(); } else { - return (com.google.protobuf.ByteString) ref; + regionInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + if (serverNameBuilder_ == null) { + serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); + } else { + serverNameBuilder_.clear(); } - } - /** - * optional string table = 2; - * - *
-       * not needed for delete, but checked for in taking snapshot
-       * 
- */ - public Builder setTable( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - table_ = value; - onChanged(); - return this; - } - /** - * optional string table = 2; - * - *
-       * not needed for delete, but checked for in taking snapshot
-       * 
- */ - public Builder clearTable() { bitField0_ = (bitField0_ & ~0x00000002); - table_ = getDefaultInstance().getTable(); - onChanged(); - return this; - } - /** - * optional string table = 2; - * - *
-       * not needed for delete, but checked for in taking snapshot
-       * 
- */ - public Builder setTableBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - table_ = value; - onChanged(); + seqNum_ = 0L; + bitField0_ = (bitField0_ & ~0x00000004); return this; } - // optional int64 creation_time = 3 [default = 0]; - private long creationTime_ ; - /** - * optional int64 creation_time = 3 [default = 0]; - */ - public boolean hasCreationTime() { - return ((bitField0_ & 0x00000004) == 0x00000004); + public Builder clone() { + return create().mergeFrom(buildPartial()); } - /** - * optional int64 creation_time = 3 [default = 0]; - */ - public long getCreationTime() { - return creationTime_; + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_RegionLocation_descriptor; } - /** - * optional int64 creation_time = 3 [default = 0]; - */ - public Builder setCreationTime(long value) { - bitField0_ |= 0x00000004; - creationTime_ = value; - onChanged(); - return this; + + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation.getDefaultInstance(); } - /** - * optional int64 creation_time = 3 [default = 0]; - */ - public Builder clearCreationTime() { - bitField0_ = (bitField0_ & ~0x00000004); - creationTime_ = 0L; - onChanged(); - return this; + + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation build() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; } - // optional .hbase.pb.SnapshotDescription.Type type = 4 [default = FLUSH]; - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type type_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type.FLUSH; - /** - * optional .hbase.pb.SnapshotDescription.Type type = 4 [default = FLUSH]; - */ - public boolean hasType() { - return ((bitField0_ & 0x00000008) == 0x00000008); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (regionInfoBuilder_ == null) { + result.regionInfo_ = regionInfo_; + } else { + result.regionInfo_ = regionInfoBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + if (serverNameBuilder_ == null) { + result.serverName_ = serverName_; + } else { + result.serverName_ = serverNameBuilder_.build(); + } + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.seqNum_ = seqNum_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; } - /** - * optional .hbase.pb.SnapshotDescription.Type type = 4 [default = FLUSH]; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type getType() { - return type_; + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation)other); + } else { + super.mergeFrom(other); + return this; + } } - /** - * optional .hbase.pb.SnapshotDescription.Type type = 4 [default = FLUSH]; - */ - public Builder setType(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type value) { - if (value == null) { - throw new NullPointerException(); + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation.getDefaultInstance()) return this; + if (other.hasRegionInfo()) { + mergeRegionInfo(other.getRegionInfo()); } - bitField0_ |= 0x00000008; - type_ = value; - onChanged(); + if (other.hasServerName()) { + mergeServerName(other.getServerName()); + } + if (other.hasSeqNum()) { + setSeqNum(other.getSeqNum()); + } + this.mergeUnknownFields(other.getUnknownFields()); return this; } - /** - * optional .hbase.pb.SnapshotDescription.Type type = 4 [default = FLUSH]; - */ - public Builder clearType() { - bitField0_ = (bitField0_ & ~0x00000008); - type_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type.FLUSH; - onChanged(); + + public final boolean isInitialized() { + if (!hasRegionInfo()) { + + return false; + } + if (!hasSeqNum()) { + + return false; + } + if (!getRegionInfo().isInitialized()) { + + return false; + } + if (hasServerName()) { + if (!getServerName().isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } return this; } + private int bitField0_; - // optional int32 version = 5; - private int version_ ; + // required .hbase.pb.RegionInfo region_info = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo regionInfo_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> regionInfoBuilder_; /** - * optional int32 version = 5; + * required .hbase.pb.RegionInfo region_info = 1; */ - public boolean hasVersion() { - return ((bitField0_ & 0x00000010) == 0x00000010); + public boolean hasRegionInfo() { + return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * optional int32 version = 5; + * required .hbase.pb.RegionInfo region_info = 1; */ - public int getVersion() { - return version_; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo() { + if (regionInfoBuilder_ == null) { + return regionInfo_; + } else { + return regionInfoBuilder_.getMessage(); + } } /** - * optional int32 version = 5; + * required .hbase.pb.RegionInfo region_info = 1; */ - public Builder setVersion(int value) { - bitField0_ |= 0x00000010; - version_ = value; - onChanged(); + public Builder setRegionInfo(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) { + if (regionInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + regionInfo_ = value; + onChanged(); + } else { + regionInfoBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; return this; } /** - * optional int32 version = 5; + * required .hbase.pb.RegionInfo region_info = 1; */ - public Builder clearVersion() { - bitField0_ = (bitField0_ & ~0x00000010); - version_ = 0; - onChanged(); + public Builder setRegionInfo( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) { + if (regionInfoBuilder_ == null) { + regionInfo_ = builderForValue.build(); + onChanged(); + } else { + regionInfoBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; return this; } - - // optional string owner = 6; - private java.lang.Object owner_ = ""; - /** - * optional string owner = 6; - */ - public boolean hasOwner() { - return ((bitField0_ & 0x00000020) == 0x00000020); - } /** - * optional string owner = 6; + * required .hbase.pb.RegionInfo region_info = 1; */ - public java.lang.String getOwner() { - java.lang.Object ref = owner_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - owner_ = s; - return s; + public Builder mergeRegionInfo(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) { + if (regionInfoBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + regionInfo_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance()) { + regionInfo_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.newBuilder(regionInfo_).mergeFrom(value).buildPartial(); + } else { + regionInfo_ = value; + } + onChanged(); } else { - return (java.lang.String) ref; + regionInfoBuilder_.mergeFrom(value); } + bitField0_ |= 0x00000001; + return this; } /** - * optional string owner = 6; + * required .hbase.pb.RegionInfo region_info = 1; */ - public com.google.protobuf.ByteString - getOwnerBytes() { - java.lang.Object ref = owner_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - owner_ = b; - return b; + public Builder clearRegionInfo() { + if (regionInfoBuilder_ == null) { + regionInfo_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance(); + onChanged(); } else { - return (com.google.protobuf.ByteString) ref; + regionInfoBuilder_.clear(); } + bitField0_ = (bitField0_ & ~0x00000001); + return this; } /** - * optional string owner = 6; + * required .hbase.pb.RegionInfo region_info = 1; */ - public Builder setOwner( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000020; - owner_ = value; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder getRegionInfoBuilder() { + bitField0_ |= 0x00000001; onChanged(); - return this; + return getRegionInfoFieldBuilder().getBuilder(); } /** - * optional string owner = 6; + * required .hbase.pb.RegionInfo region_info = 1; */ - public Builder clearOwner() { - bitField0_ = (bitField0_ & ~0x00000020); - owner_ = getDefaultInstance().getOwner(); - onChanged(); - return this; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder() { + if (regionInfoBuilder_ != null) { + return regionInfoBuilder_.getMessageOrBuilder(); + } else { + return regionInfo_; + } } /** - * optional string owner = 6; + * required .hbase.pb.RegionInfo region_info = 1; */ - public Builder setOwnerBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000020; - owner_ = value; - onChanged(); - return this; + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> + getRegionInfoFieldBuilder() { + if (regionInfoBuilder_ == null) { + regionInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>( + regionInfo_, + getParentForChildren(), + isClean()); + regionInfo_ = null; + } + return regionInfoBuilder_; } - // optional .hbase.pb.UsersAndPermissions users_and_permissions = 7; - private org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissions usersAndPermissions_ = org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissions.getDefaultInstance(); + // optional .hbase.pb.ServerName server_name = 2; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissions, org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissions.Builder, org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissionsOrBuilder> usersAndPermissionsBuilder_; + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> serverNameBuilder_; /** - * optional .hbase.pb.UsersAndPermissions users_and_permissions = 7; + * optional .hbase.pb.ServerName server_name = 2; */ - public boolean hasUsersAndPermissions() { - return ((bitField0_ & 0x00000040) == 0x00000040); + public boolean hasServerName() { + return ((bitField0_ & 0x00000002) == 0x00000002); } /** - * optional .hbase.pb.UsersAndPermissions users_and_permissions = 7; + * optional .hbase.pb.ServerName server_name = 2; */ - public org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissions getUsersAndPermissions() { - if (usersAndPermissionsBuilder_ == null) { - return usersAndPermissions_; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName() { + if (serverNameBuilder_ == null) { + return serverName_; } else { - return usersAndPermissionsBuilder_.getMessage(); + return serverNameBuilder_.getMessage(); } } /** - * optional .hbase.pb.UsersAndPermissions users_and_permissions = 7; + * optional .hbase.pb.ServerName server_name = 2; */ - public Builder setUsersAndPermissions(org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissions value) { - if (usersAndPermissionsBuilder_ == null) { + public Builder setServerName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { + if (serverNameBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - usersAndPermissions_ = value; + serverName_ = value; onChanged(); } else { - usersAndPermissionsBuilder_.setMessage(value); + serverNameBuilder_.setMessage(value); } - bitField0_ |= 0x00000040; + bitField0_ |= 0x00000002; return this; } /** - * optional .hbase.pb.UsersAndPermissions users_and_permissions = 7; + * optional .hbase.pb.ServerName server_name = 2; */ - public Builder setUsersAndPermissions( - org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissions.Builder builderForValue) { - if (usersAndPermissionsBuilder_ == null) { - usersAndPermissions_ = builderForValue.build(); + public Builder setServerName( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { + if (serverNameBuilder_ == null) { + serverName_ = builderForValue.build(); onChanged(); } else { - usersAndPermissionsBuilder_.setMessage(builderForValue.build()); + serverNameBuilder_.setMessage(builderForValue.build()); } - bitField0_ |= 0x00000040; + bitField0_ |= 0x00000002; return this; } /** - * optional .hbase.pb.UsersAndPermissions users_and_permissions = 7; + * optional .hbase.pb.ServerName server_name = 2; */ - public Builder mergeUsersAndPermissions(org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissions value) { - if (usersAndPermissionsBuilder_ == null) { - if (((bitField0_ & 0x00000040) == 0x00000040) && - usersAndPermissions_ != org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissions.getDefaultInstance()) { - usersAndPermissions_ = - org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissions.newBuilder(usersAndPermissions_).mergeFrom(value).buildPartial(); + public Builder mergeServerName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { + if (serverNameBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + serverName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()) { + serverName_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder(serverName_).mergeFrom(value).buildPartial(); } else { - usersAndPermissions_ = value; + serverName_ = value; } onChanged(); } else { - usersAndPermissionsBuilder_.mergeFrom(value); + serverNameBuilder_.mergeFrom(value); } - bitField0_ |= 0x00000040; + bitField0_ |= 0x00000002; return this; } /** - * optional .hbase.pb.UsersAndPermissions users_and_permissions = 7; + * optional .hbase.pb.ServerName server_name = 2; */ - public Builder clearUsersAndPermissions() { - if (usersAndPermissionsBuilder_ == null) { - usersAndPermissions_ = org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissions.getDefaultInstance(); + public Builder clearServerName() { + if (serverNameBuilder_ == null) { + serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); onChanged(); } else { - usersAndPermissionsBuilder_.clear(); + serverNameBuilder_.clear(); } - bitField0_ = (bitField0_ & ~0x00000040); + bitField0_ = (bitField0_ & ~0x00000002); return this; } /** - * optional .hbase.pb.UsersAndPermissions users_and_permissions = 7; + * optional .hbase.pb.ServerName server_name = 2; */ - public org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissions.Builder getUsersAndPermissionsBuilder() { - bitField0_ |= 0x00000040; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getServerNameBuilder() { + bitField0_ |= 0x00000002; onChanged(); - return getUsersAndPermissionsFieldBuilder().getBuilder(); + return getServerNameFieldBuilder().getBuilder(); } /** - * optional .hbase.pb.UsersAndPermissions users_and_permissions = 7; + * optional .hbase.pb.ServerName server_name = 2; */ - public org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissionsOrBuilder getUsersAndPermissionsOrBuilder() { - if (usersAndPermissionsBuilder_ != null) { - return usersAndPermissionsBuilder_.getMessageOrBuilder(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder() { + if (serverNameBuilder_ != null) { + return serverNameBuilder_.getMessageOrBuilder(); } else { - return usersAndPermissions_; + return serverName_; } } /** - * optional .hbase.pb.UsersAndPermissions users_and_permissions = 7; + * optional .hbase.pb.ServerName server_name = 2; */ private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissions, org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissions.Builder, org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissionsOrBuilder> - getUsersAndPermissionsFieldBuilder() { - if (usersAndPermissionsBuilder_ == null) { - usersAndPermissionsBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissions, org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissions.Builder, org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissionsOrBuilder>( - usersAndPermissions_, + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> + getServerNameFieldBuilder() { + if (serverNameBuilder_ == null) { + serverNameBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>( + serverName_, getParentForChildren(), isClean()); - usersAndPermissions_ = null; + serverName_ = null; } - return usersAndPermissionsBuilder_; + return serverNameBuilder_; } - // optional int64 ttl = 8 [default = 0]; - private long ttl_ ; + // required int64 seq_num = 3; + private long seqNum_ ; /** - * optional int64 ttl = 8 [default = 0]; + * required int64 seq_num = 3; */ - public boolean hasTtl() { - return ((bitField0_ & 0x00000080) == 0x00000080); + public boolean hasSeqNum() { + return ((bitField0_ & 0x00000004) == 0x00000004); } /** - * optional int64 ttl = 8 [default = 0]; + * required int64 seq_num = 3; */ - public long getTtl() { - return ttl_; + public long getSeqNum() { + return seqNum_; } /** - * optional int64 ttl = 8 [default = 0]; + * required int64 seq_num = 3; */ - public Builder setTtl(long value) { - bitField0_ |= 0x00000080; - ttl_ = value; + public Builder setSeqNum(long value) { + bitField0_ |= 0x00000004; + seqNum_ = value; onChanged(); return this; } /** - * optional int64 ttl = 8 [default = 0]; + * required int64 seq_num = 3; */ - public Builder clearTtl() { - bitField0_ = (bitField0_ & ~0x00000080); - ttl_ = 0L; + public Builder clearSeqNum() { + bitField0_ = (bitField0_ & ~0x00000004); + seqNum_ = 0L; onChanged(); return this; } - // @@protoc_insertion_point(builder_scope:hbase.pb.SnapshotDescription) + // @@protoc_insertion_point(builder_scope:hbase.pb.RegionLocation) } static { - defaultInstance = new SnapshotDescription(true); + defaultInstance = new RegionLocation(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.SnapshotDescription) + // @@protoc_insertion_point(class_scope:hbase.pb.RegionLocation) } private static com.google.protobuf.Descriptors.Descriptor @@ -18685,6 +21127,16 @@ public Builder clearTtl() { private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hbase_pb_TableSchema_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_TableState_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_TableState_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_TableDescriptor_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_TableDescriptor_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hbase_pb_ColumnFamilySchema_descriptor; private static @@ -18795,6 +21247,11 @@ public Builder clearTtl() { private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hbase_pb_SnapshotDescription_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_RegionLocation_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_RegionLocation_fieldAccessorTable; public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { @@ -18810,61 +21267,71 @@ public Builder clearTtl() { "leName\022,\n\nattributes\030\002 \003(\0132\030.hbase.pb.By" + "tesBytesPair\0225\n\017column_families\030\003 \003(\0132\034." + "hbase.pb.ColumnFamilySchema\022/\n\rconfigura" + - "tion\030\004 \003(\0132\030.hbase.pb.NameStringPair\"\201\001\n" + - "\022ColumnFamilySchema\022\014\n\004name\030\001 \002(\014\022,\n\natt" + - "ributes\030\002 \003(\0132\030.hbase.pb.BytesBytesPair\022" + - "/\n\rconfiguration\030\003 \003(\0132\030.hbase.pb.NameSt", - "ringPair\"\243\001\n\nRegionInfo\022\021\n\tregion_id\030\001 \002" + - "(\004\022\'\n\ntable_name\030\002 \002(\0132\023.hbase.pb.TableN" + - "ame\022\021\n\tstart_key\030\003 \001(\014\022\017\n\007end_key\030\004 \001(\014\022" + - "\017\n\007offline\030\005 \001(\010\022\r\n\005split\030\006 \001(\010\022\025\n\nrepli" + - "ca_id\030\007 \001(\005:\0010\":\n\014FavoredNodes\022*\n\014favore" + - "d_node\030\001 \003(\0132\024.hbase.pb.ServerName\"\236\001\n\017R" + - "egionSpecifier\022;\n\004type\030\001 \002(\0162-.hbase.pb." + - "RegionSpecifier.RegionSpecifierType\022\r\n\005v" + - "alue\030\002 \002(\014\"?\n\023RegionSpecifierType\022\017\n\013REG" + - "ION_NAME\020\001\022\027\n\023ENCODED_REGION_NAME\020\002\"%\n\tT", - "imeRange\022\014\n\004from\030\001 \001(\004\022\n\n\002to\030\002 \001(\004\"W\n\025Co" + - "lumnFamilyTimeRange\022\025\n\rcolumn_family\030\001 \002" + - "(\014\022\'\n\ntime_range\030\002 \002(\0132\023.hbase.pb.TimeRa" + - "nge\"A\n\nServerName\022\021\n\thost_name\030\001 \002(\t\022\014\n\004" + - "port\030\002 \001(\r\022\022\n\nstart_code\030\003 \001(\004\"\033\n\013Coproc" + - "essor\022\014\n\004name\030\001 \002(\t\"-\n\016NameStringPair\022\014\n" + - "\004name\030\001 \002(\t\022\r\n\005value\030\002 \002(\t\",\n\rNameBytesP" + - "air\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002 \001(\014\"/\n\016Byte" + - "sBytesPair\022\r\n\005first\030\001 \002(\014\022\016\n\006second\030\002 \002(" + - "\014\",\n\rNameInt64Pair\022\014\n\004name\030\001 \001(\t\022\r\n\005valu", - "e\030\002 \001(\003\"\206\001\n\024ProcedureDescription\022\021\n\tsign" + - "ature\030\001 \002(\t\022\020\n\010instance\030\002 \001(\t\022\030\n\rcreatio" + - "n_time\030\003 \001(\003:\0010\022/\n\rconfiguration\030\004 \003(\0132\030" + - ".hbase.pb.NameStringPair\"\n\n\010EmptyMsg\"\033\n\007" + - "LongMsg\022\020\n\010long_msg\030\001 \002(\003\"\037\n\tDoubleMsg\022\022" + - "\n\ndouble_msg\030\001 \002(\001\"\'\n\rBigDecimalMsg\022\026\n\016b" + - "igdecimal_msg\030\001 \002(\014\"5\n\004UUID\022\026\n\016least_sig" + - "_bits\030\001 \002(\004\022\025\n\rmost_sig_bits\030\002 \002(\004\"T\n\023Na" + - "mespaceDescriptor\022\014\n\004name\030\001 \002(\014\022/\n\rconfi" + - "guration\030\002 \003(\0132\030.hbase.pb.NameStringPair", - "\"\235\001\n\013VersionInfo\022\017\n\007version\030\001 \002(\t\022\013\n\003url" + - "\030\002 \002(\t\022\020\n\010revision\030\003 \002(\t\022\014\n\004user\030\004 \002(\t\022\014" + - "\n\004date\030\005 \002(\t\022\024\n\014src_checksum\030\006 \002(\t\022\025\n\rve" + - "rsion_major\030\007 \001(\r\022\025\n\rversion_minor\030\010 \001(\r" + - "\"Q\n\020RegionServerInfo\022\020\n\010infoPort\030\001 \001(\005\022+" + - "\n\014version_info\030\002 \001(\0132\025.hbase.pb.VersionI" + - "nfo\"\243\002\n\023SnapshotDescription\022\014\n\004name\030\001 \002(" + - "\t\022\r\n\005table\030\002 \001(\t\022\030\n\rcreation_time\030\003 \001(\003:" + - "\0010\0227\n\004type\030\004 \001(\0162\".hbase.pb.SnapshotDesc" + - "ription.Type:\005FLUSH\022\017\n\007version\030\005 \001(\005\022\r\n\005", - "owner\030\006 \001(\t\022<\n\025users_and_permissions\030\007 \001" + - "(\0132\035.hbase.pb.UsersAndPermissions\022\016\n\003ttl" + - "\030\010 \001(\003:\0010\".\n\004Type\022\014\n\010DISABLED\020\000\022\t\n\005FLUSH" + - "\020\001\022\r\n\tSKIPFLUSH\020\002*r\n\013CompareType\022\010\n\004LESS" + - "\020\000\022\021\n\rLESS_OR_EQUAL\020\001\022\t\n\005EQUAL\020\002\022\r\n\tNOT_" + - "EQUAL\020\003\022\024\n\020GREATER_OR_EQUAL\020\004\022\013\n\007GREATER" + - "\020\005\022\t\n\005NO_OP\020\006*n\n\010TimeUnit\022\017\n\013NANOSECONDS" + - "\020\001\022\020\n\014MICROSECONDS\020\002\022\020\n\014MILLISECONDS\020\003\022\013" + - "\n\007SECONDS\020\004\022\013\n\007MINUTES\020\005\022\t\n\005HOURS\020\006\022\010\n\004D" + - "AYS\020\007B>\n*org.apache.hadoop.hbase.protobu", - "f.generatedB\013HBaseProtosH\001\240\001\001" + "tion\030\004 \003(\0132\030.hbase.pb.NameStringPair\"\257\001\n" + + "\nTableState\022)\n\005state\030\001 \002(\0162\032.hbase.pb.Ta" + + "bleState.State\022\"\n\005table\030\002 \002(\0132\023.hbase.pb" + + ".TableName\022\021\n\ttimestamp\030\003 \001(\004\"?\n\005State\022\013", + "\n\007ENABLED\020\000\022\014\n\010DISABLED\020\001\022\r\n\tDISABLING\020\002" + + "\022\014\n\010ENABLING\020\003\"l\n\017TableDescriptor\022%\n\006sch" + + "ema\030\001 \002(\0132\025.hbase.pb.TableSchema\0222\n\005stat" + + "e\030\002 \001(\0162\032.hbase.pb.TableState.State:\007ENA" + + "BLED\"\201\001\n\022ColumnFamilySchema\022\014\n\004name\030\001 \002(" + + "\014\022,\n\nattributes\030\002 \003(\0132\030.hbase.pb.BytesBy" + + "tesPair\022/\n\rconfiguration\030\003 \003(\0132\030.hbase.p" + + "b.NameStringPair\"\243\001\n\nRegionInfo\022\021\n\tregio" + + "n_id\030\001 \002(\004\022\'\n\ntable_name\030\002 \002(\0132\023.hbase.p" + + "b.TableName\022\021\n\tstart_key\030\003 \001(\014\022\017\n\007end_ke", + "y\030\004 \001(\014\022\017\n\007offline\030\005 \001(\010\022\r\n\005split\030\006 \001(\010\022" + + "\025\n\nreplica_id\030\007 \001(\005:\0010\":\n\014FavoredNodes\022*" + + "\n\014favored_node\030\001 \003(\0132\024.hbase.pb.ServerNa" + + "me\"\236\001\n\017RegionSpecifier\022;\n\004type\030\001 \002(\0162-.h" + + "base.pb.RegionSpecifier.RegionSpecifierT" + + "ype\022\r\n\005value\030\002 \002(\014\"?\n\023RegionSpecifierTyp" + + "e\022\017\n\013REGION_NAME\020\001\022\027\n\023ENCODED_REGION_NAM" + + "E\020\002\"%\n\tTimeRange\022\014\n\004from\030\001 \001(\004\022\n\n\002to\030\002 \001" + + "(\004\"W\n\025ColumnFamilyTimeRange\022\025\n\rcolumn_fa" + + "mily\030\001 \002(\014\022\'\n\ntime_range\030\002 \002(\0132\023.hbase.p", + "b.TimeRange\"A\n\nServerName\022\021\n\thost_name\030\001" + + " \002(\t\022\014\n\004port\030\002 \001(\r\022\022\n\nstart_code\030\003 \001(\004\"\033" + + "\n\013Coprocessor\022\014\n\004name\030\001 \002(\t\"-\n\016NameStrin" + + "gPair\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002 \002(\t\",\n\rNa" + + "meBytesPair\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002 \001(\014" + + "\"/\n\016BytesBytesPair\022\r\n\005first\030\001 \002(\014\022\016\n\006sec" + + "ond\030\002 \002(\014\",\n\rNameInt64Pair\022\014\n\004name\030\001 \001(\t" + + "\022\r\n\005value\030\002 \001(\003\"\206\001\n\024ProcedureDescription" + + "\022\021\n\tsignature\030\001 \002(\t\022\020\n\010instance\030\002 \001(\t\022\030\n" + + "\rcreation_time\030\003 \001(\003:\0010\022/\n\rconfiguration", + "\030\004 \003(\0132\030.hbase.pb.NameStringPair\"\n\n\010Empt" + + "yMsg\"\033\n\007LongMsg\022\020\n\010long_msg\030\001 \002(\003\"\037\n\tDou" + + "bleMsg\022\022\n\ndouble_msg\030\001 \002(\001\"\'\n\rBigDecimal" + + "Msg\022\026\n\016bigdecimal_msg\030\001 \002(\014\"5\n\004UUID\022\026\n\016l" + + "east_sig_bits\030\001 \002(\004\022\025\n\rmost_sig_bits\030\002 \002" + + "(\004\"T\n\023NamespaceDescriptor\022\014\n\004name\030\001 \002(\014\022" + + "/\n\rconfiguration\030\002 \003(\0132\030.hbase.pb.NameSt" + + "ringPair\"\235\001\n\013VersionInfo\022\017\n\007version\030\001 \002(" + + "\t\022\013\n\003url\030\002 \002(\t\022\020\n\010revision\030\003 \002(\t\022\014\n\004user" + + "\030\004 \002(\t\022\014\n\004date\030\005 \002(\t\022\024\n\014src_checksum\030\006 \002", + "(\t\022\025\n\rversion_major\030\007 \001(\r\022\025\n\rversion_min" + + "or\030\010 \001(\r\"Q\n\020RegionServerInfo\022\020\n\010infoPort" + + "\030\001 \001(\005\022+\n\014version_info\030\002 \001(\0132\025.hbase.pb." + + "VersionInfo\"\243\002\n\023SnapshotDescription\022\014\n\004n" + + "ame\030\001 \002(\t\022\r\n\005table\030\002 \001(\t\022\030\n\rcreation_tim" + + "e\030\003 \001(\003:\0010\0227\n\004type\030\004 \001(\0162\".hbase.pb.Snap" + + "shotDescription.Type:\005FLUSH\022\017\n\007version\030\005" + + " \001(\005\022\r\n\005owner\030\006 \001(\t\022<\n\025users_and_permiss" + + "ions\030\007 \001(\0132\035.hbase.pb.UsersAndPermission" + + "s\022\016\n\003ttl\030\010 \001(\003:\0010\".\n\004Type\022\014\n\010DISABLED\020\000\022", + "\t\n\005FLUSH\020\001\022\r\n\tSKIPFLUSH\020\002\"w\n\016RegionLocat" + + "ion\022)\n\013region_info\030\001 \002(\0132\024.hbase.pb.Regi" + + "onInfo\022)\n\013server_name\030\002 \001(\0132\024.hbase.pb.S" + + "erverName\022\017\n\007seq_num\030\003 \002(\003*r\n\013CompareTyp" + + "e\022\010\n\004LESS\020\000\022\021\n\rLESS_OR_EQUAL\020\001\022\t\n\005EQUAL\020" + + "\002\022\r\n\tNOT_EQUAL\020\003\022\024\n\020GREATER_OR_EQUAL\020\004\022\013" + + "\n\007GREATER\020\005\022\t\n\005NO_OP\020\006*n\n\010TimeUnit\022\017\n\013NA" + + "NOSECONDS\020\001\022\020\n\014MICROSECONDS\020\002\022\020\n\014MILLISE" + + "CONDS\020\003\022\013\n\007SECONDS\020\004\022\013\n\007MINUTES\020\005\022\t\n\005HOU" + + "RS\020\006\022\010\n\004DAYS\020\007B>\n*org.apache.hadoop.hbas", + "e.protobuf.generatedB\013HBaseProtosH\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -18877,138 +21344,156 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors( com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_TableSchema_descriptor, new java.lang.String[] { "TableName", "Attributes", "ColumnFamilies", "Configuration", }); - internal_static_hbase_pb_ColumnFamilySchema_descriptor = + internal_static_hbase_pb_TableState_descriptor = getDescriptor().getMessageTypes().get(1); + internal_static_hbase_pb_TableState_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_TableState_descriptor, + new java.lang.String[] { "State", "Table", "Timestamp", }); + internal_static_hbase_pb_TableDescriptor_descriptor = + getDescriptor().getMessageTypes().get(2); + internal_static_hbase_pb_TableDescriptor_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_TableDescriptor_descriptor, + new java.lang.String[] { "Schema", "State", }); + internal_static_hbase_pb_ColumnFamilySchema_descriptor = + getDescriptor().getMessageTypes().get(3); internal_static_hbase_pb_ColumnFamilySchema_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_ColumnFamilySchema_descriptor, new java.lang.String[] { "Name", "Attributes", "Configuration", }); internal_static_hbase_pb_RegionInfo_descriptor = - getDescriptor().getMessageTypes().get(2); + getDescriptor().getMessageTypes().get(4); internal_static_hbase_pb_RegionInfo_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_RegionInfo_descriptor, new java.lang.String[] { "RegionId", "TableName", "StartKey", "EndKey", "Offline", "Split", "ReplicaId", }); internal_static_hbase_pb_FavoredNodes_descriptor = - getDescriptor().getMessageTypes().get(3); + getDescriptor().getMessageTypes().get(5); internal_static_hbase_pb_FavoredNodes_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_FavoredNodes_descriptor, new java.lang.String[] { "FavoredNode", }); internal_static_hbase_pb_RegionSpecifier_descriptor = - getDescriptor().getMessageTypes().get(4); + getDescriptor().getMessageTypes().get(6); internal_static_hbase_pb_RegionSpecifier_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_RegionSpecifier_descriptor, new java.lang.String[] { "Type", "Value", }); internal_static_hbase_pb_TimeRange_descriptor = - getDescriptor().getMessageTypes().get(5); + getDescriptor().getMessageTypes().get(7); internal_static_hbase_pb_TimeRange_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_TimeRange_descriptor, new java.lang.String[] { "From", "To", }); internal_static_hbase_pb_ColumnFamilyTimeRange_descriptor = - getDescriptor().getMessageTypes().get(6); + getDescriptor().getMessageTypes().get(8); internal_static_hbase_pb_ColumnFamilyTimeRange_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_ColumnFamilyTimeRange_descriptor, new java.lang.String[] { "ColumnFamily", "TimeRange", }); internal_static_hbase_pb_ServerName_descriptor = - getDescriptor().getMessageTypes().get(7); + getDescriptor().getMessageTypes().get(9); internal_static_hbase_pb_ServerName_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_ServerName_descriptor, new java.lang.String[] { "HostName", "Port", "StartCode", }); internal_static_hbase_pb_Coprocessor_descriptor = - getDescriptor().getMessageTypes().get(8); + getDescriptor().getMessageTypes().get(10); internal_static_hbase_pb_Coprocessor_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_Coprocessor_descriptor, new java.lang.String[] { "Name", }); internal_static_hbase_pb_NameStringPair_descriptor = - getDescriptor().getMessageTypes().get(9); + getDescriptor().getMessageTypes().get(11); internal_static_hbase_pb_NameStringPair_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_NameStringPair_descriptor, new java.lang.String[] { "Name", "Value", }); internal_static_hbase_pb_NameBytesPair_descriptor = - getDescriptor().getMessageTypes().get(10); + getDescriptor().getMessageTypes().get(12); internal_static_hbase_pb_NameBytesPair_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_NameBytesPair_descriptor, new java.lang.String[] { "Name", "Value", }); internal_static_hbase_pb_BytesBytesPair_descriptor = - getDescriptor().getMessageTypes().get(11); + getDescriptor().getMessageTypes().get(13); internal_static_hbase_pb_BytesBytesPair_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_BytesBytesPair_descriptor, new java.lang.String[] { "First", "Second", }); internal_static_hbase_pb_NameInt64Pair_descriptor = - getDescriptor().getMessageTypes().get(12); + getDescriptor().getMessageTypes().get(14); internal_static_hbase_pb_NameInt64Pair_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_NameInt64Pair_descriptor, new java.lang.String[] { "Name", "Value", }); internal_static_hbase_pb_ProcedureDescription_descriptor = - getDescriptor().getMessageTypes().get(13); + getDescriptor().getMessageTypes().get(15); internal_static_hbase_pb_ProcedureDescription_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_ProcedureDescription_descriptor, new java.lang.String[] { "Signature", "Instance", "CreationTime", "Configuration", }); internal_static_hbase_pb_EmptyMsg_descriptor = - getDescriptor().getMessageTypes().get(14); + getDescriptor().getMessageTypes().get(16); internal_static_hbase_pb_EmptyMsg_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_EmptyMsg_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_LongMsg_descriptor = - getDescriptor().getMessageTypes().get(15); + getDescriptor().getMessageTypes().get(17); internal_static_hbase_pb_LongMsg_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_LongMsg_descriptor, new java.lang.String[] { "LongMsg", }); internal_static_hbase_pb_DoubleMsg_descriptor = - getDescriptor().getMessageTypes().get(16); + getDescriptor().getMessageTypes().get(18); internal_static_hbase_pb_DoubleMsg_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_DoubleMsg_descriptor, new java.lang.String[] { "DoubleMsg", }); internal_static_hbase_pb_BigDecimalMsg_descriptor = - getDescriptor().getMessageTypes().get(17); + getDescriptor().getMessageTypes().get(19); internal_static_hbase_pb_BigDecimalMsg_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_BigDecimalMsg_descriptor, new java.lang.String[] { "BigdecimalMsg", }); internal_static_hbase_pb_UUID_descriptor = - getDescriptor().getMessageTypes().get(18); + getDescriptor().getMessageTypes().get(20); internal_static_hbase_pb_UUID_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_UUID_descriptor, new java.lang.String[] { "LeastSigBits", "MostSigBits", }); internal_static_hbase_pb_NamespaceDescriptor_descriptor = - getDescriptor().getMessageTypes().get(19); + getDescriptor().getMessageTypes().get(21); internal_static_hbase_pb_NamespaceDescriptor_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_NamespaceDescriptor_descriptor, new java.lang.String[] { "Name", "Configuration", }); internal_static_hbase_pb_VersionInfo_descriptor = - getDescriptor().getMessageTypes().get(20); + getDescriptor().getMessageTypes().get(22); internal_static_hbase_pb_VersionInfo_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_VersionInfo_descriptor, new java.lang.String[] { "Version", "Url", "Revision", "User", "Date", "SrcChecksum", "VersionMajor", "VersionMinor", }); internal_static_hbase_pb_RegionServerInfo_descriptor = - getDescriptor().getMessageTypes().get(21); + getDescriptor().getMessageTypes().get(23); internal_static_hbase_pb_RegionServerInfo_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_RegionServerInfo_descriptor, new java.lang.String[] { "InfoPort", "VersionInfo", }); internal_static_hbase_pb_SnapshotDescription_descriptor = - getDescriptor().getMessageTypes().get(22); + getDescriptor().getMessageTypes().get(24); internal_static_hbase_pb_SnapshotDescription_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_SnapshotDescription_descriptor, new java.lang.String[] { "Name", "Table", "CreationTime", "Type", "Version", "Owner", "UsersAndPermissions", "Ttl", }); + internal_static_hbase_pb_RegionLocation_descriptor = + getDescriptor().getMessageTypes().get(25); + internal_static_hbase_pb_RegionLocation_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_RegionLocation_descriptor, + new java.lang.String[] { "RegionInfo", "ServerName", "SeqNum", }); return null; } }; diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java index d6eeb558b22f..2eaed114461f 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java @@ -49724,28 +49724,42 @@ public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder // @@protoc_insertion_point(class_scope:hbase.pb.GetTableNamesResponse) } - public interface GetClusterStatusRequestOrBuilder + public interface GetTableStateRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { + + // required .hbase.pb.TableName table_name = 1; + /** + * required .hbase.pb.TableName table_name = 1; + */ + boolean hasTableName(); + /** + * required .hbase.pb.TableName table_name = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName(); + /** + * required .hbase.pb.TableName table_name = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder(); } /** - * Protobuf type {@code hbase.pb.GetClusterStatusRequest} + * Protobuf type {@code hbase.pb.GetTableStateRequest} */ - public static final class GetClusterStatusRequest extends + public static final class GetTableStateRequest extends com.google.protobuf.GeneratedMessage - implements GetClusterStatusRequestOrBuilder { - // Use GetClusterStatusRequest.newBuilder() to construct. - private GetClusterStatusRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + implements GetTableStateRequestOrBuilder { + // Use GetTableStateRequest.newBuilder() to construct. + private GetTableStateRequest(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private GetClusterStatusRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private GetTableStateRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final GetClusterStatusRequest defaultInstance; - public static GetClusterStatusRequest getDefaultInstance() { + private static final GetTableStateRequest defaultInstance; + public static GetTableStateRequest getDefaultInstance() { return defaultInstance; } - public GetClusterStatusRequest getDefaultInstanceForType() { + public GetTableStateRequest getDefaultInstanceForType() { return defaultInstance; } @@ -49755,11 +49769,12 @@ public GetClusterStatusRequest getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private GetClusterStatusRequest( + private GetTableStateRequest( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); + int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { @@ -49777,6 +49792,19 @@ private GetClusterStatusRequest( } break; } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = tableName_.toBuilder(); + } + tableName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(tableName_); + tableName_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -49791,38 +49819,70 @@ private GetClusterStatusRequest( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterStatusRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetTableStateRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterStatusRequest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetTableStateRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public GetClusterStatusRequest parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetTableStateRequest parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new GetClusterStatusRequest(input, extensionRegistry); + return new GetTableStateRequest(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } + private int bitField0_; + // required .hbase.pb.TableName table_name = 1; + public static final int TABLE_NAME_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName tableName_; + /** + * required .hbase.pb.TableName table_name = 1; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName() { + return tableName_; + } + /** + * required .hbase.pb.TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder() { + return tableName_; + } + private void initFields() { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; + if (!hasTableName()) { + memoizedIsInitialized = 0; + return false; + } + if (!getTableName().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } memoizedIsInitialized = 1; return true; } @@ -49830,6 +49890,9 @@ public final boolean isInitialized() { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, tableName_); + } getUnknownFields().writeTo(output); } @@ -49839,6 +49902,10 @@ public int getSerializedSize() { if (size != -1) return size; size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, tableName_); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -49856,12 +49923,17 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest) obj; boolean result = true; + result = result && (hasTableName() == other.hasTableName()); + if (hasTableName()) { + result = result && getTableName() + .equals(other.getTableName()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -49875,58 +49947,62 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasTableName()) { + hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER; + hash = (53 * hash) + getTableName().hashCode(); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -49935,7 +50011,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCluster public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -49947,24 +50023,24 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.GetClusterStatusRequest} + * Protobuf type {@code hbase.pb.GetTableStateRequest} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequestOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequestOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterStatusRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetTableStateRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterStatusRequest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetTableStateRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -49976,6 +50052,7 @@ private Builder( } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableNameFieldBuilder(); } } private static Builder create() { @@ -49984,6 +50061,12 @@ private static Builder create() { public Builder clear() { super.clear(); + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance(); + } else { + tableNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); return this; } @@ -49993,43 +50076,65 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterStatusRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetTableStateRequest_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest(this); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (tableNameBuilder_ == null) { + result.tableName_ = tableName_; + } else { + result.tableName_ = tableNameBuilder_.build(); + } + result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest.getDefaultInstance()) return this; + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance()) return this; + if (other.hasTableName()) { + mergeTableName(other.getTableName()); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { + if (!hasTableName()) { + + return false; + } + if (!getTableName().isInitialized()) { + + return false; + } return true; } @@ -50037,11 +50142,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -50050,106 +50155,224 @@ public Builder mergeFrom( } return this; } + private int bitField0_; - // @@protoc_insertion_point(builder_scope:hbase.pb.GetClusterStatusRequest) - } - - static { - defaultInstance = new GetClusterStatusRequest(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:hbase.pb.GetClusterStatusRequest) - } - - public interface GetClusterStatusResponseOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .hbase.pb.ClusterStatus cluster_status = 1; - /** - * required .hbase.pb.ClusterStatus cluster_status = 1; - */ - boolean hasClusterStatus(); - /** - * required .hbase.pb.ClusterStatus cluster_status = 1; - */ - org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus getClusterStatus(); - /** - * required .hbase.pb.ClusterStatus cluster_status = 1; - */ - org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatusOrBuilder getClusterStatusOrBuilder(); - } - /** - * Protobuf type {@code hbase.pb.GetClusterStatusResponse} - */ - public static final class GetClusterStatusResponse extends - com.google.protobuf.GeneratedMessage - implements GetClusterStatusResponseOrBuilder { - // Use GetClusterStatusResponse.newBuilder() to construct. - private GetClusterStatusResponse(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private GetClusterStatusResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final GetClusterStatusResponse defaultInstance; - public static GetClusterStatusResponse getDefaultInstance() { - return defaultInstance; - } - - public GetClusterStatusResponse getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private GetClusterStatusResponse( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 10: { - org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.Builder subBuilder = null; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - subBuilder = clusterStatus_.toBuilder(); - } - clusterStatus_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(clusterStatus_); - clusterStatus_ = subBuilder.buildPartial(); - } - bitField0_ |= 0x00000001; - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); + // required .hbase.pb.TableName table_name = 1; + private org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder> tableNameBuilder_; + /** + * required .hbase.pb.TableName table_name = 1; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName() { + if (tableNameBuilder_ == null) { + return tableName_; + } else { + return tableNameBuilder_.getMessage(); + } + } + /** + * required .hbase.pb.TableName table_name = 1; + */ + public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + tableName_ = value; + onChanged(); + } else { + tableNameBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.TableName table_name = 1; + */ + public Builder setTableName( + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + tableName_ = builderForValue.build(); + onChanged(); + } else { + tableNameBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.TableName table_name = 1; + */ + public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + tableName_ != org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance()) { + tableName_ = + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial(); + } else { + tableName_ = value; + } + onChanged(); + } else { + tableNameBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.TableName table_name = 1; + */ + public Builder clearTableName() { + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance(); + onChanged(); + } else { + tableNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .hbase.pb.TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder getTableNameBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getTableNameFieldBuilder().getBuilder(); + } + /** + * required .hbase.pb.TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder() { + if (tableNameBuilder_ != null) { + return tableNameBuilder_.getMessageOrBuilder(); + } else { + return tableName_; + } + } + /** + * required .hbase.pb.TableName table_name = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder> + getTableNameFieldBuilder() { + if (tableNameBuilder_ == null) { + tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder>( + tableName_, + getParentForChildren(), + isClean()); + tableName_ = null; + } + return tableNameBuilder_; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.GetTableStateRequest) + } + + static { + defaultInstance = new GetTableStateRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.GetTableStateRequest) + } + + public interface GetTableStateResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .hbase.pb.TableState table_state = 1; + /** + * required .hbase.pb.TableState table_state = 1; + */ + boolean hasTableState(); + /** + * required .hbase.pb.TableState table_state = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState getTableState(); + /** + * required .hbase.pb.TableState table_state = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableStateOrBuilder getTableStateOrBuilder(); + } + /** + * Protobuf type {@code hbase.pb.GetTableStateResponse} + */ + public static final class GetTableStateResponse extends + com.google.protobuf.GeneratedMessage + implements GetTableStateResponseOrBuilder { + // Use GetTableStateResponse.newBuilder() to construct. + private GetTableStateResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private GetTableStateResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final GetTableStateResponse defaultInstance; + public static GetTableStateResponse getDefaultInstance() { + return defaultInstance; + } + + public GetTableStateResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private GetTableStateResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = tableState_.toBuilder(); + } + tableState_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(tableState_); + tableState_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); @@ -50157,67 +50380,67 @@ private GetClusterStatusResponse( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterStatusResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetTableStateResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterStatusResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetTableStateResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public GetClusterStatusResponse parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetTableStateResponse parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new GetClusterStatusResponse(input, extensionRegistry); + return new GetTableStateResponse(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; - // required .hbase.pb.ClusterStatus cluster_status = 1; - public static final int CLUSTER_STATUS_FIELD_NUMBER = 1; - private org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus clusterStatus_; + // required .hbase.pb.TableState table_state = 1; + public static final int TABLE_STATE_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState tableState_; /** - * required .hbase.pb.ClusterStatus cluster_status = 1; + * required .hbase.pb.TableState table_state = 1; */ - public boolean hasClusterStatus() { + public boolean hasTableState() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * required .hbase.pb.ClusterStatus cluster_status = 1; + * required .hbase.pb.TableState table_state = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus getClusterStatus() { - return clusterStatus_; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState getTableState() { + return tableState_; } /** - * required .hbase.pb.ClusterStatus cluster_status = 1; + * required .hbase.pb.TableState table_state = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatusOrBuilder getClusterStatusOrBuilder() { - return clusterStatus_; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableStateOrBuilder getTableStateOrBuilder() { + return tableState_; } private void initFields() { - clusterStatus_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.getDefaultInstance(); + tableState_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (!hasClusterStatus()) { + if (!hasTableState()) { memoizedIsInitialized = 0; return false; } - if (!getClusterStatus().isInitialized()) { + if (!getTableState().isInitialized()) { memoizedIsInitialized = 0; return false; } @@ -50229,7 +50452,7 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, clusterStatus_); + output.writeMessage(1, tableState_); } getUnknownFields().writeTo(output); } @@ -50242,7 +50465,7 @@ public int getSerializedSize() { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, clusterStatus_); + .computeMessageSize(1, tableState_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -50261,16 +50484,16 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse) obj; boolean result = true; - result = result && (hasClusterStatus() == other.hasClusterStatus()); - if (hasClusterStatus()) { - result = result && getClusterStatus() - .equals(other.getClusterStatus()); + result = result && (hasTableState() == other.hasTableState()); + if (hasTableState()) { + result = result && getTableState() + .equals(other.getTableState()); } result = result && getUnknownFields().equals(other.getUnknownFields()); @@ -50285,62 +50508,62 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasClusterStatus()) { - hash = (37 * hash) + CLUSTER_STATUS_FIELD_NUMBER; - hash = (53 * hash) + getClusterStatus().hashCode(); + if (hasTableState()) { + hash = (37 * hash) + TABLE_STATE_FIELD_NUMBER; + hash = (53 * hash) + getTableState().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -50349,7 +50572,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCluster public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -50361,24 +50584,24 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.GetClusterStatusResponse} + * Protobuf type {@code hbase.pb.GetTableStateResponse} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponseOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponseOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterStatusResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetTableStateResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterStatusResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetTableStateResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -50390,7 +50613,7 @@ private Builder( } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getClusterStatusFieldBuilder(); + getTableStateFieldBuilder(); } } private static Builder create() { @@ -50399,10 +50622,10 @@ private static Builder create() { public Builder clear() { super.clear(); - if (clusterStatusBuilder_ == null) { - clusterStatus_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.getDefaultInstance(); + if (tableStateBuilder_ == null) { + tableState_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.getDefaultInstance(); } else { - clusterStatusBuilder_.clear(); + tableStateBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; @@ -50414,32 +50637,32 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterStatusResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetTableStateResponse_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse(this); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - if (clusterStatusBuilder_ == null) { - result.clusterStatus_ = clusterStatus_; + if (tableStateBuilder_ == null) { + result.tableState_ = tableState_; } else { - result.clusterStatus_ = clusterStatusBuilder_.build(); + result.tableState_ = tableStateBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); @@ -50447,29 +50670,29 @@ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusR } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse.getDefaultInstance()) return this; - if (other.hasClusterStatus()) { - mergeClusterStatus(other.getClusterStatus()); + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance()) return this; + if (other.hasTableState()) { + mergeTableState(other.getTableState()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { - if (!hasClusterStatus()) { + if (!hasTableState()) { return false; } - if (!getClusterStatus().isInitialized()) { + if (!getTableState().isInitialized()) { return false; } @@ -50480,11 +50703,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -50495,156 +50718,156 @@ public Builder mergeFrom( } private int bitField0_; - // required .hbase.pb.ClusterStatus cluster_status = 1; - private org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus clusterStatus_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.getDefaultInstance(); + // required .hbase.pb.TableState table_state = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState tableState_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatusOrBuilder> clusterStatusBuilder_; + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableStateOrBuilder> tableStateBuilder_; /** - * required .hbase.pb.ClusterStatus cluster_status = 1; + * required .hbase.pb.TableState table_state = 1; */ - public boolean hasClusterStatus() { + public boolean hasTableState() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * required .hbase.pb.ClusterStatus cluster_status = 1; + * required .hbase.pb.TableState table_state = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus getClusterStatus() { - if (clusterStatusBuilder_ == null) { - return clusterStatus_; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState getTableState() { + if (tableStateBuilder_ == null) { + return tableState_; } else { - return clusterStatusBuilder_.getMessage(); + return tableStateBuilder_.getMessage(); } } /** - * required .hbase.pb.ClusterStatus cluster_status = 1; + * required .hbase.pb.TableState table_state = 1; */ - public Builder setClusterStatus(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus value) { - if (clusterStatusBuilder_ == null) { + public Builder setTableState(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState value) { + if (tableStateBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - clusterStatus_ = value; + tableState_ = value; onChanged(); } else { - clusterStatusBuilder_.setMessage(value); + tableStateBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** - * required .hbase.pb.ClusterStatus cluster_status = 1; + * required .hbase.pb.TableState table_state = 1; */ - public Builder setClusterStatus( - org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.Builder builderForValue) { - if (clusterStatusBuilder_ == null) { - clusterStatus_ = builderForValue.build(); + public Builder setTableState( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.Builder builderForValue) { + if (tableStateBuilder_ == null) { + tableState_ = builderForValue.build(); onChanged(); } else { - clusterStatusBuilder_.setMessage(builderForValue.build()); + tableStateBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** - * required .hbase.pb.ClusterStatus cluster_status = 1; + * required .hbase.pb.TableState table_state = 1; */ - public Builder mergeClusterStatus(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus value) { - if (clusterStatusBuilder_ == null) { + public Builder mergeTableState(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState value) { + if (tableStateBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && - clusterStatus_ != org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.getDefaultInstance()) { - clusterStatus_ = - org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.newBuilder(clusterStatus_).mergeFrom(value).buildPartial(); + tableState_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.getDefaultInstance()) { + tableState_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.newBuilder(tableState_).mergeFrom(value).buildPartial(); } else { - clusterStatus_ = value; + tableState_ = value; } onChanged(); } else { - clusterStatusBuilder_.mergeFrom(value); + tableStateBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** - * required .hbase.pb.ClusterStatus cluster_status = 1; + * required .hbase.pb.TableState table_state = 1; */ - public Builder clearClusterStatus() { - if (clusterStatusBuilder_ == null) { - clusterStatus_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.getDefaultInstance(); + public Builder clearTableState() { + if (tableStateBuilder_ == null) { + tableState_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.getDefaultInstance(); onChanged(); } else { - clusterStatusBuilder_.clear(); + tableStateBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** - * required .hbase.pb.ClusterStatus cluster_status = 1; + * required .hbase.pb.TableState table_state = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.Builder getClusterStatusBuilder() { + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.Builder getTableStateBuilder() { bitField0_ |= 0x00000001; onChanged(); - return getClusterStatusFieldBuilder().getBuilder(); + return getTableStateFieldBuilder().getBuilder(); } /** - * required .hbase.pb.ClusterStatus cluster_status = 1; + * required .hbase.pb.TableState table_state = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatusOrBuilder getClusterStatusOrBuilder() { - if (clusterStatusBuilder_ != null) { - return clusterStatusBuilder_.getMessageOrBuilder(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableStateOrBuilder getTableStateOrBuilder() { + if (tableStateBuilder_ != null) { + return tableStateBuilder_.getMessageOrBuilder(); } else { - return clusterStatus_; + return tableState_; } } /** - * required .hbase.pb.ClusterStatus cluster_status = 1; + * required .hbase.pb.TableState table_state = 1; */ private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatusOrBuilder> - getClusterStatusFieldBuilder() { - if (clusterStatusBuilder_ == null) { - clusterStatusBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatusOrBuilder>( - clusterStatus_, + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableStateOrBuilder> + getTableStateFieldBuilder() { + if (tableStateBuilder_ == null) { + tableStateBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableStateOrBuilder>( + tableState_, getParentForChildren(), isClean()); - clusterStatus_ = null; + tableState_ = null; } - return clusterStatusBuilder_; + return tableStateBuilder_; } - // @@protoc_insertion_point(builder_scope:hbase.pb.GetClusterStatusResponse) + // @@protoc_insertion_point(builder_scope:hbase.pb.GetTableStateResponse) } static { - defaultInstance = new GetClusterStatusResponse(true); + defaultInstance = new GetTableStateResponse(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.GetClusterStatusResponse) + // @@protoc_insertion_point(class_scope:hbase.pb.GetTableStateResponse) } - public interface IsMasterRunningRequestOrBuilder + public interface GetClusterStatusRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { } /** - * Protobuf type {@code hbase.pb.IsMasterRunningRequest} + * Protobuf type {@code hbase.pb.GetClusterStatusRequest} */ - public static final class IsMasterRunningRequest extends + public static final class GetClusterStatusRequest extends com.google.protobuf.GeneratedMessage - implements IsMasterRunningRequestOrBuilder { - // Use IsMasterRunningRequest.newBuilder() to construct. - private IsMasterRunningRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + implements GetClusterStatusRequestOrBuilder { + // Use GetClusterStatusRequest.newBuilder() to construct. + private GetClusterStatusRequest(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private IsMasterRunningRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private GetClusterStatusRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final IsMasterRunningRequest defaultInstance; - public static IsMasterRunningRequest getDefaultInstance() { + private static final GetClusterStatusRequest defaultInstance; + public static GetClusterStatusRequest getDefaultInstance() { return defaultInstance; } - public IsMasterRunningRequest getDefaultInstanceForType() { + public GetClusterStatusRequest getDefaultInstanceForType() { return defaultInstance; } @@ -50654,7 +50877,7 @@ public IsMasterRunningRequest getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private IsMasterRunningRequest( + private GetClusterStatusRequest( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -50690,28 +50913,28 @@ private IsMasterRunningRequest( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsMasterRunningRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterStatusRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsMasterRunningRequest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterStatusRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public IsMasterRunningRequest parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetClusterStatusRequest parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new IsMasterRunningRequest(input, extensionRegistry); + return new GetClusterStatusRequest(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } @@ -50755,10 +50978,10 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest) obj; boolean result = true; result = result && @@ -50779,53 +51002,53 @@ public int hashCode() { return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -50834,7 +51057,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRu public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -50846,24 +51069,24 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.IsMasterRunningRequest} + * Protobuf type {@code hbase.pb.GetClusterStatusRequest} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequestOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequestOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsMasterRunningRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterStatusRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsMasterRunningRequest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterStatusRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -50892,38 +51115,38 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsMasterRunningRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterStatusRequest_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest(this); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest(this); onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest.getDefaultInstance()) return this; + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -50936,11 +51159,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -50950,49 +51173,53 @@ public Builder mergeFrom( return this; } - // @@protoc_insertion_point(builder_scope:hbase.pb.IsMasterRunningRequest) + // @@protoc_insertion_point(builder_scope:hbase.pb.GetClusterStatusRequest) } static { - defaultInstance = new IsMasterRunningRequest(true); + defaultInstance = new GetClusterStatusRequest(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.IsMasterRunningRequest) + // @@protoc_insertion_point(class_scope:hbase.pb.GetClusterStatusRequest) } - public interface IsMasterRunningResponseOrBuilder + public interface GetClusterStatusResponseOrBuilder extends com.google.protobuf.MessageOrBuilder { - // required bool is_master_running = 1; + // required .hbase.pb.ClusterStatus cluster_status = 1; /** - * required bool is_master_running = 1; + * required .hbase.pb.ClusterStatus cluster_status = 1; */ - boolean hasIsMasterRunning(); + boolean hasClusterStatus(); /** - * required bool is_master_running = 1; + * required .hbase.pb.ClusterStatus cluster_status = 1; */ - boolean getIsMasterRunning(); + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus getClusterStatus(); + /** + * required .hbase.pb.ClusterStatus cluster_status = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatusOrBuilder getClusterStatusOrBuilder(); } /** - * Protobuf type {@code hbase.pb.IsMasterRunningResponse} + * Protobuf type {@code hbase.pb.GetClusterStatusResponse} */ - public static final class IsMasterRunningResponse extends + public static final class GetClusterStatusResponse extends com.google.protobuf.GeneratedMessage - implements IsMasterRunningResponseOrBuilder { - // Use IsMasterRunningResponse.newBuilder() to construct. - private IsMasterRunningResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + implements GetClusterStatusResponseOrBuilder { + // Use GetClusterStatusResponse.newBuilder() to construct. + private GetClusterStatusResponse(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private IsMasterRunningResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private GetClusterStatusResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final IsMasterRunningResponse defaultInstance; - public static IsMasterRunningResponse getDefaultInstance() { + private static final GetClusterStatusResponse defaultInstance; + public static GetClusterStatusResponse getDefaultInstance() { return defaultInstance; } - public IsMasterRunningResponse getDefaultInstanceForType() { + public GetClusterStatusResponse getDefaultInstanceForType() { return defaultInstance; } @@ -51002,7 +51229,7 @@ public IsMasterRunningResponse getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private IsMasterRunningResponse( + private GetClusterStatusResponse( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -51025,9 +51252,17 @@ private IsMasterRunningResponse( } break; } - case 8: { + case 10: { + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = clusterStatus_.toBuilder(); + } + clusterStatus_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(clusterStatus_); + clusterStatus_ = subBuilder.buildPartial(); + } bitField0_ |= 0x00000001; - isMasterRunning_ = input.readBool(); break; } } @@ -51044,57 +51279,67 @@ private IsMasterRunningResponse( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsMasterRunningResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterStatusResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsMasterRunningResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterStatusResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public IsMasterRunningResponse parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetClusterStatusResponse parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new IsMasterRunningResponse(input, extensionRegistry); + return new GetClusterStatusResponse(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; - // required bool is_master_running = 1; - public static final int IS_MASTER_RUNNING_FIELD_NUMBER = 1; - private boolean isMasterRunning_; + // required .hbase.pb.ClusterStatus cluster_status = 1; + public static final int CLUSTER_STATUS_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus clusterStatus_; /** - * required bool is_master_running = 1; + * required .hbase.pb.ClusterStatus cluster_status = 1; */ - public boolean hasIsMasterRunning() { + public boolean hasClusterStatus() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * required bool is_master_running = 1; + * required .hbase.pb.ClusterStatus cluster_status = 1; */ - public boolean getIsMasterRunning() { - return isMasterRunning_; + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus getClusterStatus() { + return clusterStatus_; + } + /** + * required .hbase.pb.ClusterStatus cluster_status = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatusOrBuilder getClusterStatusOrBuilder() { + return clusterStatus_; } private void initFields() { - isMasterRunning_ = false; + clusterStatus_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (!hasIsMasterRunning()) { + if (!hasClusterStatus()) { + memoizedIsInitialized = 0; + return false; + } + if (!getClusterStatus().isInitialized()) { memoizedIsInitialized = 0; return false; } @@ -51106,7 +51351,7 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBool(1, isMasterRunning_); + output.writeMessage(1, clusterStatus_); } getUnknownFields().writeTo(output); } @@ -51119,7 +51364,7 @@ public int getSerializedSize() { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeBoolSize(1, isMasterRunning_); + .computeMessageSize(1, clusterStatus_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -51138,16 +51383,16 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse) obj; boolean result = true; - result = result && (hasIsMasterRunning() == other.hasIsMasterRunning()); - if (hasIsMasterRunning()) { - result = result && (getIsMasterRunning() - == other.getIsMasterRunning()); + result = result && (hasClusterStatus() == other.hasClusterStatus()); + if (hasClusterStatus()) { + result = result && getClusterStatus() + .equals(other.getClusterStatus()); } result = result && getUnknownFields().equals(other.getUnknownFields()); @@ -51162,62 +51407,62 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasIsMasterRunning()) { - hash = (37 * hash) + IS_MASTER_RUNNING_FIELD_NUMBER; - hash = (53 * hash) + hashBoolean(getIsMasterRunning()); + if (hasClusterStatus()) { + hash = (37 * hash) + CLUSTER_STATUS_FIELD_NUMBER; + hash = (53 * hash) + getClusterStatus().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -51226,7 +51471,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRu public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -51238,24 +51483,24 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.IsMasterRunningResponse} + * Protobuf type {@code hbase.pb.GetClusterStatusResponse} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponseOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponseOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsMasterRunningResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterStatusResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsMasterRunningResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterStatusResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -51267,6 +51512,7 @@ private Builder( } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getClusterStatusFieldBuilder(); } } private static Builder create() { @@ -51275,7 +51521,11 @@ private static Builder create() { public Builder clear() { super.clear(); - isMasterRunning_ = false; + if (clusterStatusBuilder_ == null) { + clusterStatus_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.getDefaultInstance(); + } else { + clusterStatusBuilder_.clear(); + } bitField0_ = (bitField0_ & ~0x00000001); return this; } @@ -51286,54 +51536,62 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsMasterRunningResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterStatusResponse_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse(this); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - result.isMasterRunning_ = isMasterRunning_; + if (clusterStatusBuilder_ == null) { + result.clusterStatus_ = clusterStatus_; + } else { + result.clusterStatus_ = clusterStatusBuilder_.build(); + } result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.getDefaultInstance()) return this; - if (other.hasIsMasterRunning()) { - setIsMasterRunning(other.getIsMasterRunning()); + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse.getDefaultInstance()) return this; + if (other.hasClusterStatus()) { + mergeClusterStatus(other.getClusterStatus()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { - if (!hasIsMasterRunning()) { + if (!hasClusterStatus()) { + + return false; + } + if (!getClusterStatus().isInitialized()) { return false; } @@ -51344,11 +51602,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -51359,86 +51617,156 @@ public Builder mergeFrom( } private int bitField0_; - // required bool is_master_running = 1; - private boolean isMasterRunning_ ; + // required .hbase.pb.ClusterStatus cluster_status = 1; + private org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus clusterStatus_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatusOrBuilder> clusterStatusBuilder_; /** - * required bool is_master_running = 1; + * required .hbase.pb.ClusterStatus cluster_status = 1; */ - public boolean hasIsMasterRunning() { + public boolean hasClusterStatus() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * required bool is_master_running = 1; + * required .hbase.pb.ClusterStatus cluster_status = 1; */ - public boolean getIsMasterRunning() { - return isMasterRunning_; + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus getClusterStatus() { + if (clusterStatusBuilder_ == null) { + return clusterStatus_; + } else { + return clusterStatusBuilder_.getMessage(); + } } /** - * required bool is_master_running = 1; + * required .hbase.pb.ClusterStatus cluster_status = 1; */ - public Builder setIsMasterRunning(boolean value) { + public Builder setClusterStatus(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus value) { + if (clusterStatusBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + clusterStatus_ = value; + onChanged(); + } else { + clusterStatusBuilder_.setMessage(value); + } bitField0_ |= 0x00000001; - isMasterRunning_ = value; - onChanged(); return this; } /** - * required bool is_master_running = 1; + * required .hbase.pb.ClusterStatus cluster_status = 1; */ - public Builder clearIsMasterRunning() { + public Builder setClusterStatus( + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.Builder builderForValue) { + if (clusterStatusBuilder_ == null) { + clusterStatus_ = builderForValue.build(); + onChanged(); + } else { + clusterStatusBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.ClusterStatus cluster_status = 1; + */ + public Builder mergeClusterStatus(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus value) { + if (clusterStatusBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + clusterStatus_ != org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.getDefaultInstance()) { + clusterStatus_ = + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.newBuilder(clusterStatus_).mergeFrom(value).buildPartial(); + } else { + clusterStatus_ = value; + } + onChanged(); + } else { + clusterStatusBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.ClusterStatus cluster_status = 1; + */ + public Builder clearClusterStatus() { + if (clusterStatusBuilder_ == null) { + clusterStatus_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.getDefaultInstance(); + onChanged(); + } else { + clusterStatusBuilder_.clear(); + } bitField0_ = (bitField0_ & ~0x00000001); - isMasterRunning_ = false; - onChanged(); return this; } + /** + * required .hbase.pb.ClusterStatus cluster_status = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.Builder getClusterStatusBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getClusterStatusFieldBuilder().getBuilder(); + } + /** + * required .hbase.pb.ClusterStatus cluster_status = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatusOrBuilder getClusterStatusOrBuilder() { + if (clusterStatusBuilder_ != null) { + return clusterStatusBuilder_.getMessageOrBuilder(); + } else { + return clusterStatus_; + } + } + /** + * required .hbase.pb.ClusterStatus cluster_status = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatusOrBuilder> + getClusterStatusFieldBuilder() { + if (clusterStatusBuilder_ == null) { + clusterStatusBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatusOrBuilder>( + clusterStatus_, + getParentForChildren(), + isClean()); + clusterStatus_ = null; + } + return clusterStatusBuilder_; + } - // @@protoc_insertion_point(builder_scope:hbase.pb.IsMasterRunningResponse) + // @@protoc_insertion_point(builder_scope:hbase.pb.GetClusterStatusResponse) } static { - defaultInstance = new IsMasterRunningResponse(true); + defaultInstance = new GetClusterStatusResponse(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.IsMasterRunningResponse) + // @@protoc_insertion_point(class_scope:hbase.pb.GetClusterStatusResponse) } - public interface ExecProcedureRequestOrBuilder + public interface IsMasterRunningRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { - - // required .hbase.pb.ProcedureDescription procedure = 1; - /** - * required .hbase.pb.ProcedureDescription procedure = 1; - */ - boolean hasProcedure(); - /** - * required .hbase.pb.ProcedureDescription procedure = 1; - */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription getProcedure(); - /** - * required .hbase.pb.ProcedureDescription procedure = 1; - */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder getProcedureOrBuilder(); } /** - * Protobuf type {@code hbase.pb.ExecProcedureRequest} + * Protobuf type {@code hbase.pb.IsMasterRunningRequest} */ - public static final class ExecProcedureRequest extends + public static final class IsMasterRunningRequest extends com.google.protobuf.GeneratedMessage - implements ExecProcedureRequestOrBuilder { - // Use ExecProcedureRequest.newBuilder() to construct. - private ExecProcedureRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + implements IsMasterRunningRequestOrBuilder { + // Use IsMasterRunningRequest.newBuilder() to construct. + private IsMasterRunningRequest(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private ExecProcedureRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private IsMasterRunningRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final ExecProcedureRequest defaultInstance; - public static ExecProcedureRequest getDefaultInstance() { + private static final IsMasterRunningRequest defaultInstance; + public static IsMasterRunningRequest getDefaultInstance() { return defaultInstance; } - public ExecProcedureRequest getDefaultInstanceForType() { + public IsMasterRunningRequest getDefaultInstanceForType() { return defaultInstance; } @@ -51448,12 +51776,11 @@ public ExecProcedureRequest getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private ExecProcedureRequest( + private IsMasterRunningRequest( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); - int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { @@ -51471,19 +51798,6 @@ private ExecProcedureRequest( } break; } - case 10: { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder subBuilder = null; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - subBuilder = procedure_.toBuilder(); - } - procedure_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(procedure_); - procedure_ = subBuilder.buildPartial(); - } - bitField0_ |= 0x00000001; - break; - } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -51498,70 +51812,38 @@ private ExecProcedureRequest( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ExecProcedureRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsMasterRunningRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ExecProcedureRequest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsMasterRunningRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public ExecProcedureRequest parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public IsMasterRunningRequest parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new ExecProcedureRequest(input, extensionRegistry); + return new IsMasterRunningRequest(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } - private int bitField0_; - // required .hbase.pb.ProcedureDescription procedure = 1; - public static final int PROCEDURE_FIELD_NUMBER = 1; - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription procedure_; - /** - * required .hbase.pb.ProcedureDescription procedure = 1; - */ - public boolean hasProcedure() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required .hbase.pb.ProcedureDescription procedure = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription getProcedure() { - return procedure_; - } - /** - * required .hbase.pb.ProcedureDescription procedure = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder getProcedureOrBuilder() { - return procedure_; - } - private void initFields() { - procedure_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (!hasProcedure()) { - memoizedIsInitialized = 0; - return false; - } - if (!getProcedure().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } memoizedIsInitialized = 1; return true; } @@ -51569,9 +51851,6 @@ public final boolean isInitialized() { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, procedure_); - } getUnknownFields().writeTo(output); } @@ -51581,10 +51860,6 @@ public int getSerializedSize() { if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, procedure_); - } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -51602,17 +51877,12 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest) obj; boolean result = true; - result = result && (hasProcedure() == other.hasProcedure()); - if (hasProcedure()) { - result = result && getProcedure() - .equals(other.getProcedure()); - } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -51626,62 +51896,58 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasProcedure()) { - hash = (37 * hash) + PROCEDURE_FIELD_NUMBER; - hash = (53 * hash) + getProcedure().hashCode(); - } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -51690,7 +51956,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProced public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -51702,24 +51968,24 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.ExecProcedureRequest} + * Protobuf type {@code hbase.pb.IsMasterRunningRequest} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequestOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequestOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ExecProcedureRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsMasterRunningRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ExecProcedureRequest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsMasterRunningRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -51731,7 +51997,6 @@ private Builder( } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getProcedureFieldBuilder(); } } private static Builder create() { @@ -51740,12 +52005,6 @@ private static Builder create() { public Builder clear() { super.clear(); - if (procedureBuilder_ == null) { - procedure_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.getDefaultInstance(); - } else { - procedureBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); return this; } @@ -51755,65 +52014,43 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ExecProcedureRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsMasterRunningRequest_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (procedureBuilder_ == null) { - result.procedure_ = procedure_; - } else { - result.procedure_ = procedureBuilder_.build(); - } - result.bitField0_ = to_bitField0_; + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest(this); onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance()) return this; - if (other.hasProcedure()) { - mergeProcedure(other.getProcedure()); - } + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { - if (!hasProcedure()) { - - return false; - } - if (!getProcedure().isInitialized()) { - - return false; - } return true; } @@ -51821,11 +52058,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -51834,178 +52071,50 @@ public Builder mergeFrom( } return this; } - private int bitField0_; - // required .hbase.pb.ProcedureDescription procedure = 1; - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription procedure_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder> procedureBuilder_; - /** - * required .hbase.pb.ProcedureDescription procedure = 1; - */ - public boolean hasProcedure() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required .hbase.pb.ProcedureDescription procedure = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription getProcedure() { - if (procedureBuilder_ == null) { - return procedure_; - } else { - return procedureBuilder_.getMessage(); - } - } - /** - * required .hbase.pb.ProcedureDescription procedure = 1; - */ - public Builder setProcedure(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription value) { - if (procedureBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - procedure_ = value; - onChanged(); - } else { - procedureBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - /** - * required .hbase.pb.ProcedureDescription procedure = 1; - */ - public Builder setProcedure( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder builderForValue) { - if (procedureBuilder_ == null) { - procedure_ = builderForValue.build(); - onChanged(); - } else { - procedureBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - /** - * required .hbase.pb.ProcedureDescription procedure = 1; - */ - public Builder mergeProcedure(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription value) { - if (procedureBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - procedure_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.getDefaultInstance()) { - procedure_ = - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.newBuilder(procedure_).mergeFrom(value).buildPartial(); - } else { - procedure_ = value; - } - onChanged(); - } else { - procedureBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - /** - * required .hbase.pb.ProcedureDescription procedure = 1; - */ - public Builder clearProcedure() { - if (procedureBuilder_ == null) { - procedure_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.getDefaultInstance(); - onChanged(); - } else { - procedureBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - /** - * required .hbase.pb.ProcedureDescription procedure = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder getProcedureBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getProcedureFieldBuilder().getBuilder(); - } - /** - * required .hbase.pb.ProcedureDescription procedure = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder getProcedureOrBuilder() { - if (procedureBuilder_ != null) { - return procedureBuilder_.getMessageOrBuilder(); - } else { - return procedure_; - } - } - /** - * required .hbase.pb.ProcedureDescription procedure = 1; - */ - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder> - getProcedureFieldBuilder() { - if (procedureBuilder_ == null) { - procedureBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder>( - procedure_, - getParentForChildren(), - isClean()); - procedure_ = null; - } - return procedureBuilder_; - } - - // @@protoc_insertion_point(builder_scope:hbase.pb.ExecProcedureRequest) + // @@protoc_insertion_point(builder_scope:hbase.pb.IsMasterRunningRequest) } static { - defaultInstance = new ExecProcedureRequest(true); + defaultInstance = new IsMasterRunningRequest(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.ExecProcedureRequest) + // @@protoc_insertion_point(class_scope:hbase.pb.IsMasterRunningRequest) } - public interface ExecProcedureResponseOrBuilder + public interface IsMasterRunningResponseOrBuilder extends com.google.protobuf.MessageOrBuilder { - // optional int64 expected_timeout = 1; - /** - * optional int64 expected_timeout = 1; - */ - boolean hasExpectedTimeout(); - /** - * optional int64 expected_timeout = 1; - */ - long getExpectedTimeout(); - - // optional bytes return_data = 2; + // required bool is_master_running = 1; /** - * optional bytes return_data = 2; + * required bool is_master_running = 1; */ - boolean hasReturnData(); + boolean hasIsMasterRunning(); /** - * optional bytes return_data = 2; + * required bool is_master_running = 1; */ - com.google.protobuf.ByteString getReturnData(); + boolean getIsMasterRunning(); } /** - * Protobuf type {@code hbase.pb.ExecProcedureResponse} + * Protobuf type {@code hbase.pb.IsMasterRunningResponse} */ - public static final class ExecProcedureResponse extends + public static final class IsMasterRunningResponse extends com.google.protobuf.GeneratedMessage - implements ExecProcedureResponseOrBuilder { - // Use ExecProcedureResponse.newBuilder() to construct. - private ExecProcedureResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + implements IsMasterRunningResponseOrBuilder { + // Use IsMasterRunningResponse.newBuilder() to construct. + private IsMasterRunningResponse(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private ExecProcedureResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private IsMasterRunningResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final ExecProcedureResponse defaultInstance; - public static ExecProcedureResponse getDefaultInstance() { + private static final IsMasterRunningResponse defaultInstance; + public static IsMasterRunningResponse getDefaultInstance() { return defaultInstance; } - public ExecProcedureResponse getDefaultInstanceForType() { + public IsMasterRunningResponse getDefaultInstanceForType() { return defaultInstance; } @@ -52015,7 +52124,7 @@ public ExecProcedureResponse getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private ExecProcedureResponse( + private IsMasterRunningResponse( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -52040,12 +52149,7 @@ private ExecProcedureResponse( } case 8: { bitField0_ |= 0x00000001; - expectedTimeout_ = input.readInt64(); - break; - } - case 18: { - bitField0_ |= 0x00000002; - returnData_ = input.readBytes(); + isMasterRunning_ = input.readBool(); break; } } @@ -52062,73 +52166,60 @@ private ExecProcedureResponse( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ExecProcedureResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsMasterRunningResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ExecProcedureResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsMasterRunningResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public ExecProcedureResponse parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public IsMasterRunningResponse parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new ExecProcedureResponse(input, extensionRegistry); + return new IsMasterRunningResponse(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; - // optional int64 expected_timeout = 1; - public static final int EXPECTED_TIMEOUT_FIELD_NUMBER = 1; - private long expectedTimeout_; + // required bool is_master_running = 1; + public static final int IS_MASTER_RUNNING_FIELD_NUMBER = 1; + private boolean isMasterRunning_; /** - * optional int64 expected_timeout = 1; + * required bool is_master_running = 1; */ - public boolean hasExpectedTimeout() { + public boolean hasIsMasterRunning() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * optional int64 expected_timeout = 1; - */ - public long getExpectedTimeout() { - return expectedTimeout_; - } - - // optional bytes return_data = 2; - public static final int RETURN_DATA_FIELD_NUMBER = 2; - private com.google.protobuf.ByteString returnData_; - /** - * optional bytes return_data = 2; - */ - public boolean hasReturnData() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional bytes return_data = 2; + * required bool is_master_running = 1; */ - public com.google.protobuf.ByteString getReturnData() { - return returnData_; + public boolean getIsMasterRunning() { + return isMasterRunning_; } private void initFields() { - expectedTimeout_ = 0L; - returnData_ = com.google.protobuf.ByteString.EMPTY; + isMasterRunning_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; + if (!hasIsMasterRunning()) { + memoizedIsInitialized = 0; + return false; + } memoizedIsInitialized = 1; return true; } @@ -52137,10 +52228,7 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeInt64(1, expectedTimeout_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, returnData_); + output.writeBool(1, isMasterRunning_); } getUnknownFields().writeTo(output); } @@ -52153,11 +52241,7 @@ public int getSerializedSize() { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeInt64Size(1, expectedTimeout_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, returnData_); + .computeBoolSize(1, isMasterRunning_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -52176,21 +52260,16 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse) obj; boolean result = true; - result = result && (hasExpectedTimeout() == other.hasExpectedTimeout()); - if (hasExpectedTimeout()) { - result = result && (getExpectedTimeout() - == other.getExpectedTimeout()); - } - result = result && (hasReturnData() == other.hasReturnData()); - if (hasReturnData()) { - result = result && getReturnData() - .equals(other.getReturnData()); + result = result && (hasIsMasterRunning() == other.hasIsMasterRunning()); + if (hasIsMasterRunning()) { + result = result && (getIsMasterRunning() + == other.getIsMasterRunning()); } result = result && getUnknownFields().equals(other.getUnknownFields()); @@ -52205,66 +52284,62 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasExpectedTimeout()) { - hash = (37 * hash) + EXPECTED_TIMEOUT_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getExpectedTimeout()); - } - if (hasReturnData()) { - hash = (37 * hash) + RETURN_DATA_FIELD_NUMBER; - hash = (53 * hash) + getReturnData().hashCode(); + if (hasIsMasterRunning()) { + hash = (37 * hash) + IS_MASTER_RUNNING_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getIsMasterRunning()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -52273,7 +52348,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProced public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -52285,24 +52360,24 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.ExecProcedureResponse} + * Protobuf type {@code hbase.pb.IsMasterRunningResponse} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponseOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponseOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ExecProcedureResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsMasterRunningResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ExecProcedureResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsMasterRunningResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -52322,10 +52397,8 @@ private static Builder create() { public Builder clear() { super.clear(); - expectedTimeout_ = 0L; + isMasterRunning_ = false; bitField0_ = (bitField0_ & ~0x00000001); - returnData_ = com.google.protobuf.ByteString.EMPTY; - bitField0_ = (bitField0_ & ~0x00000002); return this; } @@ -52335,60 +52408,57 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ExecProcedureResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsMasterRunningResponse_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse(this); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - result.expectedTimeout_ = expectedTimeout_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.returnData_ = returnData_; + result.isMasterRunning_ = isMasterRunning_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance()) return this; - if (other.hasExpectedTimeout()) { - setExpectedTimeout(other.getExpectedTimeout()); - } - if (other.hasReturnData()) { - setReturnData(other.getReturnData()); + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.getDefaultInstance()) return this; + if (other.hasIsMasterRunning()) { + setIsMasterRunning(other.getIsMasterRunning()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { + if (!hasIsMasterRunning()) { + + return false; + } return true; } @@ -52396,11 +52466,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -52411,122 +52481,86 @@ public Builder mergeFrom( } private int bitField0_; - // optional int64 expected_timeout = 1; - private long expectedTimeout_ ; + // required bool is_master_running = 1; + private boolean isMasterRunning_ ; /** - * optional int64 expected_timeout = 1; + * required bool is_master_running = 1; */ - public boolean hasExpectedTimeout() { + public boolean hasIsMasterRunning() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * optional int64 expected_timeout = 1; + * required bool is_master_running = 1; */ - public long getExpectedTimeout() { - return expectedTimeout_; + public boolean getIsMasterRunning() { + return isMasterRunning_; } /** - * optional int64 expected_timeout = 1; + * required bool is_master_running = 1; */ - public Builder setExpectedTimeout(long value) { + public Builder setIsMasterRunning(boolean value) { bitField0_ |= 0x00000001; - expectedTimeout_ = value; + isMasterRunning_ = value; onChanged(); return this; } /** - * optional int64 expected_timeout = 1; + * required bool is_master_running = 1; */ - public Builder clearExpectedTimeout() { + public Builder clearIsMasterRunning() { bitField0_ = (bitField0_ & ~0x00000001); - expectedTimeout_ = 0L; - onChanged(); - return this; - } - - // optional bytes return_data = 2; - private com.google.protobuf.ByteString returnData_ = com.google.protobuf.ByteString.EMPTY; - /** - * optional bytes return_data = 2; - */ - public boolean hasReturnData() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional bytes return_data = 2; - */ - public com.google.protobuf.ByteString getReturnData() { - return returnData_; - } - /** - * optional bytes return_data = 2; - */ - public Builder setReturnData(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - returnData_ = value; - onChanged(); - return this; - } - /** - * optional bytes return_data = 2; - */ - public Builder clearReturnData() { - bitField0_ = (bitField0_ & ~0x00000002); - returnData_ = getDefaultInstance().getReturnData(); + isMasterRunning_ = false; onChanged(); return this; } - // @@protoc_insertion_point(builder_scope:hbase.pb.ExecProcedureResponse) + // @@protoc_insertion_point(builder_scope:hbase.pb.IsMasterRunningResponse) } static { - defaultInstance = new ExecProcedureResponse(true); + defaultInstance = new IsMasterRunningResponse(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.ExecProcedureResponse) + // @@protoc_insertion_point(class_scope:hbase.pb.IsMasterRunningResponse) } - public interface IsProcedureDoneRequestOrBuilder + public interface ExecProcedureRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { - // optional .hbase.pb.ProcedureDescription procedure = 1; + // required .hbase.pb.ProcedureDescription procedure = 1; /** - * optional .hbase.pb.ProcedureDescription procedure = 1; + * required .hbase.pb.ProcedureDescription procedure = 1; */ boolean hasProcedure(); /** - * optional .hbase.pb.ProcedureDescription procedure = 1; + * required .hbase.pb.ProcedureDescription procedure = 1; */ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription getProcedure(); /** - * optional .hbase.pb.ProcedureDescription procedure = 1; + * required .hbase.pb.ProcedureDescription procedure = 1; */ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder getProcedureOrBuilder(); } /** - * Protobuf type {@code hbase.pb.IsProcedureDoneRequest} + * Protobuf type {@code hbase.pb.ExecProcedureRequest} */ - public static final class IsProcedureDoneRequest extends + public static final class ExecProcedureRequest extends com.google.protobuf.GeneratedMessage - implements IsProcedureDoneRequestOrBuilder { - // Use IsProcedureDoneRequest.newBuilder() to construct. - private IsProcedureDoneRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + implements ExecProcedureRequestOrBuilder { + // Use ExecProcedureRequest.newBuilder() to construct. + private ExecProcedureRequest(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private IsProcedureDoneRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private ExecProcedureRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final IsProcedureDoneRequest defaultInstance; - public static IsProcedureDoneRequest getDefaultInstance() { + private static final ExecProcedureRequest defaultInstance; + public static ExecProcedureRequest getDefaultInstance() { return defaultInstance; } - public IsProcedureDoneRequest getDefaultInstanceForType() { + public ExecProcedureRequest getDefaultInstanceForType() { return defaultInstance; } @@ -52536,7 +52570,7 @@ public IsProcedureDoneRequest getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private IsProcedureDoneRequest( + private ExecProcedureRequest( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -52586,49 +52620,49 @@ private IsProcedureDoneRequest( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsProcedureDoneRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ExecProcedureRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsProcedureDoneRequest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ExecProcedureRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public IsProcedureDoneRequest parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ExecProcedureRequest parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new IsProcedureDoneRequest(input, extensionRegistry); + return new ExecProcedureRequest(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; - // optional .hbase.pb.ProcedureDescription procedure = 1; + // required .hbase.pb.ProcedureDescription procedure = 1; public static final int PROCEDURE_FIELD_NUMBER = 1; private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription procedure_; /** - * optional .hbase.pb.ProcedureDescription procedure = 1; + * required .hbase.pb.ProcedureDescription procedure = 1; */ public boolean hasProcedure() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * optional .hbase.pb.ProcedureDescription procedure = 1; + * required .hbase.pb.ProcedureDescription procedure = 1; */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription getProcedure() { return procedure_; } /** - * optional .hbase.pb.ProcedureDescription procedure = 1; + * required .hbase.pb.ProcedureDescription procedure = 1; */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder getProcedureOrBuilder() { return procedure_; @@ -52642,11 +52676,13 @@ public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (hasProcedure()) { - if (!getProcedure().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } + if (!hasProcedure()) { + memoizedIsInitialized = 0; + return false; + } + if (!getProcedure().isInitialized()) { + memoizedIsInitialized = 0; + return false; } memoizedIsInitialized = 1; return true; @@ -52688,10 +52724,10 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest) obj; boolean result = true; result = result && (hasProcedure() == other.hasProcedure()); @@ -52721,53 +52757,53 @@ public int hashCode() { return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -52776,7 +52812,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedur public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -52788,24 +52824,24 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.IsProcedureDoneRequest} + * Protobuf type {@code hbase.pb.ExecProcedureRequest} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequestOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequestOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsProcedureDoneRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ExecProcedureRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsProcedureDoneRequest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ExecProcedureRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -52841,23 +52877,23 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsProcedureDoneRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ExecProcedureRequest_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest(this); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { @@ -52874,16 +52910,16 @@ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRe } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest.getDefaultInstance()) return this; + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance()) return this; if (other.hasProcedure()) { mergeProcedure(other.getProcedure()); } @@ -52892,11 +52928,13 @@ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos } public final boolean isInitialized() { - if (hasProcedure()) { - if (!getProcedure().isInitialized()) { - - return false; - } + if (!hasProcedure()) { + + return false; + } + if (!getProcedure().isInitialized()) { + + return false; } return true; } @@ -52905,11 +52943,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -52920,18 +52958,18 @@ public Builder mergeFrom( } private int bitField0_; - // optional .hbase.pb.ProcedureDescription procedure = 1; + // required .hbase.pb.ProcedureDescription procedure = 1; private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription procedure_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder> procedureBuilder_; /** - * optional .hbase.pb.ProcedureDescription procedure = 1; + * required .hbase.pb.ProcedureDescription procedure = 1; */ public boolean hasProcedure() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * optional .hbase.pb.ProcedureDescription procedure = 1; + * required .hbase.pb.ProcedureDescription procedure = 1; */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription getProcedure() { if (procedureBuilder_ == null) { @@ -52941,7 +52979,7 @@ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescripti } } /** - * optional .hbase.pb.ProcedureDescription procedure = 1; + * required .hbase.pb.ProcedureDescription procedure = 1; */ public Builder setProcedure(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription value) { if (procedureBuilder_ == null) { @@ -52957,7 +52995,7 @@ public Builder setProcedure(org.apache.hadoop.hbase.protobuf.generated.HBaseProt return this; } /** - * optional .hbase.pb.ProcedureDescription procedure = 1; + * required .hbase.pb.ProcedureDescription procedure = 1; */ public Builder setProcedure( org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder builderForValue) { @@ -52971,7 +53009,7 @@ public Builder setProcedure( return this; } /** - * optional .hbase.pb.ProcedureDescription procedure = 1; + * required .hbase.pb.ProcedureDescription procedure = 1; */ public Builder mergeProcedure(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription value) { if (procedureBuilder_ == null) { @@ -52990,7 +53028,7 @@ public Builder mergeProcedure(org.apache.hadoop.hbase.protobuf.generated.HBasePr return this; } /** - * optional .hbase.pb.ProcedureDescription procedure = 1; + * required .hbase.pb.ProcedureDescription procedure = 1; */ public Builder clearProcedure() { if (procedureBuilder_ == null) { @@ -53003,7 +53041,7 @@ public Builder clearProcedure() { return this; } /** - * optional .hbase.pb.ProcedureDescription procedure = 1; + * required .hbase.pb.ProcedureDescription procedure = 1; */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder getProcedureBuilder() { bitField0_ |= 0x00000001; @@ -53011,7 +53049,7 @@ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescripti return getProcedureFieldBuilder().getBuilder(); } /** - * optional .hbase.pb.ProcedureDescription procedure = 1; + * required .hbase.pb.ProcedureDescription procedure = 1; */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder getProcedureOrBuilder() { if (procedureBuilder_ != null) { @@ -53021,7 +53059,7 @@ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescripti } } /** - * optional .hbase.pb.ProcedureDescription procedure = 1; + * required .hbase.pb.ProcedureDescription procedure = 1; */ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder> @@ -53037,63 +53075,59 @@ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescripti return procedureBuilder_; } - // @@protoc_insertion_point(builder_scope:hbase.pb.IsProcedureDoneRequest) + // @@protoc_insertion_point(builder_scope:hbase.pb.ExecProcedureRequest) } static { - defaultInstance = new IsProcedureDoneRequest(true); + defaultInstance = new ExecProcedureRequest(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.IsProcedureDoneRequest) + // @@protoc_insertion_point(class_scope:hbase.pb.ExecProcedureRequest) } - public interface IsProcedureDoneResponseOrBuilder + public interface ExecProcedureResponseOrBuilder extends com.google.protobuf.MessageOrBuilder { - // optional bool done = 1 [default = false]; + // optional int64 expected_timeout = 1; /** - * optional bool done = 1 [default = false]; + * optional int64 expected_timeout = 1; */ - boolean hasDone(); + boolean hasExpectedTimeout(); /** - * optional bool done = 1 [default = false]; + * optional int64 expected_timeout = 1; */ - boolean getDone(); + long getExpectedTimeout(); - // optional .hbase.pb.ProcedureDescription snapshot = 2; - /** - * optional .hbase.pb.ProcedureDescription snapshot = 2; - */ - boolean hasSnapshot(); + // optional bytes return_data = 2; /** - * optional .hbase.pb.ProcedureDescription snapshot = 2; + * optional bytes return_data = 2; */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription getSnapshot(); + boolean hasReturnData(); /** - * optional .hbase.pb.ProcedureDescription snapshot = 2; + * optional bytes return_data = 2; */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder getSnapshotOrBuilder(); + com.google.protobuf.ByteString getReturnData(); } /** - * Protobuf type {@code hbase.pb.IsProcedureDoneResponse} + * Protobuf type {@code hbase.pb.ExecProcedureResponse} */ - public static final class IsProcedureDoneResponse extends + public static final class ExecProcedureResponse extends com.google.protobuf.GeneratedMessage - implements IsProcedureDoneResponseOrBuilder { - // Use IsProcedureDoneResponse.newBuilder() to construct. - private IsProcedureDoneResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + implements ExecProcedureResponseOrBuilder { + // Use ExecProcedureResponse.newBuilder() to construct. + private ExecProcedureResponse(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private IsProcedureDoneResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private ExecProcedureResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final IsProcedureDoneResponse defaultInstance; - public static IsProcedureDoneResponse getDefaultInstance() { + private static final ExecProcedureResponse defaultInstance; + public static ExecProcedureResponse getDefaultInstance() { return defaultInstance; } - public IsProcedureDoneResponse getDefaultInstanceForType() { + public ExecProcedureResponse getDefaultInstanceForType() { return defaultInstance; } @@ -53103,7 +53137,7 @@ public IsProcedureDoneResponse getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private IsProcedureDoneResponse( + private ExecProcedureResponse( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -53128,20 +53162,12 @@ private IsProcedureDoneResponse( } case 8: { bitField0_ |= 0x00000001; - done_ = input.readBool(); + expectedTimeout_ = input.readInt64(); break; } case 18: { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder subBuilder = null; - if (((bitField0_ & 0x00000002) == 0x00000002)) { - subBuilder = snapshot_.toBuilder(); - } - snapshot_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(snapshot_); - snapshot_ = subBuilder.buildPartial(); - } bitField0_ |= 0x00000002; + returnData_ = input.readBytes(); break; } } @@ -53158,85 +53184,73 @@ private IsProcedureDoneResponse( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsProcedureDoneResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ExecProcedureResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsProcedureDoneResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ExecProcedureResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public IsProcedureDoneResponse parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ExecProcedureResponse parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new IsProcedureDoneResponse(input, extensionRegistry); + return new ExecProcedureResponse(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; - // optional bool done = 1 [default = false]; - public static final int DONE_FIELD_NUMBER = 1; - private boolean done_; + // optional int64 expected_timeout = 1; + public static final int EXPECTED_TIMEOUT_FIELD_NUMBER = 1; + private long expectedTimeout_; /** - * optional bool done = 1 [default = false]; + * optional int64 expected_timeout = 1; */ - public boolean hasDone() { + public boolean hasExpectedTimeout() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * optional bool done = 1 [default = false]; + * optional int64 expected_timeout = 1; */ - public boolean getDone() { - return done_; + public long getExpectedTimeout() { + return expectedTimeout_; } - // optional .hbase.pb.ProcedureDescription snapshot = 2; - public static final int SNAPSHOT_FIELD_NUMBER = 2; - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription snapshot_; + // optional bytes return_data = 2; + public static final int RETURN_DATA_FIELD_NUMBER = 2; + private com.google.protobuf.ByteString returnData_; /** - * optional .hbase.pb.ProcedureDescription snapshot = 2; + * optional bytes return_data = 2; */ - public boolean hasSnapshot() { + public boolean hasReturnData() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** - * optional .hbase.pb.ProcedureDescription snapshot = 2; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription getSnapshot() { - return snapshot_; - } - /** - * optional .hbase.pb.ProcedureDescription snapshot = 2; + * optional bytes return_data = 2; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder getSnapshotOrBuilder() { - return snapshot_; + public com.google.protobuf.ByteString getReturnData() { + return returnData_; } private void initFields() { - done_ = false; - snapshot_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.getDefaultInstance(); + expectedTimeout_ = 0L; + returnData_ = com.google.protobuf.ByteString.EMPTY; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (hasSnapshot()) { - if (!getSnapshot().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } memoizedIsInitialized = 1; return true; } @@ -53245,10 +53259,10 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBool(1, done_); + output.writeInt64(1, expectedTimeout_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeMessage(2, snapshot_); + output.writeBytes(2, returnData_); } getUnknownFields().writeTo(output); } @@ -53261,11 +53275,11 @@ public int getSerializedSize() { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeBoolSize(1, done_); + .computeInt64Size(1, expectedTimeout_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(2, snapshot_); + .computeBytesSize(2, returnData_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -53284,21 +53298,21 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse) obj; boolean result = true; - result = result && (hasDone() == other.hasDone()); - if (hasDone()) { - result = result && (getDone() - == other.getDone()); + result = result && (hasExpectedTimeout() == other.hasExpectedTimeout()); + if (hasExpectedTimeout()) { + result = result && (getExpectedTimeout() + == other.getExpectedTimeout()); } - result = result && (hasSnapshot() == other.hasSnapshot()); - if (hasSnapshot()) { - result = result && getSnapshot() - .equals(other.getSnapshot()); + result = result && (hasReturnData() == other.hasReturnData()); + if (hasReturnData()) { + result = result && getReturnData() + .equals(other.getReturnData()); } result = result && getUnknownFields().equals(other.getUnknownFields()); @@ -53313,66 +53327,66 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasDone()) { - hash = (37 * hash) + DONE_FIELD_NUMBER; - hash = (53 * hash) + hashBoolean(getDone()); + if (hasExpectedTimeout()) { + hash = (37 * hash) + EXPECTED_TIMEOUT_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getExpectedTimeout()); } - if (hasSnapshot()) { - hash = (37 * hash) + SNAPSHOT_FIELD_NUMBER; - hash = (53 * hash) + getSnapshot().hashCode(); + if (hasReturnData()) { + hash = (37 * hash) + RETURN_DATA_FIELD_NUMBER; + hash = (53 * hash) + getReturnData().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -53381,7 +53395,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedur public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -53393,24 +53407,24 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.IsProcedureDoneResponse} + * Protobuf type {@code hbase.pb.ExecProcedureResponse} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponseOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponseOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsProcedureDoneResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ExecProcedureResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsProcedureDoneResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ExecProcedureResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -53422,7 +53436,6 @@ private Builder( } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getSnapshotFieldBuilder(); } } private static Builder create() { @@ -53431,13 +53444,9 @@ private static Builder create() { public Builder clear() { super.clear(); - done_ = false; + expectedTimeout_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); - if (snapshotBuilder_ == null) { - snapshot_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.getDefaultInstance(); - } else { - snapshotBuilder_.clear(); - } + returnData_ = com.google.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000002); return this; } @@ -53448,70 +53457,60 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsProcedureDoneResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ExecProcedureResponse_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse(this); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - result.done_ = done_; + result.expectedTimeout_ = expectedTimeout_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } - if (snapshotBuilder_ == null) { - result.snapshot_ = snapshot_; - } else { - result.snapshot_ = snapshotBuilder_.build(); - } + result.returnData_ = returnData_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance()) return this; - if (other.hasDone()) { - setDone(other.getDone()); + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance()) return this; + if (other.hasExpectedTimeout()) { + setExpectedTimeout(other.getExpectedTimeout()); } - if (other.hasSnapshot()) { - mergeSnapshot(other.getSnapshot()); + if (other.hasReturnData()) { + setReturnData(other.getReturnData()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { - if (hasSnapshot()) { - if (!getSnapshot().isInitialized()) { - - return false; - } - } return true; } @@ -53519,11 +53518,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -53534,199 +53533,122 @@ public Builder mergeFrom( } private int bitField0_; - // optional bool done = 1 [default = false]; - private boolean done_ ; + // optional int64 expected_timeout = 1; + private long expectedTimeout_ ; /** - * optional bool done = 1 [default = false]; + * optional int64 expected_timeout = 1; */ - public boolean hasDone() { + public boolean hasExpectedTimeout() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * optional bool done = 1 [default = false]; + * optional int64 expected_timeout = 1; */ - public boolean getDone() { - return done_; + public long getExpectedTimeout() { + return expectedTimeout_; } /** - * optional bool done = 1 [default = false]; + * optional int64 expected_timeout = 1; */ - public Builder setDone(boolean value) { + public Builder setExpectedTimeout(long value) { bitField0_ |= 0x00000001; - done_ = value; + expectedTimeout_ = value; onChanged(); return this; } /** - * optional bool done = 1 [default = false]; + * optional int64 expected_timeout = 1; */ - public Builder clearDone() { + public Builder clearExpectedTimeout() { bitField0_ = (bitField0_ & ~0x00000001); - done_ = false; + expectedTimeout_ = 0L; onChanged(); return this; } - // optional .hbase.pb.ProcedureDescription snapshot = 2; - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription snapshot_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder> snapshotBuilder_; + // optional bytes return_data = 2; + private com.google.protobuf.ByteString returnData_ = com.google.protobuf.ByteString.EMPTY; /** - * optional .hbase.pb.ProcedureDescription snapshot = 2; + * optional bytes return_data = 2; */ - public boolean hasSnapshot() { + public boolean hasReturnData() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** - * optional .hbase.pb.ProcedureDescription snapshot = 2; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription getSnapshot() { - if (snapshotBuilder_ == null) { - return snapshot_; - } else { - return snapshotBuilder_.getMessage(); - } - } - /** - * optional .hbase.pb.ProcedureDescription snapshot = 2; - */ - public Builder setSnapshot(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription value) { - if (snapshotBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - snapshot_ = value; - onChanged(); - } else { - snapshotBuilder_.setMessage(value); - } - bitField0_ |= 0x00000002; - return this; - } - /** - * optional .hbase.pb.ProcedureDescription snapshot = 2; + * optional bytes return_data = 2; */ - public Builder setSnapshot( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder builderForValue) { - if (snapshotBuilder_ == null) { - snapshot_ = builderForValue.build(); - onChanged(); - } else { - snapshotBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000002; - return this; + public com.google.protobuf.ByteString getReturnData() { + return returnData_; } /** - * optional .hbase.pb.ProcedureDescription snapshot = 2; + * optional bytes return_data = 2; */ - public Builder mergeSnapshot(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription value) { - if (snapshotBuilder_ == null) { - if (((bitField0_ & 0x00000002) == 0x00000002) && - snapshot_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.getDefaultInstance()) { - snapshot_ = - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.newBuilder(snapshot_).mergeFrom(value).buildPartial(); - } else { - snapshot_ = value; - } - onChanged(); - } else { - snapshotBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000002; + public Builder setReturnData(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + returnData_ = value; + onChanged(); return this; } /** - * optional .hbase.pb.ProcedureDescription snapshot = 2; + * optional bytes return_data = 2; */ - public Builder clearSnapshot() { - if (snapshotBuilder_ == null) { - snapshot_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.getDefaultInstance(); - onChanged(); - } else { - snapshotBuilder_.clear(); - } + public Builder clearReturnData() { bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - /** - * optional .hbase.pb.ProcedureDescription snapshot = 2; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder getSnapshotBuilder() { - bitField0_ |= 0x00000002; + returnData_ = getDefaultInstance().getReturnData(); onChanged(); - return getSnapshotFieldBuilder().getBuilder(); - } - /** - * optional .hbase.pb.ProcedureDescription snapshot = 2; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder getSnapshotOrBuilder() { - if (snapshotBuilder_ != null) { - return snapshotBuilder_.getMessageOrBuilder(); - } else { - return snapshot_; - } - } - /** - * optional .hbase.pb.ProcedureDescription snapshot = 2; - */ - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder> - getSnapshotFieldBuilder() { - if (snapshotBuilder_ == null) { - snapshotBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder>( - snapshot_, - getParentForChildren(), - isClean()); - snapshot_ = null; - } - return snapshotBuilder_; + return this; } - // @@protoc_insertion_point(builder_scope:hbase.pb.IsProcedureDoneResponse) + // @@protoc_insertion_point(builder_scope:hbase.pb.ExecProcedureResponse) } static { - defaultInstance = new IsProcedureDoneResponse(true); + defaultInstance = new ExecProcedureResponse(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.IsProcedureDoneResponse) + // @@protoc_insertion_point(class_scope:hbase.pb.ExecProcedureResponse) } - public interface GetProcedureResultRequestOrBuilder + public interface IsProcedureDoneRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { - // required uint64 proc_id = 1; + // optional .hbase.pb.ProcedureDescription procedure = 1; /** - * required uint64 proc_id = 1; + * optional .hbase.pb.ProcedureDescription procedure = 1; */ - boolean hasProcId(); + boolean hasProcedure(); /** - * required uint64 proc_id = 1; + * optional .hbase.pb.ProcedureDescription procedure = 1; */ - long getProcId(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription getProcedure(); + /** + * optional .hbase.pb.ProcedureDescription procedure = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder getProcedureOrBuilder(); } /** - * Protobuf type {@code hbase.pb.GetProcedureResultRequest} + * Protobuf type {@code hbase.pb.IsProcedureDoneRequest} */ - public static final class GetProcedureResultRequest extends + public static final class IsProcedureDoneRequest extends com.google.protobuf.GeneratedMessage - implements GetProcedureResultRequestOrBuilder { - // Use GetProcedureResultRequest.newBuilder() to construct. - private GetProcedureResultRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + implements IsProcedureDoneRequestOrBuilder { + // Use IsProcedureDoneRequest.newBuilder() to construct. + private IsProcedureDoneRequest(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private GetProcedureResultRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private IsProcedureDoneRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final GetProcedureResultRequest defaultInstance; - public static GetProcedureResultRequest getDefaultInstance() { + private static final IsProcedureDoneRequest defaultInstance; + public static IsProcedureDoneRequest getDefaultInstance() { return defaultInstance; } - public GetProcedureResultRequest getDefaultInstanceForType() { + public IsProcedureDoneRequest getDefaultInstanceForType() { return defaultInstance; } @@ -53736,7 +53658,7 @@ public GetProcedureResultRequest getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private GetProcedureResultRequest( + private IsProcedureDoneRequest( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -53759,9 +53681,17 @@ private GetProcedureResultRequest( } break; } - case 8: { + case 10: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = procedure_.toBuilder(); + } + procedure_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(procedure_); + procedure_ = subBuilder.buildPartial(); + } bitField0_ |= 0x00000001; - procId_ = input.readUInt64(); break; } } @@ -53778,59 +53708,67 @@ private GetProcedureResultRequest( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetProcedureResultRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsProcedureDoneRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetProcedureResultRequest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsProcedureDoneRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public GetProcedureResultRequest parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public IsProcedureDoneRequest parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new GetProcedureResultRequest(input, extensionRegistry); + return new IsProcedureDoneRequest(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; - // required uint64 proc_id = 1; - public static final int PROC_ID_FIELD_NUMBER = 1; - private long procId_; + // optional .hbase.pb.ProcedureDescription procedure = 1; + public static final int PROCEDURE_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription procedure_; /** - * required uint64 proc_id = 1; + * optional .hbase.pb.ProcedureDescription procedure = 1; */ - public boolean hasProcId() { + public boolean hasProcedure() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * required uint64 proc_id = 1; + * optional .hbase.pb.ProcedureDescription procedure = 1; */ - public long getProcId() { - return procId_; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription getProcedure() { + return procedure_; + } + /** + * optional .hbase.pb.ProcedureDescription procedure = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder getProcedureOrBuilder() { + return procedure_; } private void initFields() { - procId_ = 0L; + procedure_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (!hasProcId()) { - memoizedIsInitialized = 0; - return false; + if (hasProcedure()) { + if (!getProcedure().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } } memoizedIsInitialized = 1; return true; @@ -53840,7 +53778,7 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeUInt64(1, procId_); + output.writeMessage(1, procedure_); } getUnknownFields().writeTo(output); } @@ -53853,7 +53791,7 @@ public int getSerializedSize() { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(1, procId_); + .computeMessageSize(1, procedure_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -53872,17 +53810,17 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest) obj; boolean result = true; - result = result && (hasProcId() == other.hasProcId()); - if (hasProcId()) { - result = result && (getProcId() - == other.getProcId()); - } + result = result && (hasProcedure() == other.hasProcedure()); + if (hasProcedure()) { + result = result && getProcedure() + .equals(other.getProcedure()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -53896,62 +53834,62 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasProcId()) { - hash = (37 * hash) + PROC_ID_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getProcId()); + if (hasProcedure()) { + hash = (37 * hash) + PROCEDURE_FIELD_NUMBER; + hash = (53 * hash) + getProcedure().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -53960,7 +53898,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedu public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -53972,24 +53910,24 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.GetProcedureResultRequest} + * Protobuf type {@code hbase.pb.IsProcedureDoneRequest} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequestOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequestOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetProcedureResultRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsProcedureDoneRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetProcedureResultRequest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsProcedureDoneRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -54001,6 +53939,7 @@ private Builder( } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getProcedureFieldBuilder(); } } private static Builder create() { @@ -54009,7 +53948,11 @@ private static Builder create() { public Builder clear() { super.clear(); - procId_ = 0L; + if (procedureBuilder_ == null) { + procedure_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.getDefaultInstance(); + } else { + procedureBuilder_.clear(); + } bitField0_ = (bitField0_ & ~0x00000001); return this; } @@ -54020,56 +53963,62 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetProcedureResultRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsProcedureDoneRequest_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest(this); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - result.procId_ = procId_; + if (procedureBuilder_ == null) { + result.procedure_ = procedure_; + } else { + result.procedure_ = procedureBuilder_.build(); + } result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.getDefaultInstance()) return this; - if (other.hasProcId()) { - setProcId(other.getProcId()); + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest.getDefaultInstance()) return this; + if (other.hasProcedure()) { + mergeProcedure(other.getProcedure()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { - if (!hasProcId()) { - - return false; + if (hasProcedure()) { + if (!getProcedure().isInitialized()) { + + return false; + } } return true; } @@ -54078,11 +54027,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -54093,126 +54042,180 @@ public Builder mergeFrom( } private int bitField0_; - // required uint64 proc_id = 1; - private long procId_ ; + // optional .hbase.pb.ProcedureDescription procedure = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription procedure_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder> procedureBuilder_; /** - * required uint64 proc_id = 1; + * optional .hbase.pb.ProcedureDescription procedure = 1; */ - public boolean hasProcId() { + public boolean hasProcedure() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * required uint64 proc_id = 1; + * optional .hbase.pb.ProcedureDescription procedure = 1; */ - public long getProcId() { - return procId_; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription getProcedure() { + if (procedureBuilder_ == null) { + return procedure_; + } else { + return procedureBuilder_.getMessage(); + } } /** - * required uint64 proc_id = 1; + * optional .hbase.pb.ProcedureDescription procedure = 1; */ - public Builder setProcId(long value) { + public Builder setProcedure(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription value) { + if (procedureBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + procedure_ = value; + onChanged(); + } else { + procedureBuilder_.setMessage(value); + } bitField0_ |= 0x00000001; - procId_ = value; - onChanged(); return this; } /** - * required uint64 proc_id = 1; + * optional .hbase.pb.ProcedureDescription procedure = 1; */ - public Builder clearProcId() { + public Builder setProcedure( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder builderForValue) { + if (procedureBuilder_ == null) { + procedure_ = builderForValue.build(); + onChanged(); + } else { + procedureBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .hbase.pb.ProcedureDescription procedure = 1; + */ + public Builder mergeProcedure(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription value) { + if (procedureBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + procedure_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.getDefaultInstance()) { + procedure_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.newBuilder(procedure_).mergeFrom(value).buildPartial(); + } else { + procedure_ = value; + } + onChanged(); + } else { + procedureBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .hbase.pb.ProcedureDescription procedure = 1; + */ + public Builder clearProcedure() { + if (procedureBuilder_ == null) { + procedure_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.getDefaultInstance(); + onChanged(); + } else { + procedureBuilder_.clear(); + } bitField0_ = (bitField0_ & ~0x00000001); - procId_ = 0L; - onChanged(); return this; } + /** + * optional .hbase.pb.ProcedureDescription procedure = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder getProcedureBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getProcedureFieldBuilder().getBuilder(); + } + /** + * optional .hbase.pb.ProcedureDescription procedure = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder getProcedureOrBuilder() { + if (procedureBuilder_ != null) { + return procedureBuilder_.getMessageOrBuilder(); + } else { + return procedure_; + } + } + /** + * optional .hbase.pb.ProcedureDescription procedure = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder> + getProcedureFieldBuilder() { + if (procedureBuilder_ == null) { + procedureBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder>( + procedure_, + getParentForChildren(), + isClean()); + procedure_ = null; + } + return procedureBuilder_; + } - // @@protoc_insertion_point(builder_scope:hbase.pb.GetProcedureResultRequest) + // @@protoc_insertion_point(builder_scope:hbase.pb.IsProcedureDoneRequest) } static { - defaultInstance = new GetProcedureResultRequest(true); + defaultInstance = new IsProcedureDoneRequest(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.GetProcedureResultRequest) + // @@protoc_insertion_point(class_scope:hbase.pb.IsProcedureDoneRequest) } - public interface GetProcedureResultResponseOrBuilder + public interface IsProcedureDoneResponseOrBuilder extends com.google.protobuf.MessageOrBuilder { - // required .hbase.pb.GetProcedureResultResponse.State state = 1; - /** - * required .hbase.pb.GetProcedureResultResponse.State state = 1; - */ - boolean hasState(); - /** - * required .hbase.pb.GetProcedureResultResponse.State state = 1; - */ - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State getState(); - - // optional uint64 start_time = 2; + // optional bool done = 1 [default = false]; /** - * optional uint64 start_time = 2; + * optional bool done = 1 [default = false]; */ - boolean hasStartTime(); + boolean hasDone(); /** - * optional uint64 start_time = 2; + * optional bool done = 1 [default = false]; */ - long getStartTime(); + boolean getDone(); - // optional uint64 last_update = 3; - /** - * optional uint64 last_update = 3; - */ - boolean hasLastUpdate(); + // optional .hbase.pb.ProcedureDescription snapshot = 2; /** - * optional uint64 last_update = 3; + * optional .hbase.pb.ProcedureDescription snapshot = 2; */ - long getLastUpdate(); - - // optional bytes result = 4; + boolean hasSnapshot(); /** - * optional bytes result = 4; + * optional .hbase.pb.ProcedureDescription snapshot = 2; */ - boolean hasResult(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription getSnapshot(); /** - * optional bytes result = 4; + * optional .hbase.pb.ProcedureDescription snapshot = 2; */ - com.google.protobuf.ByteString getResult(); - - // optional .hbase.pb.ForeignExceptionMessage exception = 5; - /** - * optional .hbase.pb.ForeignExceptionMessage exception = 5; - */ - boolean hasException(); - /** - * optional .hbase.pb.ForeignExceptionMessage exception = 5; - */ - org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage getException(); - /** - * optional .hbase.pb.ForeignExceptionMessage exception = 5; - */ - org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessageOrBuilder getExceptionOrBuilder(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder getSnapshotOrBuilder(); } /** - * Protobuf type {@code hbase.pb.GetProcedureResultResponse} + * Protobuf type {@code hbase.pb.IsProcedureDoneResponse} */ - public static final class GetProcedureResultResponse extends + public static final class IsProcedureDoneResponse extends com.google.protobuf.GeneratedMessage - implements GetProcedureResultResponseOrBuilder { - // Use GetProcedureResultResponse.newBuilder() to construct. - private GetProcedureResultResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + implements IsProcedureDoneResponseOrBuilder { + // Use IsProcedureDoneResponse.newBuilder() to construct. + private IsProcedureDoneResponse(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private GetProcedureResultResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private IsProcedureDoneResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final GetProcedureResultResponse defaultInstance; - public static GetProcedureResultResponse getDefaultInstance() { + private static final IsProcedureDoneResponse defaultInstance; + public static IsProcedureDoneResponse getDefaultInstance() { return defaultInstance; } - public GetProcedureResultResponse getDefaultInstanceForType() { + public IsProcedureDoneResponse getDefaultInstanceForType() { return defaultInstance; } @@ -54222,7 +54225,7 @@ public GetProcedureResultResponse getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private GetProcedureResultResponse( + private IsProcedureDoneResponse( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -54246,42 +54249,21 @@ private GetProcedureResultResponse( break; } case 8: { - int rawValue = input.readEnum(); - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State value = org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State.valueOf(rawValue); - if (value == null) { - unknownFields.mergeVarintField(1, rawValue); - } else { - bitField0_ |= 0x00000001; - state_ = value; - } - break; - } - case 16: { - bitField0_ |= 0x00000002; - startTime_ = input.readUInt64(); - break; - } - case 24: { - bitField0_ |= 0x00000004; - lastUpdate_ = input.readUInt64(); - break; - } - case 34: { - bitField0_ |= 0x00000008; - result_ = input.readBytes(); + bitField0_ |= 0x00000001; + done_ = input.readBool(); break; } - case 42: { - org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.Builder subBuilder = null; - if (((bitField0_ & 0x00000010) == 0x00000010)) { - subBuilder = exception_.toBuilder(); + case 18: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder subBuilder = null; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + subBuilder = snapshot_.toBuilder(); } - exception_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.PARSER, extensionRegistry); + snapshot_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.PARSER, extensionRegistry); if (subBuilder != null) { - subBuilder.mergeFrom(exception_); - exception_ = subBuilder.buildPartial(); + subBuilder.mergeFrom(snapshot_); + snapshot_ = subBuilder.buildPartial(); } - bitField0_ |= 0x00000010; + bitField0_ |= 0x00000002; break; } } @@ -54298,224 +54280,84 @@ private GetProcedureResultResponse( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetProcedureResultResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsProcedureDoneResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetProcedureResultResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsProcedureDoneResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public GetProcedureResultResponse parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public IsProcedureDoneResponse parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new GetProcedureResultResponse(input, extensionRegistry); + return new IsProcedureDoneResponse(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } - /** - * Protobuf enum {@code hbase.pb.GetProcedureResultResponse.State} - */ - public enum State - implements com.google.protobuf.ProtocolMessageEnum { - /** - * NOT_FOUND = 0; - */ - NOT_FOUND(0, 0), - /** - * RUNNING = 1; - */ - RUNNING(1, 1), - /** - * FINISHED = 2; - */ - FINISHED(2, 2), - ; - - /** - * NOT_FOUND = 0; - */ - public static final int NOT_FOUND_VALUE = 0; - /** - * RUNNING = 1; - */ - public static final int RUNNING_VALUE = 1; - /** - * FINISHED = 2; - */ - public static final int FINISHED_VALUE = 2; - - - public final int getNumber() { return value; } - - public static State valueOf(int value) { - switch (value) { - case 0: return NOT_FOUND; - case 1: return RUNNING; - case 2: return FINISHED; - default: return null; - } - } - - public static com.google.protobuf.Internal.EnumLiteMap - internalGetValueMap() { - return internalValueMap; - } - private static com.google.protobuf.Internal.EnumLiteMap - internalValueMap = - new com.google.protobuf.Internal.EnumLiteMap() { - public State findValueByNumber(int number) { - return State.valueOf(number); - } - }; - - public final com.google.protobuf.Descriptors.EnumValueDescriptor - getValueDescriptor() { - return getDescriptor().getValues().get(index); - } - public final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptorForType() { - return getDescriptor(); - } - public static final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDescriptor().getEnumTypes().get(0); - } - - private static final State[] VALUES = values(); - - public static State valueOf( - com.google.protobuf.Descriptors.EnumValueDescriptor desc) { - if (desc.getType() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "EnumValueDescriptor is not for this type."); - } - return VALUES[desc.getIndex()]; - } - - private final int index; - private final int value; - - private State(int index, int value) { - this.index = index; - this.value = value; - } - - // @@protoc_insertion_point(enum_scope:hbase.pb.GetProcedureResultResponse.State) - } - private int bitField0_; - // required .hbase.pb.GetProcedureResultResponse.State state = 1; - public static final int STATE_FIELD_NUMBER = 1; - private org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State state_; + // optional bool done = 1 [default = false]; + public static final int DONE_FIELD_NUMBER = 1; + private boolean done_; /** - * required .hbase.pb.GetProcedureResultResponse.State state = 1; + * optional bool done = 1 [default = false]; */ - public boolean hasState() { + public boolean hasDone() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * required .hbase.pb.GetProcedureResultResponse.State state = 1; + * optional bool done = 1 [default = false]; */ - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State getState() { - return state_; + public boolean getDone() { + return done_; } - // optional uint64 start_time = 2; - public static final int START_TIME_FIELD_NUMBER = 2; - private long startTime_; + // optional .hbase.pb.ProcedureDescription snapshot = 2; + public static final int SNAPSHOT_FIELD_NUMBER = 2; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription snapshot_; /** - * optional uint64 start_time = 2; + * optional .hbase.pb.ProcedureDescription snapshot = 2; */ - public boolean hasStartTime() { + public boolean hasSnapshot() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** - * optional uint64 start_time = 2; - */ - public long getStartTime() { - return startTime_; - } - - // optional uint64 last_update = 3; - public static final int LAST_UPDATE_FIELD_NUMBER = 3; - private long lastUpdate_; - /** - * optional uint64 last_update = 3; + * optional .hbase.pb.ProcedureDescription snapshot = 2; */ - public boolean hasLastUpdate() { - return ((bitField0_ & 0x00000004) == 0x00000004); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription getSnapshot() { + return snapshot_; } /** - * optional uint64 last_update = 3; + * optional .hbase.pb.ProcedureDescription snapshot = 2; */ - public long getLastUpdate() { - return lastUpdate_; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder getSnapshotOrBuilder() { + return snapshot_; } - // optional bytes result = 4; - public static final int RESULT_FIELD_NUMBER = 4; - private com.google.protobuf.ByteString result_; - /** - * optional bytes result = 4; - */ - public boolean hasResult() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - /** - * optional bytes result = 4; - */ - public com.google.protobuf.ByteString getResult() { - return result_; + private void initFields() { + done_ = false; + snapshot_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.getDefaultInstance(); } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; - // optional .hbase.pb.ForeignExceptionMessage exception = 5; - public static final int EXCEPTION_FIELD_NUMBER = 5; - private org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage exception_; - /** - * optional .hbase.pb.ForeignExceptionMessage exception = 5; - */ - public boolean hasException() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - /** - * optional .hbase.pb.ForeignExceptionMessage exception = 5; - */ - public org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage getException() { - return exception_; - } - /** - * optional .hbase.pb.ForeignExceptionMessage exception = 5; - */ - public org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessageOrBuilder getExceptionOrBuilder() { - return exception_; - } - - private void initFields() { - state_ = org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State.NOT_FOUND; - startTime_ = 0L; - lastUpdate_ = 0L; - result_ = com.google.protobuf.ByteString.EMPTY; - exception_ = org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.getDefaultInstance(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasState()) { - memoizedIsInitialized = 0; - return false; + if (hasSnapshot()) { + if (!getSnapshot().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } } memoizedIsInitialized = 1; return true; @@ -54525,19 +54367,10 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeEnum(1, state_.getNumber()); + output.writeBool(1, done_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeUInt64(2, startTime_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeUInt64(3, lastUpdate_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeBytes(4, result_); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - output.writeMessage(5, exception_); + output.writeMessage(2, snapshot_); } getUnknownFields().writeTo(output); } @@ -54550,23 +54383,11 @@ public int getSerializedSize() { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeEnumSize(1, state_.getNumber()); + .computeBoolSize(1, done_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(2, startTime_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(3, lastUpdate_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(4, result_); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(5, exception_); + .computeMessageSize(2, snapshot_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -54585,36 +54406,21 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse) obj; boolean result = true; - result = result && (hasState() == other.hasState()); - if (hasState()) { - result = result && - (getState() == other.getState()); - } - result = result && (hasStartTime() == other.hasStartTime()); - if (hasStartTime()) { - result = result && (getStartTime() - == other.getStartTime()); - } - result = result && (hasLastUpdate() == other.hasLastUpdate()); - if (hasLastUpdate()) { - result = result && (getLastUpdate() - == other.getLastUpdate()); - } - result = result && (hasResult() == other.hasResult()); - if (hasResult()) { - result = result && getResult() - .equals(other.getResult()); + result = result && (hasDone() == other.hasDone()); + if (hasDone()) { + result = result && (getDone() + == other.getDone()); } - result = result && (hasException() == other.hasException()); - if (hasException()) { - result = result && getException() - .equals(other.getException()); + result = result && (hasSnapshot() == other.hasSnapshot()); + if (hasSnapshot()) { + result = result && getSnapshot() + .equals(other.getSnapshot()); } result = result && getUnknownFields().equals(other.getUnknownFields()); @@ -54629,78 +54435,66 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasState()) { - hash = (37 * hash) + STATE_FIELD_NUMBER; - hash = (53 * hash) + hashEnum(getState()); - } - if (hasStartTime()) { - hash = (37 * hash) + START_TIME_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getStartTime()); - } - if (hasLastUpdate()) { - hash = (37 * hash) + LAST_UPDATE_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getLastUpdate()); - } - if (hasResult()) { - hash = (37 * hash) + RESULT_FIELD_NUMBER; - hash = (53 * hash) + getResult().hashCode(); + if (hasDone()) { + hash = (37 * hash) + DONE_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getDone()); } - if (hasException()) { - hash = (37 * hash) + EXCEPTION_FIELD_NUMBER; - hash = (53 * hash) + getException().hashCode(); + if (hasSnapshot()) { + hash = (37 * hash) + SNAPSHOT_FIELD_NUMBER; + hash = (53 * hash) + getSnapshot().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -54709,7 +54503,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedu public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -54721,24 +54515,24 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.GetProcedureResultResponse} + * Protobuf type {@code hbase.pb.IsProcedureDoneResponse} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponseOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponseOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetProcedureResultResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsProcedureDoneResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetProcedureResultResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsProcedureDoneResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -54750,7 +54544,7 @@ private Builder( } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getExceptionFieldBuilder(); + getSnapshotFieldBuilder(); } } private static Builder create() { @@ -54759,20 +54553,14 @@ private static Builder create() { public Builder clear() { super.clear(); - state_ = org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State.NOT_FOUND; + done_ = false; bitField0_ = (bitField0_ & ~0x00000001); - startTime_ = 0L; - bitField0_ = (bitField0_ & ~0x00000002); - lastUpdate_ = 0L; - bitField0_ = (bitField0_ & ~0x00000004); - result_ = com.google.protobuf.ByteString.EMPTY; - bitField0_ = (bitField0_ & ~0x00000008); - if (exceptionBuilder_ == null) { - exception_ = org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.getDefaultInstance(); + if (snapshotBuilder_ == null) { + snapshot_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.getDefaultInstance(); } else { - exceptionBuilder_.clear(); + snapshotBuilder_.clear(); } - bitField0_ = (bitField0_ & ~0x00000010); + bitField0_ = (bitField0_ & ~0x00000002); return this; } @@ -54782,48 +54570,36 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetProcedureResultResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsProcedureDoneResponse_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse(this); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - result.state_ = state_; + result.done_ = done_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } - result.startTime_ = startTime_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.lastUpdate_ = lastUpdate_; - if (((from_bitField0_ & 0x00000008) == 0x00000008)) { - to_bitField0_ |= 0x00000008; - } - result.result_ = result_; - if (((from_bitField0_ & 0x00000010) == 0x00000010)) { - to_bitField0_ |= 0x00000010; - } - if (exceptionBuilder_ == null) { - result.exception_ = exception_; + if (snapshotBuilder_ == null) { + result.snapshot_ = snapshot_; } else { - result.exception_ = exceptionBuilder_.build(); + result.snapshot_ = snapshotBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); @@ -54831,39 +54607,32 @@ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResul } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance()) return this; - if (other.hasState()) { - setState(other.getState()); - } - if (other.hasStartTime()) { - setStartTime(other.getStartTime()); - } - if (other.hasLastUpdate()) { - setLastUpdate(other.getLastUpdate()); - } - if (other.hasResult()) { - setResult(other.getResult()); + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance()) return this; + if (other.hasDone()) { + setDone(other.getDone()); } - if (other.hasException()) { - mergeException(other.getException()); + if (other.hasSnapshot()) { + mergeSnapshot(other.getSnapshot()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { - if (!hasState()) { - - return false; + if (hasSnapshot()) { + if (!getSnapshot().isInitialized()) { + + return false; + } } return true; } @@ -54872,11 +54641,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -54887,273 +54656,168 @@ public Builder mergeFrom( } private int bitField0_; - // required .hbase.pb.GetProcedureResultResponse.State state = 1; - private org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State state_ = org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State.NOT_FOUND; + // optional bool done = 1 [default = false]; + private boolean done_ ; /** - * required .hbase.pb.GetProcedureResultResponse.State state = 1; + * optional bool done = 1 [default = false]; */ - public boolean hasState() { + public boolean hasDone() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * required .hbase.pb.GetProcedureResultResponse.State state = 1; + * optional bool done = 1 [default = false]; */ - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State getState() { - return state_; + public boolean getDone() { + return done_; } /** - * required .hbase.pb.GetProcedureResultResponse.State state = 1; + * optional bool done = 1 [default = false]; */ - public Builder setState(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State value) { - if (value == null) { - throw new NullPointerException(); - } + public Builder setDone(boolean value) { bitField0_ |= 0x00000001; - state_ = value; + done_ = value; onChanged(); return this; } /** - * required .hbase.pb.GetProcedureResultResponse.State state = 1; + * optional bool done = 1 [default = false]; */ - public Builder clearState() { + public Builder clearDone() { bitField0_ = (bitField0_ & ~0x00000001); - state_ = org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State.NOT_FOUND; - onChanged(); - return this; - } - - // optional uint64 start_time = 2; - private long startTime_ ; - /** - * optional uint64 start_time = 2; - */ - public boolean hasStartTime() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional uint64 start_time = 2; - */ - public long getStartTime() { - return startTime_; - } - /** - * optional uint64 start_time = 2; - */ - public Builder setStartTime(long value) { - bitField0_ |= 0x00000002; - startTime_ = value; - onChanged(); - return this; - } - /** - * optional uint64 start_time = 2; - */ - public Builder clearStartTime() { - bitField0_ = (bitField0_ & ~0x00000002); - startTime_ = 0L; - onChanged(); - return this; - } - - // optional uint64 last_update = 3; - private long lastUpdate_ ; - /** - * optional uint64 last_update = 3; - */ - public boolean hasLastUpdate() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * optional uint64 last_update = 3; - */ - public long getLastUpdate() { - return lastUpdate_; - } - /** - * optional uint64 last_update = 3; - */ - public Builder setLastUpdate(long value) { - bitField0_ |= 0x00000004; - lastUpdate_ = value; - onChanged(); - return this; - } - /** - * optional uint64 last_update = 3; - */ - public Builder clearLastUpdate() { - bitField0_ = (bitField0_ & ~0x00000004); - lastUpdate_ = 0L; - onChanged(); - return this; - } - - // optional bytes result = 4; - private com.google.protobuf.ByteString result_ = com.google.protobuf.ByteString.EMPTY; - /** - * optional bytes result = 4; - */ - public boolean hasResult() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - /** - * optional bytes result = 4; - */ - public com.google.protobuf.ByteString getResult() { - return result_; - } - /** - * optional bytes result = 4; - */ - public Builder setResult(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000008; - result_ = value; - onChanged(); - return this; - } - /** - * optional bytes result = 4; - */ - public Builder clearResult() { - bitField0_ = (bitField0_ & ~0x00000008); - result_ = getDefaultInstance().getResult(); + done_ = false; onChanged(); return this; } - // optional .hbase.pb.ForeignExceptionMessage exception = 5; - private org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage exception_ = org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.getDefaultInstance(); + // optional .hbase.pb.ProcedureDescription snapshot = 2; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription snapshot_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage, org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.Builder, org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessageOrBuilder> exceptionBuilder_; + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder> snapshotBuilder_; /** - * optional .hbase.pb.ForeignExceptionMessage exception = 5; + * optional .hbase.pb.ProcedureDescription snapshot = 2; */ - public boolean hasException() { - return ((bitField0_ & 0x00000010) == 0x00000010); + public boolean hasSnapshot() { + return ((bitField0_ & 0x00000002) == 0x00000002); } /** - * optional .hbase.pb.ForeignExceptionMessage exception = 5; + * optional .hbase.pb.ProcedureDescription snapshot = 2; */ - public org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage getException() { - if (exceptionBuilder_ == null) { - return exception_; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription getSnapshot() { + if (snapshotBuilder_ == null) { + return snapshot_; } else { - return exceptionBuilder_.getMessage(); + return snapshotBuilder_.getMessage(); } } /** - * optional .hbase.pb.ForeignExceptionMessage exception = 5; + * optional .hbase.pb.ProcedureDescription snapshot = 2; */ - public Builder setException(org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage value) { - if (exceptionBuilder_ == null) { + public Builder setSnapshot(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription value) { + if (snapshotBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - exception_ = value; + snapshot_ = value; onChanged(); } else { - exceptionBuilder_.setMessage(value); + snapshotBuilder_.setMessage(value); } - bitField0_ |= 0x00000010; + bitField0_ |= 0x00000002; return this; } /** - * optional .hbase.pb.ForeignExceptionMessage exception = 5; + * optional .hbase.pb.ProcedureDescription snapshot = 2; */ - public Builder setException( - org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.Builder builderForValue) { - if (exceptionBuilder_ == null) { - exception_ = builderForValue.build(); + public Builder setSnapshot( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder builderForValue) { + if (snapshotBuilder_ == null) { + snapshot_ = builderForValue.build(); onChanged(); } else { - exceptionBuilder_.setMessage(builderForValue.build()); + snapshotBuilder_.setMessage(builderForValue.build()); } - bitField0_ |= 0x00000010; + bitField0_ |= 0x00000002; return this; } /** - * optional .hbase.pb.ForeignExceptionMessage exception = 5; + * optional .hbase.pb.ProcedureDescription snapshot = 2; */ - public Builder mergeException(org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage value) { - if (exceptionBuilder_ == null) { - if (((bitField0_ & 0x00000010) == 0x00000010) && - exception_ != org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.getDefaultInstance()) { - exception_ = - org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.newBuilder(exception_).mergeFrom(value).buildPartial(); + public Builder mergeSnapshot(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription value) { + if (snapshotBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + snapshot_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.getDefaultInstance()) { + snapshot_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.newBuilder(snapshot_).mergeFrom(value).buildPartial(); } else { - exception_ = value; + snapshot_ = value; } onChanged(); } else { - exceptionBuilder_.mergeFrom(value); + snapshotBuilder_.mergeFrom(value); } - bitField0_ |= 0x00000010; + bitField0_ |= 0x00000002; return this; } /** - * optional .hbase.pb.ForeignExceptionMessage exception = 5; + * optional .hbase.pb.ProcedureDescription snapshot = 2; */ - public Builder clearException() { - if (exceptionBuilder_ == null) { - exception_ = org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.getDefaultInstance(); + public Builder clearSnapshot() { + if (snapshotBuilder_ == null) { + snapshot_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.getDefaultInstance(); onChanged(); } else { - exceptionBuilder_.clear(); + snapshotBuilder_.clear(); } - bitField0_ = (bitField0_ & ~0x00000010); + bitField0_ = (bitField0_ & ~0x00000002); return this; } /** - * optional .hbase.pb.ForeignExceptionMessage exception = 5; + * optional .hbase.pb.ProcedureDescription snapshot = 2; */ - public org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.Builder getExceptionBuilder() { - bitField0_ |= 0x00000010; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder getSnapshotBuilder() { + bitField0_ |= 0x00000002; onChanged(); - return getExceptionFieldBuilder().getBuilder(); + return getSnapshotFieldBuilder().getBuilder(); } /** - * optional .hbase.pb.ForeignExceptionMessage exception = 5; + * optional .hbase.pb.ProcedureDescription snapshot = 2; */ - public org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessageOrBuilder getExceptionOrBuilder() { - if (exceptionBuilder_ != null) { - return exceptionBuilder_.getMessageOrBuilder(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder getSnapshotOrBuilder() { + if (snapshotBuilder_ != null) { + return snapshotBuilder_.getMessageOrBuilder(); } else { - return exception_; + return snapshot_; } } /** - * optional .hbase.pb.ForeignExceptionMessage exception = 5; + * optional .hbase.pb.ProcedureDescription snapshot = 2; */ private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage, org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.Builder, org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessageOrBuilder> - getExceptionFieldBuilder() { - if (exceptionBuilder_ == null) { - exceptionBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage, org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.Builder, org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessageOrBuilder>( - exception_, + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder> + getSnapshotFieldBuilder() { + if (snapshotBuilder_ == null) { + snapshotBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder>( + snapshot_, getParentForChildren(), isClean()); - exception_ = null; + snapshot_ = null; } - return exceptionBuilder_; + return snapshotBuilder_; } - // @@protoc_insertion_point(builder_scope:hbase.pb.GetProcedureResultResponse) + // @@protoc_insertion_point(builder_scope:hbase.pb.IsProcedureDoneResponse) } static { - defaultInstance = new GetProcedureResultResponse(true); + defaultInstance = new IsProcedureDoneResponse(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.GetProcedureResultResponse) + // @@protoc_insertion_point(class_scope:hbase.pb.IsProcedureDoneResponse) } - public interface AbortProcedureRequestOrBuilder + public interface GetProcedureResultRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { // required uint64 proc_id = 1; @@ -55165,36 +54829,26 @@ public interface AbortProcedureRequestOrBuilder * required uint64 proc_id = 1; */ long getProcId(); - - // optional bool mayInterruptIfRunning = 2 [default = true]; - /** - * optional bool mayInterruptIfRunning = 2 [default = true]; - */ - boolean hasMayInterruptIfRunning(); - /** - * optional bool mayInterruptIfRunning = 2 [default = true]; - */ - boolean getMayInterruptIfRunning(); } /** - * Protobuf type {@code hbase.pb.AbortProcedureRequest} + * Protobuf type {@code hbase.pb.GetProcedureResultRequest} */ - public static final class AbortProcedureRequest extends + public static final class GetProcedureResultRequest extends com.google.protobuf.GeneratedMessage - implements AbortProcedureRequestOrBuilder { - // Use AbortProcedureRequest.newBuilder() to construct. - private AbortProcedureRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + implements GetProcedureResultRequestOrBuilder { + // Use GetProcedureResultRequest.newBuilder() to construct. + private GetProcedureResultRequest(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private AbortProcedureRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private GetProcedureResultRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final AbortProcedureRequest defaultInstance; - public static AbortProcedureRequest getDefaultInstance() { + private static final GetProcedureResultRequest defaultInstance; + public static GetProcedureResultRequest getDefaultInstance() { return defaultInstance; } - public AbortProcedureRequest getDefaultInstanceForType() { + public GetProcedureResultRequest getDefaultInstanceForType() { return defaultInstance; } @@ -55204,7 +54858,7 @@ public AbortProcedureRequest getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private AbortProcedureRequest( + private GetProcedureResultRequest( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -55232,11 +54886,6 @@ private AbortProcedureRequest( procId_ = input.readUInt64(); break; } - case 16: { - bitField0_ |= 0x00000002; - mayInterruptIfRunning_ = input.readBool(); - break; - } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -55251,28 +54900,28 @@ private AbortProcedureRequest( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_AbortProcedureRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetProcedureResultRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_AbortProcedureRequest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetProcedureResultRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public AbortProcedureRequest parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetProcedureResultRequest parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new AbortProcedureRequest(input, extensionRegistry); + return new GetProcedureResultRequest(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } @@ -55293,25 +54942,8 @@ public long getProcId() { return procId_; } - // optional bool mayInterruptIfRunning = 2 [default = true]; - public static final int MAYINTERRUPTIFRUNNING_FIELD_NUMBER = 2; - private boolean mayInterruptIfRunning_; - /** - * optional bool mayInterruptIfRunning = 2 [default = true]; - */ - public boolean hasMayInterruptIfRunning() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional bool mayInterruptIfRunning = 2 [default = true]; - */ - public boolean getMayInterruptIfRunning() { - return mayInterruptIfRunning_; - } - private void initFields() { procId_ = 0L; - mayInterruptIfRunning_ = true; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -55332,9 +54964,6 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeUInt64(1, procId_); } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBool(2, mayInterruptIfRunning_); - } getUnknownFields().writeTo(output); } @@ -55348,10 +54977,6 @@ public int getSerializedSize() { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(1, procId_); } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(2, mayInterruptIfRunning_); - } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -55369,10 +54994,10 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest) obj; boolean result = true; result = result && (hasProcId() == other.hasProcId()); @@ -55380,11 +55005,6 @@ public boolean equals(final java.lang.Object obj) { result = result && (getProcId() == other.getProcId()); } - result = result && (hasMayInterruptIfRunning() == other.hasMayInterruptIfRunning()); - if (hasMayInterruptIfRunning()) { - result = result && (getMayInterruptIfRunning() - == other.getMayInterruptIfRunning()); - } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -55402,62 +55022,58 @@ public int hashCode() { hash = (37 * hash) + PROC_ID_FIELD_NUMBER; hash = (53 * hash) + hashLong(getProcId()); } - if (hasMayInterruptIfRunning()) { - hash = (37 * hash) + MAYINTERRUPTIFRUNNING_FIELD_NUMBER; - hash = (53 * hash) + hashBoolean(getMayInterruptIfRunning()); - } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -55466,7 +55082,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProce public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -55478,24 +55094,24 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.AbortProcedureRequest} + * Protobuf type {@code hbase.pb.GetProcedureResultRequest} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequestOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequestOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_AbortProcedureRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetProcedureResultRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_AbortProcedureRequest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetProcedureResultRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -55517,8 +55133,6 @@ public Builder clear() { super.clear(); procId_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); - mayInterruptIfRunning_ = true; - bitField0_ = (bitField0_ & ~0x00000002); return this; } @@ -55528,55 +55142,48 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_AbortProcedureRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetProcedureResultRequest_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest(this); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.procId_ = procId_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.mayInterruptIfRunning_ = mayInterruptIfRunning_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.getDefaultInstance()) return this; + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.getDefaultInstance()) return this; if (other.hasProcId()) { setProcId(other.getProcId()); } - if (other.hasMayInterruptIfRunning()) { - setMayInterruptIfRunning(other.getMayInterruptIfRunning()); - } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -55593,11 +55200,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -55641,82 +55248,93 @@ public Builder clearProcId() { return this; } - // optional bool mayInterruptIfRunning = 2 [default = true]; - private boolean mayInterruptIfRunning_ = true; - /** - * optional bool mayInterruptIfRunning = 2 [default = true]; - */ - public boolean hasMayInterruptIfRunning() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional bool mayInterruptIfRunning = 2 [default = true]; - */ - public boolean getMayInterruptIfRunning() { - return mayInterruptIfRunning_; - } - /** - * optional bool mayInterruptIfRunning = 2 [default = true]; - */ - public Builder setMayInterruptIfRunning(boolean value) { - bitField0_ |= 0x00000002; - mayInterruptIfRunning_ = value; - onChanged(); - return this; - } - /** - * optional bool mayInterruptIfRunning = 2 [default = true]; - */ - public Builder clearMayInterruptIfRunning() { - bitField0_ = (bitField0_ & ~0x00000002); - mayInterruptIfRunning_ = true; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:hbase.pb.AbortProcedureRequest) + // @@protoc_insertion_point(builder_scope:hbase.pb.GetProcedureResultRequest) } static { - defaultInstance = new AbortProcedureRequest(true); + defaultInstance = new GetProcedureResultRequest(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.AbortProcedureRequest) + // @@protoc_insertion_point(class_scope:hbase.pb.GetProcedureResultRequest) } - public interface AbortProcedureResponseOrBuilder + public interface GetProcedureResultResponseOrBuilder extends com.google.protobuf.MessageOrBuilder { - // required bool is_procedure_aborted = 1; + // required .hbase.pb.GetProcedureResultResponse.State state = 1; /** - * required bool is_procedure_aborted = 1; + * required .hbase.pb.GetProcedureResultResponse.State state = 1; */ - boolean hasIsProcedureAborted(); + boolean hasState(); /** - * required bool is_procedure_aborted = 1; + * required .hbase.pb.GetProcedureResultResponse.State state = 1; */ - boolean getIsProcedureAborted(); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State getState(); + + // optional uint64 start_time = 2; + /** + * optional uint64 start_time = 2; + */ + boolean hasStartTime(); + /** + * optional uint64 start_time = 2; + */ + long getStartTime(); + + // optional uint64 last_update = 3; + /** + * optional uint64 last_update = 3; + */ + boolean hasLastUpdate(); + /** + * optional uint64 last_update = 3; + */ + long getLastUpdate(); + + // optional bytes result = 4; + /** + * optional bytes result = 4; + */ + boolean hasResult(); + /** + * optional bytes result = 4; + */ + com.google.protobuf.ByteString getResult(); + + // optional .hbase.pb.ForeignExceptionMessage exception = 5; + /** + * optional .hbase.pb.ForeignExceptionMessage exception = 5; + */ + boolean hasException(); + /** + * optional .hbase.pb.ForeignExceptionMessage exception = 5; + */ + org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage getException(); + /** + * optional .hbase.pb.ForeignExceptionMessage exception = 5; + */ + org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessageOrBuilder getExceptionOrBuilder(); } /** - * Protobuf type {@code hbase.pb.AbortProcedureResponse} + * Protobuf type {@code hbase.pb.GetProcedureResultResponse} */ - public static final class AbortProcedureResponse extends + public static final class GetProcedureResultResponse extends com.google.protobuf.GeneratedMessage - implements AbortProcedureResponseOrBuilder { - // Use AbortProcedureResponse.newBuilder() to construct. - private AbortProcedureResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + implements GetProcedureResultResponseOrBuilder { + // Use GetProcedureResultResponse.newBuilder() to construct. + private GetProcedureResultResponse(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private AbortProcedureResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private GetProcedureResultResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final AbortProcedureResponse defaultInstance; - public static AbortProcedureResponse getDefaultInstance() { + private static final GetProcedureResultResponse defaultInstance; + public static GetProcedureResultResponse getDefaultInstance() { return defaultInstance; } - public AbortProcedureResponse getDefaultInstanceForType() { + public GetProcedureResultResponse getDefaultInstanceForType() { return defaultInstance; } @@ -55726,7 +55344,7 @@ public AbortProcedureResponse getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private AbortProcedureResponse( + private GetProcedureResultResponse( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -55750,8 +55368,42 @@ private AbortProcedureResponse( break; } case 8: { - bitField0_ |= 0x00000001; - isProcedureAborted_ = input.readBool(); + int rawValue = input.readEnum(); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State value = org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(1, rawValue); + } else { + bitField0_ |= 0x00000001; + state_ = value; + } + break; + } + case 16: { + bitField0_ |= 0x00000002; + startTime_ = input.readUInt64(); + break; + } + case 24: { + bitField0_ |= 0x00000004; + lastUpdate_ = input.readUInt64(); + break; + } + case 34: { + bitField0_ |= 0x00000008; + result_ = input.readBytes(); + break; + } + case 42: { + org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.Builder subBuilder = null; + if (((bitField0_ & 0x00000010) == 0x00000010)) { + subBuilder = exception_.toBuilder(); + } + exception_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(exception_); + exception_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000010; break; } } @@ -55768,57 +55420,222 @@ private AbortProcedureResponse( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_AbortProcedureResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetProcedureResultResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_AbortProcedureResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetProcedureResultResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public AbortProcedureResponse parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetProcedureResultResponse parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new AbortProcedureResponse(input, extensionRegistry); + return new GetProcedureResultResponse(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } + /** + * Protobuf enum {@code hbase.pb.GetProcedureResultResponse.State} + */ + public enum State + implements com.google.protobuf.ProtocolMessageEnum { + /** + * NOT_FOUND = 0; + */ + NOT_FOUND(0, 0), + /** + * RUNNING = 1; + */ + RUNNING(1, 1), + /** + * FINISHED = 2; + */ + FINISHED(2, 2), + ; + + /** + * NOT_FOUND = 0; + */ + public static final int NOT_FOUND_VALUE = 0; + /** + * RUNNING = 1; + */ + public static final int RUNNING_VALUE = 1; + /** + * FINISHED = 2; + */ + public static final int FINISHED_VALUE = 2; + + + public final int getNumber() { return value; } + + public static State valueOf(int value) { + switch (value) { + case 0: return NOT_FOUND; + case 1: return RUNNING; + case 2: return FINISHED; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public State findValueByNumber(int number) { + return State.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDescriptor().getEnumTypes().get(0); + } + + private static final State[] VALUES = values(); + + public static State valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private State(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:hbase.pb.GetProcedureResultResponse.State) + } + private int bitField0_; - // required bool is_procedure_aborted = 1; - public static final int IS_PROCEDURE_ABORTED_FIELD_NUMBER = 1; - private boolean isProcedureAborted_; + // required .hbase.pb.GetProcedureResultResponse.State state = 1; + public static final int STATE_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State state_; /** - * required bool is_procedure_aborted = 1; + * required .hbase.pb.GetProcedureResultResponse.State state = 1; */ - public boolean hasIsProcedureAborted() { + public boolean hasState() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * required bool is_procedure_aborted = 1; + * required .hbase.pb.GetProcedureResultResponse.State state = 1; */ - public boolean getIsProcedureAborted() { - return isProcedureAborted_; + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State getState() { + return state_; + } + + // optional uint64 start_time = 2; + public static final int START_TIME_FIELD_NUMBER = 2; + private long startTime_; + /** + * optional uint64 start_time = 2; + */ + public boolean hasStartTime() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional uint64 start_time = 2; + */ + public long getStartTime() { + return startTime_; + } + + // optional uint64 last_update = 3; + public static final int LAST_UPDATE_FIELD_NUMBER = 3; + private long lastUpdate_; + /** + * optional uint64 last_update = 3; + */ + public boolean hasLastUpdate() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional uint64 last_update = 3; + */ + public long getLastUpdate() { + return lastUpdate_; + } + + // optional bytes result = 4; + public static final int RESULT_FIELD_NUMBER = 4; + private com.google.protobuf.ByteString result_; + /** + * optional bytes result = 4; + */ + public boolean hasResult() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional bytes result = 4; + */ + public com.google.protobuf.ByteString getResult() { + return result_; + } + + // optional .hbase.pb.ForeignExceptionMessage exception = 5; + public static final int EXCEPTION_FIELD_NUMBER = 5; + private org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage exception_; + /** + * optional .hbase.pb.ForeignExceptionMessage exception = 5; + */ + public boolean hasException() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional .hbase.pb.ForeignExceptionMessage exception = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage getException() { + return exception_; + } + /** + * optional .hbase.pb.ForeignExceptionMessage exception = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessageOrBuilder getExceptionOrBuilder() { + return exception_; } private void initFields() { - isProcedureAborted_ = false; + state_ = org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State.NOT_FOUND; + startTime_ = 0L; + lastUpdate_ = 0L; + result_ = com.google.protobuf.ByteString.EMPTY; + exception_ = org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (!hasIsProcedureAborted()) { + if (!hasState()) { memoizedIsInitialized = 0; return false; } @@ -55830,20 +55647,48 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBool(1, isProcedureAborted_); + output.writeEnum(1, state_.getNumber()); } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeUInt64(2, startTime_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeUInt64(3, lastUpdate_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeBytes(4, result_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeMessage(5, exception_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeBoolSize(1, isProcedureAborted_); + .computeEnumSize(1, state_.getNumber()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(2, startTime_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(3, lastUpdate_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(4, result_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(5, exception_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -55862,16 +55707,36 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse) obj; boolean result = true; - result = result && (hasIsProcedureAborted() == other.hasIsProcedureAborted()); - if (hasIsProcedureAborted()) { - result = result && (getIsProcedureAborted() - == other.getIsProcedureAborted()); + result = result && (hasState() == other.hasState()); + if (hasState()) { + result = result && + (getState() == other.getState()); + } + result = result && (hasStartTime() == other.hasStartTime()); + if (hasStartTime()) { + result = result && (getStartTime() + == other.getStartTime()); + } + result = result && (hasLastUpdate() == other.hasLastUpdate()); + if (hasLastUpdate()) { + result = result && (getLastUpdate() + == other.getLastUpdate()); + } + result = result && (hasResult() == other.hasResult()); + if (hasResult()) { + result = result && getResult() + .equals(other.getResult()); + } + result = result && (hasException() == other.hasException()); + if (hasException()) { + result = result && getException() + .equals(other.getException()); } result = result && getUnknownFields().equals(other.getUnknownFields()); @@ -55886,62 +55751,78 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasIsProcedureAborted()) { - hash = (37 * hash) + IS_PROCEDURE_ABORTED_FIELD_NUMBER; - hash = (53 * hash) + hashBoolean(getIsProcedureAborted()); + if (hasState()) { + hash = (37 * hash) + STATE_FIELD_NUMBER; + hash = (53 * hash) + hashEnum(getState()); + } + if (hasStartTime()) { + hash = (37 * hash) + START_TIME_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getStartTime()); + } + if (hasLastUpdate()) { + hash = (37 * hash) + LAST_UPDATE_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getLastUpdate()); + } + if (hasResult()) { + hash = (37 * hash) + RESULT_FIELD_NUMBER; + hash = (53 * hash) + getResult().hashCode(); + } + if (hasException()) { + hash = (37 * hash) + EXCEPTION_FIELD_NUMBER; + hash = (53 * hash) + getException().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -55950,7 +55831,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProce public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -55962,24 +55843,24 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.AbortProcedureResponse} + * Protobuf type {@code hbase.pb.GetProcedureResultResponse} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponseOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponseOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_AbortProcedureResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetProcedureResultResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_AbortProcedureResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetProcedureResultResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -55991,6 +55872,7 @@ private Builder( } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getExceptionFieldBuilder(); } } private static Builder create() { @@ -55999,8 +55881,20 @@ private static Builder create() { public Builder clear() { super.clear(); - isProcedureAborted_ = false; + state_ = org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State.NOT_FOUND; bitField0_ = (bitField0_ & ~0x00000001); + startTime_ = 0L; + bitField0_ = (bitField0_ & ~0x00000002); + lastUpdate_ = 0L; + bitField0_ = (bitField0_ & ~0x00000004); + result_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000008); + if (exceptionBuilder_ == null) { + exception_ = org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.getDefaultInstance(); + } else { + exceptionBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000010); return this; } @@ -56010,54 +55904,86 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_AbortProcedureResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetProcedureResultResponse_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse(this); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - result.isProcedureAborted_ = isProcedureAborted_; + result.state_ = state_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.startTime_ = startTime_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.lastUpdate_ = lastUpdate_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + result.result_ = result_; + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000010; + } + if (exceptionBuilder_ == null) { + result.exception_ = exception_; + } else { + result.exception_ = exceptionBuilder_.build(); + } result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance()) return this; - if (other.hasIsProcedureAborted()) { - setIsProcedureAborted(other.getIsProcedureAborted()); + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance()) return this; + if (other.hasState()) { + setState(other.getState()); + } + if (other.hasStartTime()) { + setStartTime(other.getStartTime()); + } + if (other.hasLastUpdate()) { + setLastUpdate(other.getLastUpdate()); + } + if (other.hasResult()) { + setResult(other.getResult()); + } + if (other.hasException()) { + mergeException(other.getException()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { - if (!hasIsProcedureAborted()) { + if (!hasState()) { return false; } @@ -56068,11 +55994,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -56083,72 +56009,6026 @@ public Builder mergeFrom( } private int bitField0_; - // required bool is_procedure_aborted = 1; - private boolean isProcedureAborted_ ; + // required .hbase.pb.GetProcedureResultResponse.State state = 1; + private org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State state_ = org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State.NOT_FOUND; /** - * required bool is_procedure_aborted = 1; + * required .hbase.pb.GetProcedureResultResponse.State state = 1; */ - public boolean hasIsProcedureAborted() { + public boolean hasState() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * required bool is_procedure_aborted = 1; + * required .hbase.pb.GetProcedureResultResponse.State state = 1; */ - public boolean getIsProcedureAborted() { - return isProcedureAborted_; + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State getState() { + return state_; } /** - * required bool is_procedure_aborted = 1; + * required .hbase.pb.GetProcedureResultResponse.State state = 1; */ - public Builder setIsProcedureAborted(boolean value) { + public Builder setState(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State value) { + if (value == null) { + throw new NullPointerException(); + } bitField0_ |= 0x00000001; - isProcedureAborted_ = value; + state_ = value; onChanged(); return this; } /** - * required bool is_procedure_aborted = 1; + * required .hbase.pb.GetProcedureResultResponse.State state = 1; */ - public Builder clearIsProcedureAborted() { + public Builder clearState() { bitField0_ = (bitField0_ & ~0x00000001); - isProcedureAborted_ = false; + state_ = org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State.NOT_FOUND; onChanged(); return this; } - // @@protoc_insertion_point(builder_scope:hbase.pb.AbortProcedureResponse) - } - - static { - defaultInstance = new AbortProcedureResponse(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:hbase.pb.AbortProcedureResponse) - } - - public interface ListProceduresRequestOrBuilder - extends com.google.protobuf.MessageOrBuilder { - } - /** - * Protobuf type {@code hbase.pb.ListProceduresRequest} - */ - public static final class ListProceduresRequest extends - com.google.protobuf.GeneratedMessage - implements ListProceduresRequestOrBuilder { - // Use ListProceduresRequest.newBuilder() to construct. - private ListProceduresRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + // optional uint64 start_time = 2; + private long startTime_ ; + /** + * optional uint64 start_time = 2; + */ + public boolean hasStartTime() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional uint64 start_time = 2; + */ + public long getStartTime() { + return startTime_; + } + /** + * optional uint64 start_time = 2; + */ + public Builder setStartTime(long value) { + bitField0_ |= 0x00000002; + startTime_ = value; + onChanged(); + return this; + } + /** + * optional uint64 start_time = 2; + */ + public Builder clearStartTime() { + bitField0_ = (bitField0_ & ~0x00000002); + startTime_ = 0L; + onChanged(); + return this; + } + + // optional uint64 last_update = 3; + private long lastUpdate_ ; + /** + * optional uint64 last_update = 3; + */ + public boolean hasLastUpdate() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional uint64 last_update = 3; + */ + public long getLastUpdate() { + return lastUpdate_; + } + /** + * optional uint64 last_update = 3; + */ + public Builder setLastUpdate(long value) { + bitField0_ |= 0x00000004; + lastUpdate_ = value; + onChanged(); + return this; + } + /** + * optional uint64 last_update = 3; + */ + public Builder clearLastUpdate() { + bitField0_ = (bitField0_ & ~0x00000004); + lastUpdate_ = 0L; + onChanged(); + return this; + } + + // optional bytes result = 4; + private com.google.protobuf.ByteString result_ = com.google.protobuf.ByteString.EMPTY; + /** + * optional bytes result = 4; + */ + public boolean hasResult() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional bytes result = 4; + */ + public com.google.protobuf.ByteString getResult() { + return result_; + } + /** + * optional bytes result = 4; + */ + public Builder setResult(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000008; + result_ = value; + onChanged(); + return this; + } + /** + * optional bytes result = 4; + */ + public Builder clearResult() { + bitField0_ = (bitField0_ & ~0x00000008); + result_ = getDefaultInstance().getResult(); + onChanged(); + return this; + } + + // optional .hbase.pb.ForeignExceptionMessage exception = 5; + private org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage exception_ = org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage, org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.Builder, org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessageOrBuilder> exceptionBuilder_; + /** + * optional .hbase.pb.ForeignExceptionMessage exception = 5; + */ + public boolean hasException() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional .hbase.pb.ForeignExceptionMessage exception = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage getException() { + if (exceptionBuilder_ == null) { + return exception_; + } else { + return exceptionBuilder_.getMessage(); + } + } + /** + * optional .hbase.pb.ForeignExceptionMessage exception = 5; + */ + public Builder setException(org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage value) { + if (exceptionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + exception_ = value; + onChanged(); + } else { + exceptionBuilder_.setMessage(value); + } + bitField0_ |= 0x00000010; + return this; + } + /** + * optional .hbase.pb.ForeignExceptionMessage exception = 5; + */ + public Builder setException( + org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.Builder builderForValue) { + if (exceptionBuilder_ == null) { + exception_ = builderForValue.build(); + onChanged(); + } else { + exceptionBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000010; + return this; + } + /** + * optional .hbase.pb.ForeignExceptionMessage exception = 5; + */ + public Builder mergeException(org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage value) { + if (exceptionBuilder_ == null) { + if (((bitField0_ & 0x00000010) == 0x00000010) && + exception_ != org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.getDefaultInstance()) { + exception_ = + org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.newBuilder(exception_).mergeFrom(value).buildPartial(); + } else { + exception_ = value; + } + onChanged(); + } else { + exceptionBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000010; + return this; + } + /** + * optional .hbase.pb.ForeignExceptionMessage exception = 5; + */ + public Builder clearException() { + if (exceptionBuilder_ == null) { + exception_ = org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.getDefaultInstance(); + onChanged(); + } else { + exceptionBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000010); + return this; + } + /** + * optional .hbase.pb.ForeignExceptionMessage exception = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.Builder getExceptionBuilder() { + bitField0_ |= 0x00000010; + onChanged(); + return getExceptionFieldBuilder().getBuilder(); + } + /** + * optional .hbase.pb.ForeignExceptionMessage exception = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessageOrBuilder getExceptionOrBuilder() { + if (exceptionBuilder_ != null) { + return exceptionBuilder_.getMessageOrBuilder(); + } else { + return exception_; + } + } + /** + * optional .hbase.pb.ForeignExceptionMessage exception = 5; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage, org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.Builder, org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessageOrBuilder> + getExceptionFieldBuilder() { + if (exceptionBuilder_ == null) { + exceptionBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage, org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.Builder, org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessageOrBuilder>( + exception_, + getParentForChildren(), + isClean()); + exception_ = null; + } + return exceptionBuilder_; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.GetProcedureResultResponse) + } + + static { + defaultInstance = new GetProcedureResultResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.GetProcedureResultResponse) + } + + public interface AbortProcedureRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required uint64 proc_id = 1; + /** + * required uint64 proc_id = 1; + */ + boolean hasProcId(); + /** + * required uint64 proc_id = 1; + */ + long getProcId(); + + // optional bool mayInterruptIfRunning = 2 [default = true]; + /** + * optional bool mayInterruptIfRunning = 2 [default = true]; + */ + boolean hasMayInterruptIfRunning(); + /** + * optional bool mayInterruptIfRunning = 2 [default = true]; + */ + boolean getMayInterruptIfRunning(); + } + /** + * Protobuf type {@code hbase.pb.AbortProcedureRequest} + */ + public static final class AbortProcedureRequest extends + com.google.protobuf.GeneratedMessage + implements AbortProcedureRequestOrBuilder { + // Use AbortProcedureRequest.newBuilder() to construct. + private AbortProcedureRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private AbortProcedureRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final AbortProcedureRequest defaultInstance; + public static AbortProcedureRequest getDefaultInstance() { + return defaultInstance; + } + + public AbortProcedureRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private AbortProcedureRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + procId_ = input.readUInt64(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + mayInterruptIfRunning_ = input.readBool(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_AbortProcedureRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_AbortProcedureRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public AbortProcedureRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new AbortProcedureRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required uint64 proc_id = 1; + public static final int PROC_ID_FIELD_NUMBER = 1; + private long procId_; + /** + * required uint64 proc_id = 1; + */ + public boolean hasProcId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required uint64 proc_id = 1; + */ + public long getProcId() { + return procId_; + } + + // optional bool mayInterruptIfRunning = 2 [default = true]; + public static final int MAYINTERRUPTIFRUNNING_FIELD_NUMBER = 2; + private boolean mayInterruptIfRunning_; + /** + * optional bool mayInterruptIfRunning = 2 [default = true]; + */ + public boolean hasMayInterruptIfRunning() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional bool mayInterruptIfRunning = 2 [default = true]; + */ + public boolean getMayInterruptIfRunning() { + return mayInterruptIfRunning_; + } + + private void initFields() { + procId_ = 0L; + mayInterruptIfRunning_ = true; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasProcId()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeUInt64(1, procId_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBool(2, mayInterruptIfRunning_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(1, procId_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(2, mayInterruptIfRunning_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest) obj; + + boolean result = true; + result = result && (hasProcId() == other.hasProcId()); + if (hasProcId()) { + result = result && (getProcId() + == other.getProcId()); + } + result = result && (hasMayInterruptIfRunning() == other.hasMayInterruptIfRunning()); + if (hasMayInterruptIfRunning()) { + result = result && (getMayInterruptIfRunning() + == other.getMayInterruptIfRunning()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasProcId()) { + hash = (37 * hash) + PROC_ID_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getProcId()); + } + if (hasMayInterruptIfRunning()) { + hash = (37 * hash) + MAYINTERRUPTIFRUNNING_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getMayInterruptIfRunning()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.AbortProcedureRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_AbortProcedureRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_AbortProcedureRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + procId_ = 0L; + bitField0_ = (bitField0_ & ~0x00000001); + mayInterruptIfRunning_ = true; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_AbortProcedureRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.procId_ = procId_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.mayInterruptIfRunning_ = mayInterruptIfRunning_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.getDefaultInstance()) return this; + if (other.hasProcId()) { + setProcId(other.getProcId()); + } + if (other.hasMayInterruptIfRunning()) { + setMayInterruptIfRunning(other.getMayInterruptIfRunning()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasProcId()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required uint64 proc_id = 1; + private long procId_ ; + /** + * required uint64 proc_id = 1; + */ + public boolean hasProcId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required uint64 proc_id = 1; + */ + public long getProcId() { + return procId_; + } + /** + * required uint64 proc_id = 1; + */ + public Builder setProcId(long value) { + bitField0_ |= 0x00000001; + procId_ = value; + onChanged(); + return this; + } + /** + * required uint64 proc_id = 1; + */ + public Builder clearProcId() { + bitField0_ = (bitField0_ & ~0x00000001); + procId_ = 0L; + onChanged(); + return this; + } + + // optional bool mayInterruptIfRunning = 2 [default = true]; + private boolean mayInterruptIfRunning_ = true; + /** + * optional bool mayInterruptIfRunning = 2 [default = true]; + */ + public boolean hasMayInterruptIfRunning() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional bool mayInterruptIfRunning = 2 [default = true]; + */ + public boolean getMayInterruptIfRunning() { + return mayInterruptIfRunning_; + } + /** + * optional bool mayInterruptIfRunning = 2 [default = true]; + */ + public Builder setMayInterruptIfRunning(boolean value) { + bitField0_ |= 0x00000002; + mayInterruptIfRunning_ = value; + onChanged(); + return this; + } + /** + * optional bool mayInterruptIfRunning = 2 [default = true]; + */ + public Builder clearMayInterruptIfRunning() { + bitField0_ = (bitField0_ & ~0x00000002); + mayInterruptIfRunning_ = true; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.AbortProcedureRequest) + } + + static { + defaultInstance = new AbortProcedureRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.AbortProcedureRequest) + } + + public interface AbortProcedureResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required bool is_procedure_aborted = 1; + /** + * required bool is_procedure_aborted = 1; + */ + boolean hasIsProcedureAborted(); + /** + * required bool is_procedure_aborted = 1; + */ + boolean getIsProcedureAborted(); + } + /** + * Protobuf type {@code hbase.pb.AbortProcedureResponse} + */ + public static final class AbortProcedureResponse extends + com.google.protobuf.GeneratedMessage + implements AbortProcedureResponseOrBuilder { + // Use AbortProcedureResponse.newBuilder() to construct. + private AbortProcedureResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private AbortProcedureResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final AbortProcedureResponse defaultInstance; + public static AbortProcedureResponse getDefaultInstance() { + return defaultInstance; + } + + public AbortProcedureResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private AbortProcedureResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + isProcedureAborted_ = input.readBool(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_AbortProcedureResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_AbortProcedureResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public AbortProcedureResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new AbortProcedureResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required bool is_procedure_aborted = 1; + public static final int IS_PROCEDURE_ABORTED_FIELD_NUMBER = 1; + private boolean isProcedureAborted_; + /** + * required bool is_procedure_aborted = 1; + */ + public boolean hasIsProcedureAborted() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bool is_procedure_aborted = 1; + */ + public boolean getIsProcedureAborted() { + return isProcedureAborted_; + } + + private void initFields() { + isProcedureAborted_ = false; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasIsProcedureAborted()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBool(1, isProcedureAborted_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(1, isProcedureAborted_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse) obj; + + boolean result = true; + result = result && (hasIsProcedureAborted() == other.hasIsProcedureAborted()); + if (hasIsProcedureAborted()) { + result = result && (getIsProcedureAborted() + == other.getIsProcedureAborted()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasIsProcedureAborted()) { + hash = (37 * hash) + IS_PROCEDURE_ABORTED_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getIsProcedureAborted()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.AbortProcedureResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_AbortProcedureResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_AbortProcedureResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + isProcedureAborted_ = false; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_AbortProcedureResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.isProcedureAborted_ = isProcedureAborted_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance()) return this; + if (other.hasIsProcedureAborted()) { + setIsProcedureAborted(other.getIsProcedureAborted()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasIsProcedureAborted()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required bool is_procedure_aborted = 1; + private boolean isProcedureAborted_ ; + /** + * required bool is_procedure_aborted = 1; + */ + public boolean hasIsProcedureAborted() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bool is_procedure_aborted = 1; + */ + public boolean getIsProcedureAborted() { + return isProcedureAborted_; + } + /** + * required bool is_procedure_aborted = 1; + */ + public Builder setIsProcedureAborted(boolean value) { + bitField0_ |= 0x00000001; + isProcedureAborted_ = value; + onChanged(); + return this; + } + /** + * required bool is_procedure_aborted = 1; + */ + public Builder clearIsProcedureAborted() { + bitField0_ = (bitField0_ & ~0x00000001); + isProcedureAborted_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.AbortProcedureResponse) + } + + static { + defaultInstance = new AbortProcedureResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.AbortProcedureResponse) + } + + public interface ListProceduresRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + /** + * Protobuf type {@code hbase.pb.ListProceduresRequest} + */ + public static final class ListProceduresRequest extends + com.google.protobuf.GeneratedMessage + implements ListProceduresRequestOrBuilder { + // Use ListProceduresRequest.newBuilder() to construct. + private ListProceduresRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private ListProceduresRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final ListProceduresRequest defaultInstance; + public static ListProceduresRequest getDefaultInstance() { + return defaultInstance; + } + + public ListProceduresRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ListProceduresRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListProceduresRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListProceduresRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ListProceduresRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ListProceduresRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.ListProceduresRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListProceduresRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListProceduresRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListProceduresRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.ListProceduresRequest) + } + + static { + defaultInstance = new ListProceduresRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.ListProceduresRequest) + } + + public interface ListProceduresResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // repeated .hbase.pb.Procedure procedure = 1; + /** + * repeated .hbase.pb.Procedure procedure = 1; + */ + java.util.List + getProcedureList(); + /** + * repeated .hbase.pb.Procedure procedure = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure getProcedure(int index); + /** + * repeated .hbase.pb.Procedure procedure = 1; + */ + int getProcedureCount(); + /** + * repeated .hbase.pb.Procedure procedure = 1; + */ + java.util.List + getProcedureOrBuilderList(); + /** + * repeated .hbase.pb.Procedure procedure = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureOrBuilder getProcedureOrBuilder( + int index); + } + /** + * Protobuf type {@code hbase.pb.ListProceduresResponse} + */ + public static final class ListProceduresResponse extends + com.google.protobuf.GeneratedMessage + implements ListProceduresResponseOrBuilder { + // Use ListProceduresResponse.newBuilder() to construct. + private ListProceduresResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private ListProceduresResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final ListProceduresResponse defaultInstance; + public static ListProceduresResponse getDefaultInstance() { + return defaultInstance; + } + + public ListProceduresResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ListProceduresResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + procedure_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + procedure_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + procedure_ = java.util.Collections.unmodifiableList(procedure_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListProceduresResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListProceduresResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ListProceduresResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ListProceduresResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + // repeated .hbase.pb.Procedure procedure = 1; + public static final int PROCEDURE_FIELD_NUMBER = 1; + private java.util.List procedure_; + /** + * repeated .hbase.pb.Procedure procedure = 1; + */ + public java.util.List getProcedureList() { + return procedure_; + } + /** + * repeated .hbase.pb.Procedure procedure = 1; + */ + public java.util.List + getProcedureOrBuilderList() { + return procedure_; + } + /** + * repeated .hbase.pb.Procedure procedure = 1; + */ + public int getProcedureCount() { + return procedure_.size(); + } + /** + * repeated .hbase.pb.Procedure procedure = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure getProcedure(int index) { + return procedure_.get(index); + } + /** + * repeated .hbase.pb.Procedure procedure = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureOrBuilder getProcedureOrBuilder( + int index) { + return procedure_.get(index); + } + + private void initFields() { + procedure_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + for (int i = 0; i < getProcedureCount(); i++) { + if (!getProcedure(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + for (int i = 0; i < procedure_.size(); i++) { + output.writeMessage(1, procedure_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < procedure_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, procedure_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse) obj; + + boolean result = true; + result = result && getProcedureList() + .equals(other.getProcedureList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (getProcedureCount() > 0) { + hash = (37 * hash) + PROCEDURE_FIELD_NUMBER; + hash = (53 * hash) + getProcedureList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.ListProceduresResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListProceduresResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListProceduresResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getProcedureFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (procedureBuilder_ == null) { + procedure_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + procedureBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListProceduresResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse(this); + int from_bitField0_ = bitField0_; + if (procedureBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + procedure_ = java.util.Collections.unmodifiableList(procedure_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.procedure_ = procedure_; + } else { + result.procedure_ = procedureBuilder_.build(); + } + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance()) return this; + if (procedureBuilder_ == null) { + if (!other.procedure_.isEmpty()) { + if (procedure_.isEmpty()) { + procedure_ = other.procedure_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureProcedureIsMutable(); + procedure_.addAll(other.procedure_); + } + onChanged(); + } + } else { + if (!other.procedure_.isEmpty()) { + if (procedureBuilder_.isEmpty()) { + procedureBuilder_.dispose(); + procedureBuilder_ = null; + procedure_ = other.procedure_; + bitField0_ = (bitField0_ & ~0x00000001); + procedureBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getProcedureFieldBuilder() : null; + } else { + procedureBuilder_.addAllMessages(other.procedure_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + for (int i = 0; i < getProcedureCount(); i++) { + if (!getProcedure(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // repeated .hbase.pb.Procedure procedure = 1; + private java.util.List procedure_ = + java.util.Collections.emptyList(); + private void ensureProcedureIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + procedure_ = new java.util.ArrayList(procedure_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.Builder, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureOrBuilder> procedureBuilder_; + + /** + * repeated .hbase.pb.Procedure procedure = 1; + */ + public java.util.List getProcedureList() { + if (procedureBuilder_ == null) { + return java.util.Collections.unmodifiableList(procedure_); + } else { + return procedureBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.Procedure procedure = 1; + */ + public int getProcedureCount() { + if (procedureBuilder_ == null) { + return procedure_.size(); + } else { + return procedureBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.Procedure procedure = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure getProcedure(int index) { + if (procedureBuilder_ == null) { + return procedure_.get(index); + } else { + return procedureBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.Procedure procedure = 1; + */ + public Builder setProcedure( + int index, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure value) { + if (procedureBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureProcedureIsMutable(); + procedure_.set(index, value); + onChanged(); + } else { + procedureBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.Procedure procedure = 1; + */ + public Builder setProcedure( + int index, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.Builder builderForValue) { + if (procedureBuilder_ == null) { + ensureProcedureIsMutable(); + procedure_.set(index, builderForValue.build()); + onChanged(); + } else { + procedureBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.Procedure procedure = 1; + */ + public Builder addProcedure(org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure value) { + if (procedureBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureProcedureIsMutable(); + procedure_.add(value); + onChanged(); + } else { + procedureBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.Procedure procedure = 1; + */ + public Builder addProcedure( + int index, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure value) { + if (procedureBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureProcedureIsMutable(); + procedure_.add(index, value); + onChanged(); + } else { + procedureBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.Procedure procedure = 1; + */ + public Builder addProcedure( + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.Builder builderForValue) { + if (procedureBuilder_ == null) { + ensureProcedureIsMutable(); + procedure_.add(builderForValue.build()); + onChanged(); + } else { + procedureBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.Procedure procedure = 1; + */ + public Builder addProcedure( + int index, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.Builder builderForValue) { + if (procedureBuilder_ == null) { + ensureProcedureIsMutable(); + procedure_.add(index, builderForValue.build()); + onChanged(); + } else { + procedureBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.Procedure procedure = 1; + */ + public Builder addAllProcedure( + java.lang.Iterable values) { + if (procedureBuilder_ == null) { + ensureProcedureIsMutable(); + super.addAll(values, procedure_); + onChanged(); + } else { + procedureBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.Procedure procedure = 1; + */ + public Builder clearProcedure() { + if (procedureBuilder_ == null) { + procedure_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + procedureBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.Procedure procedure = 1; + */ + public Builder removeProcedure(int index) { + if (procedureBuilder_ == null) { + ensureProcedureIsMutable(); + procedure_.remove(index); + onChanged(); + } else { + procedureBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.Procedure procedure = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.Builder getProcedureBuilder( + int index) { + return getProcedureFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.Procedure procedure = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureOrBuilder getProcedureOrBuilder( + int index) { + if (procedureBuilder_ == null) { + return procedure_.get(index); } else { + return procedureBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.Procedure procedure = 1; + */ + public java.util.List + getProcedureOrBuilderList() { + if (procedureBuilder_ != null) { + return procedureBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(procedure_); + } + } + /** + * repeated .hbase.pb.Procedure procedure = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.Builder addProcedureBuilder() { + return getProcedureFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.getDefaultInstance()); + } + /** + * repeated .hbase.pb.Procedure procedure = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.Builder addProcedureBuilder( + int index) { + return getProcedureFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.getDefaultInstance()); + } + /** + * repeated .hbase.pb.Procedure procedure = 1; + */ + public java.util.List + getProcedureBuilderList() { + return getProcedureFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.Builder, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureOrBuilder> + getProcedureFieldBuilder() { + if (procedureBuilder_ == null) { + procedureBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.Builder, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureOrBuilder>( + procedure_, + ((bitField0_ & 0x00000001) == 0x00000001), + getParentForChildren(), + isClean()); + procedure_ = null; + } + return procedureBuilder_; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.ListProceduresResponse) + } + + static { + defaultInstance = new ListProceduresResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.ListProceduresResponse) + } + + public interface SetQuotaRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional string user_name = 1; + /** + * optional string user_name = 1; + */ + boolean hasUserName(); + /** + * optional string user_name = 1; + */ + java.lang.String getUserName(); + /** + * optional string user_name = 1; + */ + com.google.protobuf.ByteString + getUserNameBytes(); + + // optional string user_group = 2; + /** + * optional string user_group = 2; + */ + boolean hasUserGroup(); + /** + * optional string user_group = 2; + */ + java.lang.String getUserGroup(); + /** + * optional string user_group = 2; + */ + com.google.protobuf.ByteString + getUserGroupBytes(); + + // optional string namespace = 3; + /** + * optional string namespace = 3; + */ + boolean hasNamespace(); + /** + * optional string namespace = 3; + */ + java.lang.String getNamespace(); + /** + * optional string namespace = 3; + */ + com.google.protobuf.ByteString + getNamespaceBytes(); + + // optional .hbase.pb.TableName table_name = 4; + /** + * optional .hbase.pb.TableName table_name = 4; + */ + boolean hasTableName(); + /** + * optional .hbase.pb.TableName table_name = 4; + */ + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName(); + /** + * optional .hbase.pb.TableName table_name = 4; + */ + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder(); + + // optional bool remove_all = 5; + /** + * optional bool remove_all = 5; + */ + boolean hasRemoveAll(); + /** + * optional bool remove_all = 5; + */ + boolean getRemoveAll(); + + // optional bool bypass_globals = 6; + /** + * optional bool bypass_globals = 6; + */ + boolean hasBypassGlobals(); + /** + * optional bool bypass_globals = 6; + */ + boolean getBypassGlobals(); + + // optional .hbase.pb.ThrottleRequest throttle = 7; + /** + * optional .hbase.pb.ThrottleRequest throttle = 7; + */ + boolean hasThrottle(); + /** + * optional .hbase.pb.ThrottleRequest throttle = 7; + */ + org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest getThrottle(); + /** + * optional .hbase.pb.ThrottleRequest throttle = 7; + */ + org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequestOrBuilder getThrottleOrBuilder(); + } + /** + * Protobuf type {@code hbase.pb.SetQuotaRequest} + */ + public static final class SetQuotaRequest extends + com.google.protobuf.GeneratedMessage + implements SetQuotaRequestOrBuilder { + // Use SetQuotaRequest.newBuilder() to construct. + private SetQuotaRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private SetQuotaRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final SetQuotaRequest defaultInstance; + public static SetQuotaRequest getDefaultInstance() { + return defaultInstance; + } + + public SetQuotaRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private SetQuotaRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + userName_ = input.readBytes(); + break; + } + case 18: { + bitField0_ |= 0x00000002; + userGroup_ = input.readBytes(); + break; + } + case 26: { + bitField0_ |= 0x00000004; + namespace_ = input.readBytes(); + break; + } + case 34: { + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder subBuilder = null; + if (((bitField0_ & 0x00000008) == 0x00000008)) { + subBuilder = tableName_.toBuilder(); + } + tableName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(tableName_); + tableName_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000008; + break; + } + case 40: { + bitField0_ |= 0x00000010; + removeAll_ = input.readBool(); + break; + } + case 48: { + bitField0_ |= 0x00000020; + bypassGlobals_ = input.readBool(); + break; + } + case 58: { + org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.Builder subBuilder = null; + if (((bitField0_ & 0x00000040) == 0x00000040)) { + subBuilder = throttle_.toBuilder(); + } + throttle_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(throttle_); + throttle_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000040; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetQuotaRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetQuotaRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public SetQuotaRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new SetQuotaRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional string user_name = 1; + public static final int USER_NAME_FIELD_NUMBER = 1; + private java.lang.Object userName_; + /** + * optional string user_name = 1; + */ + public boolean hasUserName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional string user_name = 1; + */ + public java.lang.String getUserName() { + java.lang.Object ref = userName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + userName_ = s; + } + return s; + } + } + /** + * optional string user_name = 1; + */ + public com.google.protobuf.ByteString + getUserNameBytes() { + java.lang.Object ref = userName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + userName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional string user_group = 2; + public static final int USER_GROUP_FIELD_NUMBER = 2; + private java.lang.Object userGroup_; + /** + * optional string user_group = 2; + */ + public boolean hasUserGroup() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional string user_group = 2; + */ + public java.lang.String getUserGroup() { + java.lang.Object ref = userGroup_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + userGroup_ = s; + } + return s; + } + } + /** + * optional string user_group = 2; + */ + public com.google.protobuf.ByteString + getUserGroupBytes() { + java.lang.Object ref = userGroup_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + userGroup_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional string namespace = 3; + public static final int NAMESPACE_FIELD_NUMBER = 3; + private java.lang.Object namespace_; + /** + * optional string namespace = 3; + */ + public boolean hasNamespace() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional string namespace = 3; + */ + public java.lang.String getNamespace() { + java.lang.Object ref = namespace_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + namespace_ = s; + } + return s; + } + } + /** + * optional string namespace = 3; + */ + public com.google.protobuf.ByteString + getNamespaceBytes() { + java.lang.Object ref = namespace_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + namespace_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional .hbase.pb.TableName table_name = 4; + public static final int TABLE_NAME_FIELD_NUMBER = 4; + private org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName tableName_; + /** + * optional .hbase.pb.TableName table_name = 4; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional .hbase.pb.TableName table_name = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName() { + return tableName_; + } + /** + * optional .hbase.pb.TableName table_name = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder() { + return tableName_; + } + + // optional bool remove_all = 5; + public static final int REMOVE_ALL_FIELD_NUMBER = 5; + private boolean removeAll_; + /** + * optional bool remove_all = 5; + */ + public boolean hasRemoveAll() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional bool remove_all = 5; + */ + public boolean getRemoveAll() { + return removeAll_; + } + + // optional bool bypass_globals = 6; + public static final int BYPASS_GLOBALS_FIELD_NUMBER = 6; + private boolean bypassGlobals_; + /** + * optional bool bypass_globals = 6; + */ + public boolean hasBypassGlobals() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + /** + * optional bool bypass_globals = 6; + */ + public boolean getBypassGlobals() { + return bypassGlobals_; + } + + // optional .hbase.pb.ThrottleRequest throttle = 7; + public static final int THROTTLE_FIELD_NUMBER = 7; + private org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest throttle_; + /** + * optional .hbase.pb.ThrottleRequest throttle = 7; + */ + public boolean hasThrottle() { + return ((bitField0_ & 0x00000040) == 0x00000040); + } + /** + * optional .hbase.pb.ThrottleRequest throttle = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest getThrottle() { + return throttle_; + } + /** + * optional .hbase.pb.ThrottleRequest throttle = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequestOrBuilder getThrottleOrBuilder() { + return throttle_; + } + + private void initFields() { + userName_ = ""; + userGroup_ = ""; + namespace_ = ""; + tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance(); + removeAll_ = false; + bypassGlobals_ = false; + throttle_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (hasTableName()) { + if (!getTableName().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + if (hasThrottle()) { + if (!getThrottle().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getUserNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getUserGroupBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBytes(3, getNamespaceBytes()); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeMessage(4, tableName_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeBool(5, removeAll_); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + output.writeBool(6, bypassGlobals_); + } + if (((bitField0_ & 0x00000040) == 0x00000040)) { + output.writeMessage(7, throttle_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getUserNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getUserGroupBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(3, getNamespaceBytes()); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(4, tableName_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(5, removeAll_); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(6, bypassGlobals_); + } + if (((bitField0_ & 0x00000040) == 0x00000040)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(7, throttle_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest) obj; + + boolean result = true; + result = result && (hasUserName() == other.hasUserName()); + if (hasUserName()) { + result = result && getUserName() + .equals(other.getUserName()); + } + result = result && (hasUserGroup() == other.hasUserGroup()); + if (hasUserGroup()) { + result = result && getUserGroup() + .equals(other.getUserGroup()); + } + result = result && (hasNamespace() == other.hasNamespace()); + if (hasNamespace()) { + result = result && getNamespace() + .equals(other.getNamespace()); + } + result = result && (hasTableName() == other.hasTableName()); + if (hasTableName()) { + result = result && getTableName() + .equals(other.getTableName()); + } + result = result && (hasRemoveAll() == other.hasRemoveAll()); + if (hasRemoveAll()) { + result = result && (getRemoveAll() + == other.getRemoveAll()); + } + result = result && (hasBypassGlobals() == other.hasBypassGlobals()); + if (hasBypassGlobals()) { + result = result && (getBypassGlobals() + == other.getBypassGlobals()); + } + result = result && (hasThrottle() == other.hasThrottle()); + if (hasThrottle()) { + result = result && getThrottle() + .equals(other.getThrottle()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasUserName()) { + hash = (37 * hash) + USER_NAME_FIELD_NUMBER; + hash = (53 * hash) + getUserName().hashCode(); + } + if (hasUserGroup()) { + hash = (37 * hash) + USER_GROUP_FIELD_NUMBER; + hash = (53 * hash) + getUserGroup().hashCode(); + } + if (hasNamespace()) { + hash = (37 * hash) + NAMESPACE_FIELD_NUMBER; + hash = (53 * hash) + getNamespace().hashCode(); + } + if (hasTableName()) { + hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER; + hash = (53 * hash) + getTableName().hashCode(); + } + if (hasRemoveAll()) { + hash = (37 * hash) + REMOVE_ALL_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getRemoveAll()); + } + if (hasBypassGlobals()) { + hash = (37 * hash) + BYPASS_GLOBALS_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getBypassGlobals()); + } + if (hasThrottle()) { + hash = (37 * hash) + THROTTLE_FIELD_NUMBER; + hash = (53 * hash) + getThrottle().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.SetQuotaRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetQuotaRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetQuotaRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableNameFieldBuilder(); + getThrottleFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + userName_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + userGroup_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + namespace_ = ""; + bitField0_ = (bitField0_ & ~0x00000004); + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance(); + } else { + tableNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000008); + removeAll_ = false; + bitField0_ = (bitField0_ & ~0x00000010); + bypassGlobals_ = false; + bitField0_ = (bitField0_ & ~0x00000020); + if (throttleBuilder_ == null) { + throttle_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.getDefaultInstance(); + } else { + throttleBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000040); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetQuotaRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.userName_ = userName_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.userGroup_ = userGroup_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.namespace_ = namespace_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + if (tableNameBuilder_ == null) { + result.tableName_ = tableName_; + } else { + result.tableName_ = tableNameBuilder_.build(); + } + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000010; + } + result.removeAll_ = removeAll_; + if (((from_bitField0_ & 0x00000020) == 0x00000020)) { + to_bitField0_ |= 0x00000020; + } + result.bypassGlobals_ = bypassGlobals_; + if (((from_bitField0_ & 0x00000040) == 0x00000040)) { + to_bitField0_ |= 0x00000040; + } + if (throttleBuilder_ == null) { + result.throttle_ = throttle_; + } else { + result.throttle_ = throttleBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.getDefaultInstance()) return this; + if (other.hasUserName()) { + bitField0_ |= 0x00000001; + userName_ = other.userName_; + onChanged(); + } + if (other.hasUserGroup()) { + bitField0_ |= 0x00000002; + userGroup_ = other.userGroup_; + onChanged(); + } + if (other.hasNamespace()) { + bitField0_ |= 0x00000004; + namespace_ = other.namespace_; + onChanged(); + } + if (other.hasTableName()) { + mergeTableName(other.getTableName()); + } + if (other.hasRemoveAll()) { + setRemoveAll(other.getRemoveAll()); + } + if (other.hasBypassGlobals()) { + setBypassGlobals(other.getBypassGlobals()); + } + if (other.hasThrottle()) { + mergeThrottle(other.getThrottle()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (hasTableName()) { + if (!getTableName().isInitialized()) { + + return false; + } + } + if (hasThrottle()) { + if (!getThrottle().isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional string user_name = 1; + private java.lang.Object userName_ = ""; + /** + * optional string user_name = 1; + */ + public boolean hasUserName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional string user_name = 1; + */ + public java.lang.String getUserName() { + java.lang.Object ref = userName_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + userName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string user_name = 1; + */ + public com.google.protobuf.ByteString + getUserNameBytes() { + java.lang.Object ref = userName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + userName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string user_name = 1; + */ + public Builder setUserName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + userName_ = value; + onChanged(); + return this; + } + /** + * optional string user_name = 1; + */ + public Builder clearUserName() { + bitField0_ = (bitField0_ & ~0x00000001); + userName_ = getDefaultInstance().getUserName(); + onChanged(); + return this; + } + /** + * optional string user_name = 1; + */ + public Builder setUserNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + userName_ = value; + onChanged(); + return this; + } + + // optional string user_group = 2; + private java.lang.Object userGroup_ = ""; + /** + * optional string user_group = 2; + */ + public boolean hasUserGroup() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional string user_group = 2; + */ + public java.lang.String getUserGroup() { + java.lang.Object ref = userGroup_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + userGroup_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string user_group = 2; + */ + public com.google.protobuf.ByteString + getUserGroupBytes() { + java.lang.Object ref = userGroup_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + userGroup_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string user_group = 2; + */ + public Builder setUserGroup( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + userGroup_ = value; + onChanged(); + return this; + } + /** + * optional string user_group = 2; + */ + public Builder clearUserGroup() { + bitField0_ = (bitField0_ & ~0x00000002); + userGroup_ = getDefaultInstance().getUserGroup(); + onChanged(); + return this; + } + /** + * optional string user_group = 2; + */ + public Builder setUserGroupBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + userGroup_ = value; + onChanged(); + return this; + } + + // optional string namespace = 3; + private java.lang.Object namespace_ = ""; + /** + * optional string namespace = 3; + */ + public boolean hasNamespace() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional string namespace = 3; + */ + public java.lang.String getNamespace() { + java.lang.Object ref = namespace_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + namespace_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string namespace = 3; + */ + public com.google.protobuf.ByteString + getNamespaceBytes() { + java.lang.Object ref = namespace_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + namespace_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string namespace = 3; + */ + public Builder setNamespace( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + namespace_ = value; + onChanged(); + return this; + } + /** + * optional string namespace = 3; + */ + public Builder clearNamespace() { + bitField0_ = (bitField0_ & ~0x00000004); + namespace_ = getDefaultInstance().getNamespace(); + onChanged(); + return this; + } + /** + * optional string namespace = 3; + */ + public Builder setNamespaceBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + namespace_ = value; + onChanged(); + return this; + } + + // optional .hbase.pb.TableName table_name = 4; + private org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder> tableNameBuilder_; + /** + * optional .hbase.pb.TableName table_name = 4; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional .hbase.pb.TableName table_name = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName() { + if (tableNameBuilder_ == null) { + return tableName_; + } else { + return tableNameBuilder_.getMessage(); + } + } + /** + * optional .hbase.pb.TableName table_name = 4; + */ + public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + tableName_ = value; + onChanged(); + } else { + tableNameBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + return this; + } + /** + * optional .hbase.pb.TableName table_name = 4; + */ + public Builder setTableName( + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + tableName_ = builderForValue.build(); + onChanged(); + } else { + tableNameBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + return this; + } + /** + * optional .hbase.pb.TableName table_name = 4; + */ + public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (((bitField0_ & 0x00000008) == 0x00000008) && + tableName_ != org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance()) { + tableName_ = + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial(); + } else { + tableName_ = value; + } + onChanged(); + } else { + tableNameBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000008; + return this; + } + /** + * optional .hbase.pb.TableName table_name = 4; + */ + public Builder clearTableName() { + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance(); + onChanged(); + } else { + tableNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000008); + return this; + } + /** + * optional .hbase.pb.TableName table_name = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder getTableNameBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return getTableNameFieldBuilder().getBuilder(); + } + /** + * optional .hbase.pb.TableName table_name = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder() { + if (tableNameBuilder_ != null) { + return tableNameBuilder_.getMessageOrBuilder(); + } else { + return tableName_; + } + } + /** + * optional .hbase.pb.TableName table_name = 4; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder> + getTableNameFieldBuilder() { + if (tableNameBuilder_ == null) { + tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder>( + tableName_, + getParentForChildren(), + isClean()); + tableName_ = null; + } + return tableNameBuilder_; + } + + // optional bool remove_all = 5; + private boolean removeAll_ ; + /** + * optional bool remove_all = 5; + */ + public boolean hasRemoveAll() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional bool remove_all = 5; + */ + public boolean getRemoveAll() { + return removeAll_; + } + /** + * optional bool remove_all = 5; + */ + public Builder setRemoveAll(boolean value) { + bitField0_ |= 0x00000010; + removeAll_ = value; + onChanged(); + return this; + } + /** + * optional bool remove_all = 5; + */ + public Builder clearRemoveAll() { + bitField0_ = (bitField0_ & ~0x00000010); + removeAll_ = false; + onChanged(); + return this; + } + + // optional bool bypass_globals = 6; + private boolean bypassGlobals_ ; + /** + * optional bool bypass_globals = 6; + */ + public boolean hasBypassGlobals() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + /** + * optional bool bypass_globals = 6; + */ + public boolean getBypassGlobals() { + return bypassGlobals_; + } + /** + * optional bool bypass_globals = 6; + */ + public Builder setBypassGlobals(boolean value) { + bitField0_ |= 0x00000020; + bypassGlobals_ = value; + onChanged(); + return this; + } + /** + * optional bool bypass_globals = 6; + */ + public Builder clearBypassGlobals() { + bitField0_ = (bitField0_ & ~0x00000020); + bypassGlobals_ = false; + onChanged(); + return this; + } + + // optional .hbase.pb.ThrottleRequest throttle = 7; + private org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest throttle_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequestOrBuilder> throttleBuilder_; + /** + * optional .hbase.pb.ThrottleRequest throttle = 7; + */ + public boolean hasThrottle() { + return ((bitField0_ & 0x00000040) == 0x00000040); + } + /** + * optional .hbase.pb.ThrottleRequest throttle = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest getThrottle() { + if (throttleBuilder_ == null) { + return throttle_; + } else { + return throttleBuilder_.getMessage(); + } + } + /** + * optional .hbase.pb.ThrottleRequest throttle = 7; + */ + public Builder setThrottle(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest value) { + if (throttleBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + throttle_ = value; + onChanged(); + } else { + throttleBuilder_.setMessage(value); + } + bitField0_ |= 0x00000040; + return this; + } + /** + * optional .hbase.pb.ThrottleRequest throttle = 7; + */ + public Builder setThrottle( + org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.Builder builderForValue) { + if (throttleBuilder_ == null) { + throttle_ = builderForValue.build(); + onChanged(); + } else { + throttleBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000040; + return this; + } + /** + * optional .hbase.pb.ThrottleRequest throttle = 7; + */ + public Builder mergeThrottle(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest value) { + if (throttleBuilder_ == null) { + if (((bitField0_ & 0x00000040) == 0x00000040) && + throttle_ != org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.getDefaultInstance()) { + throttle_ = + org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.newBuilder(throttle_).mergeFrom(value).buildPartial(); + } else { + throttle_ = value; + } + onChanged(); + } else { + throttleBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000040; + return this; + } + /** + * optional .hbase.pb.ThrottleRequest throttle = 7; + */ + public Builder clearThrottle() { + if (throttleBuilder_ == null) { + throttle_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.getDefaultInstance(); + onChanged(); + } else { + throttleBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000040); + return this; + } + /** + * optional .hbase.pb.ThrottleRequest throttle = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.Builder getThrottleBuilder() { + bitField0_ |= 0x00000040; + onChanged(); + return getThrottleFieldBuilder().getBuilder(); + } + /** + * optional .hbase.pb.ThrottleRequest throttle = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequestOrBuilder getThrottleOrBuilder() { + if (throttleBuilder_ != null) { + return throttleBuilder_.getMessageOrBuilder(); + } else { + return throttle_; + } + } + /** + * optional .hbase.pb.ThrottleRequest throttle = 7; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequestOrBuilder> + getThrottleFieldBuilder() { + if (throttleBuilder_ == null) { + throttleBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequestOrBuilder>( + throttle_, + getParentForChildren(), + isClean()); + throttle_ = null; + } + return throttleBuilder_; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.SetQuotaRequest) + } + + static { + defaultInstance = new SetQuotaRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.SetQuotaRequest) + } + + public interface SetQuotaResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + /** + * Protobuf type {@code hbase.pb.SetQuotaResponse} + */ + public static final class SetQuotaResponse extends + com.google.protobuf.GeneratedMessage + implements SetQuotaResponseOrBuilder { + // Use SetQuotaResponse.newBuilder() to construct. + private SetQuotaResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private SetQuotaResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final SetQuotaResponse defaultInstance; + public static SetQuotaResponse getDefaultInstance() { + return defaultInstance; + } + + public SetQuotaResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private SetQuotaResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetQuotaResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetQuotaResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public SetQuotaResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new SetQuotaResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.SetQuotaResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetQuotaResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetQuotaResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetQuotaResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.SetQuotaResponse) + } + + static { + defaultInstance = new SetQuotaResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.SetQuotaResponse) + } + + public interface MajorCompactionTimestampRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .hbase.pb.TableName table_name = 1; + /** + * required .hbase.pb.TableName table_name = 1; + */ + boolean hasTableName(); + /** + * required .hbase.pb.TableName table_name = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName(); + /** + * required .hbase.pb.TableName table_name = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder(); + } + /** + * Protobuf type {@code hbase.pb.MajorCompactionTimestampRequest} + */ + public static final class MajorCompactionTimestampRequest extends + com.google.protobuf.GeneratedMessage + implements MajorCompactionTimestampRequestOrBuilder { + // Use MajorCompactionTimestampRequest.newBuilder() to construct. + private MajorCompactionTimestampRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private MajorCompactionTimestampRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final MajorCompactionTimestampRequest defaultInstance; + public static MajorCompactionTimestampRequest getDefaultInstance() { + return defaultInstance; + } + + public MajorCompactionTimestampRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private MajorCompactionTimestampRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = tableName_.toBuilder(); + } + tableName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(tableName_); + tableName_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_MajorCompactionTimestampRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_MajorCompactionTimestampRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public MajorCompactionTimestampRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new MajorCompactionTimestampRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .hbase.pb.TableName table_name = 1; + public static final int TABLE_NAME_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName tableName_; + /** + * required .hbase.pb.TableName table_name = 1; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName() { + return tableName_; + } + /** + * required .hbase.pb.TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder() { + return tableName_; + } + + private void initFields() { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasTableName()) { + memoizedIsInitialized = 0; + return false; + } + if (!getTableName().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, tableName_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, tableName_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest) obj; + + boolean result = true; + result = result && (hasTableName() == other.hasTableName()); + if (hasTableName()) { + result = result && getTableName() + .equals(other.getTableName()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasTableName()) { + hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER; + hash = (53 * hash) + getTableName().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.MajorCompactionTimestampRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_MajorCompactionTimestampRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_MajorCompactionTimestampRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableNameFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance(); + } else { + tableNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_MajorCompactionTimestampRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (tableNameBuilder_ == null) { + result.tableName_ = tableName_; + } else { + result.tableName_ = tableNameBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.getDefaultInstance()) return this; + if (other.hasTableName()) { + mergeTableName(other.getTableName()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasTableName()) { + + return false; + } + if (!getTableName().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .hbase.pb.TableName table_name = 1; + private org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder> tableNameBuilder_; + /** + * required .hbase.pb.TableName table_name = 1; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName() { + if (tableNameBuilder_ == null) { + return tableName_; + } else { + return tableNameBuilder_.getMessage(); + } + } + /** + * required .hbase.pb.TableName table_name = 1; + */ + public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + tableName_ = value; + onChanged(); + } else { + tableNameBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.TableName table_name = 1; + */ + public Builder setTableName( + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + tableName_ = builderForValue.build(); + onChanged(); + } else { + tableNameBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.TableName table_name = 1; + */ + public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + tableName_ != org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance()) { + tableName_ = + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial(); + } else { + tableName_ = value; + } + onChanged(); + } else { + tableNameBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.TableName table_name = 1; + */ + public Builder clearTableName() { + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance(); + onChanged(); + } else { + tableNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .hbase.pb.TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder getTableNameBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getTableNameFieldBuilder().getBuilder(); + } + /** + * required .hbase.pb.TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder() { + if (tableNameBuilder_ != null) { + return tableNameBuilder_.getMessageOrBuilder(); + } else { + return tableName_; + } + } + /** + * required .hbase.pb.TableName table_name = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder> + getTableNameFieldBuilder() { + if (tableNameBuilder_ == null) { + tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder>( + tableName_, + getParentForChildren(), + isClean()); + tableName_ = null; + } + return tableNameBuilder_; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.MajorCompactionTimestampRequest) + } + + static { + defaultInstance = new MajorCompactionTimestampRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.MajorCompactionTimestampRequest) + } + + public interface MajorCompactionTimestampForRegionRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .hbase.pb.RegionSpecifier region = 1; + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + boolean hasRegion(); + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegion(); + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder(); + } + /** + * Protobuf type {@code hbase.pb.MajorCompactionTimestampForRegionRequest} + */ + public static final class MajorCompactionTimestampForRegionRequest extends + com.google.protobuf.GeneratedMessage + implements MajorCompactionTimestampForRegionRequestOrBuilder { + // Use MajorCompactionTimestampForRegionRequest.newBuilder() to construct. + private MajorCompactionTimestampForRegionRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private MajorCompactionTimestampForRegionRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final MajorCompactionTimestampForRegionRequest defaultInstance; + public static MajorCompactionTimestampForRegionRequest getDefaultInstance() { + return defaultInstance; + } + + public MajorCompactionTimestampForRegionRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private MajorCompactionTimestampForRegionRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = region_.toBuilder(); + } + region_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(region_); + region_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_MajorCompactionTimestampForRegionRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_MajorCompactionTimestampForRegionRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public MajorCompactionTimestampForRegionRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new MajorCompactionTimestampForRegionRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .hbase.pb.RegionSpecifier region = 1; + public static final int REGION_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier region_; + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public boolean hasRegion() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegion() { + return region_; + } + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder() { + return region_; + } + + private void initFields() { + region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasRegion()) { + memoizedIsInitialized = 0; + return false; + } + if (!getRegion().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, region_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, region_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest) obj; + + boolean result = true; + result = result && (hasRegion() == other.hasRegion()); + if (hasRegion()) { + result = result && getRegion() + .equals(other.getRegion()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasRegion()) { + hash = (37 * hash) + REGION_FIELD_NUMBER; + hash = (53 * hash) + getRegion().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.MajorCompactionTimestampForRegionRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_MajorCompactionTimestampForRegionRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_MajorCompactionTimestampForRegionRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getRegionFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (regionBuilder_ == null) { + region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance(); + } else { + regionBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_MajorCompactionTimestampForRegionRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (regionBuilder_ == null) { + result.region_ = region_; + } else { + result.region_ = regionBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.getDefaultInstance()) return this; + if (other.hasRegion()) { + mergeRegion(other.getRegion()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasRegion()) { + + return false; + } + if (!getRegion().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .hbase.pb.RegionSpecifier region = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> regionBuilder_; + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public boolean hasRegion() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegion() { + if (regionBuilder_ == null) { + return region_; + } else { + return regionBuilder_.getMessage(); + } + } + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public Builder setRegion(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier value) { + if (regionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + region_ = value; + onChanged(); + } else { + regionBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public Builder setRegion( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder builderForValue) { + if (regionBuilder_ == null) { + region_ = builderForValue.build(); + onChanged(); + } else { + regionBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public Builder mergeRegion(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier value) { + if (regionBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + region_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance()) { + region_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.newBuilder(region_).mergeFrom(value).buildPartial(); + } else { + region_ = value; + } + onChanged(); + } else { + regionBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public Builder clearRegion() { + if (regionBuilder_ == null) { + region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance(); + onChanged(); + } else { + regionBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder getRegionBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getRegionFieldBuilder().getBuilder(); + } + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder() { + if (regionBuilder_ != null) { + return regionBuilder_.getMessageOrBuilder(); + } else { + return region_; + } + } + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> + getRegionFieldBuilder() { + if (regionBuilder_ == null) { + regionBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder>( + region_, + getParentForChildren(), + isClean()); + region_ = null; + } + return regionBuilder_; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.MajorCompactionTimestampForRegionRequest) + } + + static { + defaultInstance = new MajorCompactionTimestampForRegionRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.MajorCompactionTimestampForRegionRequest) + } + + public interface MajorCompactionTimestampResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required int64 compaction_timestamp = 1; + /** + * required int64 compaction_timestamp = 1; + */ + boolean hasCompactionTimestamp(); + /** + * required int64 compaction_timestamp = 1; + */ + long getCompactionTimestamp(); + } + /** + * Protobuf type {@code hbase.pb.MajorCompactionTimestampResponse} + */ + public static final class MajorCompactionTimestampResponse extends + com.google.protobuf.GeneratedMessage + implements MajorCompactionTimestampResponseOrBuilder { + // Use MajorCompactionTimestampResponse.newBuilder() to construct. + private MajorCompactionTimestampResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private MajorCompactionTimestampResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final MajorCompactionTimestampResponse defaultInstance; + public static MajorCompactionTimestampResponse getDefaultInstance() { + return defaultInstance; + } + + public MajorCompactionTimestampResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private MajorCompactionTimestampResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + compactionTimestamp_ = input.readInt64(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_MajorCompactionTimestampResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_MajorCompactionTimestampResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public MajorCompactionTimestampResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new MajorCompactionTimestampResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required int64 compaction_timestamp = 1; + public static final int COMPACTION_TIMESTAMP_FIELD_NUMBER = 1; + private long compactionTimestamp_; + /** + * required int64 compaction_timestamp = 1; + */ + public boolean hasCompactionTimestamp() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required int64 compaction_timestamp = 1; + */ + public long getCompactionTimestamp() { + return compactionTimestamp_; + } + + private void initFields() { + compactionTimestamp_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasCompactionTimestamp()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeInt64(1, compactionTimestamp_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(1, compactionTimestamp_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse) obj; + + boolean result = true; + result = result && (hasCompactionTimestamp() == other.hasCompactionTimestamp()); + if (hasCompactionTimestamp()) { + result = result && (getCompactionTimestamp() + == other.getCompactionTimestamp()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasCompactionTimestamp()) { + hash = (37 * hash) + COMPACTION_TIMESTAMP_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getCompactionTimestamp()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.MajorCompactionTimestampResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_MajorCompactionTimestampResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_MajorCompactionTimestampResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + compactionTimestamp_ = 0L; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_MajorCompactionTimestampResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.compactionTimestamp_ = compactionTimestamp_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance()) return this; + if (other.hasCompactionTimestamp()) { + setCompactionTimestamp(other.getCompactionTimestamp()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasCompactionTimestamp()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required int64 compaction_timestamp = 1; + private long compactionTimestamp_ ; + /** + * required int64 compaction_timestamp = 1; + */ + public boolean hasCompactionTimestamp() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required int64 compaction_timestamp = 1; + */ + public long getCompactionTimestamp() { + return compactionTimestamp_; + } + /** + * required int64 compaction_timestamp = 1; + */ + public Builder setCompactionTimestamp(long value) { + bitField0_ |= 0x00000001; + compactionTimestamp_ = value; + onChanged(); + return this; + } + /** + * required int64 compaction_timestamp = 1; + */ + public Builder clearCompactionTimestamp() { + bitField0_ = (bitField0_ & ~0x00000001); + compactionTimestamp_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.MajorCompactionTimestampResponse) + } + + static { + defaultInstance = new MajorCompactionTimestampResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.MajorCompactionTimestampResponse) + } + + public interface SecurityCapabilitiesRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + /** + * Protobuf type {@code hbase.pb.SecurityCapabilitiesRequest} + */ + public static final class SecurityCapabilitiesRequest extends + com.google.protobuf.GeneratedMessage + implements SecurityCapabilitiesRequestOrBuilder { + // Use SecurityCapabilitiesRequest.newBuilder() to construct. + private SecurityCapabilitiesRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private SecurityCapabilitiesRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final SecurityCapabilitiesRequest defaultInstance; + public static SecurityCapabilitiesRequest getDefaultInstance() { + return defaultInstance; + } + + public SecurityCapabilitiesRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private SecurityCapabilitiesRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SecurityCapabilitiesRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SecurityCapabilitiesRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public SecurityCapabilitiesRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new SecurityCapabilitiesRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.SecurityCapabilitiesRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SecurityCapabilitiesRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SecurityCapabilitiesRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SecurityCapabilitiesRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.SecurityCapabilitiesRequest) + } + + static { + defaultInstance = new SecurityCapabilitiesRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.SecurityCapabilitiesRequest) + } + + public interface SecurityCapabilitiesResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; + /** + * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; + */ + java.util.List getCapabilitiesList(); + /** + * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; + */ + int getCapabilitiesCount(); + /** + * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Capability getCapabilities(int index); + } + /** + * Protobuf type {@code hbase.pb.SecurityCapabilitiesResponse} + */ + public static final class SecurityCapabilitiesResponse extends + com.google.protobuf.GeneratedMessage + implements SecurityCapabilitiesResponseOrBuilder { + // Use SecurityCapabilitiesResponse.newBuilder() to construct. + private SecurityCapabilitiesResponse(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private ListProceduresRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private SecurityCapabilitiesResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final ListProceduresRequest defaultInstance; - public static ListProceduresRequest getDefaultInstance() { + private static final SecurityCapabilitiesResponse defaultInstance; + public static SecurityCapabilitiesResponse getDefaultInstance() { return defaultInstance; } - public ListProceduresRequest getDefaultInstanceForType() { + public SecurityCapabilitiesResponse getDefaultInstanceForType() { return defaultInstance; } @@ -56158,11 +62038,12 @@ public ListProceduresRequest getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private ListProceduresRequest( + private SecurityCapabilitiesResponse( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); + int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { @@ -56180,6 +62061,39 @@ private ListProceduresRequest( } break; } + case 8: { + int rawValue = input.readEnum(); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Capability value = org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Capability.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(1, rawValue); + } else { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + capabilities_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + capabilities_.add(value); + } + break; + } + case 10: { + int length = input.readRawVarint32(); + int oldLimit = input.pushLimit(length); + while(input.getBytesUntilLimit() > 0) { + int rawValue = input.readEnum(); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Capability value = org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Capability.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(1, rawValue); + } else { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + capabilities_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + capabilities_.add(value); + } + } + input.popLimit(oldLimit); + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -56188,38 +62102,173 @@ private ListProceduresRequest( throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + capabilities_ = java.util.Collections.unmodifiableList(capabilities_); + } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListProceduresRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SecurityCapabilitiesResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SecurityCapabilitiesResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public SecurityCapabilitiesResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new SecurityCapabilitiesResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; } - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListProceduresRequest_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest.Builder.class); - } + /** + * Protobuf enum {@code hbase.pb.SecurityCapabilitiesResponse.Capability} + */ + public enum Capability + implements com.google.protobuf.ProtocolMessageEnum { + /** + * SIMPLE_AUTHENTICATION = 0; + */ + SIMPLE_AUTHENTICATION(0, 0), + /** + * SECURE_AUTHENTICATION = 1; + */ + SECURE_AUTHENTICATION(1, 1), + /** + * AUTHORIZATION = 2; + */ + AUTHORIZATION(2, 2), + /** + * CELL_AUTHORIZATION = 3; + */ + CELL_AUTHORIZATION(3, 3), + /** + * CELL_VISIBILITY = 4; + */ + CELL_VISIBILITY(4, 4), + ; + + /** + * SIMPLE_AUTHENTICATION = 0; + */ + public static final int SIMPLE_AUTHENTICATION_VALUE = 0; + /** + * SECURE_AUTHENTICATION = 1; + */ + public static final int SECURE_AUTHENTICATION_VALUE = 1; + /** + * AUTHORIZATION = 2; + */ + public static final int AUTHORIZATION_VALUE = 2; + /** + * CELL_AUTHORIZATION = 3; + */ + public static final int CELL_AUTHORIZATION_VALUE = 3; + /** + * CELL_VISIBILITY = 4; + */ + public static final int CELL_VISIBILITY_VALUE = 4; + + + public final int getNumber() { return value; } + + public static Capability valueOf(int value) { + switch (value) { + case 0: return SIMPLE_AUTHENTICATION; + case 1: return SECURE_AUTHENTICATION; + case 2: return AUTHORIZATION; + case 3: return CELL_AUTHORIZATION; + case 4: return CELL_VISIBILITY; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public Capability findValueByNumber(int number) { + return Capability.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDescriptor().getEnumTypes().get(0); + } + + private static final Capability[] VALUES = values(); + + public static Capability valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public ListProceduresRequest parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new ListProceduresRequest(input, extensionRegistry); + private Capability(int index, int value) { + this.index = index; + this.value = value; } - }; - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; + // @@protoc_insertion_point(enum_scope:hbase.pb.SecurityCapabilitiesResponse.Capability) + } + + // repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; + public static final int CAPABILITIES_FIELD_NUMBER = 1; + private java.util.List capabilities_; + /** + * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; + */ + public java.util.List getCapabilitiesList() { + return capabilities_; + } + /** + * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; + */ + public int getCapabilitiesCount() { + return capabilities_.size(); + } + /** + * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Capability getCapabilities(int index) { + return capabilities_.get(index); } private void initFields() { + capabilities_ = java.util.Collections.emptyList(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -56233,6 +62282,9 @@ public final boolean isInitialized() { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); + for (int i = 0; i < capabilities_.size(); i++) { + output.writeEnum(1, capabilities_.get(i).getNumber()); + } getUnknownFields().writeTo(output); } @@ -56242,6 +62294,15 @@ public int getSerializedSize() { if (size != -1) return size; size = 0; + { + int dataSize = 0; + for (int i = 0; i < capabilities_.size(); i++) { + dataSize += com.google.protobuf.CodedOutputStream + .computeEnumSizeNoTag(capabilities_.get(i).getNumber()); + } + size += dataSize; + size += 1 * capabilities_.size(); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -56259,12 +62320,14 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse) obj; boolean result = true; + result = result && getCapabilitiesList() + .equals(other.getCapabilitiesList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -56278,58 +62341,62 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); + if (getCapabilitiesCount() > 0) { + hash = (37 * hash) + CAPABILITIES_FIELD_NUMBER; + hash = (53 * hash) + hashEnumList(getCapabilitiesList()); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -56338,7 +62405,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProced public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -56350,24 +62417,24 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.ListProceduresRequest} + * Protobuf type {@code hbase.pb.SecurityCapabilitiesResponse} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequestOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponseOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListProceduresRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SecurityCapabilitiesResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListProceduresRequest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SecurityCapabilitiesResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -56387,6 +62454,8 @@ private static Builder create() { public Builder clear() { super.clear(); + capabilities_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); return this; } @@ -56396,38 +62465,54 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListProceduresRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SecurityCapabilitiesResponse_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest(this); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse(this); + int from_bitField0_ = bitField0_; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + capabilities_ = java.util.Collections.unmodifiableList(capabilities_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.capabilities_ = capabilities_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest.getDefaultInstance()) return this; + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance()) return this; + if (!other.capabilities_.isEmpty()) { + if (capabilities_.isEmpty()) { + capabilities_ = other.capabilities_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureCapabilitiesIsMutable(); + capabilities_.addAll(other.capabilities_); + } + onChanged(); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -56440,11 +62525,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -56453,65 +62538,138 @@ public Builder mergeFrom( } return this; } + private int bitField0_; - // @@protoc_insertion_point(builder_scope:hbase.pb.ListProceduresRequest) + // repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; + private java.util.List capabilities_ = + java.util.Collections.emptyList(); + private void ensureCapabilitiesIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + capabilities_ = new java.util.ArrayList(capabilities_); + bitField0_ |= 0x00000001; + } + } + /** + * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; + */ + public java.util.List getCapabilitiesList() { + return java.util.Collections.unmodifiableList(capabilities_); + } + /** + * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; + */ + public int getCapabilitiesCount() { + return capabilities_.size(); + } + /** + * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Capability getCapabilities(int index) { + return capabilities_.get(index); + } + /** + * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; + */ + public Builder setCapabilities( + int index, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Capability value) { + if (value == null) { + throw new NullPointerException(); + } + ensureCapabilitiesIsMutable(); + capabilities_.set(index, value); + onChanged(); + return this; + } + /** + * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; + */ + public Builder addCapabilities(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Capability value) { + if (value == null) { + throw new NullPointerException(); + } + ensureCapabilitiesIsMutable(); + capabilities_.add(value); + onChanged(); + return this; + } + /** + * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; + */ + public Builder addAllCapabilities( + java.lang.Iterable values) { + ensureCapabilitiesIsMutable(); + super.addAll(values, capabilities_); + onChanged(); + return this; + } + /** + * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; + */ + public Builder clearCapabilities() { + capabilities_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.SecurityCapabilitiesResponse) } static { - defaultInstance = new ListProceduresRequest(true); + defaultInstance = new SecurityCapabilitiesResponse(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.ListProceduresRequest) + // @@protoc_insertion_point(class_scope:hbase.pb.SecurityCapabilitiesResponse) } - public interface ListProceduresResponseOrBuilder + public interface ClearDeadServersRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { - // repeated .hbase.pb.Procedure procedure = 1; + // repeated .hbase.pb.ServerName server_name = 1; /** - * repeated .hbase.pb.Procedure procedure = 1; + * repeated .hbase.pb.ServerName server_name = 1; */ - java.util.List - getProcedureList(); + java.util.List + getServerNameList(); /** - * repeated .hbase.pb.Procedure procedure = 1; + * repeated .hbase.pb.ServerName server_name = 1; */ - org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure getProcedure(int index); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName(int index); /** - * repeated .hbase.pb.Procedure procedure = 1; + * repeated .hbase.pb.ServerName server_name = 1; */ - int getProcedureCount(); + int getServerNameCount(); /** - * repeated .hbase.pb.Procedure procedure = 1; + * repeated .hbase.pb.ServerName server_name = 1; */ - java.util.List - getProcedureOrBuilderList(); + java.util.List + getServerNameOrBuilderList(); /** - * repeated .hbase.pb.Procedure procedure = 1; + * repeated .hbase.pb.ServerName server_name = 1; */ - org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureOrBuilder getProcedureOrBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder( int index); } /** - * Protobuf type {@code hbase.pb.ListProceduresResponse} + * Protobuf type {@code hbase.pb.ClearDeadServersRequest} */ - public static final class ListProceduresResponse extends + public static final class ClearDeadServersRequest extends com.google.protobuf.GeneratedMessage - implements ListProceduresResponseOrBuilder { - // Use ListProceduresResponse.newBuilder() to construct. - private ListProceduresResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + implements ClearDeadServersRequestOrBuilder { + // Use ClearDeadServersRequest.newBuilder() to construct. + private ClearDeadServersRequest(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private ListProceduresResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private ClearDeadServersRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final ListProceduresResponse defaultInstance; - public static ListProceduresResponse getDefaultInstance() { + private static final ClearDeadServersRequest defaultInstance; + public static ClearDeadServersRequest getDefaultInstance() { return defaultInstance; } - public ListProceduresResponse getDefaultInstanceForType() { + public ClearDeadServersRequest getDefaultInstanceForType() { return defaultInstance; } @@ -56521,7 +62679,7 @@ public ListProceduresResponse getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private ListProceduresResponse( + private ClearDeadServersRequest( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -56546,10 +62704,10 @@ private ListProceduresResponse( } case 10: { if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - procedure_ = new java.util.ArrayList(); + serverName_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000001; } - procedure_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.PARSER, extensionRegistry)); + serverName_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry)); break; } } @@ -56561,7 +62719,7 @@ private ListProceduresResponse( e.getMessage()).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - procedure_ = java.util.Collections.unmodifiableList(procedure_); + serverName_ = java.util.Collections.unmodifiableList(serverName_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); @@ -56569,77 +62727,77 @@ private ListProceduresResponse( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListProceduresResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ClearDeadServersRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListProceduresResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ClearDeadServersRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public ListProceduresResponse parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ClearDeadServersRequest parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new ListProceduresResponse(input, extensionRegistry); + return new ClearDeadServersRequest(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } - // repeated .hbase.pb.Procedure procedure = 1; - public static final int PROCEDURE_FIELD_NUMBER = 1; - private java.util.List procedure_; + // repeated .hbase.pb.ServerName server_name = 1; + public static final int SERVER_NAME_FIELD_NUMBER = 1; + private java.util.List serverName_; /** - * repeated .hbase.pb.Procedure procedure = 1; + * repeated .hbase.pb.ServerName server_name = 1; */ - public java.util.List getProcedureList() { - return procedure_; + public java.util.List getServerNameList() { + return serverName_; } /** - * repeated .hbase.pb.Procedure procedure = 1; + * repeated .hbase.pb.ServerName server_name = 1; */ - public java.util.List - getProcedureOrBuilderList() { - return procedure_; + public java.util.List + getServerNameOrBuilderList() { + return serverName_; } /** - * repeated .hbase.pb.Procedure procedure = 1; + * repeated .hbase.pb.ServerName server_name = 1; */ - public int getProcedureCount() { - return procedure_.size(); + public int getServerNameCount() { + return serverName_.size(); } /** - * repeated .hbase.pb.Procedure procedure = 1; + * repeated .hbase.pb.ServerName server_name = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure getProcedure(int index) { - return procedure_.get(index); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName(int index) { + return serverName_.get(index); } /** - * repeated .hbase.pb.Procedure procedure = 1; + * repeated .hbase.pb.ServerName server_name = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureOrBuilder getProcedureOrBuilder( + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder( int index) { - return procedure_.get(index); + return serverName_.get(index); } private void initFields() { - procedure_ = java.util.Collections.emptyList(); + serverName_ = java.util.Collections.emptyList(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - for (int i = 0; i < getProcedureCount(); i++) { - if (!getProcedure(i).isInitialized()) { + for (int i = 0; i < getServerNameCount(); i++) { + if (!getServerName(i).isInitialized()) { memoizedIsInitialized = 0; return false; } @@ -56651,8 +62809,8 @@ public final boolean isInitialized() { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); - for (int i = 0; i < procedure_.size(); i++) { - output.writeMessage(1, procedure_.get(i)); + for (int i = 0; i < serverName_.size(); i++) { + output.writeMessage(1, serverName_.get(i)); } getUnknownFields().writeTo(output); } @@ -56663,9 +62821,9 @@ public int getSerializedSize() { if (size != -1) return size; size = 0; - for (int i = 0; i < procedure_.size(); i++) { + for (int i = 0; i < serverName_.size(); i++) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, procedure_.get(i)); + .computeMessageSize(1, serverName_.get(i)); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -56684,14 +62842,14 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest) obj; boolean result = true; - result = result && getProcedureList() - .equals(other.getProcedureList()); + result = result && getServerNameList() + .equals(other.getServerNameList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -56705,62 +62863,62 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (getProcedureCount() > 0) { - hash = (37 * hash) + PROCEDURE_FIELD_NUMBER; - hash = (53 * hash) + getProcedureList().hashCode(); + if (getServerNameCount() > 0) { + hash = (37 * hash) + SERVER_NAME_FIELD_NUMBER; + hash = (53 * hash) + getServerNameList().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -56769,7 +62927,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProced public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -56781,24 +62939,24 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.ListProceduresResponse} + * Protobuf type {@code hbase.pb.ClearDeadServersRequest} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponseOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequestOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListProceduresResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ClearDeadServersRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListProceduresResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ClearDeadServersRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -56810,7 +62968,7 @@ private Builder( } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getProcedureFieldBuilder(); + getServerNameFieldBuilder(); } } private static Builder create() { @@ -56819,11 +62977,11 @@ private static Builder create() { public Builder clear() { super.clear(); - if (procedureBuilder_ == null) { - procedure_ = java.util.Collections.emptyList(); + if (serverNameBuilder_ == null) { + serverName_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); } else { - procedureBuilder_.clear(); + serverNameBuilder_.clear(); } return this; } @@ -56834,71 +62992,71 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListProceduresResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ClearDeadServersRequest_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse(this); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest(this); int from_bitField0_ = bitField0_; - if (procedureBuilder_ == null) { + if (serverNameBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001)) { - procedure_ = java.util.Collections.unmodifiableList(procedure_); + serverName_ = java.util.Collections.unmodifiableList(serverName_); bitField0_ = (bitField0_ & ~0x00000001); } - result.procedure_ = procedure_; + result.serverName_ = serverName_; } else { - result.procedure_ = procedureBuilder_.build(); + result.serverName_ = serverNameBuilder_.build(); } onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance()) return this; - if (procedureBuilder_ == null) { - if (!other.procedure_.isEmpty()) { - if (procedure_.isEmpty()) { - procedure_ = other.procedure_; + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest.getDefaultInstance()) return this; + if (serverNameBuilder_ == null) { + if (!other.serverName_.isEmpty()) { + if (serverName_.isEmpty()) { + serverName_ = other.serverName_; bitField0_ = (bitField0_ & ~0x00000001); } else { - ensureProcedureIsMutable(); - procedure_.addAll(other.procedure_); + ensureServerNameIsMutable(); + serverName_.addAll(other.serverName_); } onChanged(); } } else { - if (!other.procedure_.isEmpty()) { - if (procedureBuilder_.isEmpty()) { - procedureBuilder_.dispose(); - procedureBuilder_ = null; - procedure_ = other.procedure_; + if (!other.serverName_.isEmpty()) { + if (serverNameBuilder_.isEmpty()) { + serverNameBuilder_.dispose(); + serverNameBuilder_ = null; + serverName_ = other.serverName_; bitField0_ = (bitField0_ & ~0x00000001); - procedureBuilder_ = + serverNameBuilder_ = com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getProcedureFieldBuilder() : null; + getServerNameFieldBuilder() : null; } else { - procedureBuilder_.addAllMessages(other.procedure_); + serverNameBuilder_.addAllMessages(other.serverName_); } } } @@ -56907,8 +63065,8 @@ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos } public final boolean isInitialized() { - for (int i = 0; i < getProcedureCount(); i++) { - if (!getProcedure(i).isInitialized()) { + for (int i = 0; i < getServerNameCount(); i++) { + if (!getServerName(i).isInitialized()) { return false; } @@ -56920,11 +63078,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -56935,372 +63093,304 @@ public Builder mergeFrom( } private int bitField0_; - // repeated .hbase.pb.Procedure procedure = 1; - private java.util.List procedure_ = + // repeated .hbase.pb.ServerName server_name = 1; + private java.util.List serverName_ = java.util.Collections.emptyList(); - private void ensureProcedureIsMutable() { + private void ensureServerNameIsMutable() { if (!((bitField0_ & 0x00000001) == 0x00000001)) { - procedure_ = new java.util.ArrayList(procedure_); + serverName_ = new java.util.ArrayList(serverName_); bitField0_ |= 0x00000001; } } private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.Builder, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureOrBuilder> procedureBuilder_; + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> serverNameBuilder_; /** - * repeated .hbase.pb.Procedure procedure = 1; + * repeated .hbase.pb.ServerName server_name = 1; */ - public java.util.List getProcedureList() { - if (procedureBuilder_ == null) { - return java.util.Collections.unmodifiableList(procedure_); + public java.util.List getServerNameList() { + if (serverNameBuilder_ == null) { + return java.util.Collections.unmodifiableList(serverName_); } else { - return procedureBuilder_.getMessageList(); + return serverNameBuilder_.getMessageList(); } } /** - * repeated .hbase.pb.Procedure procedure = 1; + * repeated .hbase.pb.ServerName server_name = 1; */ - public int getProcedureCount() { - if (procedureBuilder_ == null) { - return procedure_.size(); + public int getServerNameCount() { + if (serverNameBuilder_ == null) { + return serverName_.size(); } else { - return procedureBuilder_.getCount(); + return serverNameBuilder_.getCount(); } } /** - * repeated .hbase.pb.Procedure procedure = 1; + * repeated .hbase.pb.ServerName server_name = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure getProcedure(int index) { - if (procedureBuilder_ == null) { - return procedure_.get(index); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName(int index) { + if (serverNameBuilder_ == null) { + return serverName_.get(index); } else { - return procedureBuilder_.getMessage(index); + return serverNameBuilder_.getMessage(index); } } /** - * repeated .hbase.pb.Procedure procedure = 1; + * repeated .hbase.pb.ServerName server_name = 1; */ - public Builder setProcedure( - int index, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure value) { - if (procedureBuilder_ == null) { + public Builder setServerName( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { + if (serverNameBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - ensureProcedureIsMutable(); - procedure_.set(index, value); + ensureServerNameIsMutable(); + serverName_.set(index, value); onChanged(); } else { - procedureBuilder_.setMessage(index, value); + serverNameBuilder_.setMessage(index, value); } return this; } /** - * repeated .hbase.pb.Procedure procedure = 1; + * repeated .hbase.pb.ServerName server_name = 1; */ - public Builder setProcedure( - int index, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.Builder builderForValue) { - if (procedureBuilder_ == null) { - ensureProcedureIsMutable(); - procedure_.set(index, builderForValue.build()); + public Builder setServerName( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { + if (serverNameBuilder_ == null) { + ensureServerNameIsMutable(); + serverName_.set(index, builderForValue.build()); onChanged(); } else { - procedureBuilder_.setMessage(index, builderForValue.build()); + serverNameBuilder_.setMessage(index, builderForValue.build()); } return this; } /** - * repeated .hbase.pb.Procedure procedure = 1; + * repeated .hbase.pb.ServerName server_name = 1; */ - public Builder addProcedure(org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure value) { - if (procedureBuilder_ == null) { + public Builder addServerName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { + if (serverNameBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - ensureProcedureIsMutable(); - procedure_.add(value); + ensureServerNameIsMutable(); + serverName_.add(value); onChanged(); } else { - procedureBuilder_.addMessage(value); + serverNameBuilder_.addMessage(value); } return this; } /** - * repeated .hbase.pb.Procedure procedure = 1; + * repeated .hbase.pb.ServerName server_name = 1; */ - public Builder addProcedure( - int index, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure value) { - if (procedureBuilder_ == null) { + public Builder addServerName( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { + if (serverNameBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - ensureProcedureIsMutable(); - procedure_.add(index, value); + ensureServerNameIsMutable(); + serverName_.add(index, value); onChanged(); } else { - procedureBuilder_.addMessage(index, value); + serverNameBuilder_.addMessage(index, value); } return this; } /** - * repeated .hbase.pb.Procedure procedure = 1; + * repeated .hbase.pb.ServerName server_name = 1; */ - public Builder addProcedure( - org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.Builder builderForValue) { - if (procedureBuilder_ == null) { - ensureProcedureIsMutable(); - procedure_.add(builderForValue.build()); + public Builder addServerName( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { + if (serverNameBuilder_ == null) { + ensureServerNameIsMutable(); + serverName_.add(builderForValue.build()); onChanged(); } else { - procedureBuilder_.addMessage(builderForValue.build()); + serverNameBuilder_.addMessage(builderForValue.build()); } return this; } /** - * repeated .hbase.pb.Procedure procedure = 1; + * repeated .hbase.pb.ServerName server_name = 1; */ - public Builder addProcedure( - int index, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.Builder builderForValue) { - if (procedureBuilder_ == null) { - ensureProcedureIsMutable(); - procedure_.add(index, builderForValue.build()); + public Builder addServerName( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { + if (serverNameBuilder_ == null) { + ensureServerNameIsMutable(); + serverName_.add(index, builderForValue.build()); onChanged(); } else { - procedureBuilder_.addMessage(index, builderForValue.build()); + serverNameBuilder_.addMessage(index, builderForValue.build()); } return this; } /** - * repeated .hbase.pb.Procedure procedure = 1; + * repeated .hbase.pb.ServerName server_name = 1; */ - public Builder addAllProcedure( - java.lang.Iterable values) { - if (procedureBuilder_ == null) { - ensureProcedureIsMutable(); - super.addAll(values, procedure_); + public Builder addAllServerName( + java.lang.Iterable values) { + if (serverNameBuilder_ == null) { + ensureServerNameIsMutable(); + super.addAll(values, serverName_); onChanged(); } else { - procedureBuilder_.addAllMessages(values); + serverNameBuilder_.addAllMessages(values); } return this; } /** - * repeated .hbase.pb.Procedure procedure = 1; + * repeated .hbase.pb.ServerName server_name = 1; */ - public Builder clearProcedure() { - if (procedureBuilder_ == null) { - procedure_ = java.util.Collections.emptyList(); + public Builder clearServerName() { + if (serverNameBuilder_ == null) { + serverName_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); } else { - procedureBuilder_.clear(); + serverNameBuilder_.clear(); } return this; } /** - * repeated .hbase.pb.Procedure procedure = 1; + * repeated .hbase.pb.ServerName server_name = 1; */ - public Builder removeProcedure(int index) { - if (procedureBuilder_ == null) { - ensureProcedureIsMutable(); - procedure_.remove(index); + public Builder removeServerName(int index) { + if (serverNameBuilder_ == null) { + ensureServerNameIsMutable(); + serverName_.remove(index); onChanged(); } else { - procedureBuilder_.remove(index); + serverNameBuilder_.remove(index); } return this; } /** - * repeated .hbase.pb.Procedure procedure = 1; + * repeated .hbase.pb.ServerName server_name = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.Builder getProcedureBuilder( + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getServerNameBuilder( int index) { - return getProcedureFieldBuilder().getBuilder(index); + return getServerNameFieldBuilder().getBuilder(index); } /** - * repeated .hbase.pb.Procedure procedure = 1; + * repeated .hbase.pb.ServerName server_name = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureOrBuilder getProcedureOrBuilder( + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder( int index) { - if (procedureBuilder_ == null) { - return procedure_.get(index); } else { - return procedureBuilder_.getMessageOrBuilder(index); + if (serverNameBuilder_ == null) { + return serverName_.get(index); } else { + return serverNameBuilder_.getMessageOrBuilder(index); } } /** - * repeated .hbase.pb.Procedure procedure = 1; + * repeated .hbase.pb.ServerName server_name = 1; */ - public java.util.List - getProcedureOrBuilderList() { - if (procedureBuilder_ != null) { - return procedureBuilder_.getMessageOrBuilderList(); + public java.util.List + getServerNameOrBuilderList() { + if (serverNameBuilder_ != null) { + return serverNameBuilder_.getMessageOrBuilderList(); } else { - return java.util.Collections.unmodifiableList(procedure_); + return java.util.Collections.unmodifiableList(serverName_); } } /** - * repeated .hbase.pb.Procedure procedure = 1; + * repeated .hbase.pb.ServerName server_name = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.Builder addProcedureBuilder() { - return getProcedureFieldBuilder().addBuilder( - org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.getDefaultInstance()); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder addServerNameBuilder() { + return getServerNameFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()); } /** - * repeated .hbase.pb.Procedure procedure = 1; + * repeated .hbase.pb.ServerName server_name = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.Builder addProcedureBuilder( + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder addServerNameBuilder( int index) { - return getProcedureFieldBuilder().addBuilder( - index, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.getDefaultInstance()); + return getServerNameFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()); } /** - * repeated .hbase.pb.Procedure procedure = 1; + * repeated .hbase.pb.ServerName server_name = 1; */ - public java.util.List - getProcedureBuilderList() { - return getProcedureFieldBuilder().getBuilderList(); + public java.util.List + getServerNameBuilderList() { + return getServerNameFieldBuilder().getBuilderList(); } private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.Builder, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureOrBuilder> - getProcedureFieldBuilder() { - if (procedureBuilder_ == null) { - procedureBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.Builder, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureOrBuilder>( - procedure_, + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> + getServerNameFieldBuilder() { + if (serverNameBuilder_ == null) { + serverNameBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>( + serverName_, ((bitField0_ & 0x00000001) == 0x00000001), getParentForChildren(), isClean()); - procedure_ = null; + serverName_ = null; } - return procedureBuilder_; + return serverNameBuilder_; } - // @@protoc_insertion_point(builder_scope:hbase.pb.ListProceduresResponse) - } - - static { - defaultInstance = new ListProceduresResponse(true); - defaultInstance.initFields(); + // @@protoc_insertion_point(builder_scope:hbase.pb.ClearDeadServersRequest) } - // @@protoc_insertion_point(class_scope:hbase.pb.ListProceduresResponse) - } - - public interface SetQuotaRequestOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // optional string user_name = 1; - /** - * optional string user_name = 1; - */ - boolean hasUserName(); - /** - * optional string user_name = 1; - */ - java.lang.String getUserName(); - /** - * optional string user_name = 1; - */ - com.google.protobuf.ByteString - getUserNameBytes(); - - // optional string user_group = 2; - /** - * optional string user_group = 2; - */ - boolean hasUserGroup(); - /** - * optional string user_group = 2; - */ - java.lang.String getUserGroup(); - /** - * optional string user_group = 2; - */ - com.google.protobuf.ByteString - getUserGroupBytes(); - - // optional string namespace = 3; - /** - * optional string namespace = 3; - */ - boolean hasNamespace(); - /** - * optional string namespace = 3; - */ - java.lang.String getNamespace(); - /** - * optional string namespace = 3; - */ - com.google.protobuf.ByteString - getNamespaceBytes(); - - // optional .hbase.pb.TableName table_name = 4; - /** - * optional .hbase.pb.TableName table_name = 4; - */ - boolean hasTableName(); - /** - * optional .hbase.pb.TableName table_name = 4; - */ - org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName(); - /** - * optional .hbase.pb.TableName table_name = 4; - */ - org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder(); + static { + defaultInstance = new ClearDeadServersRequest(true); + defaultInstance.initFields(); + } - // optional bool remove_all = 5; - /** - * optional bool remove_all = 5; - */ - boolean hasRemoveAll(); - /** - * optional bool remove_all = 5; - */ - boolean getRemoveAll(); + // @@protoc_insertion_point(class_scope:hbase.pb.ClearDeadServersRequest) + } - // optional bool bypass_globals = 6; + public interface ClearDeadServersResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // repeated .hbase.pb.ServerName server_name = 1; /** - * optional bool bypass_globals = 6; + * repeated .hbase.pb.ServerName server_name = 1; */ - boolean hasBypassGlobals(); + java.util.List + getServerNameList(); /** - * optional bool bypass_globals = 6; + * repeated .hbase.pb.ServerName server_name = 1; */ - boolean getBypassGlobals(); - - // optional .hbase.pb.ThrottleRequest throttle = 7; + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName(int index); /** - * optional .hbase.pb.ThrottleRequest throttle = 7; + * repeated .hbase.pb.ServerName server_name = 1; */ - boolean hasThrottle(); + int getServerNameCount(); /** - * optional .hbase.pb.ThrottleRequest throttle = 7; + * repeated .hbase.pb.ServerName server_name = 1; */ - org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest getThrottle(); + java.util.List + getServerNameOrBuilderList(); /** - * optional .hbase.pb.ThrottleRequest throttle = 7; + * repeated .hbase.pb.ServerName server_name = 1; */ - org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequestOrBuilder getThrottleOrBuilder(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder( + int index); } /** - * Protobuf type {@code hbase.pb.SetQuotaRequest} + * Protobuf type {@code hbase.pb.ClearDeadServersResponse} */ - public static final class SetQuotaRequest extends + public static final class ClearDeadServersResponse extends com.google.protobuf.GeneratedMessage - implements SetQuotaRequestOrBuilder { - // Use SetQuotaRequest.newBuilder() to construct. - private SetQuotaRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + implements ClearDeadServersResponseOrBuilder { + // Use ClearDeadServersResponse.newBuilder() to construct. + private ClearDeadServersResponse(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private SetQuotaRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private ClearDeadServersResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final SetQuotaRequest defaultInstance; - public static SetQuotaRequest getDefaultInstance() { + private static final ClearDeadServersResponse defaultInstance; + public static ClearDeadServersResponse getDefaultInstance() { return defaultInstance; } - public SetQuotaRequest getDefaultInstanceForType() { + public ClearDeadServersResponse getDefaultInstanceForType() { return defaultInstance; } @@ -57310,7 +63400,7 @@ public SetQuotaRequest getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private SetQuotaRequest( + private ClearDeadServersResponse( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -57334,54 +63424,11 @@ private SetQuotaRequest( break; } case 10: { - bitField0_ |= 0x00000001; - userName_ = input.readBytes(); - break; - } - case 18: { - bitField0_ |= 0x00000002; - userGroup_ = input.readBytes(); - break; - } - case 26: { - bitField0_ |= 0x00000004; - namespace_ = input.readBytes(); - break; - } - case 34: { - org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder subBuilder = null; - if (((bitField0_ & 0x00000008) == 0x00000008)) { - subBuilder = tableName_.toBuilder(); - } - tableName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(tableName_); - tableName_ = subBuilder.buildPartial(); - } - bitField0_ |= 0x00000008; - break; - } - case 40: { - bitField0_ |= 0x00000010; - removeAll_ = input.readBool(); - break; - } - case 48: { - bitField0_ |= 0x00000020; - bypassGlobals_ = input.readBool(); - break; - } - case 58: { - org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.Builder subBuilder = null; - if (((bitField0_ & 0x00000040) == 0x00000040)) { - subBuilder = throttle_.toBuilder(); - } - throttle_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(throttle_); - throttle_ = subBuilder.buildPartial(); + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + serverName_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; } - bitField0_ |= 0x00000040; + serverName_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry)); break; } } @@ -57392,265 +63439,86 @@ private SetQuotaRequest( throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetQuotaRequest_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetQuotaRequest_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public SetQuotaRequest parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new SetQuotaRequest(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - private int bitField0_; - // optional string user_name = 1; - public static final int USER_NAME_FIELD_NUMBER = 1; - private java.lang.Object userName_; - /** - * optional string user_name = 1; - */ - public boolean hasUserName() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * optional string user_name = 1; - */ - public java.lang.String getUserName() { - java.lang.Object ref = userName_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - userName_ = s; - } - return s; - } - } - /** - * optional string user_name = 1; - */ - public com.google.protobuf.ByteString - getUserNameBytes() { - java.lang.Object ref = userName_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - userName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // optional string user_group = 2; - public static final int USER_GROUP_FIELD_NUMBER = 2; - private java.lang.Object userGroup_; - /** - * optional string user_group = 2; - */ - public boolean hasUserGroup() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional string user_group = 2; - */ - public java.lang.String getUserGroup() { - java.lang.Object ref = userGroup_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - userGroup_ = s; - } - return s; - } - } - /** - * optional string user_group = 2; - */ - public com.google.protobuf.ByteString - getUserGroupBytes() { - java.lang.Object ref = userGroup_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - userGroup_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // optional string namespace = 3; - public static final int NAMESPACE_FIELD_NUMBER = 3; - private java.lang.Object namespace_; - /** - * optional string namespace = 3; - */ - public boolean hasNamespace() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * optional string namespace = 3; - */ - public java.lang.String getNamespace() { - java.lang.Object ref = namespace_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - namespace_ = s; + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + serverName_ = java.util.Collections.unmodifiableList(serverName_); } - return s; + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); } } - /** - * optional string namespace = 3; - */ - public com.google.protobuf.ByteString - getNamespaceBytes() { - java.lang.Object ref = namespace_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - namespace_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ClearDeadServersResponse_descriptor; } - // optional .hbase.pb.TableName table_name = 4; - public static final int TABLE_NAME_FIELD_NUMBER = 4; - private org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName tableName_; - /** - * optional .hbase.pb.TableName table_name = 4; - */ - public boolean hasTableName() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - /** - * optional .hbase.pb.TableName table_name = 4; - */ - public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName() { - return tableName_; - } - /** - * optional .hbase.pb.TableName table_name = 4; - */ - public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder() { - return tableName_; + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ClearDeadServersResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse.Builder.class); } - // optional bool remove_all = 5; - public static final int REMOVE_ALL_FIELD_NUMBER = 5; - private boolean removeAll_; - /** - * optional bool remove_all = 5; - */ - public boolean hasRemoveAll() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - /** - * optional bool remove_all = 5; - */ - public boolean getRemoveAll() { - return removeAll_; + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ClearDeadServersResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ClearDeadServersResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; } - // optional bool bypass_globals = 6; - public static final int BYPASS_GLOBALS_FIELD_NUMBER = 6; - private boolean bypassGlobals_; + // repeated .hbase.pb.ServerName server_name = 1; + public static final int SERVER_NAME_FIELD_NUMBER = 1; + private java.util.List serverName_; /** - * optional bool bypass_globals = 6; + * repeated .hbase.pb.ServerName server_name = 1; */ - public boolean hasBypassGlobals() { - return ((bitField0_ & 0x00000020) == 0x00000020); + public java.util.List getServerNameList() { + return serverName_; } /** - * optional bool bypass_globals = 6; + * repeated .hbase.pb.ServerName server_name = 1; */ - public boolean getBypassGlobals() { - return bypassGlobals_; + public java.util.List + getServerNameOrBuilderList() { + return serverName_; } - - // optional .hbase.pb.ThrottleRequest throttle = 7; - public static final int THROTTLE_FIELD_NUMBER = 7; - private org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest throttle_; /** - * optional .hbase.pb.ThrottleRequest throttle = 7; + * repeated .hbase.pb.ServerName server_name = 1; */ - public boolean hasThrottle() { - return ((bitField0_ & 0x00000040) == 0x00000040); + public int getServerNameCount() { + return serverName_.size(); } /** - * optional .hbase.pb.ThrottleRequest throttle = 7; + * repeated .hbase.pb.ServerName server_name = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest getThrottle() { - return throttle_; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName(int index) { + return serverName_.get(index); } /** - * optional .hbase.pb.ThrottleRequest throttle = 7; + * repeated .hbase.pb.ServerName server_name = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequestOrBuilder getThrottleOrBuilder() { - return throttle_; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder( + int index) { + return serverName_.get(index); } private void initFields() { - userName_ = ""; - userGroup_ = ""; - namespace_ = ""; - tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance(); - removeAll_ = false; - bypassGlobals_ = false; - throttle_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.getDefaultInstance(); + serverName_ = java.util.Collections.emptyList(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (hasTableName()) { - if (!getTableName().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - if (hasThrottle()) { - if (!getThrottle().isInitialized()) { + for (int i = 0; i < getServerNameCount(); i++) { + if (!getServerName(i).isInitialized()) { memoizedIsInitialized = 0; return false; } @@ -57662,26 +63530,8 @@ public final boolean isInitialized() { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getUserNameBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, getUserGroupBytes()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeBytes(3, getNamespaceBytes()); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeMessage(4, tableName_); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - output.writeBool(5, removeAll_); - } - if (((bitField0_ & 0x00000020) == 0x00000020)) { - output.writeBool(6, bypassGlobals_); - } - if (((bitField0_ & 0x00000040) == 0x00000040)) { - output.writeMessage(7, throttle_); + for (int i = 0; i < serverName_.size(); i++) { + output.writeMessage(1, serverName_.get(i)); } getUnknownFields().writeTo(output); } @@ -57692,33 +63542,9 @@ public int getSerializedSize() { if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getUserNameBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, getUserGroupBytes()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(3, getNamespaceBytes()); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(4, tableName_); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(5, removeAll_); - } - if (((bitField0_ & 0x00000020) == 0x00000020)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(6, bypassGlobals_); - } - if (((bitField0_ & 0x00000040) == 0x00000040)) { + for (int i = 0; i < serverName_.size(); i++) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(7, throttle_); + .computeMessageSize(1, serverName_.get(i)); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -57737,47 +63563,14 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse) obj; boolean result = true; - result = result && (hasUserName() == other.hasUserName()); - if (hasUserName()) { - result = result && getUserName() - .equals(other.getUserName()); - } - result = result && (hasUserGroup() == other.hasUserGroup()); - if (hasUserGroup()) { - result = result && getUserGroup() - .equals(other.getUserGroup()); - } - result = result && (hasNamespace() == other.hasNamespace()); - if (hasNamespace()) { - result = result && getNamespace() - .equals(other.getNamespace()); - } - result = result && (hasTableName() == other.hasTableName()); - if (hasTableName()) { - result = result && getTableName() - .equals(other.getTableName()); - } - result = result && (hasRemoveAll() == other.hasRemoveAll()); - if (hasRemoveAll()) { - result = result && (getRemoveAll() - == other.getRemoveAll()); - } - result = result && (hasBypassGlobals() == other.hasBypassGlobals()); - if (hasBypassGlobals()) { - result = result && (getBypassGlobals() - == other.getBypassGlobals()); - } - result = result && (hasThrottle() == other.hasThrottle()); - if (hasThrottle()) { - result = result && getThrottle() - .equals(other.getThrottle()); - } + result = result && getServerNameList() + .equals(other.getServerNameList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -57789,88 +63582,64 @@ public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasUserName()) { - hash = (37 * hash) + USER_NAME_FIELD_NUMBER; - hash = (53 * hash) + getUserName().hashCode(); - } - if (hasUserGroup()) { - hash = (37 * hash) + USER_GROUP_FIELD_NUMBER; - hash = (53 * hash) + getUserGroup().hashCode(); - } - if (hasNamespace()) { - hash = (37 * hash) + NAMESPACE_FIELD_NUMBER; - hash = (53 * hash) + getNamespace().hashCode(); - } - if (hasTableName()) { - hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER; - hash = (53 * hash) + getTableName().hashCode(); - } - if (hasRemoveAll()) { - hash = (37 * hash) + REMOVE_ALL_FIELD_NUMBER; - hash = (53 * hash) + hashBoolean(getRemoveAll()); - } - if (hasBypassGlobals()) { - hash = (37 * hash) + BYPASS_GLOBALS_FIELD_NUMBER; - hash = (53 * hash) + hashBoolean(getBypassGlobals()); - } - if (hasThrottle()) { - hash = (37 * hash) + THROTTLE_FIELD_NUMBER; - hash = (53 * hash) + getThrottle().hashCode(); - } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (getServerNameCount() > 0) { + hash = (37 * hash) + SERVER_NAME_FIELD_NUMBER; + hash = (53 * hash) + getServerNameList().hashCode(); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -57879,7 +63648,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRe public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -57891,24 +63660,24 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.SetQuotaRequest} + * Protobuf type {@code hbase.pb.ClearDeadServersResponse} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequestOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponseOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetQuotaRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ClearDeadServersResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetQuotaRequest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ClearDeadServersResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -57918,741 +63687,948 @@ private Builder( super(parent); maybeForceBuilderInitialization(); } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getTableNameFieldBuilder(); - getThrottleFieldBuilder(); + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getServerNameFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (serverNameBuilder_ == null) { + serverName_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + serverNameBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ClearDeadServersResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse(this); + int from_bitField0_ = bitField0_; + if (serverNameBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + serverName_ = java.util.Collections.unmodifiableList(serverName_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.serverName_ = serverName_; + } else { + result.serverName_ = serverNameBuilder_.build(); + } + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse.getDefaultInstance()) return this; + if (serverNameBuilder_ == null) { + if (!other.serverName_.isEmpty()) { + if (serverName_.isEmpty()) { + serverName_ = other.serverName_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureServerNameIsMutable(); + serverName_.addAll(other.serverName_); + } + onChanged(); + } + } else { + if (!other.serverName_.isEmpty()) { + if (serverNameBuilder_.isEmpty()) { + serverNameBuilder_.dispose(); + serverNameBuilder_ = null; + serverName_ = other.serverName_; + bitField0_ = (bitField0_ & ~0x00000001); + serverNameBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getServerNameFieldBuilder() : null; + } else { + serverNameBuilder_.addAllMessages(other.serverName_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + for (int i = 0; i < getServerNameCount(); i++) { + if (!getServerName(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // repeated .hbase.pb.ServerName server_name = 1; + private java.util.List serverName_ = + java.util.Collections.emptyList(); + private void ensureServerNameIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + serverName_ = new java.util.ArrayList(serverName_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> serverNameBuilder_; + + /** + * repeated .hbase.pb.ServerName server_name = 1; + */ + public java.util.List getServerNameList() { + if (serverNameBuilder_ == null) { + return java.util.Collections.unmodifiableList(serverName_); + } else { + return serverNameBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.ServerName server_name = 1; + */ + public int getServerNameCount() { + if (serverNameBuilder_ == null) { + return serverName_.size(); + } else { + return serverNameBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.ServerName server_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName(int index) { + if (serverNameBuilder_ == null) { + return serverName_.get(index); + } else { + return serverNameBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.ServerName server_name = 1; + */ + public Builder setServerName( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { + if (serverNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureServerNameIsMutable(); + serverName_.set(index, value); + onChanged(); + } else { + serverNameBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.ServerName server_name = 1; + */ + public Builder setServerName( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { + if (serverNameBuilder_ == null) { + ensureServerNameIsMutable(); + serverName_.set(index, builderForValue.build()); + onChanged(); + } else { + serverNameBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.ServerName server_name = 1; + */ + public Builder addServerName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { + if (serverNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureServerNameIsMutable(); + serverName_.add(value); + onChanged(); + } else { + serverNameBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.ServerName server_name = 1; + */ + public Builder addServerName( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { + if (serverNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureServerNameIsMutable(); + serverName_.add(index, value); + onChanged(); + } else { + serverNameBuilder_.addMessage(index, value); } + return this; } - private static Builder create() { - return new Builder(); + /** + * repeated .hbase.pb.ServerName server_name = 1; + */ + public Builder addServerName( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { + if (serverNameBuilder_ == null) { + ensureServerNameIsMutable(); + serverName_.add(builderForValue.build()); + onChanged(); + } else { + serverNameBuilder_.addMessage(builderForValue.build()); + } + return this; } - - public Builder clear() { - super.clear(); - userName_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - userGroup_ = ""; - bitField0_ = (bitField0_ & ~0x00000002); - namespace_ = ""; - bitField0_ = (bitField0_ & ~0x00000004); - if (tableNameBuilder_ == null) { - tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance(); + /** + * repeated .hbase.pb.ServerName server_name = 1; + */ + public Builder addServerName( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { + if (serverNameBuilder_ == null) { + ensureServerNameIsMutable(); + serverName_.add(index, builderForValue.build()); + onChanged(); } else { - tableNameBuilder_.clear(); + serverNameBuilder_.addMessage(index, builderForValue.build()); } - bitField0_ = (bitField0_ & ~0x00000008); - removeAll_ = false; - bitField0_ = (bitField0_ & ~0x00000010); - bypassGlobals_ = false; - bitField0_ = (bitField0_ & ~0x00000020); - if (throttleBuilder_ == null) { - throttle_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.getDefaultInstance(); + return this; + } + /** + * repeated .hbase.pb.ServerName server_name = 1; + */ + public Builder addAllServerName( + java.lang.Iterable values) { + if (serverNameBuilder_ == null) { + ensureServerNameIsMutable(); + super.addAll(values, serverName_); + onChanged(); } else { - throttleBuilder_.clear(); + serverNameBuilder_.addAllMessages(values); } - bitField0_ = (bitField0_ & ~0x00000040); return this; } - - public Builder clone() { - return create().mergeFrom(buildPartial()); + /** + * repeated .hbase.pb.ServerName server_name = 1; + */ + public Builder clearServerName() { + if (serverNameBuilder_ == null) { + serverName_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + serverNameBuilder_.clear(); + } + return this; } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetQuotaRequest_descriptor; + /** + * repeated .hbase.pb.ServerName server_name = 1; + */ + public Builder removeServerName(int index) { + if (serverNameBuilder_ == null) { + ensureServerNameIsMutable(); + serverName_.remove(index); + onChanged(); + } else { + serverNameBuilder_.remove(index); + } + return this; } - - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.getDefaultInstance(); + /** + * repeated .hbase.pb.ServerName server_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getServerNameBuilder( + int index) { + return getServerNameFieldBuilder().getBuilder(index); } - - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); + /** + * repeated .hbase.pb.ServerName server_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder( + int index) { + if (serverNameBuilder_ == null) { + return serverName_.get(index); } else { + return serverNameBuilder_.getMessageOrBuilder(index); } - return result; } - - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.userName_ = userName_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.userGroup_ = userGroup_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.namespace_ = namespace_; - if (((from_bitField0_ & 0x00000008) == 0x00000008)) { - to_bitField0_ |= 0x00000008; - } - if (tableNameBuilder_ == null) { - result.tableName_ = tableName_; + /** + * repeated .hbase.pb.ServerName server_name = 1; + */ + public java.util.List + getServerNameOrBuilderList() { + if (serverNameBuilder_ != null) { + return serverNameBuilder_.getMessageOrBuilderList(); } else { - result.tableName_ = tableNameBuilder_.build(); - } - if (((from_bitField0_ & 0x00000010) == 0x00000010)) { - to_bitField0_ |= 0x00000010; - } - result.removeAll_ = removeAll_; - if (((from_bitField0_ & 0x00000020) == 0x00000020)) { - to_bitField0_ |= 0x00000020; - } - result.bypassGlobals_ = bypassGlobals_; - if (((from_bitField0_ & 0x00000040) == 0x00000040)) { - to_bitField0_ |= 0x00000040; + return java.util.Collections.unmodifiableList(serverName_); } - if (throttleBuilder_ == null) { - result.throttle_ = throttle_; - } else { - result.throttle_ = throttleBuilder_.build(); + } + /** + * repeated .hbase.pb.ServerName server_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder addServerNameBuilder() { + return getServerNameFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()); + } + /** + * repeated .hbase.pb.ServerName server_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder addServerNameBuilder( + int index) { + return getServerNameFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()); + } + /** + * repeated .hbase.pb.ServerName server_name = 1; + */ + public java.util.List + getServerNameBuilderList() { + return getServerNameFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> + getServerNameFieldBuilder() { + if (serverNameBuilder_ == null) { + serverNameBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>( + serverName_, + ((bitField0_ & 0x00000001) == 0x00000001), + getParentForChildren(), + isClean()); + serverName_ = null; } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; + return serverNameBuilder_; } - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest)other); - } else { - super.mergeFrom(other); - return this; - } - } + // @@protoc_insertion_point(builder_scope:hbase.pb.ClearDeadServersResponse) + } + + static { + defaultInstance = new ClearDeadServersResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.ClearDeadServersResponse) + } + + public interface SetSnapshotCleanupRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required bool enabled = 1; + /** + * required bool enabled = 1; + */ + boolean hasEnabled(); + /** + * required bool enabled = 1; + */ + boolean getEnabled(); + + // optional bool synchronous = 2; + /** + * optional bool synchronous = 2; + */ + boolean hasSynchronous(); + /** + * optional bool synchronous = 2; + */ + boolean getSynchronous(); + } + /** + * Protobuf type {@code hbase.pb.SetSnapshotCleanupRequest} + */ + public static final class SetSnapshotCleanupRequest extends + com.google.protobuf.GeneratedMessage + implements SetSnapshotCleanupRequestOrBuilder { + // Use SetSnapshotCleanupRequest.newBuilder() to construct. + private SetSnapshotCleanupRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private SetSnapshotCleanupRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final SetSnapshotCleanupRequest defaultInstance; + public static SetSnapshotCleanupRequest getDefaultInstance() { + return defaultInstance; + } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.getDefaultInstance()) return this; - if (other.hasUserName()) { - bitField0_ |= 0x00000001; - userName_ = other.userName_; - onChanged(); - } - if (other.hasUserGroup()) { - bitField0_ |= 0x00000002; - userGroup_ = other.userGroup_; - onChanged(); - } - if (other.hasNamespace()) { - bitField0_ |= 0x00000004; - namespace_ = other.namespace_; - onChanged(); - } - if (other.hasTableName()) { - mergeTableName(other.getTableName()); - } - if (other.hasRemoveAll()) { - setRemoveAll(other.getRemoveAll()); - } - if (other.hasBypassGlobals()) { - setBypassGlobals(other.getBypassGlobals()); - } - if (other.hasThrottle()) { - mergeThrottle(other.getThrottle()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } + public SetSnapshotCleanupRequest getDefaultInstanceForType() { + return defaultInstance; + } - public final boolean isInitialized() { - if (hasTableName()) { - if (!getTableName().isInitialized()) { - - return false; - } - } - if (hasThrottle()) { - if (!getThrottle().isInitialized()) { - - return false; + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private SetSnapshotCleanupRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + enabled_ = input.readBool(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + synchronous_ = input.readBool(); + break; + } } } - return true; + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetSnapshotCleanupRequest_descriptor; + } - public Builder mergeFrom( + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetSnapshotCleanupRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public SetSnapshotCleanupRequest parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; + throws com.google.protobuf.InvalidProtocolBufferException { + return new SetSnapshotCleanupRequest(input, extensionRegistry); } - private int bitField0_; + }; - // optional string user_name = 1; - private java.lang.Object userName_ = ""; - /** - * optional string user_name = 1; - */ - public boolean hasUserName() { - return ((bitField0_ & 0x00000001) == 0x00000001); + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required bool enabled = 1; + public static final int ENABLED_FIELD_NUMBER = 1; + private boolean enabled_; + /** + * required bool enabled = 1; + */ + public boolean hasEnabled() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bool enabled = 1; + */ + public boolean getEnabled() { + return enabled_; + } + + // optional bool synchronous = 2; + public static final int SYNCHRONOUS_FIELD_NUMBER = 2; + private boolean synchronous_; + /** + * optional bool synchronous = 2; + */ + public boolean hasSynchronous() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional bool synchronous = 2; + */ + public boolean getSynchronous() { + return synchronous_; + } + + private void initFields() { + enabled_ = false; + synchronous_ = false; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasEnabled()) { + memoizedIsInitialized = 0; + return false; } - /** - * optional string user_name = 1; - */ - public java.lang.String getUserName() { - java.lang.Object ref = userName_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - userName_ = s; - return s; - } else { - return (java.lang.String) ref; - } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBool(1, enabled_); } - /** - * optional string user_name = 1; - */ - public com.google.protobuf.ByteString - getUserNameBytes() { - java.lang.Object ref = userName_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - userName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBool(2, synchronous_); } - /** - * optional string user_name = 1; - */ - public Builder setUserName( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - userName_ = value; - onChanged(); - return this; + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(1, enabled_); } - /** - * optional string user_name = 1; - */ - public Builder clearUserName() { - bitField0_ = (bitField0_ & ~0x00000001); - userName_ = getDefaultInstance().getUserName(); - onChanged(); - return this; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(2, synchronous_); } - /** - * optional string user_name = 1; - */ - public Builder setUserNameBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - userName_ = value; - onChanged(); - return this; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest)) { + return super.equals(obj); } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest) obj; - // optional string user_group = 2; - private java.lang.Object userGroup_ = ""; - /** - * optional string user_group = 2; - */ - public boolean hasUserGroup() { - return ((bitField0_ & 0x00000002) == 0x00000002); + boolean result = true; + result = result && (hasEnabled() == other.hasEnabled()); + if (hasEnabled()) { + result = result && (getEnabled() + == other.getEnabled()); } - /** - * optional string user_group = 2; - */ - public java.lang.String getUserGroup() { - java.lang.Object ref = userGroup_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - userGroup_ = s; - return s; - } else { - return (java.lang.String) ref; - } + result = result && (hasSynchronous() == other.hasSynchronous()); + if (hasSynchronous()) { + result = result && (getSynchronous() + == other.getSynchronous()); } - /** - * optional string user_group = 2; - */ - public com.google.protobuf.ByteString - getUserGroupBytes() { - java.lang.Object ref = userGroup_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - userGroup_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; } - /** - * optional string user_group = 2; - */ - public Builder setUserGroup( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - userGroup_ = value; - onChanged(); - return this; + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasEnabled()) { + hash = (37 * hash) + ENABLED_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getEnabled()); } - /** - * optional string user_group = 2; - */ - public Builder clearUserGroup() { - bitField0_ = (bitField0_ & ~0x00000002); - userGroup_ = getDefaultInstance().getUserGroup(); - onChanged(); - return this; + if (hasSynchronous()) { + hash = (37 * hash) + SYNCHRONOUS_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getSynchronous()); } - /** - * optional string user_group = 2; - */ - public Builder setUserGroupBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - userGroup_ = value; - onChanged(); - return this; + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.SetSnapshotCleanupRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetSnapshotCleanupRequest_descriptor; } - // optional string namespace = 3; - private java.lang.Object namespace_ = ""; - /** - * optional string namespace = 3; - */ - public boolean hasNamespace() { - return ((bitField0_ & 0x00000004) == 0x00000004); + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetSnapshotCleanupRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest.Builder.class); } - /** - * optional string namespace = 3; - */ - public java.lang.String getNamespace() { - java.lang.Object ref = namespace_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - namespace_ = s; - return s; - } else { - return (java.lang.String) ref; - } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); } - /** - * optional string namespace = 3; - */ - public com.google.protobuf.ByteString - getNamespaceBytes() { - java.lang.Object ref = namespace_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - namespace_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } - /** - * optional string namespace = 3; - */ - public Builder setNamespace( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000004; - namespace_ = value; - onChanged(); - return this; + private static Builder create() { + return new Builder(); } - /** - * optional string namespace = 3; - */ - public Builder clearNamespace() { - bitField0_ = (bitField0_ & ~0x00000004); - namespace_ = getDefaultInstance().getNamespace(); - onChanged(); + + public Builder clear() { + super.clear(); + enabled_ = false; + bitField0_ = (bitField0_ & ~0x00000001); + synchronous_ = false; + bitField0_ = (bitField0_ & ~0x00000002); return this; } - /** - * optional string namespace = 3; - */ - public Builder setNamespaceBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000004; - namespace_ = value; - onChanged(); - return this; + + public Builder clone() { + return create().mergeFrom(buildPartial()); } - // optional .hbase.pb.TableName table_name = 4; - private org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder> tableNameBuilder_; - /** - * optional .hbase.pb.TableName table_name = 4; - */ - public boolean hasTableName() { - return ((bitField0_ & 0x00000008) == 0x00000008); + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetSnapshotCleanupRequest_descriptor; } - /** - * optional .hbase.pb.TableName table_name = 4; - */ - public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName() { - if (tableNameBuilder_ == null) { - return tableName_; - } else { - return tableNameBuilder_.getMessage(); - } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest.getDefaultInstance(); } - /** - * optional .hbase.pb.TableName table_name = 4; - */ - public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName value) { - if (tableNameBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - tableName_ = value; - onChanged(); - } else { - tableNameBuilder_.setMessage(value); + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); } - bitField0_ |= 0x00000008; - return this; + return result; } - /** - * optional .hbase.pb.TableName table_name = 4; - */ - public Builder setTableName( - org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder builderForValue) { - if (tableNameBuilder_ == null) { - tableName_ = builderForValue.build(); - onChanged(); - } else { - tableNameBuilder_.setMessage(builderForValue.build()); + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; } - bitField0_ |= 0x00000008; - return this; - } - /** - * optional .hbase.pb.TableName table_name = 4; - */ - public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName value) { - if (tableNameBuilder_ == null) { - if (((bitField0_ & 0x00000008) == 0x00000008) && - tableName_ != org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance()) { - tableName_ = - org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial(); - } else { - tableName_ = value; - } - onChanged(); - } else { - tableNameBuilder_.mergeFrom(value); + result.enabled_ = enabled_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; } - bitField0_ |= 0x00000008; - return this; + result.synchronous_ = synchronous_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; } - /** - * optional .hbase.pb.TableName table_name = 4; - */ - public Builder clearTableName() { - if (tableNameBuilder_ == null) { - tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance(); - onChanged(); + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest)other); } else { - tableNameBuilder_.clear(); + super.mergeFrom(other); + return this; } - bitField0_ = (bitField0_ & ~0x00000008); - return this; - } - /** - * optional .hbase.pb.TableName table_name = 4; - */ - public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder getTableNameBuilder() { - bitField0_ |= 0x00000008; - onChanged(); - return getTableNameFieldBuilder().getBuilder(); } - /** - * optional .hbase.pb.TableName table_name = 4; - */ - public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder() { - if (tableNameBuilder_ != null) { - return tableNameBuilder_.getMessageOrBuilder(); - } else { - return tableName_; + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest.getDefaultInstance()) return this; + if (other.hasEnabled()) { + setEnabled(other.getEnabled()); } - } - /** - * optional .hbase.pb.TableName table_name = 4; - */ - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder> - getTableNameFieldBuilder() { - if (tableNameBuilder_ == null) { - tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder>( - tableName_, - getParentForChildren(), - isClean()); - tableName_ = null; + if (other.hasSynchronous()) { + setSynchronous(other.getSynchronous()); } - return tableNameBuilder_; + this.mergeUnknownFields(other.getUnknownFields()); + return this; } - // optional bool remove_all = 5; - private boolean removeAll_ ; - /** - * optional bool remove_all = 5; - */ - public boolean hasRemoveAll() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - /** - * optional bool remove_all = 5; - */ - public boolean getRemoveAll() { - return removeAll_; - } - /** - * optional bool remove_all = 5; - */ - public Builder setRemoveAll(boolean value) { - bitField0_ |= 0x00000010; - removeAll_ = value; - onChanged(); - return this; + public final boolean isInitialized() { + if (!hasEnabled()) { + + return false; + } + return true; } - /** - * optional bool remove_all = 5; - */ - public Builder clearRemoveAll() { - bitField0_ = (bitField0_ & ~0x00000010); - removeAll_ = false; - onChanged(); + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } return this; } + private int bitField0_; - // optional bool bypass_globals = 6; - private boolean bypassGlobals_ ; + // required bool enabled = 1; + private boolean enabled_ ; /** - * optional bool bypass_globals = 6; + * required bool enabled = 1; */ - public boolean hasBypassGlobals() { - return ((bitField0_ & 0x00000020) == 0x00000020); + public boolean hasEnabled() { + return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * optional bool bypass_globals = 6; + * required bool enabled = 1; */ - public boolean getBypassGlobals() { - return bypassGlobals_; + public boolean getEnabled() { + return enabled_; } /** - * optional bool bypass_globals = 6; + * required bool enabled = 1; */ - public Builder setBypassGlobals(boolean value) { - bitField0_ |= 0x00000020; - bypassGlobals_ = value; + public Builder setEnabled(boolean value) { + bitField0_ |= 0x00000001; + enabled_ = value; onChanged(); return this; } /** - * optional bool bypass_globals = 6; + * required bool enabled = 1; */ - public Builder clearBypassGlobals() { - bitField0_ = (bitField0_ & ~0x00000020); - bypassGlobals_ = false; + public Builder clearEnabled() { + bitField0_ = (bitField0_ & ~0x00000001); + enabled_ = false; onChanged(); return this; } - // optional .hbase.pb.ThrottleRequest throttle = 7; - private org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest throttle_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequestOrBuilder> throttleBuilder_; - /** - * optional .hbase.pb.ThrottleRequest throttle = 7; - */ - public boolean hasThrottle() { - return ((bitField0_ & 0x00000040) == 0x00000040); - } - /** - * optional .hbase.pb.ThrottleRequest throttle = 7; - */ - public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest getThrottle() { - if (throttleBuilder_ == null) { - return throttle_; - } else { - return throttleBuilder_.getMessage(); - } - } - /** - * optional .hbase.pb.ThrottleRequest throttle = 7; - */ - public Builder setThrottle(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest value) { - if (throttleBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - throttle_ = value; - onChanged(); - } else { - throttleBuilder_.setMessage(value); - } - bitField0_ |= 0x00000040; - return this; - } + // optional bool synchronous = 2; + private boolean synchronous_ ; /** - * optional .hbase.pb.ThrottleRequest throttle = 7; + * optional bool synchronous = 2; */ - public Builder setThrottle( - org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.Builder builderForValue) { - if (throttleBuilder_ == null) { - throttle_ = builderForValue.build(); - onChanged(); - } else { - throttleBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000040; - return this; + public boolean hasSynchronous() { + return ((bitField0_ & 0x00000002) == 0x00000002); } /** - * optional .hbase.pb.ThrottleRequest throttle = 7; + * optional bool synchronous = 2; */ - public Builder mergeThrottle(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest value) { - if (throttleBuilder_ == null) { - if (((bitField0_ & 0x00000040) == 0x00000040) && - throttle_ != org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.getDefaultInstance()) { - throttle_ = - org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.newBuilder(throttle_).mergeFrom(value).buildPartial(); - } else { - throttle_ = value; - } - onChanged(); - } else { - throttleBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000040; - return this; + public boolean getSynchronous() { + return synchronous_; } /** - * optional .hbase.pb.ThrottleRequest throttle = 7; + * optional bool synchronous = 2; */ - public Builder clearThrottle() { - if (throttleBuilder_ == null) { - throttle_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.getDefaultInstance(); - onChanged(); - } else { - throttleBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000040); + public Builder setSynchronous(boolean value) { + bitField0_ |= 0x00000002; + synchronous_ = value; + onChanged(); return this; } /** - * optional .hbase.pb.ThrottleRequest throttle = 7; + * optional bool synchronous = 2; */ - public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.Builder getThrottleBuilder() { - bitField0_ |= 0x00000040; + public Builder clearSynchronous() { + bitField0_ = (bitField0_ & ~0x00000002); + synchronous_ = false; onChanged(); - return getThrottleFieldBuilder().getBuilder(); - } - /** - * optional .hbase.pb.ThrottleRequest throttle = 7; - */ - public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequestOrBuilder getThrottleOrBuilder() { - if (throttleBuilder_ != null) { - return throttleBuilder_.getMessageOrBuilder(); - } else { - return throttle_; - } - } - /** - * optional .hbase.pb.ThrottleRequest throttle = 7; - */ - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequestOrBuilder> - getThrottleFieldBuilder() { - if (throttleBuilder_ == null) { - throttleBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequestOrBuilder>( - throttle_, - getParentForChildren(), - isClean()); - throttle_ = null; - } - return throttleBuilder_; + return this; } - // @@protoc_insertion_point(builder_scope:hbase.pb.SetQuotaRequest) + // @@protoc_insertion_point(builder_scope:hbase.pb.SetSnapshotCleanupRequest) } static { - defaultInstance = new SetQuotaRequest(true); + defaultInstance = new SetSnapshotCleanupRequest(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.SetQuotaRequest) + // @@protoc_insertion_point(class_scope:hbase.pb.SetSnapshotCleanupRequest) } - public interface SetQuotaResponseOrBuilder + public interface SetSnapshotCleanupResponseOrBuilder extends com.google.protobuf.MessageOrBuilder { + + // required bool prev_snapshot_cleanup = 1; + /** + * required bool prev_snapshot_cleanup = 1; + */ + boolean hasPrevSnapshotCleanup(); + /** + * required bool prev_snapshot_cleanup = 1; + */ + boolean getPrevSnapshotCleanup(); } /** - * Protobuf type {@code hbase.pb.SetQuotaResponse} + * Protobuf type {@code hbase.pb.SetSnapshotCleanupResponse} */ - public static final class SetQuotaResponse extends + public static final class SetSnapshotCleanupResponse extends com.google.protobuf.GeneratedMessage - implements SetQuotaResponseOrBuilder { - // Use SetQuotaResponse.newBuilder() to construct. - private SetQuotaResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + implements SetSnapshotCleanupResponseOrBuilder { + // Use SetSnapshotCleanupResponse.newBuilder() to construct. + private SetSnapshotCleanupResponse(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private SetQuotaResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private SetSnapshotCleanupResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final SetQuotaResponse defaultInstance; - public static SetQuotaResponse getDefaultInstance() { + private static final SetSnapshotCleanupResponse defaultInstance; + public static SetSnapshotCleanupResponse getDefaultInstance() { return defaultInstance; } - public SetQuotaResponse getDefaultInstanceForType() { + public SetSnapshotCleanupResponse getDefaultInstanceForType() { return defaultInstance; } @@ -58662,11 +64638,12 @@ public SetQuotaResponse getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private SetQuotaResponse( + private SetSnapshotCleanupResponse( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); + int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { @@ -58684,6 +64661,11 @@ private SetQuotaResponse( } break; } + case 8: { + bitField0_ |= 0x00000001; + prevSnapshotCleanup_ = input.readBool(); + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -58698,38 +64680,60 @@ private SetQuotaResponse( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetQuotaResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetSnapshotCleanupResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetQuotaResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetSnapshotCleanupResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public SetQuotaResponse parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public SetSnapshotCleanupResponse parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new SetQuotaResponse(input, extensionRegistry); + return new SetSnapshotCleanupResponse(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } + private int bitField0_; + // required bool prev_snapshot_cleanup = 1; + public static final int PREV_SNAPSHOT_CLEANUP_FIELD_NUMBER = 1; + private boolean prevSnapshotCleanup_; + /** + * required bool prev_snapshot_cleanup = 1; + */ + public boolean hasPrevSnapshotCleanup() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bool prev_snapshot_cleanup = 1; + */ + public boolean getPrevSnapshotCleanup() { + return prevSnapshotCleanup_; + } + private void initFields() { + prevSnapshotCleanup_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; + if (!hasPrevSnapshotCleanup()) { + memoizedIsInitialized = 0; + return false; + } memoizedIsInitialized = 1; return true; } @@ -58737,6 +64741,9 @@ public final boolean isInitialized() { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBool(1, prevSnapshotCleanup_); + } getUnknownFields().writeTo(output); } @@ -58746,6 +64753,10 @@ public int getSerializedSize() { if (size != -1) return size; size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(1, prevSnapshotCleanup_); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -58763,12 +64774,17 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse) obj; - - boolean result = true; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse) obj; + + boolean result = true; + result = result && (hasPrevSnapshotCleanup() == other.hasPrevSnapshotCleanup()); + if (hasPrevSnapshotCleanup()) { + result = result && (getPrevSnapshotCleanup() + == other.getPrevSnapshotCleanup()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -58782,58 +64798,62 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasPrevSnapshotCleanup()) { + hash = (37 * hash) + PREV_SNAPSHOT_CLEANUP_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getPrevSnapshotCleanup()); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -58842,7 +64862,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRe public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -58854,24 +64874,24 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.SetQuotaResponse} + * Protobuf type {@code hbase.pb.SetSnapshotCleanupResponse} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponseOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponseOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetQuotaResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetSnapshotCleanupResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetQuotaResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetSnapshotCleanupResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -58891,6 +64911,8 @@ private static Builder create() { public Builder clear() { super.clear(); + prevSnapshotCleanup_ = false; + bitField0_ = (bitField0_ & ~0x00000001); return this; } @@ -58900,43 +64922,57 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetQuotaResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetSnapshotCleanupResponse_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse(this); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.prevSnapshotCleanup_ = prevSnapshotCleanup_; + result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance()) return this; + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse.getDefaultInstance()) return this; + if (other.hasPrevSnapshotCleanup()) { + setPrevSnapshotCleanup(other.getPrevSnapshotCleanup()); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { + if (!hasPrevSnapshotCleanup()) { + + return false; + } return true; } @@ -58944,11 +64980,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -58957,54 +64993,74 @@ public Builder mergeFrom( } return this; } + private int bitField0_; - // @@protoc_insertion_point(builder_scope:hbase.pb.SetQuotaResponse) + // required bool prev_snapshot_cleanup = 1; + private boolean prevSnapshotCleanup_ ; + /** + * required bool prev_snapshot_cleanup = 1; + */ + public boolean hasPrevSnapshotCleanup() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bool prev_snapshot_cleanup = 1; + */ + public boolean getPrevSnapshotCleanup() { + return prevSnapshotCleanup_; + } + /** + * required bool prev_snapshot_cleanup = 1; + */ + public Builder setPrevSnapshotCleanup(boolean value) { + bitField0_ |= 0x00000001; + prevSnapshotCleanup_ = value; + onChanged(); + return this; + } + /** + * required bool prev_snapshot_cleanup = 1; + */ + public Builder clearPrevSnapshotCleanup() { + bitField0_ = (bitField0_ & ~0x00000001); + prevSnapshotCleanup_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.SetSnapshotCleanupResponse) } static { - defaultInstance = new SetQuotaResponse(true); + defaultInstance = new SetSnapshotCleanupResponse(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.SetQuotaResponse) + // @@protoc_insertion_point(class_scope:hbase.pb.SetSnapshotCleanupResponse) } - public interface MajorCompactionTimestampRequestOrBuilder + public interface IsSnapshotCleanupEnabledRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { - - // required .hbase.pb.TableName table_name = 1; - /** - * required .hbase.pb.TableName table_name = 1; - */ - boolean hasTableName(); - /** - * required .hbase.pb.TableName table_name = 1; - */ - org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName(); - /** - * required .hbase.pb.TableName table_name = 1; - */ - org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder(); } /** - * Protobuf type {@code hbase.pb.MajorCompactionTimestampRequest} + * Protobuf type {@code hbase.pb.IsSnapshotCleanupEnabledRequest} */ - public static final class MajorCompactionTimestampRequest extends + public static final class IsSnapshotCleanupEnabledRequest extends com.google.protobuf.GeneratedMessage - implements MajorCompactionTimestampRequestOrBuilder { - // Use MajorCompactionTimestampRequest.newBuilder() to construct. - private MajorCompactionTimestampRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + implements IsSnapshotCleanupEnabledRequestOrBuilder { + // Use IsSnapshotCleanupEnabledRequest.newBuilder() to construct. + private IsSnapshotCleanupEnabledRequest(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private MajorCompactionTimestampRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private IsSnapshotCleanupEnabledRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final MajorCompactionTimestampRequest defaultInstance; - public static MajorCompactionTimestampRequest getDefaultInstance() { + private static final IsSnapshotCleanupEnabledRequest defaultInstance; + public static IsSnapshotCleanupEnabledRequest getDefaultInstance() { return defaultInstance; } - public MajorCompactionTimestampRequest getDefaultInstanceForType() { + public IsSnapshotCleanupEnabledRequest getDefaultInstanceForType() { return defaultInstance; } @@ -59014,12 +65070,11 @@ public MajorCompactionTimestampRequest getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private MajorCompactionTimestampRequest( + private IsSnapshotCleanupEnabledRequest( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); - int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { @@ -59037,19 +65092,6 @@ private MajorCompactionTimestampRequest( } break; } - case 10: { - org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder subBuilder = null; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - subBuilder = tableName_.toBuilder(); - } - tableName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(tableName_); - tableName_ = subBuilder.buildPartial(); - } - bitField0_ |= 0x00000001; - break; - } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -59064,70 +65106,38 @@ private MajorCompactionTimestampRequest( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_MajorCompactionTimestampRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsSnapshotCleanupEnabledRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_MajorCompactionTimestampRequest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsSnapshotCleanupEnabledRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public MajorCompactionTimestampRequest parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public IsSnapshotCleanupEnabledRequest parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new MajorCompactionTimestampRequest(input, extensionRegistry); + return new IsSnapshotCleanupEnabledRequest(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } - private int bitField0_; - // required .hbase.pb.TableName table_name = 1; - public static final int TABLE_NAME_FIELD_NUMBER = 1; - private org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName tableName_; - /** - * required .hbase.pb.TableName table_name = 1; - */ - public boolean hasTableName() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required .hbase.pb.TableName table_name = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName() { - return tableName_; - } - /** - * required .hbase.pb.TableName table_name = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder() { - return tableName_; - } - private void initFields() { - tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (!hasTableName()) { - memoizedIsInitialized = 0; - return false; - } - if (!getTableName().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } memoizedIsInitialized = 1; return true; } @@ -59135,9 +65145,6 @@ public final boolean isInitialized() { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, tableName_); - } getUnknownFields().writeTo(output); } @@ -59147,10 +65154,6 @@ public int getSerializedSize() { if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, tableName_); - } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -59168,17 +65171,12 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest) obj; boolean result = true; - result = result && (hasTableName() == other.hasTableName()); - if (hasTableName()) { - result = result && getTableName() - .equals(other.getTableName()); - } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -59192,62 +65190,58 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasTableName()) { - hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER; - hash = (53 * hash) + getTableName().hashCode(); - } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -59256,7 +65250,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompa public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -59268,24 +65262,24 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.MajorCompactionTimestampRequest} + * Protobuf type {@code hbase.pb.IsSnapshotCleanupEnabledRequest} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequestOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequestOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_MajorCompactionTimestampRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsSnapshotCleanupEnabledRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_MajorCompactionTimestampRequest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsSnapshotCleanupEnabledRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -59297,7 +65291,6 @@ private Builder( } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getTableNameFieldBuilder(); } } private static Builder create() { @@ -59306,12 +65299,6 @@ private static Builder create() { public Builder clear() { super.clear(); - if (tableNameBuilder_ == null) { - tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance(); - } else { - tableNameBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); return this; } @@ -59321,65 +65308,43 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_MajorCompactionTimestampRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsSnapshotCleanupEnabledRequest_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (tableNameBuilder_ == null) { - result.tableName_ = tableName_; - } else { - result.tableName_ = tableNameBuilder_.build(); - } - result.bitField0_ = to_bitField0_; + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest(this); onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.getDefaultInstance()) return this; - if (other.hasTableName()) { - mergeTableName(other.getTableName()); - } + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { - if (!hasTableName()) { - - return false; - } - if (!getTableName().isInitialized()) { - - return false; - } return true; } @@ -59387,11 +65352,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -59400,172 +65365,50 @@ public Builder mergeFrom( } return this; } - private int bitField0_; - - // required .hbase.pb.TableName table_name = 1; - private org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder> tableNameBuilder_; - /** - * required .hbase.pb.TableName table_name = 1; - */ - public boolean hasTableName() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required .hbase.pb.TableName table_name = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName() { - if (tableNameBuilder_ == null) { - return tableName_; - } else { - return tableNameBuilder_.getMessage(); - } - } - /** - * required .hbase.pb.TableName table_name = 1; - */ - public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName value) { - if (tableNameBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - tableName_ = value; - onChanged(); - } else { - tableNameBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - /** - * required .hbase.pb.TableName table_name = 1; - */ - public Builder setTableName( - org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder builderForValue) { - if (tableNameBuilder_ == null) { - tableName_ = builderForValue.build(); - onChanged(); - } else { - tableNameBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - /** - * required .hbase.pb.TableName table_name = 1; - */ - public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName value) { - if (tableNameBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - tableName_ != org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance()) { - tableName_ = - org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial(); - } else { - tableName_ = value; - } - onChanged(); - } else { - tableNameBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - /** - * required .hbase.pb.TableName table_name = 1; - */ - public Builder clearTableName() { - if (tableNameBuilder_ == null) { - tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance(); - onChanged(); - } else { - tableNameBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - /** - * required .hbase.pb.TableName table_name = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder getTableNameBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getTableNameFieldBuilder().getBuilder(); - } - /** - * required .hbase.pb.TableName table_name = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder() { - if (tableNameBuilder_ != null) { - return tableNameBuilder_.getMessageOrBuilder(); - } else { - return tableName_; - } - } - /** - * required .hbase.pb.TableName table_name = 1; - */ - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder> - getTableNameFieldBuilder() { - if (tableNameBuilder_ == null) { - tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder>( - tableName_, - getParentForChildren(), - isClean()); - tableName_ = null; - } - return tableNameBuilder_; - } - // @@protoc_insertion_point(builder_scope:hbase.pb.MajorCompactionTimestampRequest) + // @@protoc_insertion_point(builder_scope:hbase.pb.IsSnapshotCleanupEnabledRequest) } static { - defaultInstance = new MajorCompactionTimestampRequest(true); + defaultInstance = new IsSnapshotCleanupEnabledRequest(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.MajorCompactionTimestampRequest) + // @@protoc_insertion_point(class_scope:hbase.pb.IsSnapshotCleanupEnabledRequest) } - public interface MajorCompactionTimestampForRegionRequestOrBuilder + public interface IsSnapshotCleanupEnabledResponseOrBuilder extends com.google.protobuf.MessageOrBuilder { - // required .hbase.pb.RegionSpecifier region = 1; - /** - * required .hbase.pb.RegionSpecifier region = 1; - */ - boolean hasRegion(); + // required bool enabled = 1; /** - * required .hbase.pb.RegionSpecifier region = 1; + * required bool enabled = 1; */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegion(); + boolean hasEnabled(); /** - * required .hbase.pb.RegionSpecifier region = 1; + * required bool enabled = 1; */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder(); + boolean getEnabled(); } /** - * Protobuf type {@code hbase.pb.MajorCompactionTimestampForRegionRequest} + * Protobuf type {@code hbase.pb.IsSnapshotCleanupEnabledResponse} */ - public static final class MajorCompactionTimestampForRegionRequest extends + public static final class IsSnapshotCleanupEnabledResponse extends com.google.protobuf.GeneratedMessage - implements MajorCompactionTimestampForRegionRequestOrBuilder { - // Use MajorCompactionTimestampForRegionRequest.newBuilder() to construct. - private MajorCompactionTimestampForRegionRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + implements IsSnapshotCleanupEnabledResponseOrBuilder { + // Use IsSnapshotCleanupEnabledResponse.newBuilder() to construct. + private IsSnapshotCleanupEnabledResponse(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private MajorCompactionTimestampForRegionRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private IsSnapshotCleanupEnabledResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final MajorCompactionTimestampForRegionRequest defaultInstance; - public static MajorCompactionTimestampForRegionRequest getDefaultInstance() { + private static final IsSnapshotCleanupEnabledResponse defaultInstance; + public static IsSnapshotCleanupEnabledResponse getDefaultInstance() { return defaultInstance; } - public MajorCompactionTimestampForRegionRequest getDefaultInstanceForType() { + public IsSnapshotCleanupEnabledResponse getDefaultInstanceForType() { return defaultInstance; } @@ -59575,7 +65418,7 @@ public MajorCompactionTimestampForRegionRequest getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private MajorCompactionTimestampForRegionRequest( + private IsSnapshotCleanupEnabledResponse( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -59598,17 +65441,9 @@ private MajorCompactionTimestampForRegionRequest( } break; } - case 10: { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder subBuilder = null; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - subBuilder = region_.toBuilder(); - } - region_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(region_); - region_ = subBuilder.buildPartial(); - } + case 8: { bitField0_ |= 0x00000001; + enabled_ = input.readBool(); break; } } @@ -59625,67 +65460,57 @@ private MajorCompactionTimestampForRegionRequest( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_MajorCompactionTimestampForRegionRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsSnapshotCleanupEnabledResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_MajorCompactionTimestampForRegionRequest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsSnapshotCleanupEnabledResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public MajorCompactionTimestampForRegionRequest parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public IsSnapshotCleanupEnabledResponse parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new MajorCompactionTimestampForRegionRequest(input, extensionRegistry); + return new IsSnapshotCleanupEnabledResponse(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; - // required .hbase.pb.RegionSpecifier region = 1; - public static final int REGION_FIELD_NUMBER = 1; - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier region_; + // required bool enabled = 1; + public static final int ENABLED_FIELD_NUMBER = 1; + private boolean enabled_; /** - * required .hbase.pb.RegionSpecifier region = 1; + * required bool enabled = 1; */ - public boolean hasRegion() { + public boolean hasEnabled() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * required .hbase.pb.RegionSpecifier region = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegion() { - return region_; - } - /** - * required .hbase.pb.RegionSpecifier region = 1; + * required bool enabled = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder() { - return region_; + public boolean getEnabled() { + return enabled_; } private void initFields() { - region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance(); + enabled_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (!hasRegion()) { - memoizedIsInitialized = 0; - return false; - } - if (!getRegion().isInitialized()) { + if (!hasEnabled()) { memoizedIsInitialized = 0; return false; } @@ -59697,7 +65522,7 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, region_); + output.writeBool(1, enabled_); } getUnknownFields().writeTo(output); } @@ -59710,7 +65535,7 @@ public int getSerializedSize() { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, region_); + .computeBoolSize(1, enabled_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -59729,16 +65554,16 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse) obj; boolean result = true; - result = result && (hasRegion() == other.hasRegion()); - if (hasRegion()) { - result = result && getRegion() - .equals(other.getRegion()); + result = result && (hasEnabled() == other.hasEnabled()); + if (hasEnabled()) { + result = result && (getEnabled() + == other.getEnabled()); } result = result && getUnknownFields().equals(other.getUnknownFields()); @@ -59753,62 +65578,62 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasRegion()) { - hash = (37 * hash) + REGION_FIELD_NUMBER; - hash = (53 * hash) + getRegion().hashCode(); + if (hasEnabled()) { + hash = (37 * hash) + ENABLED_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getEnabled()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -59817,7 +65642,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompa public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -59829,24 +65654,24 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.MajorCompactionTimestampForRegionRequest} + * Protobuf type {@code hbase.pb.IsSnapshotCleanupEnabledResponse} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequestOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponseOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_MajorCompactionTimestampForRegionRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsSnapshotCleanupEnabledResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_MajorCompactionTimestampForRegionRequest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsSnapshotCleanupEnabledResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -59858,7 +65683,6 @@ private Builder( } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getRegionFieldBuilder(); } } private static Builder create() { @@ -59867,11 +65691,7 @@ private static Builder create() { public Builder clear() { super.clear(); - if (regionBuilder_ == null) { - region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance(); - } else { - regionBuilder_.clear(); - } + enabled_ = false; bitField0_ = (bitField0_ & ~0x00000001); return this; } @@ -59882,62 +65702,54 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_MajorCompactionTimestampForRegionRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsSnapshotCleanupEnabledResponse_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest(this); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - if (regionBuilder_ == null) { - result.region_ = region_; - } else { - result.region_ = regionBuilder_.build(); - } + result.enabled_ = enabled_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.getDefaultInstance()) return this; - if (other.hasRegion()) { - mergeRegion(other.getRegion()); + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse.getDefaultInstance()) return this; + if (other.hasEnabled()) { + setEnabled(other.getEnabled()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { - if (!hasRegion()) { - - return false; - } - if (!getRegion().isInitialized()) { + if (!hasEnabled()) { return false; } @@ -59948,11 +65760,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -59963,166 +65775,76 @@ public Builder mergeFrom( } private int bitField0_; - // required .hbase.pb.RegionSpecifier region = 1; - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> regionBuilder_; + // required bool enabled = 1; + private boolean enabled_ ; /** - * required .hbase.pb.RegionSpecifier region = 1; + * required bool enabled = 1; */ - public boolean hasRegion() { + public boolean hasEnabled() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * required .hbase.pb.RegionSpecifier region = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegion() { - if (regionBuilder_ == null) { - return region_; - } else { - return regionBuilder_.getMessage(); - } - } - /** - * required .hbase.pb.RegionSpecifier region = 1; - */ - public Builder setRegion(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier value) { - if (regionBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - region_ = value; - onChanged(); - } else { - regionBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - /** - * required .hbase.pb.RegionSpecifier region = 1; - */ - public Builder setRegion( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder builderForValue) { - if (regionBuilder_ == null) { - region_ = builderForValue.build(); - onChanged(); - } else { - regionBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - /** - * required .hbase.pb.RegionSpecifier region = 1; - */ - public Builder mergeRegion(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier value) { - if (regionBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - region_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance()) { - region_ = - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.newBuilder(region_).mergeFrom(value).buildPartial(); - } else { - region_ = value; - } - onChanged(); - } else { - regionBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - /** - * required .hbase.pb.RegionSpecifier region = 1; - */ - public Builder clearRegion() { - if (regionBuilder_ == null) { - region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance(); - onChanged(); - } else { - regionBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - /** - * required .hbase.pb.RegionSpecifier region = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder getRegionBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getRegionFieldBuilder().getBuilder(); - } - /** - * required .hbase.pb.RegionSpecifier region = 1; + * required bool enabled = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder() { - if (regionBuilder_ != null) { - return regionBuilder_.getMessageOrBuilder(); - } else { - return region_; - } + public boolean getEnabled() { + return enabled_; } /** - * required .hbase.pb.RegionSpecifier region = 1; - */ - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> - getRegionFieldBuilder() { - if (regionBuilder_ == null) { - regionBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder>( - region_, - getParentForChildren(), - isClean()); - region_ = null; - } - return regionBuilder_; + * required bool enabled = 1; + */ + public Builder setEnabled(boolean value) { + bitField0_ |= 0x00000001; + enabled_ = value; + onChanged(); + return this; + } + /** + * required bool enabled = 1; + */ + public Builder clearEnabled() { + bitField0_ = (bitField0_ & ~0x00000001); + enabled_ = false; + onChanged(); + return this; } - // @@protoc_insertion_point(builder_scope:hbase.pb.MajorCompactionTimestampForRegionRequest) + // @@protoc_insertion_point(builder_scope:hbase.pb.IsSnapshotCleanupEnabledResponse) } static { - defaultInstance = new MajorCompactionTimestampForRegionRequest(true); + defaultInstance = new IsSnapshotCleanupEnabledResponse(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.MajorCompactionTimestampForRegionRequest) + // @@protoc_insertion_point(class_scope:hbase.pb.IsSnapshotCleanupEnabledResponse) } - public interface MajorCompactionTimestampResponseOrBuilder + public interface GetClusterIdRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { - - // required int64 compaction_timestamp = 1; - /** - * required int64 compaction_timestamp = 1; - */ - boolean hasCompactionTimestamp(); - /** - * required int64 compaction_timestamp = 1; - */ - long getCompactionTimestamp(); } /** - * Protobuf type {@code hbase.pb.MajorCompactionTimestampResponse} + * Protobuf type {@code hbase.pb.GetClusterIdRequest} + * + *
+   ** Request and response to get the clusterID for this cluster 
+   * 
*/ - public static final class MajorCompactionTimestampResponse extends + public static final class GetClusterIdRequest extends com.google.protobuf.GeneratedMessage - implements MajorCompactionTimestampResponseOrBuilder { - // Use MajorCompactionTimestampResponse.newBuilder() to construct. - private MajorCompactionTimestampResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + implements GetClusterIdRequestOrBuilder { + // Use GetClusterIdRequest.newBuilder() to construct. + private GetClusterIdRequest(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private MajorCompactionTimestampResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private GetClusterIdRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final MajorCompactionTimestampResponse defaultInstance; - public static MajorCompactionTimestampResponse getDefaultInstance() { + private static final GetClusterIdRequest defaultInstance; + public static GetClusterIdRequest getDefaultInstance() { return defaultInstance; } - public MajorCompactionTimestampResponse getDefaultInstanceForType() { + public GetClusterIdRequest getDefaultInstanceForType() { return defaultInstance; } @@ -60132,12 +65854,11 @@ public MajorCompactionTimestampResponse getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private MajorCompactionTimestampResponse( + private GetClusterIdRequest( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); - int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { @@ -60155,11 +65876,6 @@ private MajorCompactionTimestampResponse( } break; } - case 8: { - bitField0_ |= 0x00000001; - compactionTimestamp_ = input.readInt64(); - break; - } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -60174,60 +65890,38 @@ private MajorCompactionTimestampResponse( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_MajorCompactionTimestampResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterIdRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_MajorCompactionTimestampResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterIdRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public MajorCompactionTimestampResponse parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetClusterIdRequest parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new MajorCompactionTimestampResponse(input, extensionRegistry); + return new GetClusterIdRequest(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } - private int bitField0_; - // required int64 compaction_timestamp = 1; - public static final int COMPACTION_TIMESTAMP_FIELD_NUMBER = 1; - private long compactionTimestamp_; - /** - * required int64 compaction_timestamp = 1; - */ - public boolean hasCompactionTimestamp() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required int64 compaction_timestamp = 1; - */ - public long getCompactionTimestamp() { - return compactionTimestamp_; - } - private void initFields() { - compactionTimestamp_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (!hasCompactionTimestamp()) { - memoizedIsInitialized = 0; - return false; - } memoizedIsInitialized = 1; return true; } @@ -60235,9 +65929,6 @@ public final boolean isInitialized() { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeInt64(1, compactionTimestamp_); - } getUnknownFields().writeTo(output); } @@ -60247,10 +65938,6 @@ public int getSerializedSize() { if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeInt64Size(1, compactionTimestamp_); - } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -60268,17 +65955,12 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest) obj; boolean result = true; - result = result && (hasCompactionTimestamp() == other.hasCompactionTimestamp()); - if (hasCompactionTimestamp()) { - result = result && (getCompactionTimestamp() - == other.getCompactionTimestamp()); - } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -60292,62 +65974,58 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasCompactionTimestamp()) { - hash = (37 * hash) + COMPACTION_TIMESTAMP_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getCompactionTimestamp()); - } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -60356,7 +66034,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompa public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -60368,24 +66046,28 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.MajorCompactionTimestampResponse} + * Protobuf type {@code hbase.pb.GetClusterIdRequest} + * + *
+     ** Request and response to get the clusterID for this cluster 
+     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponseOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequestOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_MajorCompactionTimestampResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterIdRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_MajorCompactionTimestampResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterIdRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -60405,8 +66087,6 @@ private static Builder create() { public Builder clear() { super.clear(); - compactionTimestamp_ = 0L; - bitField0_ = (bitField0_ & ~0x00000001); return this; } @@ -60416,57 +66096,43 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_MajorCompactionTimestampResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterIdRequest_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.compactionTimestamp_ = compactionTimestamp_; - result.bitField0_ = to_bitField0_; + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest(this); onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance()) return this; - if (other.hasCompactionTimestamp()) { - setCompactionTimestamp(other.getCompactionTimestamp()); - } + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { - if (!hasCompactionTimestamp()) { - - return false; - } return true; } @@ -60474,11 +66140,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -60487,74 +66153,67 @@ public Builder mergeFrom( } return this; } - private int bitField0_; - - // required int64 compaction_timestamp = 1; - private long compactionTimestamp_ ; - /** - * required int64 compaction_timestamp = 1; - */ - public boolean hasCompactionTimestamp() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required int64 compaction_timestamp = 1; - */ - public long getCompactionTimestamp() { - return compactionTimestamp_; - } - /** - * required int64 compaction_timestamp = 1; - */ - public Builder setCompactionTimestamp(long value) { - bitField0_ |= 0x00000001; - compactionTimestamp_ = value; - onChanged(); - return this; - } - /** - * required int64 compaction_timestamp = 1; - */ - public Builder clearCompactionTimestamp() { - bitField0_ = (bitField0_ & ~0x00000001); - compactionTimestamp_ = 0L; - onChanged(); - return this; - } - // @@protoc_insertion_point(builder_scope:hbase.pb.MajorCompactionTimestampResponse) + // @@protoc_insertion_point(builder_scope:hbase.pb.GetClusterIdRequest) } static { - defaultInstance = new MajorCompactionTimestampResponse(true); + defaultInstance = new GetClusterIdRequest(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.MajorCompactionTimestampResponse) + // @@protoc_insertion_point(class_scope:hbase.pb.GetClusterIdRequest) } - public interface SecurityCapabilitiesRequestOrBuilder + public interface GetClusterIdResponseOrBuilder extends com.google.protobuf.MessageOrBuilder { + + // optional string cluster_id = 1; + /** + * optional string cluster_id = 1; + * + *
+     ** Not set if cluster ID could not be determined. 
+     * 
+ */ + boolean hasClusterId(); + /** + * optional string cluster_id = 1; + * + *
+     ** Not set if cluster ID could not be determined. 
+     * 
+ */ + java.lang.String getClusterId(); + /** + * optional string cluster_id = 1; + * + *
+     ** Not set if cluster ID could not be determined. 
+     * 
+ */ + com.google.protobuf.ByteString + getClusterIdBytes(); } /** - * Protobuf type {@code hbase.pb.SecurityCapabilitiesRequest} + * Protobuf type {@code hbase.pb.GetClusterIdResponse} */ - public static final class SecurityCapabilitiesRequest extends + public static final class GetClusterIdResponse extends com.google.protobuf.GeneratedMessage - implements SecurityCapabilitiesRequestOrBuilder { - // Use SecurityCapabilitiesRequest.newBuilder() to construct. - private SecurityCapabilitiesRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + implements GetClusterIdResponseOrBuilder { + // Use GetClusterIdResponse.newBuilder() to construct. + private GetClusterIdResponse(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private SecurityCapabilitiesRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private GetClusterIdResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final SecurityCapabilitiesRequest defaultInstance; - public static SecurityCapabilitiesRequest getDefaultInstance() { + private static final GetClusterIdResponse defaultInstance; + public static GetClusterIdResponse getDefaultInstance() { return defaultInstance; } - public SecurityCapabilitiesRequest getDefaultInstanceForType() { + public GetClusterIdResponse getDefaultInstanceForType() { return defaultInstance; } @@ -60564,11 +66223,12 @@ public SecurityCapabilitiesRequest getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private SecurityCapabilitiesRequest( + private GetClusterIdResponse( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); + int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { @@ -60586,6 +66246,11 @@ private SecurityCapabilitiesRequest( } break; } + case 10: { + bitField0_ |= 0x00000001; + clusterId_ = input.readBytes(); + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -60600,32 +66265,89 @@ private SecurityCapabilitiesRequest( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SecurityCapabilitiesRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterIdResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SecurityCapabilitiesRequest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterIdResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public SecurityCapabilitiesRequest parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetClusterIdResponse parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new SecurityCapabilitiesRequest(input, extensionRegistry); + return new GetClusterIdResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional string cluster_id = 1; + public static final int CLUSTER_ID_FIELD_NUMBER = 1; + private java.lang.Object clusterId_; + /** + * optional string cluster_id = 1; + * + *
+     ** Not set if cluster ID could not be determined. 
+     * 
+ */ + public boolean hasClusterId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional string cluster_id = 1; + * + *
+     ** Not set if cluster ID could not be determined. 
+     * 
+ */ + public java.lang.String getClusterId() { + java.lang.Object ref = clusterId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + clusterId_ = s; + } + return s; + } + } + /** + * optional string cluster_id = 1; + * + *
+     ** Not set if cluster ID could not be determined. 
+     * 
+ */ + public com.google.protobuf.ByteString + getClusterIdBytes() { + java.lang.Object ref = clusterId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + clusterId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; } private void initFields() { + clusterId_ = ""; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -60639,6 +66361,9 @@ public final boolean isInitialized() { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getClusterIdBytes()); + } getUnknownFields().writeTo(output); } @@ -60648,6 +66373,10 @@ public int getSerializedSize() { if (size != -1) return size; size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getClusterIdBytes()); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -60665,12 +66394,17 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse) obj; boolean result = true; + result = result && (hasClusterId() == other.hasClusterId()); + if (hasClusterId()) { + result = result && getClusterId() + .equals(other.getClusterId()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -60684,58 +66418,62 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasClusterId()) { + hash = (37 * hash) + CLUSTER_ID_FIELD_NUMBER; + hash = (53 * hash) + getClusterId().hashCode(); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -60744,7 +66482,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCa public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -60755,398 +66493,332 @@ protected Builder newBuilderForType( Builder builder = new Builder(parent); return builder; } - /** - * Protobuf type {@code hbase.pb.SecurityCapabilitiesRequest} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequestOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SecurityCapabilitiesRequest_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SecurityCapabilitiesRequest_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest.Builder.class); - } - - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SecurityCapabilitiesRequest_descriptor; - } - - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest.getDefaultInstance(); - } - - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest(this); - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest.getDefaultInstance()) return this; - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - - // @@protoc_insertion_point(builder_scope:hbase.pb.SecurityCapabilitiesRequest) - } - - static { - defaultInstance = new SecurityCapabilitiesRequest(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:hbase.pb.SecurityCapabilitiesRequest) - } - - public interface SecurityCapabilitiesResponseOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; - /** - * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; - */ - java.util.List getCapabilitiesList(); - /** - * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; - */ - int getCapabilitiesCount(); - /** - * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; - */ - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Capability getCapabilities(int index); - } - /** - * Protobuf type {@code hbase.pb.SecurityCapabilitiesResponse} - */ - public static final class SecurityCapabilitiesResponse extends - com.google.protobuf.GeneratedMessage - implements SecurityCapabilitiesResponseOrBuilder { - // Use SecurityCapabilitiesResponse.newBuilder() to construct. - private SecurityCapabilitiesResponse(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private SecurityCapabilitiesResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final SecurityCapabilitiesResponse defaultInstance; - public static SecurityCapabilitiesResponse getDefaultInstance() { - return defaultInstance; - } - - public SecurityCapabilitiesResponse getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private SecurityCapabilitiesResponse( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 8: { - int rawValue = input.readEnum(); - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Capability value = org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Capability.valueOf(rawValue); - if (value == null) { - unknownFields.mergeVarintField(1, rawValue); - } else { - if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - capabilities_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000001; - } - capabilities_.add(value); - } - break; - } - case 10: { - int length = input.readRawVarint32(); - int oldLimit = input.pushLimit(length); - while(input.getBytesUntilLimit() > 0) { - int rawValue = input.readEnum(); - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Capability value = org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Capability.valueOf(rawValue); - if (value == null) { - unknownFields.mergeVarintField(1, rawValue); - } else { - if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - capabilities_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000001; - } - capabilities_.add(value); - } - } - input.popLimit(oldLimit); - break; - } - } + /** + * Protobuf type {@code hbase.pb.GetClusterIdResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterIdResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterIdResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - capabilities_ = java.util.Collections.unmodifiableList(capabilities_); + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + clusterId_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterIdResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); } - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); + return result; } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SecurityCapabilitiesResponse_descriptor; - } - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SecurityCapabilitiesResponse_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Builder.class); - } + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.clusterId_ = clusterId_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public SecurityCapabilitiesResponse parsePartialFrom( + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse.getDefaultInstance()) return this; + if (other.hasClusterId()) { + bitField0_ |= 0x00000001; + clusterId_ = other.clusterId_; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new SecurityCapabilitiesResponse(input, extensionRegistry); + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } + private int bitField0_; - /** - * Protobuf enum {@code hbase.pb.SecurityCapabilitiesResponse.Capability} - */ - public enum Capability - implements com.google.protobuf.ProtocolMessageEnum { - /** - * SIMPLE_AUTHENTICATION = 0; - */ - SIMPLE_AUTHENTICATION(0, 0), - /** - * SECURE_AUTHENTICATION = 1; - */ - SECURE_AUTHENTICATION(1, 1), - /** - * AUTHORIZATION = 2; - */ - AUTHORIZATION(2, 2), - /** - * CELL_AUTHORIZATION = 3; - */ - CELL_AUTHORIZATION(3, 3), + // optional string cluster_id = 1; + private java.lang.Object clusterId_ = ""; /** - * CELL_VISIBILITY = 4; + * optional string cluster_id = 1; + * + *
+       ** Not set if cluster ID could not be determined. 
+       * 
*/ - CELL_VISIBILITY(4, 4), - ; - + public boolean hasClusterId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } /** - * SIMPLE_AUTHENTICATION = 0; + * optional string cluster_id = 1; + * + *
+       ** Not set if cluster ID could not be determined. 
+       * 
*/ - public static final int SIMPLE_AUTHENTICATION_VALUE = 0; + public java.lang.String getClusterId() { + java.lang.Object ref = clusterId_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + clusterId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } /** - * SECURE_AUTHENTICATION = 1; + * optional string cluster_id = 1; + * + *
+       ** Not set if cluster ID could not be determined. 
+       * 
*/ - public static final int SECURE_AUTHENTICATION_VALUE = 1; + public com.google.protobuf.ByteString + getClusterIdBytes() { + java.lang.Object ref = clusterId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + clusterId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } /** - * AUTHORIZATION = 2; + * optional string cluster_id = 1; + * + *
+       ** Not set if cluster ID could not be determined. 
+       * 
*/ - public static final int AUTHORIZATION_VALUE = 2; + public Builder setClusterId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + clusterId_ = value; + onChanged(); + return this; + } /** - * CELL_AUTHORIZATION = 3; + * optional string cluster_id = 1; + * + *
+       ** Not set if cluster ID could not be determined. 
+       * 
*/ - public static final int CELL_AUTHORIZATION_VALUE = 3; + public Builder clearClusterId() { + bitField0_ = (bitField0_ & ~0x00000001); + clusterId_ = getDefaultInstance().getClusterId(); + onChanged(); + return this; + } /** - * CELL_VISIBILITY = 4; + * optional string cluster_id = 1; + * + *
+       ** Not set if cluster ID could not be determined. 
+       * 
*/ - public static final int CELL_VISIBILITY_VALUE = 4; - + public Builder setClusterIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + clusterId_ = value; + onChanged(); + return this; + } - public final int getNumber() { return value; } + // @@protoc_insertion_point(builder_scope:hbase.pb.GetClusterIdResponse) + } - public static Capability valueOf(int value) { - switch (value) { - case 0: return SIMPLE_AUTHENTICATION; - case 1: return SECURE_AUTHENTICATION; - case 2: return AUTHORIZATION; - case 3: return CELL_AUTHORIZATION; - case 4: return CELL_VISIBILITY; - default: return null; - } - } + static { + defaultInstance = new GetClusterIdResponse(true); + defaultInstance.initFields(); + } - public static com.google.protobuf.Internal.EnumLiteMap - internalGetValueMap() { - return internalValueMap; - } - private static com.google.protobuf.Internal.EnumLiteMap - internalValueMap = - new com.google.protobuf.Internal.EnumLiteMap() { - public Capability findValueByNumber(int number) { - return Capability.valueOf(number); - } - }; + // @@protoc_insertion_point(class_scope:hbase.pb.GetClusterIdResponse) + } - public final com.google.protobuf.Descriptors.EnumValueDescriptor - getValueDescriptor() { - return getDescriptor().getValues().get(index); - } - public final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptorForType() { - return getDescriptor(); - } - public static final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDescriptor().getEnumTypes().get(0); - } + public interface GetMastersRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + /** + * Protobuf type {@code hbase.pb.GetMastersRequest} + * + *
+   ** Request and response to get the current list of all registers master servers 
+   * 
+ */ + public static final class GetMastersRequest extends + com.google.protobuf.GeneratedMessage + implements GetMastersRequestOrBuilder { + // Use GetMastersRequest.newBuilder() to construct. + private GetMastersRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private GetMastersRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final Capability[] VALUES = values(); + private static final GetMastersRequest defaultInstance; + public static GetMastersRequest getDefaultInstance() { + return defaultInstance; + } - public static Capability valueOf( - com.google.protobuf.Descriptors.EnumValueDescriptor desc) { - if (desc.getType() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "EnumValueDescriptor is not for this type."); + public GetMastersRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private GetMastersRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + } } - return VALUES[desc.getIndex()]; + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMastersRequest_descriptor; + } - private final int index; - private final int value; + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMastersRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest.Builder.class); + } - private Capability(int index, int value) { - this.index = index; - this.value = value; + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetMastersRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new GetMastersRequest(input, extensionRegistry); } + }; - // @@protoc_insertion_point(enum_scope:hbase.pb.SecurityCapabilitiesResponse.Capability) - } - - // repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; - public static final int CAPABILITIES_FIELD_NUMBER = 1; - private java.util.List capabilities_; - /** - * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; - */ - public java.util.List getCapabilitiesList() { - return capabilities_; - } - /** - * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; - */ - public int getCapabilitiesCount() { - return capabilities_.size(); - } - /** - * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Capability getCapabilities(int index) { - return capabilities_.get(index); + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; } private void initFields() { - capabilities_ = java.util.Collections.emptyList(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -61160,9 +66832,6 @@ public final boolean isInitialized() { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); - for (int i = 0; i < capabilities_.size(); i++) { - output.writeEnum(1, capabilities_.get(i).getNumber()); - } getUnknownFields().writeTo(output); } @@ -61172,15 +66841,6 @@ public int getSerializedSize() { if (size != -1) return size; size = 0; - { - int dataSize = 0; - for (int i = 0; i < capabilities_.size(); i++) { - dataSize += com.google.protobuf.CodedOutputStream - .computeEnumSizeNoTag(capabilities_.get(i).getNumber()); - } - size += dataSize; - size += 1 * capabilities_.size(); - } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -61198,14 +66858,12 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest) obj; boolean result = true; - result = result && getCapabilitiesList() - .equals(other.getCapabilitiesList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -61219,62 +66877,58 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (getCapabilitiesCount() > 0) { - hash = (37 * hash) + CAPABILITIES_FIELD_NUMBER; - hash = (53 * hash) + hashEnumList(getCapabilitiesList()); - } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -61283,7 +66937,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCa public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -61295,24 +66949,28 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.SecurityCapabilitiesResponse} + * Protobuf type {@code hbase.pb.GetMastersRequest} + * + *
+     ** Request and response to get the current list of all registers master servers 
+     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponseOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequestOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SecurityCapabilitiesResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMastersRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SecurityCapabilitiesResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMastersRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -61332,8 +66990,6 @@ private static Builder create() { public Builder clear() { super.clear(); - capabilities_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); return this; } @@ -61343,54 +66999,38 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SecurityCapabilitiesResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMastersRequest_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse(this); - int from_bitField0_ = bitField0_; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - capabilities_ = java.util.Collections.unmodifiableList(capabilities_); - bitField0_ = (bitField0_ & ~0x00000001); - } - result.capabilities_ = capabilities_; + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest(this); onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance()) return this; - if (!other.capabilities_.isEmpty()) { - if (capabilities_.isEmpty()) { - capabilities_ = other.capabilities_; - bitField0_ = (bitField0_ & ~0x00000001); - } else { - ensureCapabilitiesIsMutable(); - capabilities_.addAll(other.capabilities_); - } - onChanged(); - } + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -61399,155 +67039,81 @@ public final boolean isInitialized() { return true; } - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; - private java.util.List capabilities_ = - java.util.Collections.emptyList(); - private void ensureCapabilitiesIsMutable() { - if (!((bitField0_ & 0x00000001) == 0x00000001)) { - capabilities_ = new java.util.ArrayList(capabilities_); - bitField0_ |= 0x00000001; - } - } - /** - * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; - */ - public java.util.List getCapabilitiesList() { - return java.util.Collections.unmodifiableList(capabilities_); - } - /** - * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; - */ - public int getCapabilitiesCount() { - return capabilities_.size(); - } - /** - * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Capability getCapabilities(int index) { - return capabilities_.get(index); - } - /** - * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; - */ - public Builder setCapabilities( - int index, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Capability value) { - if (value == null) { - throw new NullPointerException(); - } - ensureCapabilitiesIsMutable(); - capabilities_.set(index, value); - onChanged(); - return this; - } - /** - * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; - */ - public Builder addCapabilities(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Capability value) { - if (value == null) { - throw new NullPointerException(); - } - ensureCapabilitiesIsMutable(); - capabilities_.add(value); - onChanged(); - return this; - } - /** - * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; - */ - public Builder addAllCapabilities( - java.lang.Iterable values) { - ensureCapabilitiesIsMutable(); - super.addAll(values, capabilities_); - onChanged(); - return this; - } - /** - * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; - */ - public Builder clearCapabilities() { - capabilities_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - onChanged(); + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } return this; } - // @@protoc_insertion_point(builder_scope:hbase.pb.SecurityCapabilitiesResponse) + // @@protoc_insertion_point(builder_scope:hbase.pb.GetMastersRequest) } static { - defaultInstance = new SecurityCapabilitiesResponse(true); + defaultInstance = new GetMastersRequest(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.SecurityCapabilitiesResponse) + // @@protoc_insertion_point(class_scope:hbase.pb.GetMastersRequest) } - public interface ClearDeadServersRequestOrBuilder + public interface GetMastersResponseEntryOrBuilder extends com.google.protobuf.MessageOrBuilder { - // repeated .hbase.pb.ServerName server_name = 1; + // required .hbase.pb.ServerName server_name = 1; /** - * repeated .hbase.pb.ServerName server_name = 1; + * required .hbase.pb.ServerName server_name = 1; */ - java.util.List - getServerNameList(); + boolean hasServerName(); /** - * repeated .hbase.pb.ServerName server_name = 1; + * required .hbase.pb.ServerName server_name = 1; */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName(int index); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName(); /** - * repeated .hbase.pb.ServerName server_name = 1; + * required .hbase.pb.ServerName server_name = 1; */ - int getServerNameCount(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder(); + + // required bool is_active = 2; /** - * repeated .hbase.pb.ServerName server_name = 1; + * required bool is_active = 2; */ - java.util.List - getServerNameOrBuilderList(); + boolean hasIsActive(); /** - * repeated .hbase.pb.ServerName server_name = 1; + * required bool is_active = 2; */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder( - int index); + boolean getIsActive(); } /** - * Protobuf type {@code hbase.pb.ClearDeadServersRequest} + * Protobuf type {@code hbase.pb.GetMastersResponseEntry} */ - public static final class ClearDeadServersRequest extends + public static final class GetMastersResponseEntry extends com.google.protobuf.GeneratedMessage - implements ClearDeadServersRequestOrBuilder { - // Use ClearDeadServersRequest.newBuilder() to construct. - private ClearDeadServersRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + implements GetMastersResponseEntryOrBuilder { + // Use GetMastersResponseEntry.newBuilder() to construct. + private GetMastersResponseEntry(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private ClearDeadServersRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private GetMastersResponseEntry(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final ClearDeadServersRequest defaultInstance; - public static ClearDeadServersRequest getDefaultInstance() { + private static final GetMastersResponseEntry defaultInstance; + public static GetMastersResponseEntry getDefaultInstance() { return defaultInstance; } - public ClearDeadServersRequest getDefaultInstanceForType() { + public GetMastersResponseEntry getDefaultInstanceForType() { return defaultInstance; } @@ -61557,7 +67123,7 @@ public ClearDeadServersRequest getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private ClearDeadServersRequest( + private GetMastersResponseEntry( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -61581,11 +67147,21 @@ private ClearDeadServersRequest( break; } case 10: { - if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - serverName_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000001; + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = serverName_.toBuilder(); } - serverName_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry)); + serverName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(serverName_); + serverName_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 16: { + bitField0_ |= 0x00000002; + isActive_ = input.readBool(); break; } } @@ -61596,89 +67172,96 @@ private ClearDeadServersRequest( throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { - if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - serverName_ = java.util.Collections.unmodifiableList(serverName_); - } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ClearDeadServersRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMastersResponseEntry_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ClearDeadServersRequest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMastersResponseEntry_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public ClearDeadServersRequest parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetMastersResponseEntry parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new ClearDeadServersRequest(input, extensionRegistry); + return new GetMastersResponseEntry(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } - // repeated .hbase.pb.ServerName server_name = 1; + private int bitField0_; + // required .hbase.pb.ServerName server_name = 1; public static final int SERVER_NAME_FIELD_NUMBER = 1; - private java.util.List serverName_; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName serverName_; /** - * repeated .hbase.pb.ServerName server_name = 1; + * required .hbase.pb.ServerName server_name = 1; */ - public java.util.List getServerNameList() { - return serverName_; + public boolean hasServerName() { + return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * repeated .hbase.pb.ServerName server_name = 1; + * required .hbase.pb.ServerName server_name = 1; */ - public java.util.List - getServerNameOrBuilderList() { + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName() { return serverName_; } /** - * repeated .hbase.pb.ServerName server_name = 1; + * required .hbase.pb.ServerName server_name = 1; */ - public int getServerNameCount() { - return serverName_.size(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder() { + return serverName_; } + + // required bool is_active = 2; + public static final int IS_ACTIVE_FIELD_NUMBER = 2; + private boolean isActive_; /** - * repeated .hbase.pb.ServerName server_name = 1; + * required bool is_active = 2; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName(int index) { - return serverName_.get(index); + public boolean hasIsActive() { + return ((bitField0_ & 0x00000002) == 0x00000002); } /** - * repeated .hbase.pb.ServerName server_name = 1; + * required bool is_active = 2; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder( - int index) { - return serverName_.get(index); + public boolean getIsActive() { + return isActive_; } private void initFields() { - serverName_ = java.util.Collections.emptyList(); + serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); + isActive_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - for (int i = 0; i < getServerNameCount(); i++) { - if (!getServerName(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } + if (!hasServerName()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasIsActive()) { + memoizedIsInitialized = 0; + return false; + } + if (!getServerName().isInitialized()) { + memoizedIsInitialized = 0; + return false; } memoizedIsInitialized = 1; return true; @@ -61687,8 +67270,11 @@ public final boolean isInitialized() { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); - for (int i = 0; i < serverName_.size(); i++) { - output.writeMessage(1, serverName_.get(i)); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, serverName_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBool(2, isActive_); } getUnknownFields().writeTo(output); } @@ -61699,9 +67285,13 @@ public int getSerializedSize() { if (size != -1) return size; size = 0; - for (int i = 0; i < serverName_.size(); i++) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, serverName_.get(i)); + .computeMessageSize(1, serverName_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(2, isActive_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -61720,14 +67310,22 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry) obj; boolean result = true; - result = result && getServerNameList() - .equals(other.getServerNameList()); + result = result && (hasServerName() == other.hasServerName()); + if (hasServerName()) { + result = result && getServerName() + .equals(other.getServerName()); + } + result = result && (hasIsActive() == other.hasIsActive()); + if (hasIsActive()) { + result = result && (getIsActive() + == other.getIsActive()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -61741,62 +67339,66 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (getServerNameCount() > 0) { + if (hasServerName()) { hash = (37 * hash) + SERVER_NAME_FIELD_NUMBER; - hash = (53 * hash) + getServerNameList().hashCode(); + hash = (53 * hash) + getServerName().hashCode(); + } + if (hasIsActive()) { + hash = (37 * hash) + IS_ACTIVE_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getIsActive()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -61805,7 +67407,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadS public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -61817,24 +67419,24 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.ClearDeadServersRequest} + * Protobuf type {@code hbase.pb.GetMastersResponseEntry} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequestOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntryOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ClearDeadServersRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMastersResponseEntry_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ClearDeadServersRequest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMastersResponseEntry_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -61856,11 +67458,13 @@ private static Builder create() { public Builder clear() { super.clear(); if (serverNameBuilder_ == null) { - serverName_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); + serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); } else { serverNameBuilder_.clear(); } + bitField0_ = (bitField0_ & ~0x00000001); + isActive_ = false; + bitField0_ = (bitField0_ & ~0x00000002); return this; } @@ -61870,405 +67474,306 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ClearDeadServersRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMastersResponseEntry_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest(this); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry(this); int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } if (serverNameBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001)) { - serverName_ = java.util.Collections.unmodifiableList(serverName_); - bitField0_ = (bitField0_ & ~0x00000001); - } result.serverName_ = serverName_; } else { result.serverName_ = serverNameBuilder_.build(); } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.isActive_ = isActive_; + result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest.getDefaultInstance()) return this; - if (serverNameBuilder_ == null) { - if (!other.serverName_.isEmpty()) { - if (serverName_.isEmpty()) { - serverName_ = other.serverName_; - bitField0_ = (bitField0_ & ~0x00000001); - } else { - ensureServerNameIsMutable(); - serverName_.addAll(other.serverName_); - } - onChanged(); - } - } else { - if (!other.serverName_.isEmpty()) { - if (serverNameBuilder_.isEmpty()) { - serverNameBuilder_.dispose(); - serverNameBuilder_ = null; - serverName_ = other.serverName_; - bitField0_ = (bitField0_ & ~0x00000001); - serverNameBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getServerNameFieldBuilder() : null; - } else { - serverNameBuilder_.addAllMessages(other.serverName_); - } - } - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - for (int i = 0; i < getServerNameCount(); i++) { - if (!getServerName(i).isInitialized()) { - - return false; - } - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // repeated .hbase.pb.ServerName server_name = 1; - private java.util.List serverName_ = - java.util.Collections.emptyList(); - private void ensureServerNameIsMutable() { - if (!((bitField0_ & 0x00000001) == 0x00000001)) { - serverName_ = new java.util.ArrayList(serverName_); - bitField0_ |= 0x00000001; - } - } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> serverNameBuilder_; - - /** - * repeated .hbase.pb.ServerName server_name = 1; - */ - public java.util.List getServerNameList() { - if (serverNameBuilder_ == null) { - return java.util.Collections.unmodifiableList(serverName_); - } else { - return serverNameBuilder_.getMessageList(); + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.getDefaultInstance()) return this; + if (other.hasServerName()) { + mergeServerName(other.getServerName()); } - } - /** - * repeated .hbase.pb.ServerName server_name = 1; - */ - public int getServerNameCount() { - if (serverNameBuilder_ == null) { - return serverName_.size(); - } else { - return serverNameBuilder_.getCount(); + if (other.hasIsActive()) { + setIsActive(other.getIsActive()); } + this.mergeUnknownFields(other.getUnknownFields()); + return this; } - /** - * repeated .hbase.pb.ServerName server_name = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName(int index) { - if (serverNameBuilder_ == null) { - return serverName_.get(index); - } else { - return serverNameBuilder_.getMessage(index); + + public final boolean isInitialized() { + if (!hasServerName()) { + + return false; + } + if (!hasIsActive()) { + + return false; + } + if (!getServerName().isInitialized()) { + + return false; } + return true; } - /** - * repeated .hbase.pb.ServerName server_name = 1; - */ - public Builder setServerName( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { - if (serverNameBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); } - ensureServerNameIsMutable(); - serverName_.set(index, value); - onChanged(); - } else { - serverNameBuilder_.setMessage(index, value); } return this; } + private int bitField0_; + + // required .hbase.pb.ServerName server_name = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> serverNameBuilder_; /** - * repeated .hbase.pb.ServerName server_name = 1; + * required .hbase.pb.ServerName server_name = 1; */ - public Builder setServerName( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { - if (serverNameBuilder_ == null) { - ensureServerNameIsMutable(); - serverName_.set(index, builderForValue.build()); - onChanged(); - } else { - serverNameBuilder_.setMessage(index, builderForValue.build()); - } - return this; + public boolean hasServerName() { + return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * repeated .hbase.pb.ServerName server_name = 1; + * required .hbase.pb.ServerName server_name = 1; */ - public Builder addServerName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName() { if (serverNameBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureServerNameIsMutable(); - serverName_.add(value); - onChanged(); + return serverName_; } else { - serverNameBuilder_.addMessage(value); + return serverNameBuilder_.getMessage(); } - return this; } /** - * repeated .hbase.pb.ServerName server_name = 1; + * required .hbase.pb.ServerName server_name = 1; */ - public Builder addServerName( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { + public Builder setServerName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { if (serverNameBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - ensureServerNameIsMutable(); - serverName_.add(index, value); + serverName_ = value; onChanged(); } else { - serverNameBuilder_.addMessage(index, value); + serverNameBuilder_.setMessage(value); } + bitField0_ |= 0x00000001; return this; } /** - * repeated .hbase.pb.ServerName server_name = 1; + * required .hbase.pb.ServerName server_name = 1; */ - public Builder addServerName( + public Builder setServerName( org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { if (serverNameBuilder_ == null) { - ensureServerNameIsMutable(); - serverName_.add(builderForValue.build()); - onChanged(); - } else { - serverNameBuilder_.addMessage(builderForValue.build()); - } - return this; - } - /** - * repeated .hbase.pb.ServerName server_name = 1; - */ - public Builder addServerName( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { - if (serverNameBuilder_ == null) { - ensureServerNameIsMutable(); - serverName_.add(index, builderForValue.build()); + serverName_ = builderForValue.build(); onChanged(); } else { - serverNameBuilder_.addMessage(index, builderForValue.build()); + serverNameBuilder_.setMessage(builderForValue.build()); } + bitField0_ |= 0x00000001; return this; } /** - * repeated .hbase.pb.ServerName server_name = 1; + * required .hbase.pb.ServerName server_name = 1; */ - public Builder addAllServerName( - java.lang.Iterable values) { + public Builder mergeServerName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { if (serverNameBuilder_ == null) { - ensureServerNameIsMutable(); - super.addAll(values, serverName_); + if (((bitField0_ & 0x00000001) == 0x00000001) && + serverName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()) { + serverName_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder(serverName_).mergeFrom(value).buildPartial(); + } else { + serverName_ = value; + } onChanged(); } else { - serverNameBuilder_.addAllMessages(values); + serverNameBuilder_.mergeFrom(value); } + bitField0_ |= 0x00000001; return this; } /** - * repeated .hbase.pb.ServerName server_name = 1; + * required .hbase.pb.ServerName server_name = 1; */ public Builder clearServerName() { if (serverNameBuilder_ == null) { - serverName_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); + serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); onChanged(); } else { serverNameBuilder_.clear(); } + bitField0_ = (bitField0_ & ~0x00000001); return this; } /** - * repeated .hbase.pb.ServerName server_name = 1; + * required .hbase.pb.ServerName server_name = 1; */ - public Builder removeServerName(int index) { - if (serverNameBuilder_ == null) { - ensureServerNameIsMutable(); - serverName_.remove(index); - onChanged(); - } else { - serverNameBuilder_.remove(index); - } - return this; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getServerNameBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getServerNameFieldBuilder().getBuilder(); } /** - * repeated .hbase.pb.ServerName server_name = 1; + * required .hbase.pb.ServerName server_name = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getServerNameBuilder( - int index) { - return getServerNameFieldBuilder().getBuilder(index); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder() { + if (serverNameBuilder_ != null) { + return serverNameBuilder_.getMessageOrBuilder(); + } else { + return serverName_; + } } /** - * repeated .hbase.pb.ServerName server_name = 1; + * required .hbase.pb.ServerName server_name = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder( - int index) { + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> + getServerNameFieldBuilder() { if (serverNameBuilder_ == null) { - return serverName_.get(index); } else { - return serverNameBuilder_.getMessageOrBuilder(index); + serverNameBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>( + serverName_, + getParentForChildren(), + isClean()); + serverName_ = null; } + return serverNameBuilder_; } + + // required bool is_active = 2; + private boolean isActive_ ; /** - * repeated .hbase.pb.ServerName server_name = 1; + * required bool is_active = 2; */ - public java.util.List - getServerNameOrBuilderList() { - if (serverNameBuilder_ != null) { - return serverNameBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(serverName_); - } + public boolean hasIsActive() { + return ((bitField0_ & 0x00000002) == 0x00000002); } /** - * repeated .hbase.pb.ServerName server_name = 1; + * required bool is_active = 2; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder addServerNameBuilder() { - return getServerNameFieldBuilder().addBuilder( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()); + public boolean getIsActive() { + return isActive_; } /** - * repeated .hbase.pb.ServerName server_name = 1; + * required bool is_active = 2; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder addServerNameBuilder( - int index) { - return getServerNameFieldBuilder().addBuilder( - index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()); + public Builder setIsActive(boolean value) { + bitField0_ |= 0x00000002; + isActive_ = value; + onChanged(); + return this; } /** - * repeated .hbase.pb.ServerName server_name = 1; + * required bool is_active = 2; */ - public java.util.List - getServerNameBuilderList() { - return getServerNameFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> - getServerNameFieldBuilder() { - if (serverNameBuilder_ == null) { - serverNameBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>( - serverName_, - ((bitField0_ & 0x00000001) == 0x00000001), - getParentForChildren(), - isClean()); - serverName_ = null; - } - return serverNameBuilder_; + public Builder clearIsActive() { + bitField0_ = (bitField0_ & ~0x00000002); + isActive_ = false; + onChanged(); + return this; } - // @@protoc_insertion_point(builder_scope:hbase.pb.ClearDeadServersRequest) + // @@protoc_insertion_point(builder_scope:hbase.pb.GetMastersResponseEntry) } static { - defaultInstance = new ClearDeadServersRequest(true); + defaultInstance = new GetMastersResponseEntry(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.ClearDeadServersRequest) + // @@protoc_insertion_point(class_scope:hbase.pb.GetMastersResponseEntry) } - public interface ClearDeadServersResponseOrBuilder + public interface GetMastersResponseOrBuilder extends com.google.protobuf.MessageOrBuilder { - // repeated .hbase.pb.ServerName server_name = 1; + // repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; /** - * repeated .hbase.pb.ServerName server_name = 1; + * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; */ - java.util.List - getServerNameList(); + java.util.List + getMasterServersList(); /** - * repeated .hbase.pb.ServerName server_name = 1; + * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName(int index); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry getMasterServers(int index); /** - * repeated .hbase.pb.ServerName server_name = 1; + * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; */ - int getServerNameCount(); + int getMasterServersCount(); /** - * repeated .hbase.pb.ServerName server_name = 1; + * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; */ - java.util.List - getServerNameOrBuilderList(); + java.util.List + getMasterServersOrBuilderList(); /** - * repeated .hbase.pb.ServerName server_name = 1; + * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntryOrBuilder getMasterServersOrBuilder( int index); } /** - * Protobuf type {@code hbase.pb.ClearDeadServersResponse} + * Protobuf type {@code hbase.pb.GetMastersResponse} */ - public static final class ClearDeadServersResponse extends + public static final class GetMastersResponse extends com.google.protobuf.GeneratedMessage - implements ClearDeadServersResponseOrBuilder { - // Use ClearDeadServersResponse.newBuilder() to construct. - private ClearDeadServersResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + implements GetMastersResponseOrBuilder { + // Use GetMastersResponse.newBuilder() to construct. + private GetMastersResponse(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private ClearDeadServersResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private GetMastersResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final ClearDeadServersResponse defaultInstance; - public static ClearDeadServersResponse getDefaultInstance() { + private static final GetMastersResponse defaultInstance; + public static GetMastersResponse getDefaultInstance() { return defaultInstance; } - public ClearDeadServersResponse getDefaultInstanceForType() { + public GetMastersResponse getDefaultInstanceForType() { return defaultInstance; } @@ -62278,7 +67783,7 @@ public ClearDeadServersResponse getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private ClearDeadServersResponse( + private GetMastersResponse( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -62303,10 +67808,10 @@ private ClearDeadServersResponse( } case 10: { if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - serverName_ = new java.util.ArrayList(); + masterServers_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000001; } - serverName_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry)); + masterServers_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.PARSER, extensionRegistry)); break; } } @@ -62318,7 +67823,7 @@ private ClearDeadServersResponse( e.getMessage()).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - serverName_ = java.util.Collections.unmodifiableList(serverName_); + masterServers_ = java.util.Collections.unmodifiableList(masterServers_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); @@ -62326,77 +67831,77 @@ private ClearDeadServersResponse( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ClearDeadServersResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMastersResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ClearDeadServersResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMastersResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public ClearDeadServersResponse parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetMastersResponse parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new ClearDeadServersResponse(input, extensionRegistry); + return new GetMastersResponse(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } - // repeated .hbase.pb.ServerName server_name = 1; - public static final int SERVER_NAME_FIELD_NUMBER = 1; - private java.util.List serverName_; + // repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; + public static final int MASTER_SERVERS_FIELD_NUMBER = 1; + private java.util.List masterServers_; /** - * repeated .hbase.pb.ServerName server_name = 1; + * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; */ - public java.util.List getServerNameList() { - return serverName_; + public java.util.List getMasterServersList() { + return masterServers_; } /** - * repeated .hbase.pb.ServerName server_name = 1; + * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; */ - public java.util.List - getServerNameOrBuilderList() { - return serverName_; + public java.util.List + getMasterServersOrBuilderList() { + return masterServers_; } /** - * repeated .hbase.pb.ServerName server_name = 1; + * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; */ - public int getServerNameCount() { - return serverName_.size(); + public int getMasterServersCount() { + return masterServers_.size(); } /** - * repeated .hbase.pb.ServerName server_name = 1; + * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName(int index) { - return serverName_.get(index); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry getMasterServers(int index) { + return masterServers_.get(index); } /** - * repeated .hbase.pb.ServerName server_name = 1; + * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntryOrBuilder getMasterServersOrBuilder( int index) { - return serverName_.get(index); + return masterServers_.get(index); } private void initFields() { - serverName_ = java.util.Collections.emptyList(); + masterServers_ = java.util.Collections.emptyList(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - for (int i = 0; i < getServerNameCount(); i++) { - if (!getServerName(i).isInitialized()) { + for (int i = 0; i < getMasterServersCount(); i++) { + if (!getMasterServers(i).isInitialized()) { memoizedIsInitialized = 0; return false; } @@ -62408,8 +67913,8 @@ public final boolean isInitialized() { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); - for (int i = 0; i < serverName_.size(); i++) { - output.writeMessage(1, serverName_.get(i)); + for (int i = 0; i < masterServers_.size(); i++) { + output.writeMessage(1, masterServers_.get(i)); } getUnknownFields().writeTo(output); } @@ -62420,9 +67925,9 @@ public int getSerializedSize() { if (size != -1) return size; size = 0; - for (int i = 0; i < serverName_.size(); i++) { + for (int i = 0; i < masterServers_.size(); i++) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, serverName_.get(i)); + .computeMessageSize(1, masterServers_.get(i)); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -62441,14 +67946,14 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse) obj; boolean result = true; - result = result && getServerNameList() - .equals(other.getServerNameList()); + result = result && getMasterServersList() + .equals(other.getMasterServersList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -62462,62 +67967,62 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (getServerNameCount() > 0) { - hash = (37 * hash) + SERVER_NAME_FIELD_NUMBER; - hash = (53 * hash) + getServerNameList().hashCode(); + if (getMasterServersCount() > 0) { + hash = (37 * hash) + MASTER_SERVERS_FIELD_NUMBER; + hash = (53 * hash) + getMasterServersList().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -62526,7 +68031,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadS public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -62538,24 +68043,24 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.ClearDeadServersResponse} + * Protobuf type {@code hbase.pb.GetMastersResponse} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponseOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ClearDeadServersResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMastersResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ClearDeadServersResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMastersResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -62567,7 +68072,7 @@ private Builder( } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getServerNameFieldBuilder(); + getMasterServersFieldBuilder(); } } private static Builder create() { @@ -62576,11 +68081,11 @@ private static Builder create() { public Builder clear() { super.clear(); - if (serverNameBuilder_ == null) { - serverName_ = java.util.Collections.emptyList(); + if (masterServersBuilder_ == null) { + masterServers_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); } else { - serverNameBuilder_.clear(); + masterServersBuilder_.clear(); } return this; } @@ -62591,71 +68096,71 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ClearDeadServersResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMastersResponse_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse(this); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse(this); int from_bitField0_ = bitField0_; - if (serverNameBuilder_ == null) { + if (masterServersBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001)) { - serverName_ = java.util.Collections.unmodifiableList(serverName_); + masterServers_ = java.util.Collections.unmodifiableList(masterServers_); bitField0_ = (bitField0_ & ~0x00000001); } - result.serverName_ = serverName_; + result.masterServers_ = masterServers_; } else { - result.serverName_ = serverNameBuilder_.build(); + result.masterServers_ = masterServersBuilder_.build(); } onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse.getDefaultInstance()) return this; - if (serverNameBuilder_ == null) { - if (!other.serverName_.isEmpty()) { - if (serverName_.isEmpty()) { - serverName_ = other.serverName_; + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse.getDefaultInstance()) return this; + if (masterServersBuilder_ == null) { + if (!other.masterServers_.isEmpty()) { + if (masterServers_.isEmpty()) { + masterServers_ = other.masterServers_; bitField0_ = (bitField0_ & ~0x00000001); } else { - ensureServerNameIsMutable(); - serverName_.addAll(other.serverName_); + ensureMasterServersIsMutable(); + masterServers_.addAll(other.masterServers_); } onChanged(); } } else { - if (!other.serverName_.isEmpty()) { - if (serverNameBuilder_.isEmpty()) { - serverNameBuilder_.dispose(); - serverNameBuilder_ = null; - serverName_ = other.serverName_; + if (!other.masterServers_.isEmpty()) { + if (masterServersBuilder_.isEmpty()) { + masterServersBuilder_.dispose(); + masterServersBuilder_ = null; + masterServers_ = other.masterServers_; bitField0_ = (bitField0_ & ~0x00000001); - serverNameBuilder_ = + masterServersBuilder_ = com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getServerNameFieldBuilder() : null; + getMasterServersFieldBuilder() : null; } else { - serverNameBuilder_.addAllMessages(other.serverName_); + masterServersBuilder_.addAllMessages(other.masterServers_); } } } @@ -62664,8 +68169,8 @@ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos } public final boolean isInitialized() { - for (int i = 0; i < getServerNameCount(); i++) { - if (!getServerName(i).isInitialized()) { + for (int i = 0; i < getMasterServersCount(); i++) { + if (!getMasterServers(i).isInitialized()) { return false; } @@ -62677,11 +68182,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -62692,299 +68197,283 @@ public Builder mergeFrom( } private int bitField0_; - // repeated .hbase.pb.ServerName server_name = 1; - private java.util.List serverName_ = + // repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; + private java.util.List masterServers_ = java.util.Collections.emptyList(); - private void ensureServerNameIsMutable() { + private void ensureMasterServersIsMutable() { if (!((bitField0_ & 0x00000001) == 0x00000001)) { - serverName_ = new java.util.ArrayList(serverName_); + masterServers_ = new java.util.ArrayList(masterServers_); bitField0_ |= 0x00000001; } } private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> serverNameBuilder_; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.Builder, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntryOrBuilder> masterServersBuilder_; /** - * repeated .hbase.pb.ServerName server_name = 1; + * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; */ - public java.util.List getServerNameList() { - if (serverNameBuilder_ == null) { - return java.util.Collections.unmodifiableList(serverName_); + public java.util.List getMasterServersList() { + if (masterServersBuilder_ == null) { + return java.util.Collections.unmodifiableList(masterServers_); } else { - return serverNameBuilder_.getMessageList(); + return masterServersBuilder_.getMessageList(); } } /** - * repeated .hbase.pb.ServerName server_name = 1; + * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; */ - public int getServerNameCount() { - if (serverNameBuilder_ == null) { - return serverName_.size(); + public int getMasterServersCount() { + if (masterServersBuilder_ == null) { + return masterServers_.size(); } else { - return serverNameBuilder_.getCount(); + return masterServersBuilder_.getCount(); } } /** - * repeated .hbase.pb.ServerName server_name = 1; + * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName(int index) { - if (serverNameBuilder_ == null) { - return serverName_.get(index); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry getMasterServers(int index) { + if (masterServersBuilder_ == null) { + return masterServers_.get(index); } else { - return serverNameBuilder_.getMessage(index); + return masterServersBuilder_.getMessage(index); } } /** - * repeated .hbase.pb.ServerName server_name = 1; + * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; */ - public Builder setServerName( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { - if (serverNameBuilder_ == null) { + public Builder setMasterServers( + int index, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry value) { + if (masterServersBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - ensureServerNameIsMutable(); - serverName_.set(index, value); + ensureMasterServersIsMutable(); + masterServers_.set(index, value); onChanged(); } else { - serverNameBuilder_.setMessage(index, value); + masterServersBuilder_.setMessage(index, value); } return this; } /** - * repeated .hbase.pb.ServerName server_name = 1; + * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; */ - public Builder setServerName( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { - if (serverNameBuilder_ == null) { - ensureServerNameIsMutable(); - serverName_.set(index, builderForValue.build()); + public Builder setMasterServers( + int index, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.Builder builderForValue) { + if (masterServersBuilder_ == null) { + ensureMasterServersIsMutable(); + masterServers_.set(index, builderForValue.build()); onChanged(); } else { - serverNameBuilder_.setMessage(index, builderForValue.build()); + masterServersBuilder_.setMessage(index, builderForValue.build()); } return this; } /** - * repeated .hbase.pb.ServerName server_name = 1; + * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; */ - public Builder addServerName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { - if (serverNameBuilder_ == null) { + public Builder addMasterServers(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry value) { + if (masterServersBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - ensureServerNameIsMutable(); - serverName_.add(value); + ensureMasterServersIsMutable(); + masterServers_.add(value); onChanged(); } else { - serverNameBuilder_.addMessage(value); + masterServersBuilder_.addMessage(value); } return this; } /** - * repeated .hbase.pb.ServerName server_name = 1; + * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; */ - public Builder addServerName( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { - if (serverNameBuilder_ == null) { + public Builder addMasterServers( + int index, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry value) { + if (masterServersBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - ensureServerNameIsMutable(); - serverName_.add(index, value); + ensureMasterServersIsMutable(); + masterServers_.add(index, value); onChanged(); } else { - serverNameBuilder_.addMessage(index, value); + masterServersBuilder_.addMessage(index, value); } return this; } /** - * repeated .hbase.pb.ServerName server_name = 1; + * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; */ - public Builder addServerName( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { - if (serverNameBuilder_ == null) { - ensureServerNameIsMutable(); - serverName_.add(builderForValue.build()); + public Builder addMasterServers( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.Builder builderForValue) { + if (masterServersBuilder_ == null) { + ensureMasterServersIsMutable(); + masterServers_.add(builderForValue.build()); onChanged(); } else { - serverNameBuilder_.addMessage(builderForValue.build()); + masterServersBuilder_.addMessage(builderForValue.build()); } return this; } /** - * repeated .hbase.pb.ServerName server_name = 1; + * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; */ - public Builder addServerName( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { - if (serverNameBuilder_ == null) { - ensureServerNameIsMutable(); - serverName_.add(index, builderForValue.build()); + public Builder addMasterServers( + int index, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.Builder builderForValue) { + if (masterServersBuilder_ == null) { + ensureMasterServersIsMutable(); + masterServers_.add(index, builderForValue.build()); onChanged(); } else { - serverNameBuilder_.addMessage(index, builderForValue.build()); + masterServersBuilder_.addMessage(index, builderForValue.build()); } return this; } /** - * repeated .hbase.pb.ServerName server_name = 1; + * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; */ - public Builder addAllServerName( - java.lang.Iterable values) { - if (serverNameBuilder_ == null) { - ensureServerNameIsMutable(); - super.addAll(values, serverName_); + public Builder addAllMasterServers( + java.lang.Iterable values) { + if (masterServersBuilder_ == null) { + ensureMasterServersIsMutable(); + super.addAll(values, masterServers_); onChanged(); } else { - serverNameBuilder_.addAllMessages(values); + masterServersBuilder_.addAllMessages(values); } return this; } /** - * repeated .hbase.pb.ServerName server_name = 1; + * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; */ - public Builder clearServerName() { - if (serverNameBuilder_ == null) { - serverName_ = java.util.Collections.emptyList(); + public Builder clearMasterServers() { + if (masterServersBuilder_ == null) { + masterServers_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); } else { - serverNameBuilder_.clear(); + masterServersBuilder_.clear(); } return this; } /** - * repeated .hbase.pb.ServerName server_name = 1; + * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; */ - public Builder removeServerName(int index) { - if (serverNameBuilder_ == null) { - ensureServerNameIsMutable(); - serverName_.remove(index); + public Builder removeMasterServers(int index) { + if (masterServersBuilder_ == null) { + ensureMasterServersIsMutable(); + masterServers_.remove(index); onChanged(); } else { - serverNameBuilder_.remove(index); + masterServersBuilder_.remove(index); } return this; } /** - * repeated .hbase.pb.ServerName server_name = 1; + * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getServerNameBuilder( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.Builder getMasterServersBuilder( int index) { - return getServerNameFieldBuilder().getBuilder(index); + return getMasterServersFieldBuilder().getBuilder(index); } /** - * repeated .hbase.pb.ServerName server_name = 1; + * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntryOrBuilder getMasterServersOrBuilder( int index) { - if (serverNameBuilder_ == null) { - return serverName_.get(index); } else { - return serverNameBuilder_.getMessageOrBuilder(index); + if (masterServersBuilder_ == null) { + return masterServers_.get(index); } else { + return masterServersBuilder_.getMessageOrBuilder(index); } } /** - * repeated .hbase.pb.ServerName server_name = 1; + * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; */ - public java.util.List - getServerNameOrBuilderList() { - if (serverNameBuilder_ != null) { - return serverNameBuilder_.getMessageOrBuilderList(); + public java.util.List + getMasterServersOrBuilderList() { + if (masterServersBuilder_ != null) { + return masterServersBuilder_.getMessageOrBuilderList(); } else { - return java.util.Collections.unmodifiableList(serverName_); + return java.util.Collections.unmodifiableList(masterServers_); } } /** - * repeated .hbase.pb.ServerName server_name = 1; + * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder addServerNameBuilder() { - return getServerNameFieldBuilder().addBuilder( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.Builder addMasterServersBuilder() { + return getMasterServersFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.getDefaultInstance()); } /** - * repeated .hbase.pb.ServerName server_name = 1; + * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder addServerNameBuilder( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.Builder addMasterServersBuilder( int index) { - return getServerNameFieldBuilder().addBuilder( - index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()); + return getMasterServersFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.getDefaultInstance()); } /** - * repeated .hbase.pb.ServerName server_name = 1; + * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; */ - public java.util.List - getServerNameBuilderList() { - return getServerNameFieldBuilder().getBuilderList(); + public java.util.List + getMasterServersBuilderList() { + return getMasterServersFieldBuilder().getBuilderList(); } private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> - getServerNameFieldBuilder() { - if (serverNameBuilder_ == null) { - serverNameBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>( - serverName_, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.Builder, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntryOrBuilder> + getMasterServersFieldBuilder() { + if (masterServersBuilder_ == null) { + masterServersBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.Builder, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntryOrBuilder>( + masterServers_, ((bitField0_ & 0x00000001) == 0x00000001), getParentForChildren(), isClean()); - serverName_ = null; + masterServers_ = null; } - return serverNameBuilder_; + return masterServersBuilder_; } - // @@protoc_insertion_point(builder_scope:hbase.pb.ClearDeadServersResponse) + // @@protoc_insertion_point(builder_scope:hbase.pb.GetMastersResponse) } static { - defaultInstance = new ClearDeadServersResponse(true); + defaultInstance = new GetMastersResponse(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.ClearDeadServersResponse) + // @@protoc_insertion_point(class_scope:hbase.pb.GetMastersResponse) } - public interface SetSnapshotCleanupRequestOrBuilder + public interface GetMetaRegionLocationsRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { - - // required bool enabled = 1; - /** - * required bool enabled = 1; - */ - boolean hasEnabled(); - /** - * required bool enabled = 1; - */ - boolean getEnabled(); - - // optional bool synchronous = 2; - /** - * optional bool synchronous = 2; - */ - boolean hasSynchronous(); - /** - * optional bool synchronous = 2; - */ - boolean getSynchronous(); } /** - * Protobuf type {@code hbase.pb.SetSnapshotCleanupRequest} + * Protobuf type {@code hbase.pb.GetMetaRegionLocationsRequest} + * + *
+   ** Request and response to get the current list of meta region locations 
+   * 
*/ - public static final class SetSnapshotCleanupRequest extends + public static final class GetMetaRegionLocationsRequest extends com.google.protobuf.GeneratedMessage - implements SetSnapshotCleanupRequestOrBuilder { - // Use SetSnapshotCleanupRequest.newBuilder() to construct. - private SetSnapshotCleanupRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + implements GetMetaRegionLocationsRequestOrBuilder { + // Use GetMetaRegionLocationsRequest.newBuilder() to construct. + private GetMetaRegionLocationsRequest(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private SetSnapshotCleanupRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private GetMetaRegionLocationsRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final SetSnapshotCleanupRequest defaultInstance; - public static SetSnapshotCleanupRequest getDefaultInstance() { + private static final GetMetaRegionLocationsRequest defaultInstance; + public static GetMetaRegionLocationsRequest getDefaultInstance() { return defaultInstance; } - public SetSnapshotCleanupRequest getDefaultInstanceForType() { + public GetMetaRegionLocationsRequest getDefaultInstanceForType() { return defaultInstance; } @@ -62994,12 +68483,11 @@ public SetSnapshotCleanupRequest getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private SetSnapshotCleanupRequest( + private GetMetaRegionLocationsRequest( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); - int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { @@ -63017,16 +68505,6 @@ private SetSnapshotCleanupRequest( } break; } - case 8: { - bitField0_ |= 0x00000001; - enabled_ = input.readBool(); - break; - } - case 16: { - bitField0_ |= 0x00000002; - synchronous_ = input.readBool(); - break; - } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -63041,77 +68519,38 @@ private SetSnapshotCleanupRequest( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetSnapshotCleanupRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMetaRegionLocationsRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetSnapshotCleanupRequest_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public SetSnapshotCleanupRequest parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new SetSnapshotCleanupRequest(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - private int bitField0_; - // required bool enabled = 1; - public static final int ENABLED_FIELD_NUMBER = 1; - private boolean enabled_; - /** - * required bool enabled = 1; - */ - public boolean hasEnabled() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required bool enabled = 1; - */ - public boolean getEnabled() { - return enabled_; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMetaRegionLocationsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest.Builder.class); } - // optional bool synchronous = 2; - public static final int SYNCHRONOUS_FIELD_NUMBER = 2; - private boolean synchronous_; - /** - * optional bool synchronous = 2; - */ - public boolean hasSynchronous() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional bool synchronous = 2; - */ - public boolean getSynchronous() { - return synchronous_; + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetMetaRegionLocationsRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new GetMetaRegionLocationsRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; } private void initFields() { - enabled_ = false; - synchronous_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (!hasEnabled()) { - memoizedIsInitialized = 0; - return false; - } memoizedIsInitialized = 1; return true; } @@ -63119,12 +68558,6 @@ public final boolean isInitialized() { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBool(1, enabled_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBool(2, synchronous_); - } getUnknownFields().writeTo(output); } @@ -63134,14 +68567,6 @@ public int getSerializedSize() { if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(1, enabled_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(2, synchronous_); - } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -63159,22 +68584,12 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest) obj; boolean result = true; - result = result && (hasEnabled() == other.hasEnabled()); - if (hasEnabled()) { - result = result && (getEnabled() - == other.getEnabled()); - } - result = result && (hasSynchronous() == other.hasSynchronous()); - if (hasSynchronous()) { - result = result && (getSynchronous() - == other.getSynchronous()); - } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -63188,66 +68603,58 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasEnabled()) { - hash = (37 * hash) + ENABLED_FIELD_NUMBER; - hash = (53 * hash) + hashBoolean(getEnabled()); - } - if (hasSynchronous()) { - hash = (37 * hash) + SYNCHRONOUS_FIELD_NUMBER; - hash = (53 * hash) + hashBoolean(getSynchronous()); - } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -63256,7 +68663,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapsho public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -63268,24 +68675,28 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.SetSnapshotCleanupRequest} + * Protobuf type {@code hbase.pb.GetMetaRegionLocationsRequest} + * + *
+     ** Request and response to get the current list of meta region locations 
+     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequestOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequestOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetSnapshotCleanupRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMetaRegionLocationsRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetSnapshotCleanupRequest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMetaRegionLocationsRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -63305,10 +68716,6 @@ private static Builder create() { public Builder clear() { super.clear(); - enabled_ = false; - bitField0_ = (bitField0_ & ~0x00000001); - synchronous_ = false; - bitField0_ = (bitField0_ & ~0x00000002); return this; } @@ -63318,64 +68725,43 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetSnapshotCleanupRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMetaRegionLocationsRequest_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.enabled_ = enabled_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.synchronous_ = synchronous_; - result.bitField0_ = to_bitField0_; + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest(this); onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest.getDefaultInstance()) return this; - if (other.hasEnabled()) { - setEnabled(other.getEnabled()); - } - if (other.hasSynchronous()) { - setSynchronous(other.getSynchronous()); - } + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { - if (!hasEnabled()) { - - return false; - } return true; } @@ -63383,11 +68769,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -63396,117 +68782,85 @@ public Builder mergeFrom( } return this; } - private int bitField0_; - - // required bool enabled = 1; - private boolean enabled_ ; - /** - * required bool enabled = 1; - */ - public boolean hasEnabled() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required bool enabled = 1; - */ - public boolean getEnabled() { - return enabled_; - } - /** - * required bool enabled = 1; - */ - public Builder setEnabled(boolean value) { - bitField0_ |= 0x00000001; - enabled_ = value; - onChanged(); - return this; - } - /** - * required bool enabled = 1; - */ - public Builder clearEnabled() { - bitField0_ = (bitField0_ & ~0x00000001); - enabled_ = false; - onChanged(); - return this; - } - - // optional bool synchronous = 2; - private boolean synchronous_ ; - /** - * optional bool synchronous = 2; - */ - public boolean hasSynchronous() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional bool synchronous = 2; - */ - public boolean getSynchronous() { - return synchronous_; - } - /** - * optional bool synchronous = 2; - */ - public Builder setSynchronous(boolean value) { - bitField0_ |= 0x00000002; - synchronous_ = value; - onChanged(); - return this; - } - /** - * optional bool synchronous = 2; - */ - public Builder clearSynchronous() { - bitField0_ = (bitField0_ & ~0x00000002); - synchronous_ = false; - onChanged(); - return this; - } - // @@protoc_insertion_point(builder_scope:hbase.pb.SetSnapshotCleanupRequest) + // @@protoc_insertion_point(builder_scope:hbase.pb.GetMetaRegionLocationsRequest) } static { - defaultInstance = new SetSnapshotCleanupRequest(true); + defaultInstance = new GetMetaRegionLocationsRequest(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.SetSnapshotCleanupRequest) + // @@protoc_insertion_point(class_scope:hbase.pb.GetMetaRegionLocationsRequest) } - public interface SetSnapshotCleanupResponseOrBuilder + public interface GetMetaRegionLocationsResponseOrBuilder extends com.google.protobuf.MessageOrBuilder { - // required bool prev_snapshot_cleanup = 1; + // repeated .hbase.pb.RegionLocation meta_locations = 1; /** - * required bool prev_snapshot_cleanup = 1; + * repeated .hbase.pb.RegionLocation meta_locations = 1; + * + *
+     ** Not set if meta region locations could not be determined. 
+     * 
*/ - boolean hasPrevSnapshotCleanup(); + java.util.List + getMetaLocationsList(); /** - * required bool prev_snapshot_cleanup = 1; + * repeated .hbase.pb.RegionLocation meta_locations = 1; + * + *
+     ** Not set if meta region locations could not be determined. 
+     * 
*/ - boolean getPrevSnapshotCleanup(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation getMetaLocations(int index); + /** + * repeated .hbase.pb.RegionLocation meta_locations = 1; + * + *
+     ** Not set if meta region locations could not be determined. 
+     * 
+ */ + int getMetaLocationsCount(); + /** + * repeated .hbase.pb.RegionLocation meta_locations = 1; + * + *
+     ** Not set if meta region locations could not be determined. 
+     * 
+ */ + java.util.List + getMetaLocationsOrBuilderList(); + /** + * repeated .hbase.pb.RegionLocation meta_locations = 1; + * + *
+     ** Not set if meta region locations could not be determined. 
+     * 
+ */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocationOrBuilder getMetaLocationsOrBuilder( + int index); } /** - * Protobuf type {@code hbase.pb.SetSnapshotCleanupResponse} + * Protobuf type {@code hbase.pb.GetMetaRegionLocationsResponse} */ - public static final class SetSnapshotCleanupResponse extends + public static final class GetMetaRegionLocationsResponse extends com.google.protobuf.GeneratedMessage - implements SetSnapshotCleanupResponseOrBuilder { - // Use SetSnapshotCleanupResponse.newBuilder() to construct. - private SetSnapshotCleanupResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + implements GetMetaRegionLocationsResponseOrBuilder { + // Use GetMetaRegionLocationsResponse.newBuilder() to construct. + private GetMetaRegionLocationsResponse(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private SetSnapshotCleanupResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private GetMetaRegionLocationsResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final SetSnapshotCleanupResponse defaultInstance; - public static SetSnapshotCleanupResponse getDefaultInstance() { + private static final GetMetaRegionLocationsResponse defaultInstance; + public static GetMetaRegionLocationsResponse getDefaultInstance() { return defaultInstance; } - public SetSnapshotCleanupResponse getDefaultInstanceForType() { + public GetMetaRegionLocationsResponse getDefaultInstanceForType() { return defaultInstance; } @@ -63516,7 +68870,7 @@ public SetSnapshotCleanupResponse getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private SetSnapshotCleanupResponse( + private GetMetaRegionLocationsResponse( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -63539,9 +68893,12 @@ private SetSnapshotCleanupResponse( } break; } - case 8: { - bitField0_ |= 0x00000001; - prevSnapshotCleanup_ = input.readBool(); + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + metaLocations_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + metaLocations_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation.PARSER, extensionRegistry)); break; } } @@ -63552,65 +68909,109 @@ private SetSnapshotCleanupResponse( throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + metaLocations_ = java.util.Collections.unmodifiableList(metaLocations_); + } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetSnapshotCleanupResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMetaRegionLocationsResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetSnapshotCleanupResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMetaRegionLocationsResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public SetSnapshotCleanupResponse parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetMetaRegionLocationsResponse parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new SetSnapshotCleanupResponse(input, extensionRegistry); + return new GetMetaRegionLocationsResponse(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } - - private int bitField0_; - // required bool prev_snapshot_cleanup = 1; - public static final int PREV_SNAPSHOT_CLEANUP_FIELD_NUMBER = 1; - private boolean prevSnapshotCleanup_; + + // repeated .hbase.pb.RegionLocation meta_locations = 1; + public static final int META_LOCATIONS_FIELD_NUMBER = 1; + private java.util.List metaLocations_; + /** + * repeated .hbase.pb.RegionLocation meta_locations = 1; + * + *
+     ** Not set if meta region locations could not be determined. 
+     * 
+ */ + public java.util.List getMetaLocationsList() { + return metaLocations_; + } + /** + * repeated .hbase.pb.RegionLocation meta_locations = 1; + * + *
+     ** Not set if meta region locations could not be determined. 
+     * 
+ */ + public java.util.List + getMetaLocationsOrBuilderList() { + return metaLocations_; + } /** - * required bool prev_snapshot_cleanup = 1; + * repeated .hbase.pb.RegionLocation meta_locations = 1; + * + *
+     ** Not set if meta region locations could not be determined. 
+     * 
*/ - public boolean hasPrevSnapshotCleanup() { - return ((bitField0_ & 0x00000001) == 0x00000001); + public int getMetaLocationsCount() { + return metaLocations_.size(); } /** - * required bool prev_snapshot_cleanup = 1; + * repeated .hbase.pb.RegionLocation meta_locations = 1; + * + *
+     ** Not set if meta region locations could not be determined. 
+     * 
*/ - public boolean getPrevSnapshotCleanup() { - return prevSnapshotCleanup_; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation getMetaLocations(int index) { + return metaLocations_.get(index); + } + /** + * repeated .hbase.pb.RegionLocation meta_locations = 1; + * + *
+     ** Not set if meta region locations could not be determined. 
+     * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocationOrBuilder getMetaLocationsOrBuilder( + int index) { + return metaLocations_.get(index); } private void initFields() { - prevSnapshotCleanup_ = false; + metaLocations_ = java.util.Collections.emptyList(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (!hasPrevSnapshotCleanup()) { - memoizedIsInitialized = 0; - return false; + for (int i = 0; i < getMetaLocationsCount(); i++) { + if (!getMetaLocations(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } } memoizedIsInitialized = 1; return true; @@ -63619,8 +69020,8 @@ public final boolean isInitialized() { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBool(1, prevSnapshotCleanup_); + for (int i = 0; i < metaLocations_.size(); i++) { + output.writeMessage(1, metaLocations_.get(i)); } getUnknownFields().writeTo(output); } @@ -63631,9 +69032,9 @@ public int getSerializedSize() { if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { + for (int i = 0; i < metaLocations_.size(); i++) { size += com.google.protobuf.CodedOutputStream - .computeBoolSize(1, prevSnapshotCleanup_); + .computeMessageSize(1, metaLocations_.get(i)); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -63652,17 +69053,14 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse) obj; boolean result = true; - result = result && (hasPrevSnapshotCleanup() == other.hasPrevSnapshotCleanup()); - if (hasPrevSnapshotCleanup()) { - result = result && (getPrevSnapshotCleanup() - == other.getPrevSnapshotCleanup()); - } + result = result && getMetaLocationsList() + .equals(other.getMetaLocationsList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -63676,62 +69074,62 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasPrevSnapshotCleanup()) { - hash = (37 * hash) + PREV_SNAPSHOT_CLEANUP_FIELD_NUMBER; - hash = (53 * hash) + hashBoolean(getPrevSnapshotCleanup()); + if (getMetaLocationsCount() > 0) { + hash = (37 * hash) + META_LOCATIONS_FIELD_NUMBER; + hash = (53 * hash) + getMetaLocationsList().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -63740,7 +69138,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapsho public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -63752,24 +69150,24 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.SetSnapshotCleanupResponse} + * Protobuf type {@code hbase.pb.GetMetaRegionLocationsResponse} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponseOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponseOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetSnapshotCleanupResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMetaRegionLocationsResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetSnapshotCleanupResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMetaRegionLocationsResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -63781,6 +69179,7 @@ private Builder( } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getMetaLocationsFieldBuilder(); } } private static Builder create() { @@ -63789,8 +69188,12 @@ private static Builder create() { public Builder clear() { super.clear(); - prevSnapshotCleanup_ = false; - bitField0_ = (bitField0_ & ~0x00000001); + if (metaLocationsBuilder_ == null) { + metaLocations_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + metaLocationsBuilder_.clear(); + } return this; } @@ -63800,145 +69203,456 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetSnapshotCleanupResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMetaRegionLocationsResponse_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse(this); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse(this); int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; + if (metaLocationsBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + metaLocations_ = java.util.Collections.unmodifiableList(metaLocations_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.metaLocations_ = metaLocations_; + } else { + result.metaLocations_ = metaLocationsBuilder_.build(); } - result.prevSnapshotCleanup_ = prevSnapshotCleanup_; - result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse.getDefaultInstance()) return this; - if (other.hasPrevSnapshotCleanup()) { - setPrevSnapshotCleanup(other.getPrevSnapshotCleanup()); + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse.getDefaultInstance()) return this; + if (metaLocationsBuilder_ == null) { + if (!other.metaLocations_.isEmpty()) { + if (metaLocations_.isEmpty()) { + metaLocations_ = other.metaLocations_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureMetaLocationsIsMutable(); + metaLocations_.addAll(other.metaLocations_); + } + onChanged(); + } + } else { + if (!other.metaLocations_.isEmpty()) { + if (metaLocationsBuilder_.isEmpty()) { + metaLocationsBuilder_.dispose(); + metaLocationsBuilder_ = null; + metaLocations_ = other.metaLocations_; + bitField0_ = (bitField0_ & ~0x00000001); + metaLocationsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getMetaLocationsFieldBuilder() : null; + } else { + metaLocationsBuilder_.addAllMessages(other.metaLocations_); + } + } } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { - if (!hasPrevSnapshotCleanup()) { - - return false; + for (int i = 0; i < getMetaLocationsCount(); i++) { + if (!getMetaLocations(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // repeated .hbase.pb.RegionLocation meta_locations = 1; + private java.util.List metaLocations_ = + java.util.Collections.emptyList(); + private void ensureMetaLocationsIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + metaLocations_ = new java.util.ArrayList(metaLocations_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocationOrBuilder> metaLocationsBuilder_; + + /** + * repeated .hbase.pb.RegionLocation meta_locations = 1; + * + *
+       ** Not set if meta region locations could not be determined. 
+       * 
+ */ + public java.util.List getMetaLocationsList() { + if (metaLocationsBuilder_ == null) { + return java.util.Collections.unmodifiableList(metaLocations_); + } else { + return metaLocationsBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.RegionLocation meta_locations = 1; + * + *
+       ** Not set if meta region locations could not be determined. 
+       * 
+ */ + public int getMetaLocationsCount() { + if (metaLocationsBuilder_ == null) { + return metaLocations_.size(); + } else { + return metaLocationsBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.RegionLocation meta_locations = 1; + * + *
+       ** Not set if meta region locations could not be determined. 
+       * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation getMetaLocations(int index) { + if (metaLocationsBuilder_ == null) { + return metaLocations_.get(index); + } else { + return metaLocationsBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.RegionLocation meta_locations = 1; + * + *
+       ** Not set if meta region locations could not be determined. 
+       * 
+ */ + public Builder setMetaLocations( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation value) { + if (metaLocationsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureMetaLocationsIsMutable(); + metaLocations_.set(index, value); + onChanged(); + } else { + metaLocationsBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.RegionLocation meta_locations = 1; + * + *
+       ** Not set if meta region locations could not be determined. 
+       * 
+ */ + public Builder setMetaLocations( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation.Builder builderForValue) { + if (metaLocationsBuilder_ == null) { + ensureMetaLocationsIsMutable(); + metaLocations_.set(index, builderForValue.build()); + onChanged(); + } else { + metaLocationsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.RegionLocation meta_locations = 1; + * + *
+       ** Not set if meta region locations could not be determined. 
+       * 
+ */ + public Builder addMetaLocations(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation value) { + if (metaLocationsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureMetaLocationsIsMutable(); + metaLocations_.add(value); + onChanged(); + } else { + metaLocationsBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.RegionLocation meta_locations = 1; + * + *
+       ** Not set if meta region locations could not be determined. 
+       * 
+ */ + public Builder addMetaLocations( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation value) { + if (metaLocationsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureMetaLocationsIsMutable(); + metaLocations_.add(index, value); + onChanged(); + } else { + metaLocationsBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.RegionLocation meta_locations = 1; + * + *
+       ** Not set if meta region locations could not be determined. 
+       * 
+ */ + public Builder addMetaLocations( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation.Builder builderForValue) { + if (metaLocationsBuilder_ == null) { + ensureMetaLocationsIsMutable(); + metaLocations_.add(builderForValue.build()); + onChanged(); + } else { + metaLocationsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.RegionLocation meta_locations = 1; + * + *
+       ** Not set if meta region locations could not be determined. 
+       * 
+ */ + public Builder addMetaLocations( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation.Builder builderForValue) { + if (metaLocationsBuilder_ == null) { + ensureMetaLocationsIsMutable(); + metaLocations_.add(index, builderForValue.build()); + onChanged(); + } else { + metaLocationsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.RegionLocation meta_locations = 1; + * + *
+       ** Not set if meta region locations could not be determined. 
+       * 
+ */ + public Builder addAllMetaLocations( + java.lang.Iterable values) { + if (metaLocationsBuilder_ == null) { + ensureMetaLocationsIsMutable(); + super.addAll(values, metaLocations_); + onChanged(); + } else { + metaLocationsBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.RegionLocation meta_locations = 1; + * + *
+       ** Not set if meta region locations could not be determined. 
+       * 
+ */ + public Builder clearMetaLocations() { + if (metaLocationsBuilder_ == null) { + metaLocations_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + metaLocationsBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.RegionLocation meta_locations = 1; + * + *
+       ** Not set if meta region locations could not be determined. 
+       * 
+ */ + public Builder removeMetaLocations(int index) { + if (metaLocationsBuilder_ == null) { + ensureMetaLocationsIsMutable(); + metaLocations_.remove(index); + onChanged(); + } else { + metaLocationsBuilder_.remove(index); } - return true; + return this; } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } + /** + * repeated .hbase.pb.RegionLocation meta_locations = 1; + * + *
+       ** Not set if meta region locations could not be determined. 
+       * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation.Builder getMetaLocationsBuilder( + int index) { + return getMetaLocationsFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.RegionLocation meta_locations = 1; + * + *
+       ** Not set if meta region locations could not be determined. 
+       * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocationOrBuilder getMetaLocationsOrBuilder( + int index) { + if (metaLocationsBuilder_ == null) { + return metaLocations_.get(index); } else { + return metaLocationsBuilder_.getMessageOrBuilder(index); } - return this; } - private int bitField0_; - - // required bool prev_snapshot_cleanup = 1; - private boolean prevSnapshotCleanup_ ; /** - * required bool prev_snapshot_cleanup = 1; + * repeated .hbase.pb.RegionLocation meta_locations = 1; + * + *
+       ** Not set if meta region locations could not be determined. 
+       * 
*/ - public boolean hasPrevSnapshotCleanup() { - return ((bitField0_ & 0x00000001) == 0x00000001); + public java.util.List + getMetaLocationsOrBuilderList() { + if (metaLocationsBuilder_ != null) { + return metaLocationsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(metaLocations_); + } } /** - * required bool prev_snapshot_cleanup = 1; + * repeated .hbase.pb.RegionLocation meta_locations = 1; + * + *
+       ** Not set if meta region locations could not be determined. 
+       * 
*/ - public boolean getPrevSnapshotCleanup() { - return prevSnapshotCleanup_; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation.Builder addMetaLocationsBuilder() { + return getMetaLocationsFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation.getDefaultInstance()); } /** - * required bool prev_snapshot_cleanup = 1; + * repeated .hbase.pb.RegionLocation meta_locations = 1; + * + *
+       ** Not set if meta region locations could not be determined. 
+       * 
*/ - public Builder setPrevSnapshotCleanup(boolean value) { - bitField0_ |= 0x00000001; - prevSnapshotCleanup_ = value; - onChanged(); - return this; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation.Builder addMetaLocationsBuilder( + int index) { + return getMetaLocationsFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation.getDefaultInstance()); } /** - * required bool prev_snapshot_cleanup = 1; + * repeated .hbase.pb.RegionLocation meta_locations = 1; + * + *
+       ** Not set if meta region locations could not be determined. 
+       * 
*/ - public Builder clearPrevSnapshotCleanup() { - bitField0_ = (bitField0_ & ~0x00000001); - prevSnapshotCleanup_ = false; - onChanged(); - return this; + public java.util.List + getMetaLocationsBuilderList() { + return getMetaLocationsFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocationOrBuilder> + getMetaLocationsFieldBuilder() { + if (metaLocationsBuilder_ == null) { + metaLocationsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocationOrBuilder>( + metaLocations_, + ((bitField0_ & 0x00000001) == 0x00000001), + getParentForChildren(), + isClean()); + metaLocations_ = null; + } + return metaLocationsBuilder_; } - // @@protoc_insertion_point(builder_scope:hbase.pb.SetSnapshotCleanupResponse) + // @@protoc_insertion_point(builder_scope:hbase.pb.GetMetaRegionLocationsResponse) } static { - defaultInstance = new SetSnapshotCleanupResponse(true); + defaultInstance = new GetMetaRegionLocationsResponse(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.SetSnapshotCleanupResponse) + // @@protoc_insertion_point(class_scope:hbase.pb.GetMetaRegionLocationsResponse) } - public interface IsSnapshotCleanupEnabledRequestOrBuilder + public interface GetNumLiveRSRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { } /** - * Protobuf type {@code hbase.pb.IsSnapshotCleanupEnabledRequest} + * Protobuf type {@code hbase.pb.GetNumLiveRSRequest} + * + *
+   ** Request and response to get the number of live region servers 
+   * 
*/ - public static final class IsSnapshotCleanupEnabledRequest extends + public static final class GetNumLiveRSRequest extends com.google.protobuf.GeneratedMessage - implements IsSnapshotCleanupEnabledRequestOrBuilder { - // Use IsSnapshotCleanupEnabledRequest.newBuilder() to construct. - private IsSnapshotCleanupEnabledRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + implements GetNumLiveRSRequestOrBuilder { + // Use GetNumLiveRSRequest.newBuilder() to construct. + private GetNumLiveRSRequest(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private IsSnapshotCleanupEnabledRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private GetNumLiveRSRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final IsSnapshotCleanupEnabledRequest defaultInstance; - public static IsSnapshotCleanupEnabledRequest getDefaultInstance() { + private static final GetNumLiveRSRequest defaultInstance; + public static GetNumLiveRSRequest getDefaultInstance() { return defaultInstance; } - public IsSnapshotCleanupEnabledRequest getDefaultInstanceForType() { + public GetNumLiveRSRequest getDefaultInstanceForType() { return defaultInstance; } @@ -63948,7 +69662,7 @@ public IsSnapshotCleanupEnabledRequest getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private IsSnapshotCleanupEnabledRequest( + private GetNumLiveRSRequest( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -63984,28 +69698,28 @@ private IsSnapshotCleanupEnabledRequest( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsSnapshotCleanupEnabledRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetNumLiveRSRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsSnapshotCleanupEnabledRequest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetNumLiveRSRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public IsSnapshotCleanupEnabledRequest parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetNumLiveRSRequest parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new IsSnapshotCleanupEnabledRequest(input, extensionRegistry); + return new GetNumLiveRSRequest(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } @@ -64049,10 +69763,10 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest) obj; boolean result = true; result = result && @@ -64073,53 +69787,53 @@ public int hashCode() { return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -64128,7 +69842,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshot public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -64140,24 +69854,28 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.IsSnapshotCleanupEnabledRequest} + * Protobuf type {@code hbase.pb.GetNumLiveRSRequest} + * + *
+     ** Request and response to get the number of live region servers 
+     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequestOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequestOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsSnapshotCleanupEnabledRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetNumLiveRSRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsSnapshotCleanupEnabledRequest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetNumLiveRSRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -64186,38 +69904,38 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsSnapshotCleanupEnabledRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetNumLiveRSRequest_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest(this); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest(this); onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest.getDefaultInstance()) return this; + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -64230,11 +69948,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -64244,49 +69962,49 @@ public Builder mergeFrom( return this; } - // @@protoc_insertion_point(builder_scope:hbase.pb.IsSnapshotCleanupEnabledRequest) + // @@protoc_insertion_point(builder_scope:hbase.pb.GetNumLiveRSRequest) } static { - defaultInstance = new IsSnapshotCleanupEnabledRequest(true); + defaultInstance = new GetNumLiveRSRequest(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.IsSnapshotCleanupEnabledRequest) + // @@protoc_insertion_point(class_scope:hbase.pb.GetNumLiveRSRequest) } - public interface IsSnapshotCleanupEnabledResponseOrBuilder + public interface GetNumLiveRSResponseOrBuilder extends com.google.protobuf.MessageOrBuilder { - // required bool enabled = 1; + // required int32 num_region_servers = 1; /** - * required bool enabled = 1; + * required int32 num_region_servers = 1; */ - boolean hasEnabled(); + boolean hasNumRegionServers(); /** - * required bool enabled = 1; + * required int32 num_region_servers = 1; */ - boolean getEnabled(); + int getNumRegionServers(); } /** - * Protobuf type {@code hbase.pb.IsSnapshotCleanupEnabledResponse} + * Protobuf type {@code hbase.pb.GetNumLiveRSResponse} */ - public static final class IsSnapshotCleanupEnabledResponse extends + public static final class GetNumLiveRSResponse extends com.google.protobuf.GeneratedMessage - implements IsSnapshotCleanupEnabledResponseOrBuilder { - // Use IsSnapshotCleanupEnabledResponse.newBuilder() to construct. - private IsSnapshotCleanupEnabledResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + implements GetNumLiveRSResponseOrBuilder { + // Use GetNumLiveRSResponse.newBuilder() to construct. + private GetNumLiveRSResponse(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private IsSnapshotCleanupEnabledResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private GetNumLiveRSResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final IsSnapshotCleanupEnabledResponse defaultInstance; - public static IsSnapshotCleanupEnabledResponse getDefaultInstance() { + private static final GetNumLiveRSResponse defaultInstance; + public static GetNumLiveRSResponse getDefaultInstance() { return defaultInstance; } - public IsSnapshotCleanupEnabledResponse getDefaultInstanceForType() { + public GetNumLiveRSResponse getDefaultInstanceForType() { return defaultInstance; } @@ -64296,7 +70014,7 @@ public IsSnapshotCleanupEnabledResponse getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private IsSnapshotCleanupEnabledResponse( + private GetNumLiveRSResponse( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -64321,7 +70039,7 @@ private IsSnapshotCleanupEnabledResponse( } case 8: { bitField0_ |= 0x00000001; - enabled_ = input.readBool(); + numRegionServers_ = input.readInt32(); break; } } @@ -64338,57 +70056,57 @@ private IsSnapshotCleanupEnabledResponse( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsSnapshotCleanupEnabledResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetNumLiveRSResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsSnapshotCleanupEnabledResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetNumLiveRSResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public IsSnapshotCleanupEnabledResponse parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetNumLiveRSResponse parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new IsSnapshotCleanupEnabledResponse(input, extensionRegistry); + return new GetNumLiveRSResponse(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; - // required bool enabled = 1; - public static final int ENABLED_FIELD_NUMBER = 1; - private boolean enabled_; + // required int32 num_region_servers = 1; + public static final int NUM_REGION_SERVERS_FIELD_NUMBER = 1; + private int numRegionServers_; /** - * required bool enabled = 1; + * required int32 num_region_servers = 1; */ - public boolean hasEnabled() { + public boolean hasNumRegionServers() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * required bool enabled = 1; + * required int32 num_region_servers = 1; */ - public boolean getEnabled() { - return enabled_; + public int getNumRegionServers() { + return numRegionServers_; } private void initFields() { - enabled_ = false; + numRegionServers_ = 0; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (!hasEnabled()) { + if (!hasNumRegionServers()) { memoizedIsInitialized = 0; return false; } @@ -64400,7 +70118,7 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBool(1, enabled_); + output.writeInt32(1, numRegionServers_); } getUnknownFields().writeTo(output); } @@ -64413,7 +70131,7 @@ public int getSerializedSize() { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeBoolSize(1, enabled_); + .computeInt32Size(1, numRegionServers_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -64432,16 +70150,16 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse) obj; boolean result = true; - result = result && (hasEnabled() == other.hasEnabled()); - if (hasEnabled()) { - result = result && (getEnabled() - == other.getEnabled()); + result = result && (hasNumRegionServers() == other.hasNumRegionServers()); + if (hasNumRegionServers()) { + result = result && (getNumRegionServers() + == other.getNumRegionServers()); } result = result && getUnknownFields().equals(other.getUnknownFields()); @@ -64456,62 +70174,62 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasEnabled()) { - hash = (37 * hash) + ENABLED_FIELD_NUMBER; - hash = (53 * hash) + hashBoolean(getEnabled()); + if (hasNumRegionServers()) { + hash = (37 * hash) + NUM_REGION_SERVERS_FIELD_NUMBER; + hash = (53 * hash) + getNumRegionServers(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -64520,7 +70238,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshot public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -64532,24 +70250,24 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.IsSnapshotCleanupEnabledResponse} + * Protobuf type {@code hbase.pb.GetNumLiveRSResponse} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponseOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponseOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsSnapshotCleanupEnabledResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetNumLiveRSResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsSnapshotCleanupEnabledResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetNumLiveRSResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -64569,7 +70287,7 @@ private static Builder create() { public Builder clear() { super.clear(); - enabled_ = false; + numRegionServers_ = 0; bitField0_ = (bitField0_ & ~0x00000001); return this; } @@ -64580,54 +70298,54 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsSnapshotCleanupEnabledResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetNumLiveRSResponse_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse(this); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - result.enabled_ = enabled_; + result.numRegionServers_ = numRegionServers_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse.getDefaultInstance()) return this; - if (other.hasEnabled()) { - setEnabled(other.getEnabled()); + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse.getDefaultInstance()) return this; + if (other.hasNumRegionServers()) { + setNumRegionServers(other.getNumRegionServers()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { - if (!hasEnabled()) { + if (!hasNumRegionServers()) { return false; } @@ -64638,11 +70356,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -64653,48 +70371,48 @@ public Builder mergeFrom( } private int bitField0_; - // required bool enabled = 1; - private boolean enabled_ ; + // required int32 num_region_servers = 1; + private int numRegionServers_ ; /** - * required bool enabled = 1; + * required int32 num_region_servers = 1; */ - public boolean hasEnabled() { + public boolean hasNumRegionServers() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * required bool enabled = 1; + * required int32 num_region_servers = 1; */ - public boolean getEnabled() { - return enabled_; + public int getNumRegionServers() { + return numRegionServers_; } /** - * required bool enabled = 1; + * required int32 num_region_servers = 1; */ - public Builder setEnabled(boolean value) { + public Builder setNumRegionServers(int value) { bitField0_ |= 0x00000001; - enabled_ = value; + numRegionServers_ = value; onChanged(); return this; } /** - * required bool enabled = 1; + * required int32 num_region_servers = 1; */ - public Builder clearEnabled() { + public Builder clearNumRegionServers() { bitField0_ = (bitField0_ & ~0x00000001); - enabled_ = false; + numRegionServers_ = 0; onChanged(); return this; } - // @@protoc_insertion_point(builder_scope:hbase.pb.IsSnapshotCleanupEnabledResponse) + // @@protoc_insertion_point(builder_scope:hbase.pb.GetNumLiveRSResponse) } static { - defaultInstance = new IsSnapshotCleanupEnabledResponse(true); + defaultInstance = new GetNumLiveRSResponse(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.IsSnapshotCleanupEnabledResponse) + // @@protoc_insertion_point(class_scope:hbase.pb.GetNumLiveRSResponse) } /** @@ -65507,6 +71225,18 @@ public abstract void isSnapshotCleanupEnabled( org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest request, com.google.protobuf.RpcCallback done); + /** + * rpc GetTableState(.hbase.pb.GetTableStateRequest) returns (.hbase.pb.GetTableStateResponse); + * + *
+       ** returns table state 
+       * 
+ */ + public abstract void getTableState( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest request, + com.google.protobuf.RpcCallback done); + } public static com.google.protobuf.Service newReflectiveService( @@ -66024,6 +71754,14 @@ public void isSnapshotCleanupEnabled( impl.isSnapshotCleanupEnabled(controller, request, done); } + @java.lang.Override + public void getTableState( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest request, + com.google.protobuf.RpcCallback done) { + impl.getTableState(controller, request, done); + } + }; } @@ -66174,6 +71912,8 @@ public final com.google.protobuf.Message callBlockingMethod( return impl.switchSnapshotCleanup(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest)request); case 63: return impl.isSnapshotCleanupEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest)request); + case 64: + return impl.getTableState(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest)request); default: throw new java.lang.AssertionError("Can't get here."); } @@ -66316,6 +72056,8 @@ public final com.google.protobuf.Message callBlockingMethod( return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest.getDefaultInstance(); case 63: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest.getDefaultInstance(); + case 64: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -66458,6 +72200,8 @@ public final com.google.protobuf.Message callBlockingMethod( return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse.getDefaultInstance(); case 63: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse.getDefaultInstance(); + case 64: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -67268,6 +73012,18 @@ public abstract void isSnapshotCleanupEnabled( org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest request, com.google.protobuf.RpcCallback done); + /** + * rpc GetTableState(.hbase.pb.GetTableStateRequest) returns (.hbase.pb.GetTableStateResponse); + * + *
+     ** returns table state 
+     * 
+ */ + public abstract void getTableState( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest request, + com.google.protobuf.RpcCallback done); + public static final com.google.protobuf.Descriptors.ServiceDescriptor getDescriptor() { @@ -67610,6 +73366,11 @@ public final void callMethod( com.google.protobuf.RpcUtil.specializeCallback( done)); return; + case 64: + this.getTableState(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; default: throw new java.lang.AssertionError("Can't get here."); } @@ -67752,6 +73513,8 @@ public final void callMethod( return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest.getDefaultInstance(); case 63: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest.getDefaultInstance(); + case 64: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -67894,6 +73657,8 @@ public final void callMethod( return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse.getDefaultInstance(); case 63: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse.getDefaultInstance(); + case 64: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -68800,1186 +74565,1710 @@ public void abortProcedure( org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance())); } - public void listProcedures( + public void listProcedures( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(59), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance())); + } + + public void clearDeadServers( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(60), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse.getDefaultInstance())); + } + + public void listNamespaces( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespacesRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(61), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespacesResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespacesResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespacesResponse.getDefaultInstance())); + } + + public void switchSnapshotCleanup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(62), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse.getDefaultInstance())); + } + + public void isSnapshotCleanupEnabled( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(63), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse.getDefaultInstance())); + } + + public void getTableState( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(64), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance())); + } + } + + public static BlockingInterface newBlockingStub( + com.google.protobuf.BlockingRpcChannel channel) { + return new BlockingStub(channel); + } + + public interface BlockingInterface { + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse getSchemaAlterStatus( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsResponse getTableDescriptors( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesResponse getTableNames( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse getClusterStatus( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse isMasterRunning( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnResponse addColumn( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteColumnResponse deleteColumn( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteColumnRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnResponse modifyColumn( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionResponse moveRegion( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse dispatchMergingRegions( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionResponse assignRegion( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionResponse unassignRegion( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.OfflineRegionResponse offlineRegion( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.OfflineRegionRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableResponse deleteTable( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.TruncateTableResponse truncateTable( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.TruncateTableRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableResponse enableTable( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableResponse disableTable( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableResponse modifyTable( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableResponse createTable( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse shutdown( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse stopMaster( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse isMasterInMaintenanceMode( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse balance( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse setBalancerRunning( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse isBalancerEnabled( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledResponse setSplitOrMergeEnabled( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse isSplitOrMergeEnabled( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeResponse normalize( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningResponse setNormalizerRunning( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse isNormalizerEnabled( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse runCatalogScan( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse enableCatalogJanitor( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse isCatalogJanitorEnabled( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCleanerChoreResponse runCleanerChore( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(59), - controller, - request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.class, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance())); - } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCleanerChoreRequest request) + throws com.google.protobuf.ServiceException; - public void clearDeadServers( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetCleanerChoreRunningResponse setCleanerChoreRunning( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(60), - controller, - request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse.class, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse.getDefaultInstance())); - } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetCleanerChoreRunningRequest request) + throws com.google.protobuf.ServiceException; - public void listNamespaces( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCleanerChoreEnabledResponse isCleanerChoreEnabled( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespacesRequest request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(61), - controller, - request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespacesResponse.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespacesResponse.class, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespacesResponse.getDefaultInstance())); - } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCleanerChoreEnabledRequest request) + throws com.google.protobuf.ServiceException; - public void switchSnapshotCleanup( + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse execMasterService( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(62), - controller, - request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse.class, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse.getDefaultInstance())); - } + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest request) + throws com.google.protobuf.ServiceException; - public void isSnapshotCleanupEnabled( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse snapshot( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(63), - controller, - request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse.class, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse.getDefaultInstance())); - } - } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest request) + throws com.google.protobuf.ServiceException; - public static BlockingInterface newBlockingStub( - com.google.protobuf.BlockingRpcChannel channel) { - return new BlockingStub(channel); - } + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse getCompletedSnapshots( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest request) + throws com.google.protobuf.ServiceException; - public interface BlockingInterface { - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse getSchemaAlterStatus( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse deleteSnapshot( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest request) throws com.google.protobuf.ServiceException; - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsResponse getTableDescriptors( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse isSnapshotDone( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest request) throws com.google.protobuf.ServiceException; - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesResponse getTableNames( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse restoreSnapshot( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest request) throws com.google.protobuf.ServiceException; - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse getClusterStatus( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneResponse isRestoreSnapshotDone( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneRequest request) throws com.google.protobuf.ServiceException; - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse isMasterRunning( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse execProcedure( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest request) throws com.google.protobuf.ServiceException; - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnResponse addColumn( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse execProcedureWithRet( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest request) throws com.google.protobuf.ServiceException; - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteColumnResponse deleteColumn( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse isProcedureDone( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteColumnRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest request) throws com.google.protobuf.ServiceException; - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnResponse modifyColumn( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse modifyNamespace( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest request) throws com.google.protobuf.ServiceException; - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionResponse moveRegion( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse createNamespace( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest request) throws com.google.protobuf.ServiceException; - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse dispatchMergingRegions( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse deleteNamespace( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest request) throws com.google.protobuf.ServiceException; - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionResponse assignRegion( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse getNamespaceDescriptor( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest request) throws com.google.protobuf.ServiceException; - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionResponse unassignRegion( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse listNamespaceDescriptors( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest request) throws com.google.protobuf.ServiceException; - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.OfflineRegionResponse offlineRegion( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse listTableDescriptorsByNamespace( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.OfflineRegionRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest request) throws com.google.protobuf.ServiceException; - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableResponse deleteTable( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse listTableNamesByNamespace( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest request) throws com.google.protobuf.ServiceException; - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.TruncateTableResponse truncateTable( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse setQuota( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.TruncateTableRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest request) throws com.google.protobuf.ServiceException; - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableResponse enableTable( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse getLastMajorCompactionTimestamp( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest request) throws com.google.protobuf.ServiceException; - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableResponse disableTable( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse getLastMajorCompactionTimestampForRegion( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest request) throws com.google.protobuf.ServiceException; - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableResponse modifyTable( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse getProcedureResult( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest request) throws com.google.protobuf.ServiceException; - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableResponse createTable( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse getSecurityCapabilities( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest request) throws com.google.protobuf.ServiceException; - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse shutdown( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse abortProcedure( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest request) throws com.google.protobuf.ServiceException; - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse stopMaster( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse listProcedures( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest request) throws com.google.protobuf.ServiceException; - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse isMasterInMaintenanceMode( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse clearDeadServers( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest request) throws com.google.protobuf.ServiceException; - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse balance( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespacesResponse listNamespaces( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespacesRequest request) throws com.google.protobuf.ServiceException; - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse setBalancerRunning( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse switchSnapshotCleanup( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest request) throws com.google.protobuf.ServiceException; - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse isBalancerEnabled( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse isSnapshotCleanupEnabled( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest request) throws com.google.protobuf.ServiceException; - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledResponse setSplitOrMergeEnabled( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse getTableState( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest request) throws com.google.protobuf.ServiceException; + } + + private static final class BlockingStub implements BlockingInterface { + private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) { + this.channel = channel; + } + + private final com.google.protobuf.BlockingRpcChannel channel; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse getSchemaAlterStatus( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(0), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse.getDefaultInstance()); + } + + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsResponse getTableDescriptors( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(1), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsResponse.getDefaultInstance()); + } + + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesResponse getTableNames( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(2), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesResponse.getDefaultInstance()); + } + + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse getClusterStatus( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(3), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse.getDefaultInstance()); + } + + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse isMasterRunning( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(4), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.getDefaultInstance()); + } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse isSplitOrMergeEnabled( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest request) - throws com.google.protobuf.ServiceException; - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeResponse normalize( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnResponse addColumn( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest request) - throws com.google.protobuf.ServiceException; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(5), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnResponse.getDefaultInstance()); + } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningResponse setNormalizerRunning( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest request) - throws com.google.protobuf.ServiceException; - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse isNormalizerEnabled( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteColumnResponse deleteColumn( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest request) - throws com.google.protobuf.ServiceException; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteColumnRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteColumnResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(6), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteColumnResponse.getDefaultInstance()); + } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse runCatalogScan( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest request) - throws com.google.protobuf.ServiceException; - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse enableCatalogJanitor( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnResponse modifyColumn( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest request) - throws com.google.protobuf.ServiceException; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(7), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnResponse.getDefaultInstance()); + } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse isCatalogJanitorEnabled( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest request) - throws com.google.protobuf.ServiceException; - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCleanerChoreResponse runCleanerChore( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionResponse moveRegion( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCleanerChoreRequest request) - throws com.google.protobuf.ServiceException; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(8), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionResponse.getDefaultInstance()); + } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetCleanerChoreRunningResponse setCleanerChoreRunning( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetCleanerChoreRunningRequest request) - throws com.google.protobuf.ServiceException; - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCleanerChoreEnabledResponse isCleanerChoreEnabled( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse dispatchMergingRegions( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCleanerChoreEnabledRequest request) - throws com.google.protobuf.ServiceException; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(9), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse.getDefaultInstance()); + } - public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse execMasterService( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest request) - throws com.google.protobuf.ServiceException; - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse snapshot( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionResponse assignRegion( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest request) - throws com.google.protobuf.ServiceException; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(10), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionResponse.getDefaultInstance()); + } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse getCompletedSnapshots( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest request) - throws com.google.protobuf.ServiceException; - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse deleteSnapshot( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionResponse unassignRegion( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest request) - throws com.google.protobuf.ServiceException; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(11), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionResponse.getDefaultInstance()); + } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse isSnapshotDone( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest request) - throws com.google.protobuf.ServiceException; - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse restoreSnapshot( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.OfflineRegionResponse offlineRegion( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest request) - throws com.google.protobuf.ServiceException; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.OfflineRegionRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.OfflineRegionResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(12), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.OfflineRegionResponse.getDefaultInstance()); + } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneResponse isRestoreSnapshotDone( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneRequest request) - throws com.google.protobuf.ServiceException; - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse execProcedure( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableResponse deleteTable( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest request) - throws com.google.protobuf.ServiceException; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(13), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableResponse.getDefaultInstance()); + } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse execProcedureWithRet( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest request) - throws com.google.protobuf.ServiceException; - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse isProcedureDone( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.TruncateTableResponse truncateTable( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest request) - throws com.google.protobuf.ServiceException; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.TruncateTableRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.TruncateTableResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(14), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.TruncateTableResponse.getDefaultInstance()); + } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse modifyNamespace( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest request) - throws com.google.protobuf.ServiceException; - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse createNamespace( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableResponse enableTable( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest request) - throws com.google.protobuf.ServiceException; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(15), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableResponse.getDefaultInstance()); + } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse deleteNamespace( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest request) - throws com.google.protobuf.ServiceException; - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse getNamespaceDescriptor( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableResponse disableTable( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest request) - throws com.google.protobuf.ServiceException; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(16), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableResponse.getDefaultInstance()); + } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse listNamespaceDescriptors( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest request) - throws com.google.protobuf.ServiceException; - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse listTableDescriptorsByNamespace( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableResponse modifyTable( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest request) - throws com.google.protobuf.ServiceException; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(17), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableResponse.getDefaultInstance()); + } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse listTableNamesByNamespace( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest request) - throws com.google.protobuf.ServiceException; - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse setQuota( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableResponse createTable( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest request) - throws com.google.protobuf.ServiceException; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(18), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableResponse.getDefaultInstance()); + } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse getLastMajorCompactionTimestamp( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest request) - throws com.google.protobuf.ServiceException; - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse getLastMajorCompactionTimestampForRegion( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse shutdown( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest request) - throws com.google.protobuf.ServiceException; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(19), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse.getDefaultInstance()); + } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse getProcedureResult( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest request) - throws com.google.protobuf.ServiceException; - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse getSecurityCapabilities( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse stopMaster( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest request) - throws com.google.protobuf.ServiceException; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(20), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse.getDefaultInstance()); + } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse abortProcedure( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest request) - throws com.google.protobuf.ServiceException; - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse listProcedures( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse isMasterInMaintenanceMode( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest request) - throws com.google.protobuf.ServiceException; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(21), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse.getDefaultInstance()); + } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse clearDeadServers( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest request) - throws com.google.protobuf.ServiceException; - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespacesResponse listNamespaces( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse balance( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespacesRequest request) - throws com.google.protobuf.ServiceException; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(22), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse.getDefaultInstance()); + } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse switchSnapshotCleanup( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest request) - throws com.google.protobuf.ServiceException; - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse isSnapshotCleanupEnabled( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse setBalancerRunning( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest request) - throws com.google.protobuf.ServiceException; - } - - private static final class BlockingStub implements BlockingInterface { - private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) { - this.channel = channel; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(23), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse.getDefaultInstance()); } - private final com.google.protobuf.BlockingRpcChannel channel; - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse getSchemaAlterStatus( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse isBalancerEnabled( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest request) throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(0), + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(24), controller, request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse.getDefaultInstance()); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse.getDefaultInstance()); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsResponse getTableDescriptors( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledResponse setSplitOrMergeEnabled( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest request) throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(1), + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(25), controller, request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsResponse.getDefaultInstance()); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledResponse.getDefaultInstance()); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesResponse getTableNames( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse isSplitOrMergeEnabled( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest request) throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(2), + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(26), controller, request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesResponse.getDefaultInstance()); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse.getDefaultInstance()); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse getClusterStatus( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeResponse normalize( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest request) throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(3), + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(27), controller, request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse.getDefaultInstance()); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeResponse.getDefaultInstance()); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse isMasterRunning( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningResponse setNormalizerRunning( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest request) throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(4), + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(28), controller, request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.getDefaultInstance()); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningResponse.getDefaultInstance()); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnResponse addColumn( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse isNormalizerEnabled( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest request) throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(5), + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(29), controller, request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnResponse.getDefaultInstance()); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse.getDefaultInstance()); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteColumnResponse deleteColumn( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse runCatalogScan( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteColumnRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest request) throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteColumnResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(6), + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(30), controller, request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteColumnResponse.getDefaultInstance()); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse.getDefaultInstance()); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnResponse modifyColumn( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse enableCatalogJanitor( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest request) throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(7), + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(31), controller, request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnResponse.getDefaultInstance()); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse.getDefaultInstance()); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionResponse moveRegion( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse isCatalogJanitorEnabled( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest request) throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(8), + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(32), controller, request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionResponse.getDefaultInstance()); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance()); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse dispatchMergingRegions( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCleanerChoreResponse runCleanerChore( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCleanerChoreRequest request) throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(9), + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCleanerChoreResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(33), controller, request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse.getDefaultInstance()); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCleanerChoreResponse.getDefaultInstance()); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionResponse assignRegion( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetCleanerChoreRunningResponse setCleanerChoreRunning( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetCleanerChoreRunningRequest request) throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(10), + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetCleanerChoreRunningResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(34), controller, request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionResponse.getDefaultInstance()); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetCleanerChoreRunningResponse.getDefaultInstance()); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionResponse unassignRegion( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCleanerChoreEnabledResponse isCleanerChoreEnabled( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCleanerChoreEnabledRequest request) throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(11), + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCleanerChoreEnabledResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(35), controller, request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionResponse.getDefaultInstance()); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCleanerChoreEnabledResponse.getDefaultInstance()); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.OfflineRegionResponse offlineRegion( + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse execMasterService( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.OfflineRegionRequest request) + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest request) throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.OfflineRegionResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(12), + return (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(36), controller, request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.OfflineRegionResponse.getDefaultInstance()); + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse.getDefaultInstance()); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableResponse deleteTable( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse snapshot( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest request) throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(13), + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(37), controller, request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableResponse.getDefaultInstance()); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse.getDefaultInstance()); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.TruncateTableResponse truncateTable( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse getCompletedSnapshots( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.TruncateTableRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest request) throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.TruncateTableResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(14), + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(38), controller, request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.TruncateTableResponse.getDefaultInstance()); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse.getDefaultInstance()); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableResponse enableTable( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse deleteSnapshot( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest request) throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(15), + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(39), controller, request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableResponse.getDefaultInstance()); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse.getDefaultInstance()); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableResponse disableTable( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse isSnapshotDone( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest request) throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(16), + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(40), controller, request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableResponse.getDefaultInstance()); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse.getDefaultInstance()); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableResponse modifyTable( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse restoreSnapshot( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest request) throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(17), + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(41), controller, request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableResponse.getDefaultInstance()); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse.getDefaultInstance()); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableResponse createTable( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneResponse isRestoreSnapshotDone( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneRequest request) throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(18), + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(42), controller, request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableResponse.getDefaultInstance()); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneResponse.getDefaultInstance()); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse shutdown( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse execProcedure( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest request) throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(19), + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(43), controller, request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse.getDefaultInstance()); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance()); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse stopMaster( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse execProcedureWithRet( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest request) throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(20), + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(44), controller, request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse.getDefaultInstance()); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance()); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse isMasterInMaintenanceMode( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse isProcedureDone( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest request) throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(21), + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(45), controller, request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse.getDefaultInstance()); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance()); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse balance( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse modifyNamespace( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest request) throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(22), + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(46), controller, request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse.getDefaultInstance()); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse.getDefaultInstance()); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse setBalancerRunning( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse createNamespace( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest request) throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(23), + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(47), controller, request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse.getDefaultInstance()); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse.getDefaultInstance()); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse isBalancerEnabled( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse deleteNamespace( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest request) throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(24), + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(48), controller, request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse.getDefaultInstance()); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse.getDefaultInstance()); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledResponse setSplitOrMergeEnabled( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse getNamespaceDescriptor( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest request) throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(25), + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(49), controller, request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledResponse.getDefaultInstance()); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse.getDefaultInstance()); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse isSplitOrMergeEnabled( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse listNamespaceDescriptors( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest request) throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(26), + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(50), controller, request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse.getDefaultInstance()); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse.getDefaultInstance()); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeResponse normalize( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse listTableDescriptorsByNamespace( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest request) throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(27), + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(51), controller, request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeResponse.getDefaultInstance()); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.getDefaultInstance()); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningResponse setNormalizerRunning( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse listTableNamesByNamespace( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest request) throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(28), + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(52), controller, request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningResponse.getDefaultInstance()); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance()); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse isNormalizerEnabled( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse setQuota( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest request) throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(29), + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(53), controller, request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse.getDefaultInstance()); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance()); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse runCatalogScan( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse getLastMajorCompactionTimestamp( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest request) throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(30), + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(54), controller, request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse.getDefaultInstance()); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance()); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse enableCatalogJanitor( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse getLastMajorCompactionTimestampForRegion( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest request) throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(31), + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(55), controller, request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse.getDefaultInstance()); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance()); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse isCatalogJanitorEnabled( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse getProcedureResult( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest request) throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(32), + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(56), controller, request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance()); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance()); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCleanerChoreResponse runCleanerChore( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse getSecurityCapabilities( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCleanerChoreRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest request) throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCleanerChoreResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(33), + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(57), controller, request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCleanerChoreResponse.getDefaultInstance()); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance()); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetCleanerChoreRunningResponse setCleanerChoreRunning( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse abortProcedure( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetCleanerChoreRunningRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest request) throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetCleanerChoreRunningResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(34), + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(58), controller, request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetCleanerChoreRunningResponse.getDefaultInstance()); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance()); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCleanerChoreEnabledResponse isCleanerChoreEnabled( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse listProcedures( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCleanerChoreEnabledRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest request) throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCleanerChoreEnabledResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(35), + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(59), controller, request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCleanerChoreEnabledResponse.getDefaultInstance()); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance()); } - public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse execMasterService( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse clearDeadServers( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest request) throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(36), + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(60), controller, request, - org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse.getDefaultInstance()); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse.getDefaultInstance()); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse snapshot( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespacesResponse listNamespaces( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespacesRequest request) throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(37), + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespacesResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(61), controller, request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse.getDefaultInstance()); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespacesResponse.getDefaultInstance()); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse getCompletedSnapshots( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse switchSnapshotCleanup( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest request) throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(38), + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(62), controller, request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse.getDefaultInstance()); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse.getDefaultInstance()); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse deleteSnapshot( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse isSnapshotCleanupEnabled( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest request) throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(39), + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(63), controller, request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse.getDefaultInstance()); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse.getDefaultInstance()); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse isSnapshotDone( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse getTableState( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest request) throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(40), + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(64), controller, request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse.getDefaultInstance()); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance()); } + } + + // @@protoc_insertion_point(class_scope:hbase.pb.MasterService) + } + + /** + * Protobuf service {@code hbase.pb.ClientMetaService} + * + *
+   **
+   * Implements all the RPCs needed by clients to look up cluster meta information needed for connection establishment.
+   * 
+ */ + public static abstract class ClientMetaService + implements com.google.protobuf.Service { + protected ClientMetaService() {} + + public interface Interface { + /** + * rpc GetClusterId(.hbase.pb.GetClusterIdRequest) returns (.hbase.pb.GetClusterIdResponse); + * + *
+       **
+       * Get Cluster ID for this cluster.
+       * 
+ */ + public abstract void getClusterId( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc GetMasters(.hbase.pb.GetMastersRequest) returns (.hbase.pb.GetMastersResponse); + * + *
+       **
+       * Get registered list of master servers in this cluster. List includes both active and backup
+       * masters.
+       * 
+ */ + public abstract void getMasters( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc GetMetaRegionLocations(.hbase.pb.GetMetaRegionLocationsRequest) returns (.hbase.pb.GetMetaRegionLocationsResponse); + * + *
+       **
+       * Get current meta replicas' region locations.
+       * 
+ */ + public abstract void getMetaRegionLocations( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc GetNumLiveRS(.hbase.pb.GetNumLiveRSRequest) returns (.hbase.pb.GetNumLiveRSResponse); + * + *
+       **
+       * Get number of live region servers.
+       * 
+ */ + public abstract void getNumLiveRS( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest request, + com.google.protobuf.RpcCallback done); + + } + + public static com.google.protobuf.Service newReflectiveService( + final Interface impl) { + return new ClientMetaService() { + @java.lang.Override + public void getClusterId( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest request, + com.google.protobuf.RpcCallback done) { + impl.getClusterId(controller, request, done); + } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse restoreSnapshot( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(41), - controller, - request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse.getDefaultInstance()); - } + @java.lang.Override + public void getMasters( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest request, + com.google.protobuf.RpcCallback done) { + impl.getMasters(controller, request, done); + } + @java.lang.Override + public void getMetaRegionLocations( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest request, + com.google.protobuf.RpcCallback done) { + impl.getMetaRegionLocations(controller, request, done); + } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneResponse isRestoreSnapshotDone( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneRequest request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(42), - controller, - request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneResponse.getDefaultInstance()); - } + @java.lang.Override + public void getNumLiveRS( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest request, + com.google.protobuf.RpcCallback done) { + impl.getNumLiveRS(controller, request, done); + } + }; + } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse execProcedure( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(43), - controller, - request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance()); - } + public static com.google.protobuf.BlockingService + newReflectiveBlockingService(final BlockingInterface impl) { + return new com.google.protobuf.BlockingService() { + public final com.google.protobuf.Descriptors.ServiceDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public final com.google.protobuf.Message callBlockingMethod( + com.google.protobuf.Descriptors.MethodDescriptor method, + com.google.protobuf.RpcController controller, + com.google.protobuf.Message request) + throws com.google.protobuf.ServiceException { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.callBlockingMethod() given method descriptor for " + + "wrong service type."); + } + switch(method.getIndex()) { + case 0: + return impl.getClusterId(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest)request); + case 1: + return impl.getMasters(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest)request); + case 2: + return impl.getMetaRegionLocations(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest)request); + case 3: + return impl.getNumLiveRS(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest)request); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse execProcedureWithRet( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(44), - controller, - request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance()); - } + public final com.google.protobuf.Message + getRequestPrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getRequestPrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest.getDefaultInstance(); + case 1: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest.getDefaultInstance(); + case 2: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest.getDefaultInstance(); + case 3: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + public final com.google.protobuf.Message + getResponsePrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getResponsePrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse.getDefaultInstance(); + case 1: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse.getDefaultInstance(); + case 2: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse.getDefaultInstance(); + case 3: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse isProcedureDone( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(45), - controller, - request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance()); - } + }; + } + /** + * rpc GetClusterId(.hbase.pb.GetClusterIdRequest) returns (.hbase.pb.GetClusterIdResponse); + * + *
+     **
+     * Get Cluster ID for this cluster.
+     * 
+ */ + public abstract void getClusterId( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest request, + com.google.protobuf.RpcCallback done); - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse modifyNamespace( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(46), - controller, - request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse.getDefaultInstance()); - } + /** + * rpc GetMasters(.hbase.pb.GetMastersRequest) returns (.hbase.pb.GetMastersResponse); + * + *
+     **
+     * Get registered list of master servers in this cluster. List includes both active and backup
+     * masters.
+     * 
+ */ + public abstract void getMasters( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest request, + com.google.protobuf.RpcCallback done); + /** + * rpc GetMetaRegionLocations(.hbase.pb.GetMetaRegionLocationsRequest) returns (.hbase.pb.GetMetaRegionLocationsResponse); + * + *
+     **
+     * Get current meta replicas' region locations.
+     * 
+ */ + public abstract void getMetaRegionLocations( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest request, + com.google.protobuf.RpcCallback done); - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse createNamespace( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(47), - controller, - request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse.getDefaultInstance()); - } + /** + * rpc GetNumLiveRS(.hbase.pb.GetNumLiveRSRequest) returns (.hbase.pb.GetNumLiveRSResponse); + * + *
+     **
+     * Get number of live region servers.
+     * 
+ */ + public abstract void getNumLiveRS( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest request, + com.google.protobuf.RpcCallback done); + public static final + com.google.protobuf.Descriptors.ServiceDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.getDescriptor().getServices().get(1); + } + public final com.google.protobuf.Descriptors.ServiceDescriptor + getDescriptorForType() { + return getDescriptor(); + } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse deleteNamespace( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(48), - controller, - request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse.getDefaultInstance()); + public final void callMethod( + com.google.protobuf.Descriptors.MethodDescriptor method, + com.google.protobuf.RpcController controller, + com.google.protobuf.Message request, + com.google.protobuf.RpcCallback< + com.google.protobuf.Message> done) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.callMethod() given method descriptor for wrong " + + "service type."); + } + switch(method.getIndex()) { + case 0: + this.getClusterId(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 1: + this.getMasters(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 2: + this.getMetaRegionLocations(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 3: + this.getNumLiveRS(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + default: + throw new java.lang.AssertionError("Can't get here."); } + } + public final com.google.protobuf.Message + getRequestPrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getRequestPrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest.getDefaultInstance(); + case 1: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest.getDefaultInstance(); + case 2: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest.getDefaultInstance(); + case 3: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse getNamespaceDescriptor( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(49), - controller, - request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse.getDefaultInstance()); + public final com.google.protobuf.Message + getResponsePrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getResponsePrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse.getDefaultInstance(); + case 1: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse.getDefaultInstance(); + case 2: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse.getDefaultInstance(); + case 3: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); } + } + public static Stub newStub( + com.google.protobuf.RpcChannel channel) { + return new Stub(channel); + } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse listNamespaceDescriptors( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(50), - controller, - request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse.getDefaultInstance()); + public static final class Stub extends org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClientMetaService implements Interface { + private Stub(com.google.protobuf.RpcChannel channel) { + this.channel = channel; } + private final com.google.protobuf.RpcChannel channel; - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse listTableDescriptorsByNamespace( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(51), - controller, - request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.getDefaultInstance()); + public com.google.protobuf.RpcChannel getChannel() { + return channel; } - - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse listTableNamesByNamespace( + public void getClusterId( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(52), + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(0), controller, request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance()); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse.getDefaultInstance())); } - - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse setQuota( + public void getMasters( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(53), + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(1), controller, request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance()); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse.getDefaultInstance())); } - - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse getLastMajorCompactionTimestamp( + public void getMetaRegionLocations( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(54), + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(2), controller, request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance()); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse.getDefaultInstance())); } - - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse getLastMajorCompactionTimestampForRegion( + public void getNumLiveRS( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(55), + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(3), controller, request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance()); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse.getDefaultInstance())); } + } + public static BlockingInterface newBlockingStub( + com.google.protobuf.BlockingRpcChannel channel) { + return new BlockingStub(channel); + } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse getProcedureResult( + public interface BlockingInterface { + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse getClusterId( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(56), - controller, - request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance()); - } - + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest request) + throws com.google.protobuf.ServiceException; - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse getSecurityCapabilities( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse getMasters( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(57), - controller, - request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance()); - } - + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest request) + throws com.google.protobuf.ServiceException; - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse abortProcedure( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse getMetaRegionLocations( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(58), - controller, - request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance()); - } - + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest request) + throws com.google.protobuf.ServiceException; - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse listProcedures( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse getNumLiveRS( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(59), - controller, - request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance()); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest request) + throws com.google.protobuf.ServiceException; + } + + private static final class BlockingStub implements BlockingInterface { + private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) { + this.channel = channel; } + private final com.google.protobuf.BlockingRpcChannel channel; - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse clearDeadServers( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse getClusterId( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest request) throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(60), + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(0), controller, request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse.getDefaultInstance()); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse.getDefaultInstance()); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespacesResponse listNamespaces( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse getMasters( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespacesRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest request) throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespacesResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(61), + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(1), controller, request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespacesResponse.getDefaultInstance()); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse.getDefaultInstance()); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse switchSnapshotCleanup( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse getMetaRegionLocations( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest request) throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(62), + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(2), controller, request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse.getDefaultInstance()); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse.getDefaultInstance()); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse isSnapshotCleanupEnabled( + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse getNumLiveRS( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest request) + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest request) throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(63), + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(3), controller, request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse.getDefaultInstance()); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse.getDefaultInstance()); } } - // @@protoc_insertion_point(class_scope:hbase.pb.MasterService) + // @@protoc_insertion_point(class_scope:hbase.pb.ClientMetaService) } private static com.google.protobuf.Descriptors.Descriptor @@ -70462,6 +76751,16 @@ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanup private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hbase_pb_GetTableNamesResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_GetTableStateRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_GetTableStateRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_GetTableStateResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_GetTableStateResponse_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hbase_pb_GetClusterStatusRequest_descriptor; private static @@ -70597,6 +76896,51 @@ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanup private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hbase_pb_IsSnapshotCleanupEnabledResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_GetClusterIdRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_GetClusterIdRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_GetClusterIdResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_GetClusterIdResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_GetMastersRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_GetMastersRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_GetMastersResponseEntry_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_GetMastersResponseEntry_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_GetMastersResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_GetMastersResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_GetMetaRegionLocationsRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_GetMetaRegionLocationsRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_GetMetaRegionLocationsResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_GetMetaRegionLocationsResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_GetNumLiveRSRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_GetNumLiveRSRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_GetNumLiveRSResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_GetNumLiveRSResponse_fieldAccessorTable; public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { @@ -70758,211 +77102,236 @@ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanup "sRequest\022\r\n\005regex\030\001 \001(\t\022!\n\022include_sys_t", "ables\030\002 \001(\010:\005false\022\021\n\tnamespace\030\003 \001(\t\"A\n" + "\025GetTableNamesResponse\022(\n\013table_names\030\001 " + - "\003(\0132\023.hbase.pb.TableName\"\031\n\027GetClusterSt" + - "atusRequest\"K\n\030GetClusterStatusResponse\022" + - "/\n\016cluster_status\030\001 \002(\0132\027.hbase.pb.Clust" + - "erStatus\"\030\n\026IsMasterRunningRequest\"4\n\027Is" + - "MasterRunningResponse\022\031\n\021is_master_runni" + - "ng\030\001 \002(\010\"I\n\024ExecProcedureRequest\0221\n\tproc" + - "edure\030\001 \002(\0132\036.hbase.pb.ProcedureDescript" + - "ion\"F\n\025ExecProcedureResponse\022\030\n\020expected", - "_timeout\030\001 \001(\003\022\023\n\013return_data\030\002 \001(\014\"K\n\026I" + - "sProcedureDoneRequest\0221\n\tprocedure\030\001 \001(\013" + - "2\036.hbase.pb.ProcedureDescription\"`\n\027IsPr" + - "ocedureDoneResponse\022\023\n\004done\030\001 \001(\010:\005false" + - "\0220\n\010snapshot\030\002 \001(\0132\036.hbase.pb.ProcedureD" + - "escription\",\n\031GetProcedureResultRequest\022" + - "\017\n\007proc_id\030\001 \002(\004\"\371\001\n\032GetProcedureResultR" + - "esponse\0229\n\005state\030\001 \002(\0162*.hbase.pb.GetPro" + - "cedureResultResponse.State\022\022\n\nstart_time" + - "\030\002 \001(\004\022\023\n\013last_update\030\003 \001(\004\022\016\n\006result\030\004 ", - "\001(\014\0224\n\texception\030\005 \001(\0132!.hbase.pb.Foreig" + - "nExceptionMessage\"1\n\005State\022\r\n\tNOT_FOUND\020" + - "\000\022\013\n\007RUNNING\020\001\022\014\n\010FINISHED\020\002\"M\n\025AbortPro" + - "cedureRequest\022\017\n\007proc_id\030\001 \002(\004\022#\n\025mayInt" + - "erruptIfRunning\030\002 \001(\010:\004true\"6\n\026AbortProc" + - "edureResponse\022\034\n\024is_procedure_aborted\030\001 " + - "\002(\010\"\027\n\025ListProceduresRequest\"@\n\026ListProc" + - "eduresResponse\022&\n\tprocedure\030\001 \003(\0132\023.hbas" + - "e.pb.Procedure\"\315\001\n\017SetQuotaRequest\022\021\n\tus" + - "er_name\030\001 \001(\t\022\022\n\nuser_group\030\002 \001(\t\022\021\n\tnam", - "espace\030\003 \001(\t\022\'\n\ntable_name\030\004 \001(\0132\023.hbase" + - ".pb.TableName\022\022\n\nremove_all\030\005 \001(\010\022\026\n\016byp" + - "ass_globals\030\006 \001(\010\022+\n\010throttle\030\007 \001(\0132\031.hb" + - "ase.pb.ThrottleRequest\"\022\n\020SetQuotaRespon" + - "se\"J\n\037MajorCompactionTimestampRequest\022\'\n" + - "\ntable_name\030\001 \002(\0132\023.hbase.pb.TableName\"U" + - "\n(MajorCompactionTimestampForRegionReque" + - "st\022)\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSpec" + - "ifier\"@\n MajorCompactionTimestampRespons" + - "e\022\034\n\024compaction_timestamp\030\001 \002(\003\"\035\n\033Secur", - "ityCapabilitiesRequest\"\354\001\n\034SecurityCapab" + - "ilitiesResponse\022G\n\014capabilities\030\001 \003(\01621." + - "hbase.pb.SecurityCapabilitiesResponse.Ca" + - "pability\"\202\001\n\nCapability\022\031\n\025SIMPLE_AUTHEN" + - "TICATION\020\000\022\031\n\025SECURE_AUTHENTICATION\020\001\022\021\n" + - "\rAUTHORIZATION\020\002\022\026\n\022CELL_AUTHORIZATION\020\003" + - "\022\023\n\017CELL_VISIBILITY\020\004\"D\n\027ClearDeadServer" + - "sRequest\022)\n\013server_name\030\001 \003(\0132\024.hbase.pb" + - ".ServerName\"E\n\030ClearDeadServersResponse\022" + - ")\n\013server_name\030\001 \003(\0132\024.hbase.pb.ServerNa", - "me\"A\n\031SetSnapshotCleanupRequest\022\017\n\007enabl" + - "ed\030\001 \002(\010\022\023\n\013synchronous\030\002 \001(\010\";\n\032SetSnap" + - "shotCleanupResponse\022\035\n\025prev_snapshot_cle" + - "anup\030\001 \002(\010\"!\n\037IsSnapshotCleanupEnabledRe" + - "quest\"3\n IsSnapshotCleanupEnabledRespons" + - "e\022\017\n\007enabled\030\001 \002(\010*(\n\020MasterSwitchType\022\t" + - "\n\005SPLIT\020\000\022\t\n\005MERGE\020\0012\241.\n\rMasterService\022e" + - "\n\024GetSchemaAlterStatus\022%.hbase.pb.GetSch" + - "emaAlterStatusRequest\032&.hbase.pb.GetSche" + - "maAlterStatusResponse\022b\n\023GetTableDescrip", - "tors\022$.hbase.pb.GetTableDescriptorsReque" + - "st\032%.hbase.pb.GetTableDescriptorsRespons" + - "e\022P\n\rGetTableNames\022\036.hbase.pb.GetTableNa" + - "mesRequest\032\037.hbase.pb.GetTableNamesRespo" + - "nse\022Y\n\020GetClusterStatus\022!.hbase.pb.GetCl" + - "usterStatusRequest\032\".hbase.pb.GetCluster" + - "StatusResponse\022V\n\017IsMasterRunning\022 .hbas" + - "e.pb.IsMasterRunningRequest\032!.hbase.pb.I" + - "sMasterRunningResponse\022D\n\tAddColumn\022\032.hb" + - "ase.pb.AddColumnRequest\032\033.hbase.pb.AddCo", - "lumnResponse\022M\n\014DeleteColumn\022\035.hbase.pb." + - "DeleteColumnRequest\032\036.hbase.pb.DeleteCol" + - "umnResponse\022M\n\014ModifyColumn\022\035.hbase.pb.M" + - "odifyColumnRequest\032\036.hbase.pb.ModifyColu" + - "mnResponse\022G\n\nMoveRegion\022\033.hbase.pb.Move" + - "RegionRequest\032\034.hbase.pb.MoveRegionRespo" + - "nse\022k\n\026DispatchMergingRegions\022\'.hbase.pb" + - ".DispatchMergingRegionsRequest\032(.hbase.p" + - "b.DispatchMergingRegionsResponse\022M\n\014Assi" + - "gnRegion\022\035.hbase.pb.AssignRegionRequest\032", - "\036.hbase.pb.AssignRegionResponse\022S\n\016Unass" + - "ignRegion\022\037.hbase.pb.UnassignRegionReque" + - "st\032 .hbase.pb.UnassignRegionResponse\022P\n\r" + - "OfflineRegion\022\036.hbase.pb.OfflineRegionRe" + - "quest\032\037.hbase.pb.OfflineRegionResponse\022J" + - "\n\013DeleteTable\022\034.hbase.pb.DeleteTableRequ" + - "est\032\035.hbase.pb.DeleteTableResponse\022P\n\rtr" + - "uncateTable\022\036.hbase.pb.TruncateTableRequ" + - "est\032\037.hbase.pb.TruncateTableResponse\022J\n\013" + - "EnableTable\022\034.hbase.pb.EnableTableReques", - "t\032\035.hbase.pb.EnableTableResponse\022M\n\014Disa" + - "bleTable\022\035.hbase.pb.DisableTableRequest\032" + - "\036.hbase.pb.DisableTableResponse\022J\n\013Modif" + - "yTable\022\034.hbase.pb.ModifyTableRequest\032\035.h" + - "base.pb.ModifyTableResponse\022J\n\013CreateTab" + - "le\022\034.hbase.pb.CreateTableRequest\032\035.hbase" + - ".pb.CreateTableResponse\022A\n\010Shutdown\022\031.hb" + - "ase.pb.ShutdownRequest\032\032.hbase.pb.Shutdo" + - "wnResponse\022G\n\nStopMaster\022\033.hbase.pb.Stop" + - "MasterRequest\032\034.hbase.pb.StopMasterRespo", - "nse\022h\n\031IsMasterInMaintenanceMode\022$.hbase" + - ".pb.IsInMaintenanceModeRequest\032%.hbase.p" + - "b.IsInMaintenanceModeResponse\022>\n\007Balance" + - "\022\030.hbase.pb.BalanceRequest\032\031.hbase.pb.Ba" + - "lanceResponse\022_\n\022SetBalancerRunning\022#.hb" + - "ase.pb.SetBalancerRunningRequest\032$.hbase" + - ".pb.SetBalancerRunningResponse\022\\\n\021IsBala" + - "ncerEnabled\022\".hbase.pb.IsBalancerEnabled" + - "Request\032#.hbase.pb.IsBalancerEnabledResp" + - "onse\022k\n\026SetSplitOrMergeEnabled\022\'.hbase.p", - "b.SetSplitOrMergeEnabledRequest\032(.hbase." + - "pb.SetSplitOrMergeEnabledResponse\022h\n\025IsS" + - "plitOrMergeEnabled\022&.hbase.pb.IsSplitOrM" + - "ergeEnabledRequest\032\'.hbase.pb.IsSplitOrM" + - "ergeEnabledResponse\022D\n\tNormalize\022\032.hbase" + - ".pb.NormalizeRequest\032\033.hbase.pb.Normaliz" + - "eResponse\022e\n\024SetNormalizerRunning\022%.hbas" + - "e.pb.SetNormalizerRunningRequest\032&.hbase" + - ".pb.SetNormalizerRunningResponse\022b\n\023IsNo" + - "rmalizerEnabled\022$.hbase.pb.IsNormalizerE", - "nabledRequest\032%.hbase.pb.IsNormalizerEna" + - "bledResponse\022S\n\016RunCatalogScan\022\037.hbase.p" + - "b.RunCatalogScanRequest\032 .hbase.pb.RunCa" + - "talogScanResponse\022e\n\024EnableCatalogJanito" + - "r\022%.hbase.pb.EnableCatalogJanitorRequest" + - "\032&.hbase.pb.EnableCatalogJanitorResponse" + - "\022n\n\027IsCatalogJanitorEnabled\022(.hbase.pb.I" + - "sCatalogJanitorEnabledRequest\032).hbase.pb" + - ".IsCatalogJanitorEnabledResponse\022V\n\017RunC" + - "leanerChore\022 .hbase.pb.RunCleanerChoreRe", - "quest\032!.hbase.pb.RunCleanerChoreResponse" + - "\022k\n\026SetCleanerChoreRunning\022\'.hbase.pb.Se" + - "tCleanerChoreRunningRequest\032(.hbase.pb.S" + - "etCleanerChoreRunningResponse\022h\n\025IsClean" + - "erChoreEnabled\022&.hbase.pb.IsCleanerChore" + - "EnabledRequest\032\'.hbase.pb.IsCleanerChore" + - "EnabledResponse\022^\n\021ExecMasterService\022#.h" + - "base.pb.CoprocessorServiceRequest\032$.hbas" + - "e.pb.CoprocessorServiceResponse\022A\n\010Snaps" + - "hot\022\031.hbase.pb.SnapshotRequest\032\032.hbase.p", - "b.SnapshotResponse\022h\n\025GetCompletedSnapsh" + - "ots\022&.hbase.pb.GetCompletedSnapshotsRequ" + - "est\032\'.hbase.pb.GetCompletedSnapshotsResp" + - "onse\022S\n\016DeleteSnapshot\022\037.hbase.pb.Delete" + - "SnapshotRequest\032 .hbase.pb.DeleteSnapsho" + - "tResponse\022S\n\016IsSnapshotDone\022\037.hbase.pb.I" + - "sSnapshotDoneRequest\032 .hbase.pb.IsSnapsh" + - "otDoneResponse\022V\n\017RestoreSnapshot\022 .hbas" + - "e.pb.RestoreSnapshotRequest\032!.hbase.pb.R" + - "estoreSnapshotResponse\022h\n\025IsRestoreSnaps", - "hotDone\022&.hbase.pb.IsRestoreSnapshotDone" + - "Request\032\'.hbase.pb.IsRestoreSnapshotDone" + - "Response\022P\n\rExecProcedure\022\036.hbase.pb.Exe" + - "cProcedureRequest\032\037.hbase.pb.ExecProcedu" + - "reResponse\022W\n\024ExecProcedureWithRet\022\036.hba" + - "se.pb.ExecProcedureRequest\032\037.hbase.pb.Ex" + - "ecProcedureResponse\022V\n\017IsProcedureDone\022 " + - ".hbase.pb.IsProcedureDoneRequest\032!.hbase" + - ".pb.IsProcedureDoneResponse\022V\n\017ModifyNam" + - "espace\022 .hbase.pb.ModifyNamespaceRequest", - "\032!.hbase.pb.ModifyNamespaceResponse\022V\n\017C" + - "reateNamespace\022 .hbase.pb.CreateNamespac" + - "eRequest\032!.hbase.pb.CreateNamespaceRespo" + - "nse\022V\n\017DeleteNamespace\022 .hbase.pb.Delete" + - "NamespaceRequest\032!.hbase.pb.DeleteNamesp" + - "aceResponse\022k\n\026GetNamespaceDescriptor\022\'." + - "hbase.pb.GetNamespaceDescriptorRequest\032(" + - ".hbase.pb.GetNamespaceDescriptorResponse" + - "\022q\n\030ListNamespaceDescriptors\022).hbase.pb." + - "ListNamespaceDescriptorsRequest\032*.hbase.", - "pb.ListNamespaceDescriptorsResponse\022\206\001\n\037" + - "ListTableDescriptorsByNamespace\0220.hbase." + - "pb.ListTableDescriptorsByNamespaceReques" + - "t\0321.hbase.pb.ListTableDescriptorsByNames" + - "paceResponse\022t\n\031ListTableNamesByNamespac" + - "e\022*.hbase.pb.ListTableNamesByNamespaceRe" + - "quest\032+.hbase.pb.ListTableNamesByNamespa" + - "ceResponse\022A\n\010SetQuota\022\031.hbase.pb.SetQuo" + - "taRequest\032\032.hbase.pb.SetQuotaResponse\022x\n" + - "\037getLastMajorCompactionTimestamp\022).hbase", - ".pb.MajorCompactionTimestampRequest\032*.hb" + - "ase.pb.MajorCompactionTimestampResponse\022" + - "\212\001\n(getLastMajorCompactionTimestampForRe" + - "gion\0222.hbase.pb.MajorCompactionTimestamp" + - "ForRegionRequest\032*.hbase.pb.MajorCompact" + - "ionTimestampResponse\022_\n\022getProcedureResu" + - "lt\022#.hbase.pb.GetProcedureResultRequest\032" + - "$.hbase.pb.GetProcedureResultResponse\022h\n" + - "\027getSecurityCapabilities\022%.hbase.pb.Secu" + - "rityCapabilitiesRequest\032&.hbase.pb.Secur", - "ityCapabilitiesResponse\022S\n\016AbortProcedur" + - "e\022\037.hbase.pb.AbortProcedureRequest\032 .hba" + - "se.pb.AbortProcedureResponse\022S\n\016ListProc" + - "edures\022\037.hbase.pb.ListProceduresRequest\032" + - " .hbase.pb.ListProceduresResponse\022Y\n\020Cle" + - "arDeadServers\022!.hbase.pb.ClearDeadServer" + - "sRequest\032\".hbase.pb.ClearDeadServersResp" + - "onse\022S\n\016ListNamespaces\022\037.hbase.pb.ListNa" + - "mespacesRequest\032 .hbase.pb.ListNamespace" + - "sResponse\022b\n\025SwitchSnapshotCleanup\022#.hba", - "se.pb.SetSnapshotCleanupRequest\032$.hbase." + - "pb.SetSnapshotCleanupResponse\022q\n\030IsSnaps" + - "hotCleanupEnabled\022).hbase.pb.IsSnapshotC" + - "leanupEnabledRequest\032*.hbase.pb.IsSnapsh" + - "otCleanupEnabledResponseBB\n*org.apache.h" + - "adoop.hbase.protobuf.generatedB\014MasterPr" + - "otosH\001\210\001\001\240\001\001" + "\003(\0132\023.hbase.pb.TableName\"?\n\024GetTableStat" + + "eRequest\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb." + + "TableName\"B\n\025GetTableStateResponse\022)\n\013ta" + + "ble_state\030\001 \002(\0132\024.hbase.pb.TableState\"\031\n" + + "\027GetClusterStatusRequest\"K\n\030GetClusterSt" + + "atusResponse\022/\n\016cluster_status\030\001 \002(\0132\027.h" + + "base.pb.ClusterStatus\"\030\n\026IsMasterRunning" + + "Request\"4\n\027IsMasterRunningResponse\022\031\n\021is", + "_master_running\030\001 \002(\010\"I\n\024ExecProcedureRe" + + "quest\0221\n\tprocedure\030\001 \002(\0132\036.hbase.pb.Proc" + + "edureDescription\"F\n\025ExecProcedureRespons" + + "e\022\030\n\020expected_timeout\030\001 \001(\003\022\023\n\013return_da" + + "ta\030\002 \001(\014\"K\n\026IsProcedureDoneRequest\0221\n\tpr" + + "ocedure\030\001 \001(\0132\036.hbase.pb.ProcedureDescri" + + "ption\"`\n\027IsProcedureDoneResponse\022\023\n\004done" + + "\030\001 \001(\010:\005false\0220\n\010snapshot\030\002 \001(\0132\036.hbase." + + "pb.ProcedureDescription\",\n\031GetProcedureR" + + "esultRequest\022\017\n\007proc_id\030\001 \002(\004\"\371\001\n\032GetPro", + "cedureResultResponse\0229\n\005state\030\001 \002(\0162*.hb" + + "ase.pb.GetProcedureResultResponse.State\022" + + "\022\n\nstart_time\030\002 \001(\004\022\023\n\013last_update\030\003 \001(\004" + + "\022\016\n\006result\030\004 \001(\014\0224\n\texception\030\005 \001(\0132!.hb" + + "ase.pb.ForeignExceptionMessage\"1\n\005State\022" + + "\r\n\tNOT_FOUND\020\000\022\013\n\007RUNNING\020\001\022\014\n\010FINISHED\020" + + "\002\"M\n\025AbortProcedureRequest\022\017\n\007proc_id\030\001 " + + "\002(\004\022#\n\025mayInterruptIfRunning\030\002 \001(\010:\004true" + + "\"6\n\026AbortProcedureResponse\022\034\n\024is_procedu" + + "re_aborted\030\001 \002(\010\"\027\n\025ListProceduresReques", + "t\"@\n\026ListProceduresResponse\022&\n\tprocedure" + + "\030\001 \003(\0132\023.hbase.pb.Procedure\"\315\001\n\017SetQuota" + + "Request\022\021\n\tuser_name\030\001 \001(\t\022\022\n\nuser_group" + + "\030\002 \001(\t\022\021\n\tnamespace\030\003 \001(\t\022\'\n\ntable_name\030" + + "\004 \001(\0132\023.hbase.pb.TableName\022\022\n\nremove_all" + + "\030\005 \001(\010\022\026\n\016bypass_globals\030\006 \001(\010\022+\n\010thrott" + + "le\030\007 \001(\0132\031.hbase.pb.ThrottleRequest\"\022\n\020S" + + "etQuotaResponse\"J\n\037MajorCompactionTimest" + + "ampRequest\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.p" + + "b.TableName\"U\n(MajorCompactionTimestampF", + "orRegionRequest\022)\n\006region\030\001 \002(\0132\031.hbase." + + "pb.RegionSpecifier\"@\n MajorCompactionTim" + + "estampResponse\022\034\n\024compaction_timestamp\030\001" + + " \002(\003\"\035\n\033SecurityCapabilitiesRequest\"\354\001\n\034" + + "SecurityCapabilitiesResponse\022G\n\014capabili" + + "ties\030\001 \003(\01621.hbase.pb.SecurityCapabiliti" + + "esResponse.Capability\"\202\001\n\nCapability\022\031\n\025" + + "SIMPLE_AUTHENTICATION\020\000\022\031\n\025SECURE_AUTHEN" + + "TICATION\020\001\022\021\n\rAUTHORIZATION\020\002\022\026\n\022CELL_AU" + + "THORIZATION\020\003\022\023\n\017CELL_VISIBILITY\020\004\"D\n\027Cl", + "earDeadServersRequest\022)\n\013server_name\030\001 \003" + + "(\0132\024.hbase.pb.ServerName\"E\n\030ClearDeadSer" + + "versResponse\022)\n\013server_name\030\001 \003(\0132\024.hbas" + + "e.pb.ServerName\"A\n\031SetSnapshotCleanupReq" + + "uest\022\017\n\007enabled\030\001 \002(\010\022\023\n\013synchronous\030\002 \001" + + "(\010\";\n\032SetSnapshotCleanupResponse\022\035\n\025prev" + + "_snapshot_cleanup\030\001 \002(\010\"!\n\037IsSnapshotCle" + + "anupEnabledRequest\"3\n IsSnapshotCleanupE" + + "nabledResponse\022\017\n\007enabled\030\001 \002(\010\"\025\n\023GetCl" + + "usterIdRequest\"*\n\024GetClusterIdResponse\022\022", + "\n\ncluster_id\030\001 \001(\t\"\023\n\021GetMastersRequest\"" + + "W\n\027GetMastersResponseEntry\022)\n\013server_nam" + + "e\030\001 \002(\0132\024.hbase.pb.ServerName\022\021\n\tis_acti" + + "ve\030\002 \002(\010\"O\n\022GetMastersResponse\0229\n\016master" + + "_servers\030\001 \003(\0132!.hbase.pb.GetMastersResp" + + "onseEntry\"\037\n\035GetMetaRegionLocationsReque" + + "st\"R\n\036GetMetaRegionLocationsResponse\0220\n\016" + + "meta_locations\030\001 \003(\0132\030.hbase.pb.RegionLo" + + "cation\"\025\n\023GetNumLiveRSRequest\"2\n\024GetNumL" + + "iveRSResponse\022\032\n\022num_region_servers\030\001 \002(", + "\005*(\n\020MasterSwitchType\022\t\n\005SPLIT\020\000\022\t\n\005MERG" + + "E\020\0012\363.\n\rMasterService\022e\n\024GetSchemaAlterS" + + "tatus\022%.hbase.pb.GetSchemaAlterStatusReq" + + "uest\032&.hbase.pb.GetSchemaAlterStatusResp" + + "onse\022b\n\023GetTableDescriptors\022$.hbase.pb.G" + + "etTableDescriptorsRequest\032%.hbase.pb.Get" + + "TableDescriptorsResponse\022P\n\rGetTableName" + + "s\022\036.hbase.pb.GetTableNamesRequest\032\037.hbas" + + "e.pb.GetTableNamesResponse\022Y\n\020GetCluster" + + "Status\022!.hbase.pb.GetClusterStatusReques", + "t\032\".hbase.pb.GetClusterStatusResponse\022V\n" + + "\017IsMasterRunning\022 .hbase.pb.IsMasterRunn" + + "ingRequest\032!.hbase.pb.IsMasterRunningRes" + + "ponse\022D\n\tAddColumn\022\032.hbase.pb.AddColumnR" + + "equest\032\033.hbase.pb.AddColumnResponse\022M\n\014D" + + "eleteColumn\022\035.hbase.pb.DeleteColumnReque" + + "st\032\036.hbase.pb.DeleteColumnResponse\022M\n\014Mo" + + "difyColumn\022\035.hbase.pb.ModifyColumnReques" + + "t\032\036.hbase.pb.ModifyColumnResponse\022G\n\nMov" + + "eRegion\022\033.hbase.pb.MoveRegionRequest\032\034.h", + "base.pb.MoveRegionResponse\022k\n\026DispatchMe" + + "rgingRegions\022\'.hbase.pb.DispatchMergingR" + + "egionsRequest\032(.hbase.pb.DispatchMerging" + + "RegionsResponse\022M\n\014AssignRegion\022\035.hbase." + + "pb.AssignRegionRequest\032\036.hbase.pb.Assign" + + "RegionResponse\022S\n\016UnassignRegion\022\037.hbase" + + ".pb.UnassignRegionRequest\032 .hbase.pb.Una" + + "ssignRegionResponse\022P\n\rOfflineRegion\022\036.h" + + "base.pb.OfflineRegionRequest\032\037.hbase.pb." + + "OfflineRegionResponse\022J\n\013DeleteTable\022\034.h", + "base.pb.DeleteTableRequest\032\035.hbase.pb.De" + + "leteTableResponse\022P\n\rtruncateTable\022\036.hba" + + "se.pb.TruncateTableRequest\032\037.hbase.pb.Tr" + + "uncateTableResponse\022J\n\013EnableTable\022\034.hba" + + "se.pb.EnableTableRequest\032\035.hbase.pb.Enab" + + "leTableResponse\022M\n\014DisableTable\022\035.hbase." + + "pb.DisableTableRequest\032\036.hbase.pb.Disabl" + + "eTableResponse\022J\n\013ModifyTable\022\034.hbase.pb" + + ".ModifyTableRequest\032\035.hbase.pb.ModifyTab" + + "leResponse\022J\n\013CreateTable\022\034.hbase.pb.Cre", + "ateTableRequest\032\035.hbase.pb.CreateTableRe" + + "sponse\022A\n\010Shutdown\022\031.hbase.pb.ShutdownRe" + + "quest\032\032.hbase.pb.ShutdownResponse\022G\n\nSto" + + "pMaster\022\033.hbase.pb.StopMasterRequest\032\034.h" + + "base.pb.StopMasterResponse\022h\n\031IsMasterIn" + + "MaintenanceMode\022$.hbase.pb.IsInMaintenan" + + "ceModeRequest\032%.hbase.pb.IsInMaintenance" + + "ModeResponse\022>\n\007Balance\022\030.hbase.pb.Balan" + + "ceRequest\032\031.hbase.pb.BalanceResponse\022_\n\022" + + "SetBalancerRunning\022#.hbase.pb.SetBalance", + "rRunningRequest\032$.hbase.pb.SetBalancerRu" + + "nningResponse\022\\\n\021IsBalancerEnabled\022\".hba" + + "se.pb.IsBalancerEnabledRequest\032#.hbase.p" + + "b.IsBalancerEnabledResponse\022k\n\026SetSplitO" + + "rMergeEnabled\022\'.hbase.pb.SetSplitOrMerge" + + "EnabledRequest\032(.hbase.pb.SetSplitOrMerg" + + "eEnabledResponse\022h\n\025IsSplitOrMergeEnable" + + "d\022&.hbase.pb.IsSplitOrMergeEnabledReques" + + "t\032\'.hbase.pb.IsSplitOrMergeEnabledRespon" + + "se\022D\n\tNormalize\022\032.hbase.pb.NormalizeRequ", + "est\032\033.hbase.pb.NormalizeResponse\022e\n\024SetN" + + "ormalizerRunning\022%.hbase.pb.SetNormalize" + + "rRunningRequest\032&.hbase.pb.SetNormalizer" + + "RunningResponse\022b\n\023IsNormalizerEnabled\022$" + + ".hbase.pb.IsNormalizerEnabledRequest\032%.h" + + "base.pb.IsNormalizerEnabledResponse\022S\n\016R" + + "unCatalogScan\022\037.hbase.pb.RunCatalogScanR" + + "equest\032 .hbase.pb.RunCatalogScanResponse" + + "\022e\n\024EnableCatalogJanitor\022%.hbase.pb.Enab" + + "leCatalogJanitorRequest\032&.hbase.pb.Enabl", + "eCatalogJanitorResponse\022n\n\027IsCatalogJani" + + "torEnabled\022(.hbase.pb.IsCatalogJanitorEn" + + "abledRequest\032).hbase.pb.IsCatalogJanitor" + + "EnabledResponse\022V\n\017RunCleanerChore\022 .hba" + + "se.pb.RunCleanerChoreRequest\032!.hbase.pb." + + "RunCleanerChoreResponse\022k\n\026SetCleanerCho" + + "reRunning\022\'.hbase.pb.SetCleanerChoreRunn" + + "ingRequest\032(.hbase.pb.SetCleanerChoreRun" + + "ningResponse\022h\n\025IsCleanerChoreEnabled\022&." + + "hbase.pb.IsCleanerChoreEnabledRequest\032\'.", + "hbase.pb.IsCleanerChoreEnabledResponse\022^" + + "\n\021ExecMasterService\022#.hbase.pb.Coprocess" + + "orServiceRequest\032$.hbase.pb.CoprocessorS" + + "erviceResponse\022A\n\010Snapshot\022\031.hbase.pb.Sn" + + "apshotRequest\032\032.hbase.pb.SnapshotRespons" + + "e\022h\n\025GetCompletedSnapshots\022&.hbase.pb.Ge" + + "tCompletedSnapshotsRequest\032\'.hbase.pb.Ge" + + "tCompletedSnapshotsResponse\022S\n\016DeleteSna" + + "pshot\022\037.hbase.pb.DeleteSnapshotRequest\032 " + + ".hbase.pb.DeleteSnapshotResponse\022S\n\016IsSn", + "apshotDone\022\037.hbase.pb.IsSnapshotDoneRequ" + + "est\032 .hbase.pb.IsSnapshotDoneResponse\022V\n" + + "\017RestoreSnapshot\022 .hbase.pb.RestoreSnaps" + + "hotRequest\032!.hbase.pb.RestoreSnapshotRes" + + "ponse\022h\n\025IsRestoreSnapshotDone\022&.hbase.p" + + "b.IsRestoreSnapshotDoneRequest\032\'.hbase.p" + + "b.IsRestoreSnapshotDoneResponse\022P\n\rExecP" + + "rocedure\022\036.hbase.pb.ExecProcedureRequest" + + "\032\037.hbase.pb.ExecProcedureResponse\022W\n\024Exe" + + "cProcedureWithRet\022\036.hbase.pb.ExecProcedu", + "reRequest\032\037.hbase.pb.ExecProcedureRespon" + + "se\022V\n\017IsProcedureDone\022 .hbase.pb.IsProce" + + "dureDoneRequest\032!.hbase.pb.IsProcedureDo" + + "neResponse\022V\n\017ModifyNamespace\022 .hbase.pb" + + ".ModifyNamespaceRequest\032!.hbase.pb.Modif" + + "yNamespaceResponse\022V\n\017CreateNamespace\022 ." + + "hbase.pb.CreateNamespaceRequest\032!.hbase." + + "pb.CreateNamespaceResponse\022V\n\017DeleteName" + + "space\022 .hbase.pb.DeleteNamespaceRequest\032" + + "!.hbase.pb.DeleteNamespaceResponse\022k\n\026Ge", + "tNamespaceDescriptor\022\'.hbase.pb.GetNames" + + "paceDescriptorRequest\032(.hbase.pb.GetName" + + "spaceDescriptorResponse\022q\n\030ListNamespace" + + "Descriptors\022).hbase.pb.ListNamespaceDesc" + + "riptorsRequest\032*.hbase.pb.ListNamespaceD" + + "escriptorsResponse\022\206\001\n\037ListTableDescript" + + "orsByNamespace\0220.hbase.pb.ListTableDescr" + + "iptorsByNamespaceRequest\0321.hbase.pb.List" + + "TableDescriptorsByNamespaceResponse\022t\n\031L" + + "istTableNamesByNamespace\022*.hbase.pb.List", + "TableNamesByNamespaceRequest\032+.hbase.pb." + + "ListTableNamesByNamespaceResponse\022A\n\010Set" + + "Quota\022\031.hbase.pb.SetQuotaRequest\032\032.hbase" + + ".pb.SetQuotaResponse\022x\n\037getLastMajorComp" + + "actionTimestamp\022).hbase.pb.MajorCompacti" + + "onTimestampRequest\032*.hbase.pb.MajorCompa" + + "ctionTimestampResponse\022\212\001\n(getLastMajorC" + + "ompactionTimestampForRegion\0222.hbase.pb.M" + + "ajorCompactionTimestampForRegionRequest\032" + + "*.hbase.pb.MajorCompactionTimestampRespo", + "nse\022_\n\022getProcedureResult\022#.hbase.pb.Get" + + "ProcedureResultRequest\032$.hbase.pb.GetPro" + + "cedureResultResponse\022h\n\027getSecurityCapab" + + "ilities\022%.hbase.pb.SecurityCapabilitiesR" + + "equest\032&.hbase.pb.SecurityCapabilitiesRe" + + "sponse\022S\n\016AbortProcedure\022\037.hbase.pb.Abor" + + "tProcedureRequest\032 .hbase.pb.AbortProced" + + "ureResponse\022S\n\016ListProcedures\022\037.hbase.pb" + + ".ListProceduresRequest\032 .hbase.pb.ListPr" + + "oceduresResponse\022Y\n\020ClearDeadServers\022!.h", + "base.pb.ClearDeadServersRequest\032\".hbase." + + "pb.ClearDeadServersResponse\022S\n\016ListNames" + + "paces\022\037.hbase.pb.ListNamespacesRequest\032 " + + ".hbase.pb.ListNamespacesResponse\022b\n\025Swit" + + "chSnapshotCleanup\022#.hbase.pb.SetSnapshot" + + "CleanupRequest\032$.hbase.pb.SetSnapshotCle" + + "anupResponse\022q\n\030IsSnapshotCleanupEnabled" + + "\022).hbase.pb.IsSnapshotCleanupEnabledRequ" + + "est\032*.hbase.pb.IsSnapshotCleanupEnabledR" + + "esponse\022P\n\rGetTableState\022\036.hbase.pb.GetT", + "ableStateRequest\032\037.hbase.pb.GetTableStat" + + "eResponse2\347\002\n\021ClientMetaService\022M\n\014GetCl" + + "usterId\022\035.hbase.pb.GetClusterIdRequest\032\036" + + ".hbase.pb.GetClusterIdResponse\022G\n\nGetMas" + + "ters\022\033.hbase.pb.GetMastersRequest\032\034.hbas" + + "e.pb.GetMastersResponse\022k\n\026GetMetaRegion" + + "Locations\022\'.hbase.pb.GetMetaRegionLocati" + + "onsRequest\032(.hbase.pb.GetMetaRegionLocat" + + "ionsResponse\022M\n\014GetNumLiveRS\022\035.hbase.pb." + + "GetNumLiveRSRequest\032\036.hbase.pb.GetNumLiv", + "eRSResponseBB\n*org.apache.hadoop.hbase.p" + + "rotobuf.generatedB\014MasterProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -71545,168 +77914,234 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors( com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_GetTableNamesResponse_descriptor, new java.lang.String[] { "TableNames", }); - internal_static_hbase_pb_GetClusterStatusRequest_descriptor = + internal_static_hbase_pb_GetTableStateRequest_descriptor = getDescriptor().getMessageTypes().get(96); + internal_static_hbase_pb_GetTableStateRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_GetTableStateRequest_descriptor, + new java.lang.String[] { "TableName", }); + internal_static_hbase_pb_GetTableStateResponse_descriptor = + getDescriptor().getMessageTypes().get(97); + internal_static_hbase_pb_GetTableStateResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_GetTableStateResponse_descriptor, + new java.lang.String[] { "TableState", }); + internal_static_hbase_pb_GetClusterStatusRequest_descriptor = + getDescriptor().getMessageTypes().get(98); internal_static_hbase_pb_GetClusterStatusRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_GetClusterStatusRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_GetClusterStatusResponse_descriptor = - getDescriptor().getMessageTypes().get(97); + getDescriptor().getMessageTypes().get(99); internal_static_hbase_pb_GetClusterStatusResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_GetClusterStatusResponse_descriptor, new java.lang.String[] { "ClusterStatus", }); internal_static_hbase_pb_IsMasterRunningRequest_descriptor = - getDescriptor().getMessageTypes().get(98); + getDescriptor().getMessageTypes().get(100); internal_static_hbase_pb_IsMasterRunningRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_IsMasterRunningRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_IsMasterRunningResponse_descriptor = - getDescriptor().getMessageTypes().get(99); + getDescriptor().getMessageTypes().get(101); internal_static_hbase_pb_IsMasterRunningResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_IsMasterRunningResponse_descriptor, new java.lang.String[] { "IsMasterRunning", }); internal_static_hbase_pb_ExecProcedureRequest_descriptor = - getDescriptor().getMessageTypes().get(100); + getDescriptor().getMessageTypes().get(102); internal_static_hbase_pb_ExecProcedureRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_ExecProcedureRequest_descriptor, new java.lang.String[] { "Procedure", }); internal_static_hbase_pb_ExecProcedureResponse_descriptor = - getDescriptor().getMessageTypes().get(101); + getDescriptor().getMessageTypes().get(103); internal_static_hbase_pb_ExecProcedureResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_ExecProcedureResponse_descriptor, new java.lang.String[] { "ExpectedTimeout", "ReturnData", }); internal_static_hbase_pb_IsProcedureDoneRequest_descriptor = - getDescriptor().getMessageTypes().get(102); + getDescriptor().getMessageTypes().get(104); internal_static_hbase_pb_IsProcedureDoneRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_IsProcedureDoneRequest_descriptor, new java.lang.String[] { "Procedure", }); internal_static_hbase_pb_IsProcedureDoneResponse_descriptor = - getDescriptor().getMessageTypes().get(103); + getDescriptor().getMessageTypes().get(105); internal_static_hbase_pb_IsProcedureDoneResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_IsProcedureDoneResponse_descriptor, new java.lang.String[] { "Done", "Snapshot", }); internal_static_hbase_pb_GetProcedureResultRequest_descriptor = - getDescriptor().getMessageTypes().get(104); + getDescriptor().getMessageTypes().get(106); internal_static_hbase_pb_GetProcedureResultRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_GetProcedureResultRequest_descriptor, new java.lang.String[] { "ProcId", }); internal_static_hbase_pb_GetProcedureResultResponse_descriptor = - getDescriptor().getMessageTypes().get(105); + getDescriptor().getMessageTypes().get(107); internal_static_hbase_pb_GetProcedureResultResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_GetProcedureResultResponse_descriptor, new java.lang.String[] { "State", "StartTime", "LastUpdate", "Result", "Exception", }); internal_static_hbase_pb_AbortProcedureRequest_descriptor = - getDescriptor().getMessageTypes().get(106); + getDescriptor().getMessageTypes().get(108); internal_static_hbase_pb_AbortProcedureRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_AbortProcedureRequest_descriptor, new java.lang.String[] { "ProcId", "MayInterruptIfRunning", }); internal_static_hbase_pb_AbortProcedureResponse_descriptor = - getDescriptor().getMessageTypes().get(107); + getDescriptor().getMessageTypes().get(109); internal_static_hbase_pb_AbortProcedureResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_AbortProcedureResponse_descriptor, new java.lang.String[] { "IsProcedureAborted", }); internal_static_hbase_pb_ListProceduresRequest_descriptor = - getDescriptor().getMessageTypes().get(108); + getDescriptor().getMessageTypes().get(110); internal_static_hbase_pb_ListProceduresRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_ListProceduresRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_ListProceduresResponse_descriptor = - getDescriptor().getMessageTypes().get(109); + getDescriptor().getMessageTypes().get(111); internal_static_hbase_pb_ListProceduresResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_ListProceduresResponse_descriptor, new java.lang.String[] { "Procedure", }); internal_static_hbase_pb_SetQuotaRequest_descriptor = - getDescriptor().getMessageTypes().get(110); + getDescriptor().getMessageTypes().get(112); internal_static_hbase_pb_SetQuotaRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_SetQuotaRequest_descriptor, new java.lang.String[] { "UserName", "UserGroup", "Namespace", "TableName", "RemoveAll", "BypassGlobals", "Throttle", }); internal_static_hbase_pb_SetQuotaResponse_descriptor = - getDescriptor().getMessageTypes().get(111); + getDescriptor().getMessageTypes().get(113); internal_static_hbase_pb_SetQuotaResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_SetQuotaResponse_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_MajorCompactionTimestampRequest_descriptor = - getDescriptor().getMessageTypes().get(112); + getDescriptor().getMessageTypes().get(114); internal_static_hbase_pb_MajorCompactionTimestampRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_MajorCompactionTimestampRequest_descriptor, new java.lang.String[] { "TableName", }); internal_static_hbase_pb_MajorCompactionTimestampForRegionRequest_descriptor = - getDescriptor().getMessageTypes().get(113); + getDescriptor().getMessageTypes().get(115); internal_static_hbase_pb_MajorCompactionTimestampForRegionRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_MajorCompactionTimestampForRegionRequest_descriptor, new java.lang.String[] { "Region", }); internal_static_hbase_pb_MajorCompactionTimestampResponse_descriptor = - getDescriptor().getMessageTypes().get(114); + getDescriptor().getMessageTypes().get(116); internal_static_hbase_pb_MajorCompactionTimestampResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_MajorCompactionTimestampResponse_descriptor, new java.lang.String[] { "CompactionTimestamp", }); internal_static_hbase_pb_SecurityCapabilitiesRequest_descriptor = - getDescriptor().getMessageTypes().get(115); + getDescriptor().getMessageTypes().get(117); internal_static_hbase_pb_SecurityCapabilitiesRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_SecurityCapabilitiesRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_SecurityCapabilitiesResponse_descriptor = - getDescriptor().getMessageTypes().get(116); + getDescriptor().getMessageTypes().get(118); internal_static_hbase_pb_SecurityCapabilitiesResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_SecurityCapabilitiesResponse_descriptor, new java.lang.String[] { "Capabilities", }); internal_static_hbase_pb_ClearDeadServersRequest_descriptor = - getDescriptor().getMessageTypes().get(117); + getDescriptor().getMessageTypes().get(119); internal_static_hbase_pb_ClearDeadServersRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_ClearDeadServersRequest_descriptor, new java.lang.String[] { "ServerName", }); internal_static_hbase_pb_ClearDeadServersResponse_descriptor = - getDescriptor().getMessageTypes().get(118); + getDescriptor().getMessageTypes().get(120); internal_static_hbase_pb_ClearDeadServersResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_ClearDeadServersResponse_descriptor, new java.lang.String[] { "ServerName", }); internal_static_hbase_pb_SetSnapshotCleanupRequest_descriptor = - getDescriptor().getMessageTypes().get(119); + getDescriptor().getMessageTypes().get(121); internal_static_hbase_pb_SetSnapshotCleanupRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_SetSnapshotCleanupRequest_descriptor, new java.lang.String[] { "Enabled", "Synchronous", }); internal_static_hbase_pb_SetSnapshotCleanupResponse_descriptor = - getDescriptor().getMessageTypes().get(120); + getDescriptor().getMessageTypes().get(122); internal_static_hbase_pb_SetSnapshotCleanupResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_SetSnapshotCleanupResponse_descriptor, new java.lang.String[] { "PrevSnapshotCleanup", }); internal_static_hbase_pb_IsSnapshotCleanupEnabledRequest_descriptor = - getDescriptor().getMessageTypes().get(121); + getDescriptor().getMessageTypes().get(123); internal_static_hbase_pb_IsSnapshotCleanupEnabledRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_IsSnapshotCleanupEnabledRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_IsSnapshotCleanupEnabledResponse_descriptor = - getDescriptor().getMessageTypes().get(122); + getDescriptor().getMessageTypes().get(124); internal_static_hbase_pb_IsSnapshotCleanupEnabledResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_IsSnapshotCleanupEnabledResponse_descriptor, new java.lang.String[] { "Enabled", }); + internal_static_hbase_pb_GetClusterIdRequest_descriptor = + getDescriptor().getMessageTypes().get(125); + internal_static_hbase_pb_GetClusterIdRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_GetClusterIdRequest_descriptor, + new java.lang.String[] { }); + internal_static_hbase_pb_GetClusterIdResponse_descriptor = + getDescriptor().getMessageTypes().get(126); + internal_static_hbase_pb_GetClusterIdResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_GetClusterIdResponse_descriptor, + new java.lang.String[] { "ClusterId", }); + internal_static_hbase_pb_GetMastersRequest_descriptor = + getDescriptor().getMessageTypes().get(127); + internal_static_hbase_pb_GetMastersRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_GetMastersRequest_descriptor, + new java.lang.String[] { }); + internal_static_hbase_pb_GetMastersResponseEntry_descriptor = + getDescriptor().getMessageTypes().get(128); + internal_static_hbase_pb_GetMastersResponseEntry_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_GetMastersResponseEntry_descriptor, + new java.lang.String[] { "ServerName", "IsActive", }); + internal_static_hbase_pb_GetMastersResponse_descriptor = + getDescriptor().getMessageTypes().get(129); + internal_static_hbase_pb_GetMastersResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_GetMastersResponse_descriptor, + new java.lang.String[] { "MasterServers", }); + internal_static_hbase_pb_GetMetaRegionLocationsRequest_descriptor = + getDescriptor().getMessageTypes().get(130); + internal_static_hbase_pb_GetMetaRegionLocationsRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_GetMetaRegionLocationsRequest_descriptor, + new java.lang.String[] { }); + internal_static_hbase_pb_GetMetaRegionLocationsResponse_descriptor = + getDescriptor().getMessageTypes().get(131); + internal_static_hbase_pb_GetMetaRegionLocationsResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_GetMetaRegionLocationsResponse_descriptor, + new java.lang.String[] { "MetaLocations", }); + internal_static_hbase_pb_GetNumLiveRSRequest_descriptor = + getDescriptor().getMessageTypes().get(132); + internal_static_hbase_pb_GetNumLiveRSRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_GetNumLiveRSRequest_descriptor, + new java.lang.String[] { }); + internal_static_hbase_pb_GetNumLiveRSResponse_descriptor = + getDescriptor().getMessageTypes().get(133); + internal_static_hbase_pb_GetNumLiveRSResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_GetNumLiveRSResponse_descriptor, + new java.lang.String[] { "NumRegionServers", }); return null; } }; diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java index fc181a8bc18a..e872f4c2a7bd 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java @@ -4419,12 +4419,12 @@ public Builder clearMode() { // @@protoc_insertion_point(class_scope:hbase.pb.SplitLogTask) } - public interface TableOrBuilder + public interface DeprecatedTableStateOrBuilder extends com.google.protobuf.MessageOrBuilder { - // required .hbase.pb.Table.State state = 1 [default = ENABLED]; + // required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED]; /** - * required .hbase.pb.Table.State state = 1 [default = ENABLED]; + * required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED]; * *
      * This is the table's state.  If no znode for a table,
@@ -4434,7 +4434,7 @@ public interface TableOrBuilder
      */
     boolean hasState();
     /**
-     * required .hbase.pb.Table.State state = 1 [default = ENABLED];
+     * required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED];
      *
      * 
      * This is the table's state.  If no znode for a table,
@@ -4442,32 +4442,33 @@ public interface TableOrBuilder
      * for more.
      * 
*/ - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State getState(); + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State getState(); } /** - * Protobuf type {@code hbase.pb.Table} + * Protobuf type {@code hbase.pb.DeprecatedTableState} * *
    **
    * The znode that holds state of table.
+   * Deprected, table state is stored in table descriptor on HDFS.
    * 
*/ - public static final class Table extends + public static final class DeprecatedTableState extends com.google.protobuf.GeneratedMessage - implements TableOrBuilder { - // Use Table.newBuilder() to construct. - private Table(com.google.protobuf.GeneratedMessage.Builder builder) { + implements DeprecatedTableStateOrBuilder { + // Use DeprecatedTableState.newBuilder() to construct. + private DeprecatedTableState(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private Table(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private DeprecatedTableState(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final Table defaultInstance; - public static Table getDefaultInstance() { + private static final DeprecatedTableState defaultInstance; + public static DeprecatedTableState getDefaultInstance() { return defaultInstance; } - public Table getDefaultInstanceForType() { + public DeprecatedTableState getDefaultInstanceForType() { return defaultInstance; } @@ -4477,7 +4478,7 @@ public Table getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private Table( + private DeprecatedTableState( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -4502,7 +4503,7 @@ private Table( } case 8: { int rawValue = input.readEnum(); - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State value = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State.valueOf(rawValue); + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State value = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(1, rawValue); } else { @@ -4525,33 +4526,33 @@ private Table( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_Table_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_DeprecatedTableState_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_Table_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_DeprecatedTableState_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser
() { - public Table parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public DeprecatedTableState parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new Table(input, extensionRegistry); + return new DeprecatedTableState(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser
getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } /** - * Protobuf enum {@code hbase.pb.Table.State} + * Protobuf enum {@code hbase.pb.DeprecatedTableState.State} * *
      * Table's current state
@@ -4629,7 +4630,7 @@ public State findValueByNumber(int number) {
       }
       public static final com.google.protobuf.Descriptors.EnumDescriptor
           getDescriptor() {
-        return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.getDescriptor().getEnumTypes().get(0);
+        return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.getDescriptor().getEnumTypes().get(0);
       }
 
       private static final State[] VALUES = values();
@@ -4651,15 +4652,15 @@ private State(int index, int value) {
         this.value = value;
       }
 
-      // @@protoc_insertion_point(enum_scope:hbase.pb.Table.State)
+      // @@protoc_insertion_point(enum_scope:hbase.pb.DeprecatedTableState.State)
     }
 
     private int bitField0_;
-    // required .hbase.pb.Table.State state = 1 [default = ENABLED];
+    // required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED];
     public static final int STATE_FIELD_NUMBER = 1;
-    private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State state_;
+    private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State state_;
     /**
-     * required .hbase.pb.Table.State state = 1 [default = ENABLED];
+     * required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED];
      *
      * 
      * This is the table's state.  If no znode for a table,
@@ -4671,7 +4672,7 @@ public boolean hasState() {
       return ((bitField0_ & 0x00000001) == 0x00000001);
     }
     /**
-     * required .hbase.pb.Table.State state = 1 [default = ENABLED];
+     * required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED];
      *
      * 
      * This is the table's state.  If no znode for a table,
@@ -4679,12 +4680,12 @@ public boolean hasState() {
      * for more.
      * 
*/ - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State getState() { + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State getState() { return state_; } private void initFields() { - state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State.ENABLED; + state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State.ENABLED; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -4735,10 +4736,10 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table) obj; + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState) obj; boolean result = true; result = result && (hasState() == other.hasState()); @@ -4768,53 +4769,53 @@ public int hashCode() { return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -4823,7 +4824,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table p public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -4835,29 +4836,30 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.Table} + * Protobuf type {@code hbase.pb.DeprecatedTableState} * *
      **
      * The znode that holds state of table.
+     * Deprected, table state is stored in table descriptor on HDFS.
      * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableStateOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_Table_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_DeprecatedTableState_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_Table_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_DeprecatedTableState_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -4877,7 +4879,7 @@ private static Builder create() { public Builder clear() { super.clear(); - state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State.ENABLED; + state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State.ENABLED; bitField0_ = (bitField0_ & ~0x00000001); return this; } @@ -4888,23 +4890,23 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_Table_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_DeprecatedTableState_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table build() { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState build() { + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table(this); + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { @@ -4917,16 +4919,16 @@ public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table buildPar } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.getDefaultInstance()) return this; + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.getDefaultInstance()) return this; if (other.hasState()) { setState(other.getState()); } @@ -4946,11 +4948,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -4961,10 +4963,10 @@ public Builder mergeFrom( } private int bitField0_; - // required .hbase.pb.Table.State state = 1 [default = ENABLED]; - private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State.ENABLED; + // required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED]; + private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State.ENABLED; /** - * required .hbase.pb.Table.State state = 1 [default = ENABLED]; + * required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED]; * *
        * This is the table's state.  If no znode for a table,
@@ -4976,7 +4978,7 @@ public boolean hasState() {
         return ((bitField0_ & 0x00000001) == 0x00000001);
       }
       /**
-       * required .hbase.pb.Table.State state = 1 [default = ENABLED];
+       * required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED];
        *
        * 
        * This is the table's state.  If no znode for a table,
@@ -4984,11 +4986,11 @@ public boolean hasState() {
        * for more.
        * 
*/ - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State getState() { + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State getState() { return state_; } /** - * required .hbase.pb.Table.State state = 1 [default = ENABLED]; + * required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED]; * *
        * This is the table's state.  If no znode for a table,
@@ -4996,7 +4998,7 @@ public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State ge
        * for more.
        * 
*/ - public Builder setState(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State value) { + public Builder setState(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State value) { if (value == null) { throw new NullPointerException(); } @@ -5006,7 +5008,7 @@ public Builder setState(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProt return this; } /** - * required .hbase.pb.Table.State state = 1 [default = ENABLED]; + * required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED]; * *
        * This is the table's state.  If no znode for a table,
@@ -5016,20 +5018,20 @@ public Builder setState(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProt
        */
       public Builder clearState() {
         bitField0_ = (bitField0_ & ~0x00000001);
-        state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State.ENABLED;
+        state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State.ENABLED;
         onChanged();
         return this;
       }
 
-      // @@protoc_insertion_point(builder_scope:hbase.pb.Table)
+      // @@protoc_insertion_point(builder_scope:hbase.pb.DeprecatedTableState)
     }
 
     static {
-      defaultInstance = new Table(true);
+      defaultInstance = new DeprecatedTableState(true);
       defaultInstance.initFields();
     }
 
-    // @@protoc_insertion_point(class_scope:hbase.pb.Table)
+    // @@protoc_insertion_point(class_scope:hbase.pb.DeprecatedTableState)
   }
 
   public interface TableCFOrBuilder
@@ -10934,10 +10936,10 @@ public Builder clearEnabled() {
     com.google.protobuf.GeneratedMessage.FieldAccessorTable
       internal_static_hbase_pb_SplitLogTask_fieldAccessorTable;
   private static com.google.protobuf.Descriptors.Descriptor
-    internal_static_hbase_pb_Table_descriptor;
+    internal_static_hbase_pb_DeprecatedTableState_descriptor;
   private static
     com.google.protobuf.GeneratedMessage.FieldAccessorTable
-      internal_static_hbase_pb_Table_fieldAccessorTable;
+      internal_static_hbase_pb_DeprecatedTableState_fieldAccessorTable;
   private static com.google.protobuf.Descriptors.Descriptor
     internal_static_hbase_pb_TableCF_descriptor;
   private static
@@ -11001,28 +11003,29 @@ public Builder clearEnabled() {
       "\022\016\n\nUNASSIGNED\020\000\022\t\n\005OWNED\020\001\022\014\n\010RESIGNED\020" +
       "\002\022\010\n\004DONE\020\003\022\007\n\003ERR\020\004\">\n\014RecoveryMode\022\013\n\007" +
       "UNKNOWN\020\000\022\021\n\rLOG_SPLITTING\020\001\022\016\n\nLOG_REPL" +
-      "AY\020\002\"w\n\005Table\022-\n\005state\030\001 \002(\0162\025.hbase.pb.",
-      "Table.State:\007ENABLED\"?\n\005State\022\013\n\007ENABLED" +
-      "\020\000\022\014\n\010DISABLED\020\001\022\r\n\tDISABLING\020\002\022\014\n\010ENABL" +
-      "ING\020\003\"D\n\007TableCF\022\'\n\ntable_name\030\001 \001(\0132\023.h" +
-      "base.pb.TableName\022\020\n\010families\030\002 \003(\014\"\330\001\n\017" +
-      "ReplicationPeer\022\022\n\nclusterkey\030\001 \002(\t\022\037\n\027r" +
-      "eplicationEndpointImpl\030\002 \001(\t\022&\n\004data\030\003 \003" +
-      "(\0132\030.hbase.pb.BytesBytesPair\022/\n\rconfigur" +
-      "ation\030\004 \003(\0132\030.hbase.pb.NameStringPair\022$\n" +
-      "\ttable_cfs\030\005 \003(\0132\021.hbase.pb.TableCF\022\021\n\tb" +
-      "andwidth\030\006 \001(\003\"g\n\020ReplicationState\022/\n\005st",
-      "ate\030\001 \002(\0162 .hbase.pb.ReplicationState.St" +
-      "ate\"\"\n\005State\022\013\n\007ENABLED\020\000\022\014\n\010DISABLED\020\001\"" +
-      "+\n\027ReplicationHLogPosition\022\020\n\010position\030\001" +
-      " \002(\003\"%\n\017ReplicationLock\022\022\n\nlock_owner\030\001 " +
-      "\002(\t\"\252\001\n\tTableLock\022\'\n\ntable_name\030\001 \001(\0132\023." +
-      "hbase.pb.TableName\022(\n\nlock_owner\030\002 \001(\0132\024" +
-      ".hbase.pb.ServerName\022\021\n\tthread_id\030\003 \001(\003\022" +
-      "\021\n\tis_shared\030\004 \001(\010\022\017\n\007purpose\030\005 \001(\t\022\023\n\013c" +
-      "reate_time\030\006 \001(\003\"\036\n\013SwitchState\022\017\n\007enabl" +
-      "ed\030\001 \001(\010BE\n*org.apache.hadoop.hbase.prot",
-      "obuf.generatedB\017ZooKeeperProtosH\001\210\001\001\240\001\001"
+      "AY\020\002\"\225\001\n\024DeprecatedTableState\022<\n\005state\030\001",
+      " \002(\0162$.hbase.pb.DeprecatedTableState.Sta" +
+      "te:\007ENABLED\"?\n\005State\022\013\n\007ENABLED\020\000\022\014\n\010DIS" +
+      "ABLED\020\001\022\r\n\tDISABLING\020\002\022\014\n\010ENABLING\020\003\"D\n\007" +
+      "TableCF\022\'\n\ntable_name\030\001 \001(\0132\023.hbase.pb.T" +
+      "ableName\022\020\n\010families\030\002 \003(\014\"\330\001\n\017Replicati" +
+      "onPeer\022\022\n\nclusterkey\030\001 \002(\t\022\037\n\027replicatio" +
+      "nEndpointImpl\030\002 \001(\t\022&\n\004data\030\003 \003(\0132\030.hbas" +
+      "e.pb.BytesBytesPair\022/\n\rconfiguration\030\004 \003" +
+      "(\0132\030.hbase.pb.NameStringPair\022$\n\ttable_cf" +
+      "s\030\005 \003(\0132\021.hbase.pb.TableCF\022\021\n\tbandwidth\030",
+      "\006 \001(\003\"g\n\020ReplicationState\022/\n\005state\030\001 \002(\016" +
+      "2 .hbase.pb.ReplicationState.State\"\"\n\005St" +
+      "ate\022\013\n\007ENABLED\020\000\022\014\n\010DISABLED\020\001\"+\n\027Replic" +
+      "ationHLogPosition\022\020\n\010position\030\001 \002(\003\"%\n\017R" +
+      "eplicationLock\022\022\n\nlock_owner\030\001 \002(\t\"\252\001\n\tT" +
+      "ableLock\022\'\n\ntable_name\030\001 \001(\0132\023.hbase.pb." +
+      "TableName\022(\n\nlock_owner\030\002 \001(\0132\024.hbase.pb" +
+      ".ServerName\022\021\n\tthread_id\030\003 \001(\003\022\021\n\tis_sha" +
+      "red\030\004 \001(\010\022\017\n\007purpose\030\005 \001(\t\022\023\n\013create_tim" +
+      "e\030\006 \001(\003\"\036\n\013SwitchState\022\017\n\007enabled\030\001 \001(\010B",
+      "E\n*org.apache.hadoop.hbase.protobuf.gene" +
+      "ratedB\017ZooKeeperProtosH\001\210\001\001\240\001\001"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -11059,11 +11062,11 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors(
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_SplitLogTask_descriptor,
               new java.lang.String[] { "State", "ServerName", "Mode", });
-          internal_static_hbase_pb_Table_descriptor =
+          internal_static_hbase_pb_DeprecatedTableState_descriptor =
             getDescriptor().getMessageTypes().get(5);
-          internal_static_hbase_pb_Table_fieldAccessorTable = new
+          internal_static_hbase_pb_DeprecatedTableState_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
-              internal_static_hbase_pb_Table_descriptor,
+              internal_static_hbase_pb_DeprecatedTableState_descriptor,
               new java.lang.String[] { "State", });
           internal_static_hbase_pb_TableCF_descriptor =
             getDescriptor().getMessageTypes().get(6);
diff --git a/hbase-protocol/src/main/protobuf/HBase.proto b/hbase-protocol/src/main/protobuf/HBase.proto
index 44b722de8536..29bec7280327 100644
--- a/hbase-protocol/src/main/protobuf/HBase.proto
+++ b/hbase-protocol/src/main/protobuf/HBase.proto
@@ -39,6 +39,27 @@ message TableSchema {
   repeated NameStringPair configuration = 4;
 }
 
+/** Denotes state of the table */
+message TableState {
+  // Table's current state
+  enum State {
+    ENABLED = 0;
+    DISABLED = 1;
+    DISABLING = 2;
+    ENABLING = 3;
+  }
+  // This is the table's state.
+  required State state = 1;
+  required TableName table = 2;
+  optional uint64 timestamp = 3;
+}
+
+/** On HDFS representation of table state. */
+message TableDescriptor {
+  required TableSchema schema = 1;
+  optional TableState.State state = 2 [ default = ENABLED ];
+}
+
 /**
  * Column Family Schema
  * Inspired by the rest ColumSchemaMessage
@@ -234,3 +255,9 @@ message SnapshotDescription {
   optional UsersAndPermissions users_and_permissions = 7;
   optional int64 ttl = 8 [default = 0];
 }
+
+message RegionLocation {
+  required RegionInfo region_info = 1;
+  optional ServerName server_name = 2;
+  required int64 seq_num = 3;
+}
diff --git a/hbase-protocol/src/main/protobuf/Master.proto b/hbase-protocol/src/main/protobuf/Master.proto
index f43c4e0f285f..be4c66ee6ad2 100644
--- a/hbase-protocol/src/main/protobuf/Master.proto
+++ b/hbase-protocol/src/main/protobuf/Master.proto
@@ -456,6 +456,14 @@ message GetTableNamesResponse {
   repeated TableName table_names = 1;
 }
 
+message GetTableStateRequest {
+  required TableName table_name = 1;
+}
+
+message GetTableStateResponse {
+  required TableState table_state = 1;
+}
+
 message GetClusterStatusRequest {
 }
 
@@ -901,5 +909,67 @@ service MasterService {
   rpc IsSnapshotCleanupEnabled (IsSnapshotCleanupEnabledRequest)
     returns (IsSnapshotCleanupEnabledResponse);
 
+  /** returns table state */
+  rpc GetTableState(GetTableStateRequest)
+    returns(GetTableStateResponse);
+}
+
+/** Request and response to get the clusterID for this cluster */
+message GetClusterIdRequest {
+}
+message GetClusterIdResponse {
+  /** Not set if cluster ID could not be determined. */
+  optional string cluster_id = 1;
+}
+
+/** Request and response to get the current list of all registers master servers */
+message GetMastersRequest {
+}
+message GetMastersResponseEntry {
+    required ServerName server_name = 1;
+    required bool is_active = 2;
+}
+message GetMastersResponse {
+    repeated GetMastersResponseEntry master_servers = 1;
+}
+
+/** Request and response to get the current list of meta region locations */
+message GetMetaRegionLocationsRequest {
+}
+message GetMetaRegionLocationsResponse {
+  /** Not set if meta region locations could not be determined. */
+  repeated RegionLocation meta_locations = 1;
+}
+
+/** Request and response to get the number of live region servers */
+message GetNumLiveRSRequest {
+}
+message GetNumLiveRSResponse {
+  required int32 num_region_servers = 1;
+}
+
+/**
+ * Implements all the RPCs needed by clients to look up cluster meta information needed for connection establishment.
+ */
+service ClientMetaService {
+  /**
+   * Get Cluster ID for this cluster.
+   */
+  rpc GetClusterId(GetClusterIdRequest) returns(GetClusterIdResponse);
+
+  /**
+   * Get registered list of master servers in this cluster. List includes both active and backup
+   * masters.
+   */
+  rpc GetMasters(GetMastersRequest) returns(GetMastersResponse);
+
+  /**
+   * Get current meta replicas' region locations.
+   */
+  rpc GetMetaRegionLocations(GetMetaRegionLocationsRequest) returns(GetMetaRegionLocationsResponse);
 
+  /**
+   * Get number of live region servers.
+   */
+  rpc GetNumLiveRS(GetNumLiveRSRequest) returns(GetNumLiveRSResponse);
 }
diff --git a/hbase-protocol/src/main/protobuf/ZooKeeper.proto b/hbase-protocol/src/main/protobuf/ZooKeeper.proto
index 1638bf707147..ad740f3ccba4 100644
--- a/hbase-protocol/src/main/protobuf/ZooKeeper.proto
+++ b/hbase-protocol/src/main/protobuf/ZooKeeper.proto
@@ -105,8 +105,9 @@ message SplitLogTask {
 
 /**
  * The znode that holds state of table.
+ * Deprected, table state is stored in table descriptor on HDFS.
  */
-message Table {
+message DeprecatedTableState {
   // Table's current state
   enum State {
     ENABLED = 0;
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
index 2b12f8103534..49f2e3c7f4ae 100644
--- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
+++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
@@ -45,6 +45,7 @@
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.constraint.ConstraintException;
 import org.apache.hadoop.hbase.master.AssignmentManager;
 import org.apache.hadoop.hbase.master.LoadBalancer;
@@ -54,7 +55,6 @@
 import org.apache.hadoop.hbase.master.ServerManager;
 import org.apache.hadoop.hbase.master.TableLockManager.TableLock;
 import org.apache.hadoop.hbase.net.Address;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 
 /**
  * Service to support Region Server Grouping (HBase-6721)
@@ -269,8 +269,8 @@ public void moveTables(Set tables, String targetGroup) throws IOExcep
     }
     for(TableName table: tables) {
       if (master.getAssignmentManager().getTableStateManager().isTableState(table,
-          ZooKeeperProtos.Table.State.DISABLED,
-          ZooKeeperProtos.Table.State.DISABLING)) {
+          TableState.State.DISABLED,
+          TableState.State.DISABLING)) {
         LOG.debug("Skipping move regions because the table" + table + " is disabled.");
         continue;
       }
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
index 6799e69fb9f4..41a83a584f94 100644
--- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
+++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
@@ -53,7 +53,6 @@
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.TableStateManager;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Get;
@@ -61,11 +60,13 @@
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.constraint.ConstraintException;
 import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint;
 import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.ServerListener;
+import org.apache.hadoop.hbase.master.TableStateManager;
 import org.apache.hadoop.hbase.master.procedure.CreateTableProcedure;
 import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch;
 import org.apache.hadoop.hbase.net.Address;
@@ -74,7 +75,6 @@
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
 import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos;
 import org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MutateRowsRequest;
 import org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -646,7 +646,7 @@ public boolean visit(Result row) throws IOException {
                     if (sn == null) {
                       found.set(false);
                     } else if (tsm.isTableState(RSGROUP_TABLE_NAME,
-                        ZooKeeperProtos.Table.State.ENABLED)) {
+                        TableState.State.ENABLED)) {
                       try {
                         ClientProtos.ClientService.BlockingInterface rs = conn.getClient(sn);
                         ClientProtos.GetRequest request =
@@ -670,7 +670,7 @@ public boolean visit(Result row) throws IOException {
                     if (sn == null) {
                       nsFound.set(false);
                     } else if (tsm.isTableState(TableName.NAMESPACE_TABLE_NAME,
-                        ZooKeeperProtos.Table.State.ENABLED)) {
+                        TableState.State.ENABLED)) {
                       try {
                         ClientProtos.ClientService.BlockingInterface rs = conn.getClient(sn);
                         ClientProtos.GetRequest request =
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/CoordinatedStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/CoordinatedStateManager.java
index bdb202d362c9..b4c808cb1062 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/CoordinatedStateManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/CoordinatedStateManager.java
@@ -55,12 +55,4 @@ public interface CoordinatedStateManager {
    * @return instance of Server coordinated state manager runs within
    */
   Server getServer();
-
-  /**
-   * Returns implementation of TableStateManager.
-   * @throws InterruptedException if operation is interrupted
-   * @throws CoordinatedStateException if error happens in underlying coordination mechanism
-   */
-  TableStateManager getTableStateManager() throws InterruptedException,
-    CoordinatedStateException;
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptor.java
new file mode 100644
index 000000000000..5db0f6928158
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptor.java
@@ -0,0 +1,161 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.TableState;
+import org.apache.hadoop.hbase.exceptions.DeserializationException;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.protobuf.InvalidProtocolBufferException;
+/**
+ * Class represents table state on HDFS.
+ */
+@InterfaceAudience.Private
+public class TableDescriptor {
+  private HTableDescriptor hTableDescriptor;
+  private TableState.State tableState;
+
+  /**
+   * Creates TableDescriptor with all fields.
+   * @param hTableDescriptor HTableDescriptor to use
+   * @param tableState table state
+   */
+  public TableDescriptor(HTableDescriptor hTableDescriptor,
+      TableState.State tableState) {
+    this.hTableDescriptor = hTableDescriptor;
+    this.tableState = tableState;
+  }
+
+  /**
+   * Creates TableDescriptor with Enabled table.
+   * @param hTableDescriptor HTableDescriptor to use
+   */
+  @VisibleForTesting
+  public TableDescriptor(HTableDescriptor hTableDescriptor) {
+    this(hTableDescriptor, TableState.State.ENABLED);
+  }
+
+  /**
+   * Associated HTableDescriptor
+   * @return instance of HTableDescriptor
+   */
+  public HTableDescriptor getHTableDescriptor() {
+    return hTableDescriptor;
+  }
+
+  public void setHTableDescriptor(HTableDescriptor hTableDescriptor) {
+    this.hTableDescriptor = hTableDescriptor;
+  }
+
+  public TableState.State getTableState() {
+    return tableState;
+  }
+
+  public void setTableState(TableState.State tableState) {
+    this.tableState = tableState;
+  }
+
+  /**
+   * Convert to PB.
+   */
+  public HBaseProtos.TableDescriptor convert() {
+    return HBaseProtos.TableDescriptor.newBuilder()
+        .setSchema(hTableDescriptor.convert())
+        .setState(tableState.convert())
+        .build();
+  }
+
+  /**
+   * Convert from PB
+   */
+  public static TableDescriptor convert(HBaseProtos.TableDescriptor proto) {
+    HTableDescriptor hTableDescriptor = HTableDescriptor.convert(proto.getSchema());
+    TableState.State state = TableState.State.convert(proto.getState());
+    return new TableDescriptor(hTableDescriptor, state);
+  }
+
+  /**
+   * @return This instance serialized with pb with pb magic prefix
+   * @see #parseFrom(byte[])
+   */
+  public byte [] toByteArray() {
+    return ProtobufUtil.prependPBMagic(convert().toByteArray());
+  }
+
+  /**
+   * @param bytes A pb serialized {@link TableDescriptor} instance with pb magic prefix
+   * @see #toByteArray()
+   */
+  public static TableDescriptor parseFrom(final byte [] bytes)
+      throws DeserializationException, IOException {
+    if (!ProtobufUtil.isPBMagicPrefix(bytes)) {
+      throw new DeserializationException("Expected PB encoded TableDescriptor");
+    }
+    int pblen = ProtobufUtil.lengthOfPBMagic();
+    HBaseProtos.TableDescriptor.Builder builder = HBaseProtos.TableDescriptor.newBuilder();
+    HBaseProtos.TableDescriptor ts;
+    try {
+      ts = builder.mergeFrom(bytes, pblen, bytes.length - pblen).build();
+    } catch (InvalidProtocolBufferException e) {
+      throw new DeserializationException(e);
+    }
+    return convert(ts);
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) {
+      return true;
+    }
+    if (o == null || getClass() != o.getClass()) {
+      return false;
+    }
+
+    TableDescriptor that = (TableDescriptor) o;
+
+    if (hTableDescriptor != null ?
+        !hTableDescriptor.equals(that.hTableDescriptor) :
+        that.hTableDescriptor != null){
+      return false;
+    }
+    if (tableState != that.tableState) {
+      return false;
+    }
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    int result = hTableDescriptor != null ? hTableDescriptor.hashCode() : 0;
+    result = 31 * result + (tableState != null ? tableState.hashCode() : 0);
+    return result;
+  }
+
+  @Override
+  public String toString() {
+    return "TableDescriptor{" +
+        "hTableDescriptor=" + hTableDescriptor +
+        ", tableState=" + tableState +
+        '}';
+  }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java
index 33ae1d5aa933..c7bfd03e9595 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java
@@ -36,6 +36,14 @@ public interface TableDescriptors {
   HTableDescriptor get(final TableName tableName)
   throws IOException;
 
+  /**
+   * @param tableName
+   * @return TableDescriptor for tablename
+   * @throws IOException
+   */
+  TableDescriptor getDescriptor(final TableName tableName)
+      throws IOException;
+
   /**
    * Get Map of all NamespaceDescriptors for a given namespace.
    * @return Map of all descriptors.
@@ -53,6 +61,15 @@ Map getByNamespace(String name)
   Map getAll()
   throws IOException;
 
+  /**
+   * Get Map of all TableDescriptors. Populates the descriptor cache as a
+   * side effect.
+   * @return Map of all descriptors.
+   * @throws IOException
+   */
+  Map getAllDescriptors()
+      throws IOException;
+
   /**
    * Add or update descriptor
    * @param htd Descriptor to set into TableDescriptors
@@ -61,6 +78,14 @@ Map getAll()
   void add(final HTableDescriptor htd)
   throws IOException;
 
+  /**
+   * Add or update descriptor
+   * @param htd Descriptor to set into TableDescriptors
+   * @throws IOException
+   */
+  void add(final TableDescriptor htd)
+      throws IOException;
+
   /**
    * @param tablename
    * @return Instance of table descriptor or null if none found.
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableStateManager.java
deleted file mode 100644
index 21c09b8a853e..000000000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableStateManager.java
+++ /dev/null
@@ -1,121 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import java.io.InterruptedIOException;
-import java.util.Set;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
-
-/**
- * Helper class for table state management for operations running inside
- * RegionServer or HMaster.
- * Depending on implementation, fetches information from HBase system table,
- * local data store, ZooKeeper ensemble or somewhere else.
- * Code running on client side (with no coordinated state context) shall instead use
- * {@link org.apache.hadoop.hbase.zookeeper.ZKTableStateClientSideReader}
- */
-@InterfaceAudience.Private
-public interface TableStateManager {
-
-  /**
-   * Sets the table into desired state. Fails silently if the table is already in this state.
-   * @param tableName table to process
-   * @param state new state of this table
-   * @throws CoordinatedStateException if error happened when trying to set table state
-   */
-  void setTableState(TableName tableName, ZooKeeperProtos.Table.State state)
-    throws CoordinatedStateException;
-
-  /**
-   * Sets the specified table into the newState, but only if the table is already in
-   * one of the possibleCurrentStates (otherwise no operation is performed).
-   * @param tableName table to process
-   * @param newState new state for the table
-   * @param states table should be in one of these states for the operation
-   *                              to be performed
-   * @throws CoordinatedStateException if error happened while performing operation
-   * @return true if operation succeeded, false otherwise
-   */
-  boolean setTableStateIfInStates(TableName tableName, ZooKeeperProtos.Table.State newState,
-                                  ZooKeeperProtos.Table.State... states)
-    throws CoordinatedStateException;
-
-  /**
-   * Sets the specified table into the newState, but only if the table is NOT in
-   * one of the possibleCurrentStates (otherwise no operation is performed).
-   * @param tableName table to process
-   * @param newState new state for the table
-   * @param states table should NOT be in one of these states for the operation
-   *                              to be performed
-   * @throws CoordinatedStateException if error happened while performing operation
-   * @return true if operation succeeded, false otherwise
-   */
-  boolean setTableStateIfNotInStates(TableName tableName, ZooKeeperProtos.Table.State newState,
-                                     ZooKeeperProtos.Table.State... states)
-    throws CoordinatedStateException;
-
-  /**
-   * @return true if the table is in any one of the listed states, false otherwise.
-   */
-  boolean isTableState(TableName tableName, ZooKeeperProtos.Table.State... states);
-
-  /**
-   * @return true if the table is in any one of the listed states, false otherwise.
-   */
-  boolean isTableState(TableName tableName, boolean checkSource,
-      ZooKeeperProtos.Table.State... states);
-
-  /**
-   * Mark table as deleted. Fails silently if the table is not currently marked as disabled.
-   * @param tableName table to be deleted
-   * @throws CoordinatedStateException if error happened while performing operation
-   */
-  void setDeletedTable(TableName tableName) throws CoordinatedStateException;
-
-  /**
-   * Checks if table is present.
-   *
-   * @param tableName table we're checking
-   * @return true if the table is present, false otherwise
-   */
-  boolean isTablePresent(TableName tableName);
-
-  /**
-   * @return set of tables which are in any one of the listed states, empty Set if none
-   */
-  Set getTablesInStates(ZooKeeperProtos.Table.State... states)
-    throws InterruptedIOException, CoordinatedStateException;
-
-  /**
-   * If the table is found in the given state the in-memory state is removed. This
-   * helps in cases where CreateTable is to be retried by the client in case of
-   * failures.  If deletePermanentState is true - the flag kept permanently is
-   * also reset.
-   *
-   * @param tableName table we're working on
-   * @param states if table isn't in any one of these states, operation aborts
-   * @param deletePermanentState if true, reset the permanent flag
-   * @throws CoordinatedStateException if error happened in underlying coordination engine
-   */
-  void checkAndRemoveTableState(TableName tableName, ZooKeeperProtos.Table.State states,
-                            boolean deletePermanentState)
-    throws CoordinatedStateException;
-}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/BaseCoordinatedStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/BaseCoordinatedStateManager.java
index f79e5d8dfbb5..03762aba5eba 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/BaseCoordinatedStateManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/BaseCoordinatedStateManager.java
@@ -18,10 +18,8 @@
 package org.apache.hadoop.hbase.coordination;
 
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.CoordinatedStateException;
 import org.apache.hadoop.hbase.CoordinatedStateManager;
 import org.apache.hadoop.hbase.Server;
-import org.apache.hadoop.hbase.TableStateManager;
 
 /**
  * Base class for {@link org.apache.hadoop.hbase.CoordinatedStateManager} implementations.
@@ -49,9 +47,6 @@ public Server getServer() {
     return null;
   }
 
-  @Override
-  public abstract TableStateManager getTableStateManager() throws InterruptedException,
-    CoordinatedStateException;
   /**
    * Method to retrieve coordination for split log worker
    */
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkCoordinatedStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkCoordinatedStateManager.java
index 2f739befb4f8..7222b0f52220 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkCoordinatedStateManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkCoordinatedStateManager.java
@@ -20,13 +20,9 @@
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.CoordinatedStateException;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
 import org.apache.hadoop.hbase.Server;
-import org.apache.hadoop.hbase.TableStateManager;
-import org.apache.hadoop.hbase.zookeeper.ZKTableStateManager;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-import org.apache.zookeeper.KeeperException;
 
 /**
  * ZooKeeper-based implementation of {@link org.apache.hadoop.hbase.CoordinatedStateManager}.
@@ -60,16 +56,6 @@ public Server getServer() {
     return server;
   }
 
-  @Override
-  public TableStateManager getTableStateManager() throws InterruptedException,
-      CoordinatedStateException {
-    try {
-      return new ZKTableStateManager(server.getZooKeeper());
-    } catch (KeeperException e) {
-      throw new CoordinatedStateException(e);
-    }
-  }
-
   @Override
   public SplitLogWorkerCoordination getSplitLogWorkerCoordination() {
     return splitLogWorkerCoordination;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkOpenRegionCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkOpenRegionCoordination.java
index 812bbe25ccb8..b54740a86ffe 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkOpenRegionCoordination.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkOpenRegionCoordination.java
@@ -23,11 +23,11 @@
 import org.apache.hadoop.hbase.CoordinatedStateManager;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.executor.EventType;
 import org.apache.hadoop.hbase.master.AssignmentManager;
 import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.RegionServerServices;
 import org.apache.hadoop.hbase.zookeeper.ZKAssign;
@@ -309,7 +309,7 @@ public boolean commitOpenOnMasterSide(AssignmentManager assignmentManager,
     }
     if (!openedNodeDeleted) {
       if (assignmentManager.getTableStateManager().isTableState(regionInfo.getTable(),
-          ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) {
+          TableState.State.DISABLED, TableState.State.DISABLING)) {
         debugLog(regionInfo, "Opened region "
           + regionInfo.getShortNameToLog() + " but "
           + "this table is disabled, triggering close of region");
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java
index 7b93e8f0e18b..61ace7817046 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java
@@ -1,4 +1,4 @@
-/**
+/*
  *
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
@@ -17,8 +17,10 @@
  * limitations under the License.
  */
 package org.apache.hadoop.hbase.master;
-
+import com.google.common.collect.ImmutableList;
 import java.io.IOException;
+import java.io.InterruptedIOException;
+import java.util.List;
 import java.util.concurrent.atomic.AtomicBoolean;
 
 import org.apache.commons.logging.Log;
@@ -36,9 +38,10 @@
 import org.apache.zookeeper.KeeperException;
 
 /**
- * Handles everything on master-side related to master election.
+ * Handles everything on master-side related to master election. Keeps track of
+ * currently active master and registered backup masters.
  *
- * 

Listens and responds to ZooKeeper notifications on the master znode, + *

Listens and responds to ZooKeeper notifications on the master znodes, * both nodeCreated and nodeDeleted. * *

Contains blocking methods which will hold up backup masters, waiting @@ -57,20 +60,31 @@ public class ActiveMasterManager extends ZooKeeperListener { final AtomicBoolean clusterHasActiveMaster = new AtomicBoolean(false); final AtomicBoolean clusterShutDown = new AtomicBoolean(false); + // This server's information. private final ServerName sn; private int infoPort; private final Server master; + // Active master's server name. Invalidated anytime active master changes (based on ZK + // notifications) and lazily fetched on-demand. + // ServerName is immutable, so we don't need heavy synchronization around it. + volatile ServerName activeMasterServerName; + // Registered backup masters. List is kept up to date based on ZK change notifications to + // backup znode. + private volatile ImmutableList backupMasters; + /** - * @param watcher + * @param watcher ZK watcher * @param sn ServerName * @param master In an instance of a Master. */ - ActiveMasterManager(ZooKeeperWatcher watcher, ServerName sn, Server master) { + ActiveMasterManager(ZooKeeperWatcher watcher, ServerName sn, Server master) + throws InterruptedIOException { super(watcher); watcher.registerListener(this); this.sn = sn; this.master = master; + updateBackupMasters(); } // will be set after jetty server is started @@ -84,8 +98,18 @@ public void nodeCreated(String path) { } @Override - public void nodeDeleted(String path) { + public void nodeChildrenChanged(String path) { + if (path.equals(watcher.backupMasterAddressesZNode)) { + try { + updateBackupMasters(); + } catch (InterruptedIOException ioe) { + LOG.error("Error updating backup masters", ioe); + } + } + } + @Override + public void nodeDeleted(String path) { // We need to keep track of the cluster's shutdown status while // we wait on the current master. We consider that, if the cluster // was already in a "shutdown" state when we started, that this master @@ -96,7 +120,6 @@ public void nodeDeleted(String path) { if(path.equals(watcher.clusterStateZNode) && !master.isStopped()) { clusterShutDown.set(true); } - handle(path); } @@ -106,6 +129,38 @@ void handle(final String path) { } } + private void updateBackupMasters() throws InterruptedIOException { + backupMasters = + ImmutableList.copyOf(MasterAddressTracker.getBackupMastersAndRenewWatch(watcher)); + } + + /** + * Fetches the active master's ServerName from zookeeper. + */ + private void fetchAndSetActiveMasterServerName() { + LOG.debug("Attempting to fetch active master sn from zk"); + try { + activeMasterServerName = MasterAddressTracker.getMasterAddress(watcher); + } catch (IOException | KeeperException e) { + // Log and ignore for now and re-fetch later if needed. + LOG.error("Error fetching active master information", e); + } + } + + /** + * @return the currently active master as seen by us or null if one does not exist. + */ + public ServerName getActiveMasterServerName() { + if (!clusterHasActiveMaster.get()) { + return null; + } + if (activeMasterServerName == null) { + fetchAndSetActiveMasterServerName(); + } + // It could still be null, but return whatever we have. + return activeMasterServerName; + } + /** * Handle a change in the master node. Doesn't matter whether this was called * from a nodeCreated or nodeDeleted event because there are no guarantees @@ -134,6 +189,9 @@ private void handleMasterNodeChange() { // Notify any thread waiting to become the active master clusterHasActiveMaster.notifyAll(); } + // Reset the active master sn. Will be re-fetched later if needed. + // We don't want to make a synchronous RPC under a monitor. + activeMasterServerName = null; } } catch (KeeperException ke) { master.abort("Received an unexpected KeeperException, aborting", ke); @@ -151,8 +209,8 @@ private void handleMasterNodeChange() { * @param checkInterval the interval to check if the master is stopped * @param startupStatus the monitor status to track the progress * @return True if no issue becoming active master else false if another - * master was running or if some other problem (zookeeper, stop flag has been - * set on this Master) + * master was running or if some other problem (zookeeper, stop flag has been + * set on this Master) */ boolean blockUntilBecomingActiveMaster( int checkInterval, MonitoredTask startupStatus) { @@ -179,9 +237,13 @@ boolean blockUntilBecomingActiveMaster( startupStatus.setStatus("Successfully registered as active master."); this.clusterHasActiveMaster.set(true); LOG.info("Registered Active Master=" + this.sn); + activeMasterServerName = sn; return true; } + // Invalidate the active master name so that subsequent requests do not get any stale + // master information. Will be re-fetched if needed. + activeMasterServerName = null; // There is another active master running elsewhere or this is a restart // and the master ephemeral node has not expired yet. this.clusterHasActiveMaster.set(true); @@ -208,7 +270,8 @@ boolean blockUntilBecomingActiveMaster( ZKUtil.deleteNode(this.watcher, this.watcher.getMasterAddressZNode()); // We may have failed to delete the znode at the previous step, but - // we delete the file anyway: a second attempt to delete the znode is likely to fail again. + // we delete the file anyway: a second attempt to delete the znode is likely to fail + // again. ZNodeClearer.deleteMyEphemeralNodeOnDisk(); } else { msg = "Another master is the active master, " + currentMaster + @@ -280,4 +343,11 @@ public void stop() { LOG.error(this.watcher.prefix("Error deleting our own master address node"), e); } } + + /** + * @return list of registered backup masters. + */ + public List getBackupMasters() { + return backupMasters; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java index de4edbbea1b5..842ce85687d0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java @@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.master; import java.io.IOException; -import java.io.InterruptedIOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -64,7 +63,6 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; -import org.apache.hadoop.hbase.TableStateManager; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Admin.MasterSwitchType; @@ -77,6 +75,7 @@ import org.apache.hadoop.hbase.coordination.ZkOpenRegionCoordination; import org.apache.hadoop.hbase.coordination.ZkRegionMergeCoordination; import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.executor.EventHandler; import org.apache.hadoop.hbase.executor.EventType; import org.apache.hadoop.hbase.executor.ExecutorService; @@ -92,12 +91,12 @@ import org.apache.hadoop.hbase.master.handler.OpenedRegionHandler; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.regionserver.RegionAlreadyInTransitionException; import org.apache.hadoop.hbase.regionserver.RegionOpeningState; import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException; import org.apache.hadoop.hbase.util.ConfigUtil; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.util.ExceptionUtil; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.KeyLocker; import org.apache.hadoop.hbase.util.Pair; @@ -286,14 +285,11 @@ public enum ServerHostRegion { * @param service Executor service * @param metricsMaster metrics manager * @param tableLockManager TableLock manager - * @throws KeeperException - * @throws IOException */ public AssignmentManager(MasterServices server, ServerManager serverManager, final LoadBalancer balancer, final ExecutorService service, MetricsMaster metricsMaster, - final TableLockManager tableLockManager) throws KeeperException, - IOException, CoordinatedStateException { + final TableLockManager tableLockManager, final TableStateManager tableStateManager) { super(server.getZooKeeper()); this.server = server; this.serverManager = serverManager; @@ -306,15 +302,9 @@ public AssignmentManager(MasterServices server, ServerManager serverManager, this.shouldAssignRegionsWithFavoredNodes = conf.getClass( HConstants.HBASE_MASTER_LOADBALANCER_CLASS, Object.class).equals( FavoredNodeLoadBalancer.class); - try { - if (server.getCoordinatedStateManager() != null) { - this.tableStateManager = server.getCoordinatedStateManager().getTableStateManager(); - } else { - this.tableStateManager = null; - } - } catch (InterruptedException e) { - throw new InterruptedIOException(); - } + + this.tableStateManager = tableStateManager; + // This is the max attempts, not retries, so it should be at least 1. this.maximumAttempts = Math.max(1, this.server.getConfiguration().getInt("hbase.assignment.maximum.attempts", 10)); @@ -392,7 +382,7 @@ public boolean unregisterListener(final AssignmentListener listener) { } /** - * @return Instance of ZKTableStateManager. + * @return Instance of TableStateManager. */ public TableStateManager getTableStateManager() { // These are 'expensive' to make involving trip to zk ensemble so allow @@ -516,10 +506,9 @@ void failoverCleanupDone() { * @throws IOException * @throws KeeperException * @throws InterruptedException - * @throws CoordinatedStateException */ void joinCluster() throws IOException, - KeeperException, InterruptedException, CoordinatedStateException { + KeeperException, CoordinatedStateException { long startTime = System.currentTimeMillis(); // Concurrency note: In the below the accesses on regionsInTransition are // outside of a synchronization block where usually all accesses to RIT are @@ -560,7 +549,7 @@ void joinCluster() throws IOException, * @throws InterruptedException */ boolean processDeadServersAndRegionsInTransition(final Set deadServers) - throws KeeperException, IOException, InterruptedException, CoordinatedStateException { + throws KeeperException, IOException { List nodes = ZKUtil.listChildrenNoWatch(watcher, watcher.assignmentZNode); if (useZKForAssignment && nodes == null) { @@ -568,7 +557,6 @@ boolean processDeadServersAndRegionsInTransition(final Set deadServe server.abort(errorMessage, new IOException(errorMessage)); return true; // Doesn't matter in this case } - boolean failover = !serverManager.getDeadServers().isEmpty(); if (failover) { // This may not be a failover actually, especially if meta is on this master. @@ -689,7 +677,11 @@ boolean processDeadServersAndRegionsInTransition(final Set deadServe if (!failover) { // Fresh cluster startup. LOG.info("Clean cluster startup. Assigning user regions"); - assignAllUserRegions(allRegions); + try { + assignAllUserRegions(allRegions); + } catch (InterruptedException ie) { + ExceptionUtil.rethrowIfInterrupt(ie); + } } // unassign replicas of the split parents and the merged regions // the daughter replicas are opened in assignAllUserRegions if it was @@ -707,11 +699,10 @@ boolean processDeadServersAndRegionsInTransition(final Set deadServe * locations are returned. */ private Map getUserRegionsToAssign() - throws InterruptedIOException, CoordinatedStateException { + throws IOException { Set disabledOrDisablingOrEnabling = - tableStateManager.getTablesInStates(ZooKeeperProtos.Table.State.DISABLED, - ZooKeeperProtos.Table.State.DISABLING, ZooKeeperProtos.Table.State.ENABLING); - + tableStateManager.getTablesInStates(TableState.State.DISABLED, + TableState.State.DISABLING, TableState.State.ENABLING); // Clean re/start, mark all user regions closed before reassignment return regionStates.closeAllUserRegions(disabledOrDisablingOrEnabling); } @@ -739,7 +730,7 @@ public void run() { try { // Assign the regions assignAllUserRegions(getUserRegionsToAssign()); - } catch (CoordinatedStateException | IOException | InterruptedException e) { + } catch (IOException | InterruptedException e) { LOG.error("Exception occured while assigning user regions.", e); } }; @@ -1482,7 +1473,7 @@ public void run() { LOG.debug("Znode " + regionNameStr + " deleted, state: " + rs); boolean disabled = getTableStateManager().isTableState(regionInfo.getTable(), - ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING); + TableState.State.DISABLED, TableState.State.DISABLING); ServerName serverName = rs.getServerName(); if (serverManager.isServerOnline(serverName)) { @@ -2269,7 +2260,7 @@ public void assign(RegionState state, // will not be in ENABLING or ENABLED state. TableName tableName = region.getTable(); if (!tableStateManager.isTableState(tableName, - ZooKeeperProtos.Table.State.ENABLED, ZooKeeperProtos.Table.State.ENABLING)) { + TableState.State.ENABLED, TableState.State.ENABLING)) { LOG.debug("Setting table " + tableName + " to ENABLED state."); setEnabledTable(tableName); } @@ -2495,8 +2486,8 @@ private void processAlreadyOpenedRegion(HRegionInfo region, ServerName sn) { private boolean isDisabledorDisablingRegionInRIT(final HRegionInfo region) { if (this.tableStateManager.isTableState(region.getTable(), - ZooKeeperProtos.Table.State.DISABLED, - ZooKeeperProtos.Table.State.DISABLING) || replicasToClose.contains(region)) { + TableState.State.DISABLED, + TableState.State.DISABLING) || replicasToClose.contains(region)) { LOG.info("Table " + region.getTable() + " is disabled or disabling;" + " skipping assign of " + region.getRegionNameAsString()); offlineDisabledRegion(region); @@ -3127,7 +3118,7 @@ private void assignAllUserRegions(Map allRegions) for (HRegionInfo hri : regionsFromMetaScan) { TableName tableName = hri.getTable(); if (!tableStateManager.isTableState(tableName, - ZooKeeperProtos.Table.State.ENABLED)) { + TableState.State.ENABLED)) { setEnabledTable(tableName); } } @@ -3194,14 +3185,14 @@ boolean waitUntilNoRegionsInTransition(final long timeout) * @throws IOException */ Set rebuildUserRegions() throws - IOException, KeeperException, CoordinatedStateException { + IOException, KeeperException { Set disabledOrEnablingTables = tableStateManager.getTablesInStates( - ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.ENABLING); + TableState.State.DISABLED, TableState.State.ENABLING); Set disabledOrDisablingOrEnabling = tableStateManager.getTablesInStates( - ZooKeeperProtos.Table.State.DISABLED, - ZooKeeperProtos.Table.State.DISABLING, - ZooKeeperProtos.Table.State.ENABLING); + TableState.State.DISABLED, + TableState.State.DISABLING, + TableState.State.ENABLING); // Region assignment from META List results = MetaTableAccessor.fullScanOfMeta(server.getConnection()); @@ -3253,7 +3244,7 @@ Set rebuildUserRegions() throws ServerName lastHost = hrl.getServerName(); ServerName regionLocation = RegionStateStore.getRegionServer(result, replicaId); if (tableStateManager.isTableState(regionInfo.getTable(), - ZooKeeperProtos.Table.State.DISABLED)) { + TableState.State.DISABLED)) { // force region to forget it hosts for disabled/disabling tables. // see HBASE-13326 lastHost = null; @@ -3283,7 +3274,7 @@ Set rebuildUserRegions() throws // this will be used in rolling restarts if (!disabledOrDisablingOrEnabling.contains(tableName) && !getTableStateManager().isTableState(tableName, - ZooKeeperProtos.Table.State.ENABLED)) { + TableState.State.ENABLED)) { setEnabledTable(tableName); } } @@ -3300,9 +3291,9 @@ Set rebuildUserRegions() throws * @throws IOException */ private void recoverTableInDisablingState() - throws KeeperException, IOException, CoordinatedStateException { + throws KeeperException, IOException { Set disablingTables = - tableStateManager.getTablesInStates(ZooKeeperProtos.Table.State.DISABLING); + tableStateManager.getTablesInStates(TableState.State.DISABLING); if (disablingTables.size() != 0) { for (TableName tableName : disablingTables) { // Recover by calling DisableTableHandler @@ -3324,9 +3315,9 @@ private void recoverTableInDisablingState() * @throws IOException */ private void recoverTableInEnablingState() - throws KeeperException, IOException, CoordinatedStateException { + throws KeeperException, IOException { Set enablingTables = tableStateManager. - getTablesInStates(ZooKeeperProtos.Table.State.ENABLING); + getTablesInStates(TableState.State.ENABLING); if (enablingTables.size() != 0) { for (TableName tableName : enablingTables) { // Recover by calling EnableTableHandler @@ -3398,9 +3389,9 @@ void processRegionInTransitionZkLess() { LOG.info("Server " + serverName + " isn't online. SSH will handle this"); continue; } + RegionState.State state = regionState.getState(); HRegionInfo regionInfo = regionState.getRegion(); - State state = regionState.getState(); - + LOG.info("Processing " + regionState); switch (state) { case CLOSED: invokeAssign(regionInfo); @@ -3790,7 +3781,7 @@ public List cleanOutCrashedServerReferences(final ServerName sn) { server.abort("Unexpected ZK exception deleting node " + hri, ke); } if (tableStateManager.isTableState(hri.getTable(), - ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) { + TableState.State.DISABLED, TableState.State.DISABLING)) { regionStates.regionOffline(hri); it.remove(); continue; @@ -3813,7 +3804,7 @@ public void balance(final RegionPlan plan) { HRegionInfo hri = plan.getRegionInfo(); TableName tableName = hri.getTable(); if (tableStateManager.isTableState(tableName, - ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) { + TableState.State.DISABLED, TableState.State.DISABLING)) { LOG.info("Ignored moving region of disabling/disabled table " + tableName); return; @@ -3861,8 +3852,8 @@ public void shutdown() { protected void setEnabledTable(TableName tableName) { try { this.tableStateManager.setTableState(tableName, - ZooKeeperProtos.Table.State.ENABLED); - } catch (CoordinatedStateException e) { + TableState.State.ENABLED); + } catch (IOException e) { // here we can abort as it is the start up flow String errorMsg = "Unable to ensure that the table " + tableName + " will be" + " enabled because of a ZooKeeper issue"; @@ -3967,8 +3958,8 @@ private void onRegionFailedOpen( // When there are more than one region server a new RS is selected as the // destination and the same is updated in the region plan. (HBASE-5546) if (getTableStateManager().isTableState(hri.getTable(), - ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING) || - replicasToClose.contains(hri)) { + TableState.State.DISABLED, TableState.State.DISABLING) || + replicasToClose.contains(hri)) { offlineDisabledRegion(hri); return; } @@ -3996,15 +3987,14 @@ private void onRegionOpen(final HRegionInfo hri, final ServerName sn, long openS // reset the count, if any failedOpenTracker.remove(hri.getEncodedName()); if (getTableStateManager().isTableState(hri.getTable(), - ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) { + TableState.State.DISABLED, TableState.State.DISABLING)) { invokeUnAssign(hri); } } private void onRegionClosed(final HRegionInfo hri) { - if (getTableStateManager().isTableState(hri.getTable(), - ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING) || - replicasToClose.contains(hri)) { + if (getTableStateManager().isTableState(hri.getTable(), TableState.State.DISABLED, + TableState.State.DISABLING) || replicasToClose.contains(hri)) { offlineDisabledRegion(hri); return; } @@ -4050,7 +4040,7 @@ private String onRegionSplitReverted(ServerName sn, } if (getTableStateManager().isTableState(p.getTable(), - ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) { + TableState.State.DISABLED, TableState.State.DISABLING)) { invokeUnAssign(p); } return null; @@ -4076,7 +4066,7 @@ private String onRegionSplit(ServerName sn, TransitionCode code, // User could disable the table before master knows the new region. if (getTableStateManager().isTableState(p.getTable(), - ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) { + TableState.State.DISABLED, TableState.State.DISABLING)) { invokeUnAssign(a); invokeUnAssign(b); } else { @@ -4130,7 +4120,7 @@ private String onRegionMerge(ServerName sn, TransitionCode code, // User could disable the table before master knows the new region. if (getTableStateManager().isTableState(p.getTable(), - ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) { + TableState.State.DISABLED, TableState.State.DISABLING)) { invokeUnAssign(p); } else { Callable mergeReplicasCallable = new Callable() { @@ -4170,7 +4160,7 @@ private String onRegionMergeReverted(ServerName sn, TransitionCode code, } if (getTableStateManager().isTableState(p.getTable(), - ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) { + TableState.State.DISABLED, TableState.State.DISABLING)) { invokeUnAssign(a); invokeUnAssign(b); } @@ -4291,7 +4281,7 @@ private boolean handleRegionMerging(final RegionTransition rt, final String enco // User could disable the table before master knows the new region. if (tableStateManager.isTableState(p.getTable(), - ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) { + TableState.State.DISABLED, TableState.State.DISABLING)) { unassign(p); } } @@ -4421,7 +4411,7 @@ private boolean handleRegionSplitting(final RegionTransition rt, final String en // User could disable the table before master knows the new region. if (tableStateManager.isTableState(p.getTable(), - ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) { + TableState.State.DISABLED, TableState.State.DISABLING)) { unassign(hri_a); unassign(hri_b); } @@ -4692,7 +4682,7 @@ protected String onRegionTransition(final ServerName serverName, errorMsg = hri.getShortNameToLog() + " is not pending close on " + serverName; } else { - onRegionClosed(hri); + onRegionClosed(hri); } break; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CachedClusterId.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CachedClusterId.java new file mode 100644 index 000000000000..6825b8910825 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CachedClusterId.java @@ -0,0 +1,155 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import java.io.IOException; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.ClusterId; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.util.FSUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Caches the cluster ID of the cluster. For standby masters, this is used to serve the client + * RPCs that fetch the cluster ID. ClusterID is only created by an active master if one does not + * already exist. Standby masters just read the information from the file system. This class is + * thread-safe. + * + * TODO: Make it a singleton without affecting concurrent junit tests. + */ +@InterfaceAudience.Private +public class CachedClusterId { + + public static final Logger LOG = LoggerFactory.getLogger(CachedClusterId.class); + private static final int MAX_FETCH_TIMEOUT_MS = 10000; + + private Path rootDir; + private FileSystem fs; + + // When true, indicates that a FileSystem fetch of ClusterID is in progress. This is used to + // avoid multiple fetches from FS and let only one thread fetch the information. + AtomicBoolean fetchInProgress = new AtomicBoolean(false); + + // When true, it means that the cluster ID has been fetched successfully from fs. + private AtomicBoolean isClusterIdSet = new AtomicBoolean(false); + // Immutable once set and read multiple times. + private ClusterId clusterId; + + // cache stats for testing. + private AtomicInteger cacheMisses = new AtomicInteger(0); + + public CachedClusterId(Configuration conf) throws IOException { + rootDir = FSUtils.getRootDir(conf); + fs = rootDir.getFileSystem(conf); + } + + /** + * Succeeds only once, when setting to a non-null value. Overwrites are not allowed. + */ + private void setClusterId(ClusterId id) { + if (id == null || isClusterIdSet.get()) { + return; + } + clusterId = id; + isClusterIdSet.set(true); + } + + /** + * Returns a cached copy of the cluster ID. null if the cache is not populated. + */ + private String getClusterId() { + if (!isClusterIdSet.get()) { + return null; + } + // It is ok to read without a lock since clusterId is immutable once set. + return clusterId.toString(); + } + + /** + * Attempts to fetch the cluster ID from the file system. If no attempt is already in progress, + * synchronously fetches the cluster ID and sets it. If an attempt is already in progress, + * returns right away and the caller is expected to wait for the fetch to finish. + * @return true if the attempt is done, false if another thread is already fetching it. + */ + private boolean attemptFetch() { + if (fetchInProgress.compareAndSet(false, true)) { + // A fetch is not in progress, so try fetching the cluster ID synchronously and then notify + // the waiting threads. + try { + cacheMisses.incrementAndGet(); + setClusterId(FSUtils.getClusterId(fs, rootDir)); + } catch (IOException e) { + LOG.warn("Error fetching cluster ID", e); + } finally { + Preconditions.checkState(fetchInProgress.compareAndSet(true, false)); + synchronized (fetchInProgress) { + fetchInProgress.notifyAll(); + } + } + return true; + } + return false; + } + + private void waitForFetchToFinish() throws InterruptedException { + synchronized (fetchInProgress) { + while (fetchInProgress.get()) { + // We don't want the fetches to block forever, for example if there are bugs + // of missing notifications. + fetchInProgress.wait(MAX_FETCH_TIMEOUT_MS); + } + } + } + + /** + * Fetches the ClusterId from FS if it is not cached locally. Atomically updates the cached + * copy and is thread-safe. Optimized to do a single fetch when there are multiple threads are + * trying get from a clean cache. + * + * @return ClusterId by reading from FileSystem or null in any error case or cluster ID does + * not exist on the file system. + */ + public String getFromCacheOrFetch() { + String id = getClusterId(); + if (id != null) { + return id; + } + if (!attemptFetch()) { + // A fetch is in progress. + try { + waitForFetchToFinish(); + } catch (InterruptedException e) { + // pass and return whatever is in the cache. + } + } + return getClusterId(); + } + + @VisibleForTesting + public int getCacheStats() { + return cacheMisses.get(); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index ffabbb4f86aa..1a2f52b2750f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,6 +17,8 @@ */ package org.apache.hadoop.hbase.master; +import com.google.protobuf.Descriptors; +import com.google.protobuf.Service; import java.io.IOException; import java.io.InterruptedIOException; import java.lang.reflect.Constructor; @@ -28,7 +29,6 @@ import java.util.ArrayList; import java.util.Collection; import java.util.Collections; -import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; @@ -85,8 +85,8 @@ import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitorBase; import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; -import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.executor.ExecutorType; import org.apache.hadoop.hbase.http.InfoServer; import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils; @@ -140,7 +140,6 @@ import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo; import org.apache.hadoop.hbase.protobuf.generated.WALProtos; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode; import org.apache.hadoop.hbase.quotas.MasterQuotaManager; import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine; @@ -168,6 +167,7 @@ import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.util.VersionInfo; +import org.apache.hadoop.hbase.util.ZKDataMigrator; import org.apache.hadoop.hbase.zookeeper.DrainingServerTracker; import org.apache.hadoop.hbase.zookeeper.LoadBalancerTracker; import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker; @@ -189,12 +189,8 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Lists; import com.google.common.collect.Maps; -import com.google.protobuf.Descriptors; -import com.google.protobuf.Service; /** - * HMaster is the "master server" for HBase. An HBase cluster has one active - * master. If many masters are started, all compete. Whichever wins goes on to * run the cluster. All others park themselves in their constructor until * master or cluster shutdown or until the active master loses its lease in * zookeeper. Thereafter, all running master jostle to take over master role. @@ -307,6 +303,12 @@ public void run() { private RegionsRecoveryConfigManager regionsRecoveryConfigManager = null; + /** + * Cache for the meta region replica's locations. Also tracks their changes to avoid stale + * cache entries. + */ + private final MetaRegionLocationCache metaRegionLocationCache; + // buffer for "fatal error" notices from region servers // in the cluster. This is only used for assisting // operations/debugging. @@ -380,12 +382,18 @@ public void run() { private long splitPlanCount; private long mergePlanCount; + // handle table states + private TableStateManager tableStateManager; + /** flag used in test cases in order to simulate RS failures during master initialization */ private volatile boolean initializationBeforeMetaAssignment = false; /** jetty server for master to redirect requests to regionserver infoServer */ private org.mortbay.jetty.Server masterJettyServer; + // Cached clusterId on stand by masters to serve clusterID requests from clients. + private final CachedClusterId cachedClusterId; + public static class RedirectServlet extends HttpServlet { private static final long serialVersionUID = 2894774810058302473L; private final int regionServerInfoPort; @@ -514,13 +522,16 @@ public HMaster(final Configuration conf, CoordinatedStateManager csm) // Some unit tests don't need a cluster, so no zookeeper at all if (!conf.getBoolean("hbase.testing.nocluster", false)) { + this.metaRegionLocationCache = new MetaRegionLocationCache(this.zooKeeper); setInitLatch(new CountDownLatch(1)); activeMasterManager = new ActiveMasterManager(zooKeeper, this.serverName, this); int infoPort = putUpJettyServer(); startActiveMasterManager(infoPort); } else { + this.metaRegionLocationCache = null; activeMasterManager = null; } + cachedClusterId = new CachedClusterId(conf); } // return the actual infoPort, -1 means disable info server. @@ -683,9 +694,8 @@ void initializeZKBasedSystemTrackers() throws IOException, this.assignmentManager = new AssignmentManager(this, serverManager, this.balancer, this.service, this.metricsMaster, - this.tableLockManager); + this.tableLockManager, tableStateManager); zooKeeper.registerListenerFirst(assignmentManager); - this.regionServerTracker = new RegionServerTracker(zooKeeper, this, this.serverManager); this.regionServerTracker.start(); @@ -717,6 +727,14 @@ void initializeZKBasedSystemTrackers() throws IOException, this.mpmHost.register(new MasterFlushTableProcedureManager()); this.mpmHost.loadProcedures(conf); this.mpmHost.initialize(this, this.metricsMaster); + + // migrating existent table state from zk + for (Map.Entry entry : ZKDataMigrator + .queryForTableStates(getZooKeeper()).entrySet()) { + LOG.info("Converting state from zk to new states:" + entry); + tableStateManager.setTableState(entry.getKey(), entry.getValue()); + } + ZKUtil.deleteChildrenRecursively(getZooKeeper(), getZooKeeper().tableZNode); } /** @@ -781,6 +799,9 @@ private void finishActiveMasterInitialization(MonitoredTask status) // Invalidate all write locks held previously this.tableLockManager.reapWriteLocks(); + this.tableStateManager = new TableStateManager(this); + this.tableStateManager.start(); + status.setStatus("Initializing ZK system trackers"); initializeZKBasedSystemTrackers(); @@ -1175,8 +1196,8 @@ private void enableCrashedServerProcessing(final boolean waitForMeta) } private void enableMeta(TableName metaTableName) { - if (!this.assignmentManager.getTableStateManager().isTableState(metaTableName, - ZooKeeperProtos.Table.State.ENABLED)) { + if (!this.tableStateManager.isTableState(metaTableName, + TableState.State.ENABLED)) { this.assignmentManager.setEnabledTable(metaTableName); } } @@ -1220,6 +1241,11 @@ public TableNamespaceManager getTableNamespaceManager() { return tableNamespaceManager; } + @Override + public TableStateManager getTableStateManager() { + return tableStateManager; + } + /* * Start up all services. If any of these threads gets an unhandled exception * then they just die with a logged message. This should be fine because @@ -1652,7 +1678,7 @@ public boolean normalizeRegions() throws IOException, CoordinatedStateException // Don't run the normalizer concurrently List allEnabledTables = new ArrayList<>( this.assignmentManager.getTableStateManager().getTablesInStates( - ZooKeeperProtos.Table.State.ENABLED)); + TableState.State.ENABLED)); Collections.shuffle(allEnabledTables); @@ -2497,7 +2523,7 @@ public void checkTableModifiable(final TableName tableName) throw new TableNotFoundException(tableName); } if (!getAssignmentManager().getTableStateManager(). - isTableState(tableName, ZooKeeperProtos.Table.State.DISABLED)) { + isTableState(tableName, TableState.State.DISABLED)) { throw new TableNotDisabledException(tableName); } } @@ -2518,56 +2544,14 @@ public ClusterStatus getClusterStatus() throws IOException { */ public ClusterStatus getClusterStatusWithoutCoprocessor() throws InterruptedIOException { // Build Set of backup masters from ZK nodes - List backupMasterStrings; - try { - backupMasterStrings = ZKUtil.listChildrenNoWatch(this.zooKeeper, - this.zooKeeper.backupMasterAddressesZNode); - } catch (KeeperException e) { - LOG.warn(this.zooKeeper.prefix("Unable to list backup servers"), e); - backupMasterStrings = null; - } - - List backupMasters = null; - if (backupMasterStrings != null && !backupMasterStrings.isEmpty()) { - backupMasters = new ArrayList(backupMasterStrings.size()); - for (String s: backupMasterStrings) { - try { - byte [] bytes; - try { - bytes = ZKUtil.getData(this.zooKeeper, ZKUtil.joinZNode( - this.zooKeeper.backupMasterAddressesZNode, s)); - } catch (InterruptedException e) { - throw new InterruptedIOException(); - } - if (bytes != null) { - ServerName sn; - try { - sn = ServerName.parseFrom(bytes); - } catch (DeserializationException e) { - LOG.warn("Failed parse, skipping registering backup server", e); - continue; - } - backupMasters.add(sn); - } - } catch (KeeperException e) { - LOG.warn(this.zooKeeper.prefix("Unable to get information about " + - "backup servers"), e); - } - } - Collections.sort(backupMasters, new Comparator() { - @Override - public int compare(ServerName s1, ServerName s2) { - return s1.getServerName().compareTo(s2.getServerName()); - }}); - } - + List backupMasters = getBackupMasters(); String clusterId = fileSystemManager != null ? - fileSystemManager.getClusterId().toString() : null; + fileSystemManager.getClusterId().toString() : null; Set regionsInTransition = assignmentManager != null ? - assignmentManager.getRegionStates().getRegionsInTransition() : null; + assignmentManager.getRegionStates().getRegionsInTransition() : null; String[] coprocessors = cpHost != null ? getMasterCoprocessors() : null; boolean balancerOn = loadBalancerTracker != null ? - loadBalancerTracker.isBalancerOn() : false; + loadBalancerTracker.isBalancerOn() : false; Map onlineServers = null; Set deadServers = null; if (serverManager != null) { @@ -2575,8 +2559,12 @@ public int compare(ServerName s1, ServerName s2) { onlineServers = serverManager.getOnlineServers(); } return new ClusterStatus(VersionInfo.getVersion(), clusterId, - onlineServers, deadServers, serverName, backupMasters, - regionsInTransition, coprocessors, balancerOn); + onlineServers, deadServers, serverName, backupMasters, + regionsInTransition, coprocessors, balancerOn); + } + + List getBackupMasters() { + return activeMasterManager.getBackupMasters(); } /** @@ -2611,6 +2599,21 @@ public long getMasterFinishedInitializationTime() { return masterFinishedInitializationTime; } + /** + * @return number of live region servers tracked by this master. + * @throws KeeperException if there is an issue with zookeeper connection. + */ + public int getNumLiveRegionServers() throws KeeperException { + if (isActiveMaster()) { + return regionServerTracker.getOnlineServers().size(); + } + // If the master is not active, we fall back to ZK to fetch the number of live region servers. + // This is an extra hop but that is okay since the ConnectionRegistry call that is serviced by + // this method is already deprecated and is not used in any active code paths. This method is + // here to only for the test code. + return ZKUtil.getNumberOfChildren(zooKeeper, zooKeeper.rsZNode); + } + public int getNumWALFiles() { return procedureStore != null ? procedureStore.getActiveLogs().size() : 0; } @@ -3429,4 +3432,19 @@ public LoadBalancer getLoadBalancer() { } return replicationLoadSourceMap; } + + public ServerName getActiveMaster() { + return activeMasterManager.getActiveMasterServerName(); + } + + public String getClusterId() { + if (activeMaster) { + return super.getClusterId(); + } + return cachedClusterId.getFromCacheOrFetch(); + } + + public MetaRegionLocationCache getMetaRegionLocationCache() { + return this.metaRegionLocationCache; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java index 6ca0ad5cc0a7..c5e81015ba98 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java @@ -547,7 +547,6 @@ private Path checkRootDir(final Path rd, final Configuration c, fsd.createTableDescriptor( new HTableDescriptor(fsd.get(TableName.META_TABLE_NAME))); } - return rd; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index 82b456e06422..963b94e2e593 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -28,6 +28,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.CoordinatedStateException; +import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; @@ -39,10 +40,12 @@ import org.apache.hadoop.hbase.ServerLoad; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.UnknownRegionException; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.errorhandling.ForeignException; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.exceptions.MergeRegionException; import org.apache.hadoop.hbase.exceptions.UnknownProtocolException; import org.apache.hadoop.hbase.ipc.PriorityFunction; @@ -71,6 +74,7 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClientMetaService; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableRequest; @@ -93,12 +97,21 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest; @@ -221,7 +234,8 @@ @InterfaceAudience.Private @SuppressWarnings("deprecation") public class MasterRpcServices extends RSRpcServices - implements MasterService.BlockingInterface, RegionServerStatusService.BlockingInterface { + implements MasterService.BlockingInterface, RegionServerStatusService.BlockingInterface, + ClientMetaService.BlockingInterface { private static final Log LOG = LogFactory.getLog(MasterRpcServices.class.getName()); private final HMaster master; @@ -330,6 +344,9 @@ protected List getServices() { bssi.add(new BlockingServiceAndInterface( RegionServerStatusService.newReflectiveBlockingService(this), RegionServerStatusService.BlockingInterface.class)); + bssi.add(new BlockingServiceAndInterface( + ClientMetaService.newReflectiveBlockingService(this), + ClientMetaService.BlockingInterface.class)); bssi.addAll(super.getServices()); return bssi; } @@ -940,13 +957,11 @@ public GetTableDescriptorsResponse getTableDescriptors(RpcController c, public GetTableNamesResponse getTableNames(RpcController controller, GetTableNamesRequest req) throws ServiceException { try { - master.checkInitialized(); - + master.checkServiceStarted(); final String regex = req.hasRegex() ? req.getRegex() : null; final String namespace = req.hasNamespace() ? req.getNamespace() : null; List tableNames = master.listTableNames(namespace, regex, req.getIncludeSysTables()); - GetTableNamesResponse.Builder builder = GetTableNamesResponse.newBuilder(); if (tableNames != null && tableNames.size() > 0) { // Add the table names to the response @@ -960,6 +975,26 @@ public GetTableNamesResponse getTableNames(RpcController controller, } } + @Override + public MasterProtos.GetTableStateResponse getTableState(RpcController controller, + MasterProtos.GetTableStateRequest request) throws ServiceException { + try { + master.checkServiceStarted(); + TableName tableName = ProtobufUtil.toTableName(request.getTableName()); + TableState.State state = master.getTableStateManager() + .getTableState(tableName); + if (state == null) { + throw new TableNotFoundException(tableName); + } + MasterProtos.GetTableStateResponse.Builder builder = + MasterProtos.GetTableStateResponse.newBuilder(); + builder.setTableState(new TableState(tableName, state).convert()); + return builder.build(); + } catch (IOException e) { + throw new ServiceException(e); + } + } + @Override public IsCatalogJanitorEnabledResponse isCatalogJanitorEnabled(RpcController c, IsCatalogJanitorEnabledRequest req) throws ServiceException { @@ -1757,4 +1792,57 @@ private Admin.MasterSwitchType convert(MasterProtos.MasterSwitchType switchType) } return null; } + + @Override + public GetClusterIdResponse getClusterId(RpcController rpcController, GetClusterIdRequest request) + throws ServiceException { + GetClusterIdResponse.Builder resp = GetClusterIdResponse.newBuilder(); + String clusterId = master.getClusterId(); + if (clusterId != null) { + resp.setClusterId(clusterId); + } + return resp.build(); + } + + @Override + public GetMastersResponse getMasters(RpcController rpcController, GetMastersRequest request) + throws ServiceException { + GetMastersResponse.Builder resp = GetMastersResponse.newBuilder(); + // Active master + ServerName serverName = master.getActiveMaster(); + if (serverName != null) { + resp.addMasterServers(GetMastersResponseEntry.newBuilder() + .setServerName(ProtobufUtil.toServerName(serverName)).setIsActive(true).build()); + } + // Backup masters + for (ServerName backupMaster: master.getBackupMasters()) { + resp.addMasterServers(GetMastersResponseEntry.newBuilder().setServerName( + ProtobufUtil.toServerName(backupMaster)).setIsActive(false).build()); + } + return resp.build(); + } + + @Override + public GetMetaRegionLocationsResponse getMetaRegionLocations(RpcController rpcController, + GetMetaRegionLocationsRequest request) throws ServiceException { + GetMetaRegionLocationsResponse.Builder response = GetMetaRegionLocationsResponse.newBuilder(); + List metaLocations = + master.getMetaRegionLocationCache().getMetaRegionLocations(); + for (HRegionLocation location: metaLocations) { + response.addMetaLocations(ProtobufUtil.toRegionLocation(location)); + } + return response.build(); + } + + @Override + public GetNumLiveRSResponse getNumLiveRS(RpcController rpcController, GetNumLiveRSRequest request) + throws ServiceException { + GetNumLiveRSResponse.Builder response = GetNumLiveRSResponse.newBuilder(); + try { + response.setNumRegionServers(master.getNumLiveRegionServers()); + } catch (KeeperException ke) { + throw new ServiceException(ke); + } + return response.build(); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java index be6fb12d1c3c..d20b76468203 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java @@ -83,6 +83,11 @@ public interface MasterServices extends Server { */ TableLockManager getTableLockManager(); + /** + * @return Master's instance of {@link TableStateManager} + */ + TableStateManager getTableStateManager(); + /** * @return Master's instance of {@link MasterCoprocessorHost} */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetaRegionLocationCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetaRegionLocationCache.java new file mode 100644 index 000000000000..821cb18c1469 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetaRegionLocationCache.java @@ -0,0 +1,256 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master; + +import static org.apache.hadoop.hbase.zookeeper.ZKUtil.joinZNode; +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ConcurrentNavigableMap; +import java.util.concurrent.ThreadFactory; +import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.types.CopyOnWriteArrayMap; +import org.apache.hadoop.hbase.util.RetryCounter; +import org.apache.hadoop.hbase.util.RetryCounterFactory; +import org.apache.hadoop.hbase.zookeeper.ZKUtil; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.zookeeper.KeeperException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * A cache of meta region location metadata. Registers a listener on ZK to track changes to the + * meta table znodes. Clients are expected to retry if the meta information is stale. This class + * is thread-safe (a single instance of this class can be shared by multiple threads without race + * conditions). + */ +@InterfaceAudience.Private +public class MetaRegionLocationCache extends ZooKeeperListener { + + private static final Logger LOG = LoggerFactory.getLogger(MetaRegionLocationCache.class); + + /** + * Maximum number of times we retry when ZK operation times out. + */ + private static final int MAX_ZK_META_FETCH_RETRIES = 10; + /** + * Sleep interval ms between ZK operation retries. + */ + private static final int SLEEP_INTERVAL_MS_BETWEEN_RETRIES = 1000; + private static final int SLEEP_INTERVAL_MS_MAX = 10000; + private final RetryCounterFactory retryCounterFactory = + new RetryCounterFactory(MAX_ZK_META_FETCH_RETRIES, SLEEP_INTERVAL_MS_BETWEEN_RETRIES); + + /** + * Cached meta region locations indexed by replica ID. + * CopyOnWriteArrayMap ensures synchronization during updates and a consistent snapshot during + * client requests. Even though CopyOnWriteArrayMap copies the data structure for every write, + * that should be OK since the size of the list is often small and mutations are not too often + * and we do not need to block client requests while mutations are in progress. + */ + private final CopyOnWriteArrayMap cachedMetaLocations; + + private enum ZNodeOpType { + INIT, + CREATED, + CHANGED, + DELETED + } + + public MetaRegionLocationCache(ZooKeeperWatcher zkWatcher) { + super(zkWatcher); + cachedMetaLocations = new CopyOnWriteArrayMap<>(); + watcher.registerListener(this); + // Populate the initial snapshot of data from meta znodes. + // This is needed because stand-by masters can potentially start after the initial znode + // creation. It blocks forever until the initial meta locations are loaded from ZK and watchers + // are established. Subsequent updates are handled by the registered listener. Also, this runs + // in a separate thread in the background to not block master init. + ThreadFactory threadFactory = new ThreadFactoryBuilder().setDaemon(true).build(); + final RetryCounterFactory retryFactory = new RetryCounterFactory( + Integer.MAX_VALUE, SLEEP_INTERVAL_MS_BETWEEN_RETRIES, SLEEP_INTERVAL_MS_MAX); + threadFactory.newThread( + new Runnable() { + @Override + public void run() { + MetaRegionLocationCache.this.loadMetaLocationsFromZk( + retryFactory.create(), ZNodeOpType.INIT); + } + }).start(); + } + + /** + * Populates the current snapshot of meta locations from ZK. If no meta znodes exist, it registers + * a watcher on base znode to check for any CREATE/DELETE events on the children. + * @param retryCounter controls the number of retries and sleep between retries. + */ + private void loadMetaLocationsFromZk(RetryCounter retryCounter, ZNodeOpType opType) { + List znodes = null; + while (retryCounter.shouldRetry()) { + try { + znodes = watcher.getMetaReplicaNodesAndWatchChildren(); + break; + } catch (KeeperException ke) { + LOG.debug("Error populating initial meta locations", ke); + if (!retryCounter.shouldRetry()) { + // Retries exhausted and watchers not set. This is not a desirable state since the cache + // could remain stale forever. Propagate the exception. + watcher.abort("Error populating meta locations", ke); + return; + } + try { + retryCounter.sleepUntilNextRetry(); + } catch (InterruptedException ie) { + LOG.error("Interrupted while loading meta locations from ZK", ie); + Thread.currentThread().interrupt(); + return; + } + } + } + if (znodes == null || znodes.isEmpty()) { + // No meta znodes exist at this point but we registered a watcher on the base znode to listen + // for updates. They will be handled via nodeChildrenChanged(). + return; + } + if (znodes.size() == cachedMetaLocations.size()) { + // No new meta znodes got added. + return; + } + for (String znode: znodes) { + String path = joinZNode(watcher.baseZNode, znode); + updateMetaLocation(path, opType); + } + } + + /** + * Gets the HRegionLocation for a given meta replica ID. Renews the watch on the znode for + * future updates. + * @param replicaId ReplicaID of the region. + * @return HRegionLocation for the meta replica. + * @throws KeeperException if there is any issue fetching/parsing the serialized data. + */ + private HRegionLocation getMetaRegionLocation(int replicaId) + throws KeeperException { + RegionState metaRegionState; + try { + byte[] data = ZKUtil.getDataAndWatch(watcher, + watcher.getZNodeForReplica(replicaId)); + metaRegionState = ProtobufUtil.parseMetaRegionStateFrom(data, replicaId); + } catch (DeserializationException e) { + throw ZKUtil.convert(e); + } + return new HRegionLocation(metaRegionState.getRegion(), metaRegionState.getServerName()); + } + + private void updateMetaLocation(String path, ZNodeOpType opType) { + if (!isValidMetaZNode(path)) { + return; + } + LOG.debug("Updating meta znode for path {}: {}", path, opType.name()); + int replicaId = watcher.getMetaReplicaIdFromPath(path); + RetryCounter retryCounter = retryCounterFactory.create(); + HRegionLocation location = null; + while (retryCounter.shouldRetry()) { + try { + if (opType == ZNodeOpType.DELETED) { + if (!ZKUtil.watchAndCheckExists(watcher, path)) { + // The path does not exist, we've set the watcher and we can break for now. + break; + } + // If it is a transient error and the node appears right away, we fetch the + // latest meta state. + } + location = getMetaRegionLocation(replicaId); + break; + } catch (KeeperException e) { + LOG.debug("Error getting meta location for path {}", path, e); + if (!retryCounter.shouldRetry()) { + LOG.warn("Error getting meta location for path {}. Retries exhausted.", path, e); + break; + } + try { + retryCounter.sleepUntilNextRetry(); + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + return; + } + } + } + if (location == null) { + cachedMetaLocations.remove(replicaId); + return; + } + cachedMetaLocations.put(replicaId, location); + } + + /** + * @return Optional list of HRegionLocations for meta replica(s), null if the cache is empty. + * + */ + public List getMetaRegionLocations() { + ConcurrentNavigableMap snapshot = + cachedMetaLocations.tailMap(cachedMetaLocations.firstKey()); + List result = new ArrayList<>(); + if (snapshot.isEmpty()) { + // This could be possible if the master has not successfully initialized yet or meta region + // is stuck in some weird state. + return result; + } + // Explicitly iterate instead of new ArrayList<>(snapshot.values()) because the underlying + // ArrayValueCollection does not implement toArray(). + for (HRegionLocation location: snapshot.values()) { + result.add(location); + } + return result; + } + + /** + * Helper to check if the given 'path' corresponds to a meta znode. This listener is only + * interested in changes to meta znodes. + */ + private boolean isValidMetaZNode(String path) { + return watcher.isAnyMetaReplicaZNode(path); + } + + @Override + public void nodeCreated(String path) { + updateMetaLocation(path, ZNodeOpType.CREATED); + } + + @Override + public void nodeDeleted(String path) { + updateMetaLocation(path, ZNodeOpType.DELETED); + } + + @Override + public void nodeDataChanged(String path) { + updateMetaLocation(path, ZNodeOpType.CHANGED); + } + + @Override + public void nodeChildrenChanged(String path) { + if (!path.equals(watcher.baseZNode)) { + return; + } + loadMetaLocationsFromZk(retryCounterFactory.create(), ZNodeOpType.CHANGED); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java index e31868e414da..b8b49d70d398 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java @@ -31,6 +31,8 @@ import java.util.TreeMap; import java.util.TreeSet; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -42,14 +44,13 @@ import org.apache.hadoop.hbase.ServerLoad; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.TableStateManager; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.master.RegionState.State; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ConfigUtil; import org.apache.hadoop.hbase.util.FSUtils; @@ -59,9 +60,6 @@ import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.zookeeper.KeeperException; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; - /** * Region state accountant. It holds the states of all regions in the memory. * In normal scenario, it should match the meta table and the true region states. @@ -720,7 +718,7 @@ public void regionOffline( if (oldServerName != null && serverHoldings.containsKey(oldServerName)) { if (newState == State.MERGED || newState == State.SPLIT || hri.isMetaRegion() || tableStateManager.isTableState(hri.getTable(), - ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) { + TableState.State.DISABLED, TableState.State.DISABLING)) { // Offline the region only if it's merged/split, or the table is disabled/disabling. // Otherwise, offline it from this server only when it is online on a different server. LOG.info("Offlined " + hri.getShortNameToLog() + " from " + oldServerName); @@ -1295,8 +1293,8 @@ static boolean isOneOfStates(RegionState regionState, State... states) { * Update a region state. It will be put in transition if not already there. */ private RegionState updateRegionState(final HRegionInfo hri, - final State state, final ServerName serverName, long openSeqNum) { - if (state == State.FAILED_CLOSE || state == State.FAILED_OPEN) { + final RegionState.State state, final ServerName serverName, long openSeqNum) { + if (state == RegionState.State.FAILED_CLOSE || state == RegionState.State.FAILED_OPEN) { LOG.warn("Failed to open/close " + hri.getShortNameToLog() + " on " + serverName + ", set to " + state); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java index 5929f26337a0..e576934bfb23 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java @@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.constraint.ConstraintException; import org.apache.hadoop.hbase.master.procedure.CreateNamespaceProcedure; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; @@ -228,7 +229,7 @@ public synchronized boolean isTableAvailableAndInitialized( } // Now check if the table is assigned, if not then fail fast - if (isTableAssigned()) { + if (isTableAssigned() && isTableEnabled()) { try { boolean initGoodSofar = true; nsTable = this.masterServices.getConnection().getTable(TableName.NAMESPACE_TABLE_NAME); @@ -297,6 +298,12 @@ public synchronized boolean isTableAvailableAndInitialized( return false; } + private boolean isTableEnabled() throws IOException { + return masterServices.getTableStateManager().getTableState( + TableName.NAMESPACE_TABLE_NAME + ).equals(TableState.State.ENABLED); + } + private boolean isTableAssigned() { return !masterServices.getAssignmentManager().getRegionStates(). getRegionsOfTable(TableName.NAMESPACE_TABLE_NAME).isEmpty(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java new file mode 100644 index 000000000000..4ba3d1086d7f --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java @@ -0,0 +1,219 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master; + +import java.io.IOException; +import java.util.Map; +import java.util.Set; + +import com.google.common.collect.Maps; +import com.google.common.collect.Sets; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.TableDescriptor; +import org.apache.hadoop.hbase.TableDescriptors; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.TableNotFoundException; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.TableState; + +/** + * This is a helper class used to manage table states. + * States persisted in tableinfo and cached internally. + */ +@InterfaceAudience.Private +public class TableStateManager { + private static final Log LOG = LogFactory.getLog(TableStateManager.class); + private final TableDescriptors descriptors; + + private final Map tableStates = Maps.newConcurrentMap(); + + public TableStateManager(MasterServices master) { + this.descriptors = master.getTableDescriptors(); + } + + public void start() throws IOException { + Map all = descriptors.getAllDescriptors(); + for (TableDescriptor table : all.values()) { + TableName tableName = table.getHTableDescriptor().getTableName(); + if (LOG.isDebugEnabled()) { + LOG.debug("Adding table state: " + tableName + + ": " + table.getTableState()); + } + tableStates.put(tableName, table.getTableState()); + } + } + + /** + * Set table state to provided. + * Caller should lock table on write. + * @param tableName table to change state for + * @param newState new state + * @throws IOException + */ + public void setTableState(TableName tableName, TableState.State newState) throws IOException { + synchronized (tableStates) { + TableDescriptor descriptor = readDescriptor(tableName); + if (descriptor == null) { + throw new TableNotFoundException(tableName); + } + if (descriptor.getTableState() != newState) { + writeDescriptor( + new TableDescriptor(descriptor.getHTableDescriptor(), newState)); + } + } + } + + /** + * Set table state to provided but only if table in specified states + * Caller should lock table on write. + * @param tableName table to change state for + * @param newState new state + * @param states states to check against + * @throws IOException + */ + public boolean setTableStateIfInStates(TableName tableName, + TableState.State newState, + TableState.State... states) + throws IOException { + synchronized (tableStates) { + TableDescriptor descriptor = readDescriptor(tableName); + if (descriptor == null) { + throw new TableNotFoundException(tableName); + } + if (TableState.isInStates(descriptor.getTableState(), states)) { + writeDescriptor( + new TableDescriptor(descriptor.getHTableDescriptor(), newState)); + return true; + } else { + return false; + } + } + } + + + /** + * Set table state to provided but only if table not in specified states + * Caller should lock table on write. + * @param tableName table to change state for + * @param newState new state + * @param states states to check against + * @throws IOException + */ + public boolean setTableStateIfNotInStates(TableName tableName, + TableState.State newState, + TableState.State... states) + throws IOException { + synchronized (tableStates) { + TableDescriptor descriptor = readDescriptor(tableName); + if (descriptor == null) { + throw new TableNotFoundException(tableName); + } + if (!TableState.isInStates(descriptor.getTableState(), states)) { + writeDescriptor( + new TableDescriptor(descriptor.getHTableDescriptor(), newState)); + return true; + } else { + return false; + } + } + } + + public boolean isTableState(TableName tableName, TableState.State... states) { + TableState.State tableState = null; + try { + tableState = getTableState(tableName); + } catch (IOException e) { + LOG.error("Unable to get table state, probably table not exists"); + return false; + } + return tableState != null && TableState.isInStates(tableState, states); + } + + public void setDeletedTable(TableName tableName) throws IOException { + TableState.State remove = tableStates.remove(tableName); + if (remove == null) { + LOG.warn("Moving table " + tableName + " state to deleted but was " + + "already deleted"); + } + } + + public boolean isTablePresent(TableName tableName) throws IOException { + return getTableState(tableName) != null; + } + + /** + * Return all tables in given states. + * + * @param states filter by states + * @return tables in given states + * @throws IOException + */ + public Set getTablesInStates(TableState.State... states) throws IOException { + Set rv = Sets.newHashSet(); + for (Map.Entry entry : tableStates.entrySet()) { + if (TableState.isInStates(entry.getValue(), states)) { + rv.add(entry.getKey()); + } + } + return rv; + } + + public TableState.State getTableState(TableName tableName) throws IOException { + TableState.State tableState = tableStates.get(tableName); + if (tableState == null) { + TableDescriptor descriptor = readDescriptor(tableName); + if (descriptor != null) { + tableState = descriptor.getTableState(); + } + } + return tableState; + } + + /** + * Write descriptor in place, update cache of states. + * Write lock should be hold by caller. + * + * @param descriptor what to write + */ + private void writeDescriptor(TableDescriptor descriptor) throws IOException { + TableName tableName = descriptor.getHTableDescriptor().getTableName(); + TableState.State state = descriptor.getTableState(); + descriptors.add(descriptor); + LOG.debug("Table " + tableName + " written descriptor for state " + state); + tableStates.put(tableName, state); + LOG.debug("Table " + tableName + " updated state to " + state); + } + + /** + * Read current descriptor for table, update cache of states. + * + * @param table descriptor to read + * @return descriptor + * @throws IOException + */ + private TableDescriptor readDescriptor(TableName tableName) throws IOException { + TableDescriptor descriptor = descriptors.getDescriptor(tableName); + if (descriptor == null) { + tableStates.remove(tableName); + } else { + tableStates.put(tableName, descriptor.getTableState()); + } + return descriptor; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ClosedRegionHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ClosedRegionHandler.java index 389a738c7a29..3be3316f26d1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ClosedRegionHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ClosedRegionHandler.java @@ -23,11 +23,10 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.executor.EventHandler; import org.apache.hadoop.hbase.executor.EventType; import org.apache.hadoop.hbase.master.AssignmentManager; -import org.apache.hadoop.hbase.master.RegionState; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; /** * Handles CLOSED region event on Master. @@ -93,7 +92,7 @@ public void process() { LOG.debug("Handling CLOSED event for " + regionInfo.getEncodedName()); // Check if this table is being disabled or not if (this.assignmentManager.getTableStateManager().isTableState(this.regionInfo.getTable(), - ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING) || + TableState.State.DISABLED, TableState.State.DISABLING) || assignmentManager.getReplicasToClose().contains(regionInfo)) { assignmentManager.offlineDisabledRegion(regionInfo); return; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java index 79e24938afa3..09569b30dfd9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java @@ -30,14 +30,16 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.CoordinatedStateException; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException; import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.TableDescriptor; import org.apache.hadoop.hbase.TableExistsException; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionReplicaUtil; -import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.executor.EventHandler; import org.apache.hadoop.hbase.executor.EventType; import org.apache.hadoop.hbase.ipc.RpcServer; @@ -48,7 +50,6 @@ import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.TableLockManager; import org.apache.hadoop.hbase.master.TableLockManager.TableLock; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.UserProvider; import org.apache.hadoop.hbase.util.FSTableDescriptors; @@ -119,13 +120,6 @@ public CreateTableHandler prepare() if (MetaTableAccessor.tableExists(this.server.getConnection(), tableName)) { throw new TableExistsException(tableName); } - - // During master initialization, the ZK state could be inconsistent from failed DDL - // in the past. If we fail here, it would prevent master to start. We should force - // setting the system table state regardless the table state. - boolean skipTableStateCheck = - !((HMaster) this.server).isInitialized() && tableName.isSystemTable(); - checkAndSetEnablingTable(assignmentManager, tableName, skipTableStateCheck); success = true; } finally { if (!success) { @@ -135,52 +129,6 @@ public CreateTableHandler prepare() return this; } - static void checkAndSetEnablingTable(final AssignmentManager assignmentManager, - final TableName tableName, boolean skipTableStateCheck) throws IOException { - // If we have multiple client threads trying to create the table at the - // same time, given the async nature of the operation, the table - // could be in a state where hbase:meta table hasn't been updated yet in - // the process() function. - // Use enabling state to tell if there is already a request for the same - // table in progress. This will introduce a new zookeeper call. Given - // createTable isn't a frequent operation, that should be ok. - // TODO: now that we have table locks, re-evaluate above -- table locks are not enough. - // We could have cleared the hbase.rootdir and not zk. How can we detect this case? - // Having to clean zk AND hdfs is awkward. - try { - if (skipTableStateCheck) { - assignmentManager.getTableStateManager().setTableState( - tableName, - ZooKeeperProtos.Table.State.ENABLING); - } else if (!assignmentManager.getTableStateManager().setTableStateIfNotInStates( - tableName, - ZooKeeperProtos.Table.State.ENABLING, - ZooKeeperProtos.Table.State.ENABLING, - ZooKeeperProtos.Table.State.ENABLED)) { - throw new TableExistsException(tableName); - } - } catch (CoordinatedStateException e) { - throw new IOException("Unable to ensure that the table will be" + - " enabling because of a ZooKeeper issue", e); - } - } - - static void removeEnablingTable(final AssignmentManager assignmentManager, - final TableName tableName) { - // Try deleting the enabling node in case of error - // If this does not happen then if the client tries to create the table - // again with the same Active master - // It will block the creation saying TableAlreadyExists. - try { - assignmentManager.getTableStateManager().checkAndRemoveTableState(tableName, - ZooKeeperProtos.Table.State.ENABLING, false); - } catch (CoordinatedStateException e) { - // Keeper exception should not happen here - LOG.error("Got a keeper exception while removing the ENABLING table znode " - + tableName, e); - } - } - @Override public String toString() { String name = "UnknownServerName"; @@ -228,9 +176,6 @@ protected void completed(final Throwable exception) { releaseTableLock(); LOG.info("Table, " + this.hTableDescriptor.getTableName() + ", creation " + (exception == null ? "successful" : "failed. " + exception)); - if (exception != null) { - removeEnablingTable(this.assignmentManager, this.hTableDescriptor.getTableName()); - } } /** @@ -253,9 +198,12 @@ private void handleCreateTable(TableName tableName) FileSystem fs = fileSystemManager.getFileSystem(); // 1. Create Table Descriptor + // using a copy of descriptor, table will be created enabling first + TableDescriptor underConstruction = new TableDescriptor( + this.hTableDescriptor, TableState.State.ENABLING); Path tempTableDir = FSUtils.getTableDir(tempdir, tableName); new FSTableDescriptors(this.conf).createTableDescriptorForTableDirectory( - tempTableDir, this.hTableDescriptor, false); + tempTableDir, underConstruction, false); Path tableDir = FSUtils.getTableDir(fileSystemManager.getRootDir(), tableName); // 2. Create Regions @@ -280,24 +228,18 @@ private void handleCreateTable(TableName tableName) // 7. Trigger immediate assignment of the regions in round-robin fashion ModifyRegionUtils.assignRegions(assignmentManager, regionInfos); } - - // 8. Set table enabled flag up in zk. - try { - assignmentManager.getTableStateManager().setTableState(tableName, - ZooKeeperProtos.Table.State.ENABLED); - } catch (CoordinatedStateException e) { - throw new IOException("Unable to ensure that " + tableName + " will be" + - " enabled because of a ZooKeeper issue", e); - } - // 8. Update the tabledescriptor cache. ((HMaster) this.server).getTableDescriptors().get(tableName); + + // 9. Enable table + assignmentManager.getTableStateManager().setTableState(tableName, + TableState.State.ENABLED); } /** * Create any replicas for the regions (the default replicas that was * already created is passed to the method) - * @param hTableDescriptor + * @param hTableDescriptor descriptor to use * @param regions default replicas * @return the combined list of default and non-default replicas */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java index 76f603f3bc26..e9b764e0c61a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java @@ -25,13 +25,13 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.CoordinatedStateException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.TableNotEnabledException; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.constraint.ConstraintException; import org.apache.hadoop.hbase.executor.EventHandler; import org.apache.hadoop.hbase.executor.EventType; @@ -39,11 +39,10 @@ import org.apache.hadoop.hbase.master.BulkAssigner; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; +import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.master.RegionStates; import org.apache.hadoop.hbase.master.TableLockManager; -import org.apache.hadoop.hbase.master.RegionState.State; import org.apache.hadoop.hbase.master.TableLockManager.TableLock; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.htrace.Trace; /** @@ -91,16 +90,11 @@ public DisableTableHandler prepare() // DISABLED or ENABLED. //TODO: reevaluate this since we have table locks now if (!skipTableStateCheck) { - try { - if (!this.assignmentManager.getTableStateManager().setTableStateIfInStates( - this.tableName, ZooKeeperProtos.Table.State.DISABLING, - ZooKeeperProtos.Table.State.ENABLED)) { - LOG.info("Table " + tableName + " isn't enabled; skipping disable"); - throw new TableNotEnabledException(this.tableName); - } - } catch (CoordinatedStateException e) { - throw new IOException("Unable to ensure that the table will be" + - " disabling because of a coordination engine issue", e); + if (!this.assignmentManager.getTableStateManager().setTableStateIfInStates( + this.tableName, TableState.State.DISABLING, + TableState.State.ENABLED)) { + LOG.info("Table " + tableName + " isn't enabled; skipping disable"); + throw new TableNotEnabledException(this.tableName); } } success = true; @@ -139,8 +133,6 @@ public void process() { } } catch (IOException e) { LOG.error("Error trying to disable table " + this.tableName, e); - } catch (CoordinatedStateException e) { - LOG.error("Error trying to disable table " + this.tableName, e); } finally { releaseTableLock(); } @@ -156,10 +148,10 @@ private void releaseTableLock() { } } - private void handleDisableTable() throws IOException, CoordinatedStateException { + private void handleDisableTable() throws IOException { // Set table disabling flag up in zk. this.assignmentManager.getTableStateManager().setTableState(this.tableName, - ZooKeeperProtos.Table.State.DISABLING); + TableState.State.DISABLING); boolean done = false; while (true) { // Get list of online regions that are of this table. Regions that are @@ -188,7 +180,7 @@ private void handleDisableTable() throws IOException, CoordinatedStateException } // Flip the table to disabled if success. if (done) this.assignmentManager.getTableStateManager().setTableState(this.tableName, - ZooKeeperProtos.Table.State.DISABLED); + TableState.State.DISABLED); LOG.info("Disabled table, " + this.tableName + ", is done=" + done); } @@ -208,7 +200,7 @@ protected void populatePool(ExecutorService pool) { RegionStates regionStates = assignmentManager.getRegionStates(); for (HRegionInfo region: regions) { if (regionStates.isRegionInTransition(region) - && !regionStates.isRegionInState(region, State.FAILED_CLOSE)) { + && !regionStates.isRegionInState(region, RegionState.State.FAILED_CLOSE)) { continue; } final HRegionInfo hri = region; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java index 2e6a10a7eeed..0b914d52a946 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java @@ -26,15 +26,15 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.CoordinatedStateException; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.executor.EventHandler; import org.apache.hadoop.hbase.executor.EventType; import org.apache.hadoop.hbase.master.AssignmentManager; @@ -47,7 +47,7 @@ import org.apache.hadoop.hbase.master.ServerManager; import org.apache.hadoop.hbase.master.TableLockManager; import org.apache.hadoop.hbase.master.TableLockManager.TableLock; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; +import org.apache.hadoop.hbase.master.TableStateManager; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; @@ -97,14 +97,9 @@ public EnableTableHandler prepare() if (!this.skipTableStateCheck) { throw new TableNotFoundException(tableName); } - try { - this.assignmentManager.getTableStateManager().checkAndRemoveTableState(tableName, - ZooKeeperProtos.Table.State.ENABLING, true); - throw new TableNotFoundException(tableName); - } catch (CoordinatedStateException e) { - // TODO : Use HBCK to clear such nodes - LOG.warn("Failed to delete the ENABLING node for the table " + tableName - + ". The table will remain unusable. Run HBCK to manually fix the problem."); + TableStateManager tsm = assignmentManager.getTableStateManager(); + if (tsm.isTableState(tableName, TableState.State.ENABLING)) { + tsm.setDeletedTable(tableName); } } @@ -113,16 +108,11 @@ public EnableTableHandler prepare() // After that, no other requests can be accepted until the table reaches // DISABLED or ENABLED. if (!skipTableStateCheck) { - try { - if (!this.assignmentManager.getTableStateManager().setTableStateIfInStates( - this.tableName, ZooKeeperProtos.Table.State.ENABLING, - ZooKeeperProtos.Table.State.DISABLED)) { - LOG.info("Table " + tableName + " isn't disabled; skipping enable"); - throw new TableNotDisabledException(this.tableName); - } - } catch (CoordinatedStateException e) { - throw new IOException("Unable to ensure that the table will be" + - " enabling because of a coordination engine issue", e); + if (!this.assignmentManager.getTableStateManager().setTableStateIfInStates( + this.tableName, TableState.State.ENABLING, + TableState.State.DISABLED)) { + LOG.info("Table " + tableName + " isn't disabled; skipping enable"); + throw new TableNotDisabledException(this.tableName); } } success = true; @@ -158,11 +148,7 @@ public void process() { if (cpHost != null) { cpHost.postEnableTableHandler(this.tableName, null); } - } catch (IOException e) { - LOG.error("Error trying to enable the table " + this.tableName, e); - } catch (CoordinatedStateException e) { - LOG.error("Error trying to enable the table " + this.tableName, e); - } catch (InterruptedException e) { + } catch (IOException | InterruptedException e) { LOG.error("Error trying to enable the table " + this.tableName, e); } finally { releaseTableLock(); @@ -179,14 +165,13 @@ private void releaseTableLock() { } } - private void handleEnableTable() throws IOException, CoordinatedStateException, + private void handleEnableTable() throws IOException, InterruptedException { // I could check table is disabling and if so, not enable but require // that user first finish disabling but that might be obnoxious. - // Set table enabling flag up in zk. this.assignmentManager.getTableStateManager().setTableState(this.tableName, - ZooKeeperProtos.Table.State.ENABLING); + TableState.State.ENABLING); boolean done = false; ServerManager serverManager = ((HMaster)this.server).getServerManager(); // Get the regions of this table. We're done when all listed @@ -251,7 +236,7 @@ private void handleEnableTable() throws IOException, CoordinatedStateException, if (done) { // Flip the table to enabled. this.assignmentManager.getTableStateManager().setTableState( - this.tableName, ZooKeeperProtos.Table.State.ENABLED); + this.tableName, TableState.State.ENABLED); LOG.info("Table '" + this.tableName + "' was successfully enabled. Status: done=" + done); } else { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java index 43a0f65be9a3..0081f16816f7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java @@ -29,6 +29,8 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.CoordinatedStateException; +import org.apache.hadoop.hbase.TableDescriptor; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.HTableDescriptor; @@ -36,16 +38,15 @@ import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.RegionLocator; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.executor.EventHandler; import org.apache.hadoop.hbase.executor.EventType; import org.apache.hadoop.hbase.master.BulkReOpen; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.TableLockManager.TableLock; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; import org.apache.hadoop.hbase.classification.InterfaceAudience; @@ -137,7 +138,7 @@ public void process() { handleTableOperation(hris); if (eventType.isOnlineSchemaChangeSupported() && this.masterServices. getAssignmentManager().getTableStateManager().isTableState( - tableName, ZooKeeperProtos.Table.State.ENABLED)) { + tableName, TableState.State.ENABLED)) { if (reOpenAllRegions(hris)) { LOG.info("Completed table operation " + eventType + " on table " + tableName); @@ -236,10 +237,10 @@ public boolean reOpenAllRegions(List regions) throws IOException { * @throws FileNotFoundException * @throws IOException */ - public HTableDescriptor getTableDescriptor() + public TableDescriptor getTableDescriptor() throws FileNotFoundException, IOException { - HTableDescriptor htd = - this.masterServices.getTableDescriptors().get(tableName); + TableDescriptor htd = + this.masterServices.getTableDescriptors().getDescriptor(tableName); if (htd == null) { throw new IOException("HTableDescriptor missing for " + tableName); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AddColumnFamilyProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AddColumnFamilyProcedure.java index a3dc1a49521a..c9df56e404ce 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AddColumnFamilyProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AddColumnFamilyProcedure.java @@ -32,12 +32,12 @@ import org.apache.hadoop.hbase.InvalidFamilyOperationException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.procedure2.StateMachineProcedure; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos; import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyState; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.security.User; /** @@ -336,7 +336,7 @@ private void postAdd(final MasterProcedureEnv env, final AddColumnFamilyState st private void reOpenAllRegionsIfTableIsOnline(final MasterProcedureEnv env) throws IOException { // This operation only run when the table is enabled. if (!env.getMasterServices().getAssignmentManager().getTableStateManager() - .isTableState(getTableName(), ZooKeeperProtos.Table.State.ENABLED)) { + .isTableState(getTableName(), TableState.State.ENABLED)) { return; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java index 152af450a79d..a8459f16bb87 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java @@ -33,20 +33,21 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.TableDescriptor; import org.apache.hadoop.hbase.TableExistsException; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.TableStateManager; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.RegionReplicaUtil; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.exceptions.HBaseException; import org.apache.hadoop.hbase.master.AssignmentManager; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.MasterFileSystem; +import org.apache.hadoop.hbase.master.TableStateManager; import org.apache.hadoop.hbase.procedure2.StateMachineProcedure; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos; import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableState; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.FSUtils; @@ -299,8 +300,8 @@ private boolean prepareCreate(final MasterProcedureEnv env) throws IOException { !(env.getMasterServices().isInitialized()) && tableName.isSystemTable(); if (!skipTableStateCheck) { TableStateManager tsm = env.getMasterServices().getAssignmentManager().getTableStateManager(); - if (tsm.isTableState(tableName, true, ZooKeeperProtos.Table.State.ENABLING, - ZooKeeperProtos.Table.State.ENABLED)) { + if (tsm.isTableState(tableName, TableState.State.ENABLING, + TableState.State.ENABLED)) { LOG.warn("The table " + tableName + " does not exist in meta but has a znode. " + "run hbck to fix inconsistencies."); setFailure("master-create-table", new TableExistsException(getTableName())); @@ -375,7 +376,7 @@ protected static List createFsLayout(final MasterProcedureEnv env, // using a copy of descriptor, table will be created enabling first final Path tempTableDir = FSUtils.getTableDir(tempdir, hTableDescriptor.getTableName()); new FSTableDescriptors(env.getMasterConfiguration()).createTableDescriptorForTableDirectory( - tempTableDir, hTableDescriptor, false); + tempTableDir, new TableDescriptor(hTableDescriptor), false); // 2. Create Regions newRegions = hdfsRegionHandler.createHdfsRegions(env, tempdir, @@ -448,14 +449,14 @@ protected static void assignRegions(final MasterProcedureEnv env, // Mark the table as Enabling assignmentManager.getTableStateManager().setTableState(tableName, - ZooKeeperProtos.Table.State.ENABLING); + TableState.State.ENABLING); // Trigger immediate assignment of the regions in round-robin fashion ModifyRegionUtils.assignRegions(assignmentManager, regions); // Enable table assignmentManager.getTableStateManager() - .setTableState(tableName, ZooKeeperProtos.Table.State.ENABLED); + .setTableState(tableName, TableState.State.ENABLED); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteColumnFamilyProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteColumnFamilyProcedure.java index 5b1a69c0ece3..3e6568bec1c2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteColumnFamilyProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteColumnFamilyProcedure.java @@ -31,12 +31,12 @@ import org.apache.hadoop.hbase.InvalidFamilyOperationException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.procedure2.StateMachineProcedure; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos; import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyState; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.ByteStringer; import org.apache.hadoop.hbase.util.Bytes; @@ -357,7 +357,7 @@ private void postDelete(final MasterProcedureEnv env, final DeleteColumnFamilySt private void reOpenAllRegionsIfTableIsOnline(final MasterProcedureEnv env) throws IOException { // This operation only run when the table is enabled. if (!env.getMasterServices().getAssignmentManager().getTableStateManager() - .isTableState(getTableName(), ZooKeeperProtos.Table.State.ENABLED)) { + .isTableState(getTableName(), TableState.State.ENABLED)) { return; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java index bec599cbb426..7fe2a8973e9d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java @@ -32,8 +32,8 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotEnabledException; import org.apache.hadoop.hbase.TableNotFoundException; -import org.apache.hadoop.hbase.TableStateManager; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.constraint.ConstraintException; import org.apache.hadoop.hbase.exceptions.HBaseException; import org.apache.hadoop.hbase.master.AssignmentManager; @@ -41,11 +41,11 @@ import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.master.RegionStates; +import org.apache.hadoop.hbase.master.TableStateManager; import org.apache.hadoop.hbase.procedure2.StateMachineProcedure; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos; import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableState; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.htrace.Trace; @@ -286,8 +286,8 @@ private boolean prepareDisable(final MasterProcedureEnv env) throws HBaseExcepti // this issue. TableStateManager tsm = env.getMasterServices().getAssignmentManager().getTableStateManager(); - if (!tsm.setTableStateIfInStates(tableName, ZooKeeperProtos.Table.State.DISABLING, - ZooKeeperProtos.Table.State.DISABLING, ZooKeeperProtos.Table.State.ENABLED)) { + if (!tsm.setTableStateIfInStates(tableName, TableState.State.DISABLING, + TableState.State.DISABLING, TableState.State.ENABLED)) { LOG.info("Table " + tableName + " isn't enabled; skipping disable"); setFailure("master-disable-table", new TableNotEnabledException(tableName)); canTableBeDisabled = false; @@ -311,7 +311,7 @@ private void undoTableStateChange(final MasterProcedureEnv env) { try { // If the state was changed, undo it. if (env.getMasterServices().getAssignmentManager().getTableStateManager().isTableState( - tableName, ZooKeeperProtos.Table.State.DISABLING)) { + tableName, TableState.State.DISABLING)) { EnableTableProcedure.setTableStateToEnabled(env, tableName); } } catch (Exception e) { @@ -344,7 +344,7 @@ protected static void setTableStateToDisabling( // Set table disabling flag up in zk. env.getMasterServices().getAssignmentManager().getTableStateManager().setTableState( tableName, - ZooKeeperProtos.Table.State.DISABLING); + TableState.State.DISABLING); } /** @@ -435,7 +435,7 @@ protected static void setTableStateToDisabled( // Flip the table to disabled env.getMasterServices().getAssignmentManager().getTableStateManager().setTableState( tableName, - ZooKeeperProtos.Table.State.DISABLED); + TableState.State.DISABLED); LOG.info("Disabled table, " + tableName + ", is completed."); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java index f4a45388a569..c06bb07a5079 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java @@ -35,8 +35,8 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.TableNotFoundException; -import org.apache.hadoop.hbase.TableStateManager; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.exceptions.HBaseException; import org.apache.hadoop.hbase.master.AssignmentManager; import org.apache.hadoop.hbase.master.BulkAssigner; @@ -45,11 +45,11 @@ import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.RegionStates; import org.apache.hadoop.hbase.master.ServerManager; +import org.apache.hadoop.hbase.master.TableStateManager; import org.apache.hadoop.hbase.procedure2.StateMachineProcedure; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos; import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableState; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; @@ -307,7 +307,7 @@ private boolean prepareEnable(final MasterProcedureEnv env) throws IOException { // was implemented. With table lock, there is no need to set the state here (it will // set the state later on). A quick state check should be enough for us to move forward. TableStateManager tsm = env.getMasterServices().getAssignmentManager().getTableStateManager(); - if (!tsm.isTableState(tableName, ZooKeeperProtos.Table.State.DISABLED)) { + if (!tsm.isTableState(tableName, TableState.State.DISABLED)) { LOG.info("Table " + tableName + " isn't disabled; skipping enable"); setFailure("master-enable-table", new TableNotDisabledException(this.tableName)); canTableBeEnabled = false; @@ -344,8 +344,7 @@ protected static void setTableStateToEnabling( // Set table disabling flag up in zk. LOG.info("Attempting to enable the table " + tableName); env.getMasterServices().getAssignmentManager().getTableStateManager().setTableState( - tableName, - ZooKeeperProtos.Table.State.ENABLING); + tableName, TableState.State.ENABLING); } /** @@ -490,8 +489,7 @@ protected static void setTableStateToEnabled( final TableName tableName) throws HBaseException, IOException { // Flip the table to Enabled env.getMasterServices().getAssignmentManager().getTableStateManager().setTableState( - tableName, - ZooKeeperProtos.Table.State.ENABLED); + tableName, TableState.State.ENABLED); LOG.info("Table '" + tableName + "' was successfully enabled."); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterDDLOperationHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterDDLOperationHelper.java index 2e8499f61dd0..c6ff1b6e3e8e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterDDLOperationHelper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterDDLOperationHelper.java @@ -37,7 +37,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.RegionLocator; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.master.AssignmentManager; import org.apache.hadoop.hbase.master.BulkReOpen; import org.apache.hadoop.hbase.master.MasterFileSystem; @@ -78,7 +78,7 @@ public static void checkTableModifiable(final MasterProcedureEnv env, final Tabl // We only execute this procedure with table online if online schema change config is set. if (!env.getMasterServices().getAssignmentManager().getTableStateManager() - .isTableState(tableName, ZooKeeperProtos.Table.State.DISABLED) + .isTableState(tableName, TableState.State.DISABLED) && !MasterDDLOperationHelper.isOnlineSchemaChangeAllowed(env)) { throw new TableNotDisabledException(tableName); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyColumnFamilyProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyColumnFamilyProcedure.java index 5a6b59229f0a..590e4ceb432e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyColumnFamilyProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyColumnFamilyProcedure.java @@ -32,12 +32,12 @@ import org.apache.hadoop.hbase.InvalidFamilyOperationException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.procedure2.StateMachineProcedure; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos; import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyState; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.security.User; /** @@ -316,7 +316,7 @@ private void postModify(final MasterProcedureEnv env, final ModifyColumnFamilySt private void reOpenAllRegionsIfTableIsOnline(final MasterProcedureEnv env) throws IOException { // This operation only run when the table is enabled. if (!env.getMasterServices().getAssignmentManager().getTableStateManager() - .isTableState(getTableName(), ZooKeeperProtos.Table.State.ENABLED)) { + .isTableState(getTableName(), TableState.State.ENABLED)) { return; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java index e78568475b1d..fa9746f62aa4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java @@ -42,11 +42,11 @@ import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.procedure2.StateMachineProcedure; import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos; import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableState; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil; @@ -294,7 +294,7 @@ private void prepareModify(final MasterProcedureEnv env) throws IOException { env.getMasterServices().getTableDescriptors().get(getTableName()); if (env.getMasterServices().getAssignmentManager().getTableStateManager() - .isTableState(getTableName(), ZooKeeperProtos.Table.State.ENABLED)) { + .isTableState(getTableName(), TableState.State.ENABLED)) { // We only execute this procedure with table online if online schema change config is set. if (!MasterDDLOperationHelper.isOnlineSchemaChangeAllowed(env)) { throw new TableNotDisabledException(getTableName()); @@ -432,7 +432,7 @@ private void postModify(final MasterProcedureEnv env, final ModifyTableState sta private void reOpenAllRegionsIfTableIsOnline(final MasterProcedureEnv env) throws IOException { // This operation only run when the table is enabled. if (!env.getMasterServices().getAssignmentManager().getTableStateManager() - .isTableState(getTableName(), ZooKeeperProtos.Table.State.ENABLED)) { + .isTableState(getTableName(), TableState.State.ENABLED)) { return; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java index b6e7a7c97448..ef04cfe2d1e5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.RegionReplicaUtil; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.master.AssignmentManager; import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.master.MasterServices; @@ -48,7 +49,6 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo; import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos; import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ServerCrashState; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; import org.apache.hadoop.hbase.zookeeper.ZKAssign; @@ -526,7 +526,7 @@ private List calcRegionsToAssign(final MasterProcedureEnv env) } else if (rit != null) { if ((rit.isPendingCloseOrClosing() || rit.isOffline()) && am.getTableStateManager().isTableState(hri.getTable(), - ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING) || + TableState.State.DISABLED, TableState.State.DISABLING) || am.getReplicasToClose().contains(hri)) { // If the table was partially disabled and the RS went down, we should clear the // RIT and remove the node for the region. @@ -713,7 +713,7 @@ private static boolean processDeadRegion(HRegionInfo hri, AssignmentManager assi } // If table is not disabled but the region is offlined, boolean disabled = assignmentManager.getTableStateManager().isTableState(hri.getTable(), - ZooKeeperProtos.Table.State.DISABLED); + TableState.State.DISABLED); if (disabled){ LOG.info("The table " + hri.getTable() + " was disabled. Hence not proceeding."); return false; @@ -725,7 +725,7 @@ private static boolean processDeadRegion(HRegionInfo hri, AssignmentManager assi return false; } boolean disabling = assignmentManager.getTableStateManager().isTableState(hri.getTable(), - ZooKeeperProtos.Table.State.DISABLING); + TableState.State.DISABLING); if (disabling) { LOG.info("The table " + hri.getTable() + " is disabled. Hence not assigning region" + hri.getEncodedName()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java index 98018f07a43c..5874c5985d3d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java @@ -52,6 +52,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.errorhandling.ForeignException; import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.ipc.RpcServer; @@ -72,7 +73,6 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.quotas.QuotaExceededException; import org.apache.hadoop.hbase.security.AccessDeniedException; import org.apache.hadoop.hbase.security.User; @@ -622,7 +622,7 @@ private void takeSnapshotInternal(SnapshotDescription snapshot) throws IOExcepti TableName snapshotTable = TableName.valueOf(snapshot.getTable()); AssignmentManager assignmentMgr = master.getAssignmentManager(); if (assignmentMgr.getTableStateManager().isTableState(snapshotTable, - ZooKeeperProtos.Table.State.ENABLED)) { + TableState.State.ENABLED)) { if (LOG.isDebugEnabled()) { LOG.debug("Table enabled, starting distributed snapshot for " + ClientSnapshotDescriptionUtils.toString(snapshot)); @@ -634,7 +634,7 @@ private void takeSnapshotInternal(SnapshotDescription snapshot) throws IOExcepti } // For disabled table, snapshot is created by the master else if (assignmentMgr.getTableStateManager().isTableState(snapshotTable, - ZooKeeperProtos.Table.State.DISABLED)) { + TableState.State.DISABLED)) { if (LOG.isDebugEnabled()) { LOG.debug("Table is disabled, running snapshot entirely on master " + ClientSnapshotDescriptionUtils.toString(snapshot)); @@ -801,7 +801,7 @@ public void restoreSnapshot(SnapshotDescription reqSnapshot, boolean restoreAcl) // Execute the restore/clone operation if (MetaTableAccessor.tableExists(master.getConnection(), tableName)) { if (master.getAssignmentManager().getTableStateManager().isTableState( - TableName.valueOf(snapshot.getTable()), ZooKeeperProtos.Table.State.ENABLED)) { + TableName.valueOf(snapshot.getTable()), TableState.State.ENABLED)) { throw new UnsupportedOperationException("Table '" + TableName.valueOf(snapshot.getTable()) + "' must be disabled in order to " + "perform a restore operation" + diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java index 6da05cdd0e37..8a1c11a0abb3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java @@ -39,12 +39,14 @@ import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; @@ -373,7 +375,7 @@ public void migrateACL() throws IOException { HTableDescriptor newDesc = new HTableDescriptor(oldDesc); newDesc.setName(newTableName); new FSTableDescriptors(this.conf).createTableDescriptorForTableDirectory( - newTablePath, newDesc, true); + newTablePath, new TableDescriptor(newDesc, TableState.State.ENABLED), true); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java index 0b483d9d30f8..37528b2a11f0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java @@ -34,6 +34,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableDescriptor; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.io.Text; @@ -111,13 +112,14 @@ public void compact(final Path path, final boolean compactOnce, final boolean ma if (isFamilyDir(fs, path)) { Path regionDir = path.getParent(); Path tableDir = regionDir.getParent(); - HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir); + TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir); HRegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir); - compactStoreFiles(tableDir, htd, hri, path.getName(), compactOnce, major); + compactStoreFiles(tableDir, htd.getHTableDescriptor(), hri, + path.getName(), compactOnce, major); } else if (isRegionDir(fs, path)) { Path tableDir = path.getParent(); - HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir); - compactRegion(tableDir, htd, path, compactOnce, major); + TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir); + compactRegion(tableDir, htd.getHTableDescriptor(), path, compactOnce, major); } else if (isTableDir(fs, path)) { compactTable(path, compactOnce, major); } else { @@ -128,9 +130,9 @@ public void compact(final Path path, final boolean compactOnce, final boolean ma private void compactTable(final Path tableDir, final boolean compactOnce, final boolean major) throws IOException { - HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir); + TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir); for (Path regionDir: FSUtils.getRegionDirs(fs, tableDir)) { - compactRegion(tableDir, htd, regionDir, compactOnce, major); + compactRegion(tableDir, htd.getHTableDescriptor(), regionDir, compactOnce, major); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java index 11b6120e59a1..15360d1e317f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java @@ -90,6 +90,7 @@ public static Class getWALCellCodecClass(Configuration conf) { * Fully prepares the codec for use. * @param conf {@link Configuration} to read for the user-specified codec. If none is specified, * uses a {@link WALCellCodec}. + * @param cellCodecClsName name of codec * @param compression compression the codec should use * @return a {@link WALCellCodec} ready for use. * @throws UnsupportedOperationException if the codec cannot be instantiated diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java index c76a3a9c1544..0b54c4d7bf3e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java @@ -43,7 +43,9 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableDescriptor; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare; import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; @@ -305,7 +307,8 @@ protected void addRegion(final Path tableDir, final HRegionInfo regionInfo, Regi private void load() throws IOException { switch (getSnapshotFormat(desc)) { case SnapshotManifestV1.DESCRIPTOR_VERSION: { - this.htd = FSTableDescriptors.getTableDescriptorFromFs(workingDirFs, workingDir); + this.htd = FSTableDescriptors.getTableDescriptorFromFs(workingDirFs, workingDir) + .getHTableDescriptor(); ThreadPoolExecutor tpool = createExecutor("SnapshotManifestLoader"); try { this.regionManifests = @@ -410,7 +413,8 @@ public void consolidate() throws IOException { LOG.info("Using old Snapshot Format"); // write a copy of descriptor to the snapshot directory new FSTableDescriptors(conf, workingDirFs, rootDir) - .createTableDescriptorForTableDirectory(workingDir, htd, false); + .createTableDescriptorForTableDirectory(workingDir, new TableDescriptor( + htd, TableState.State.ENABLED), false); } else { LOG.debug("Convert to Single Snapshot Manifest for " + this.desc.getName()); convertToV2SingleManifest(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java index 7e161cab1026..8a163a9224c8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java @@ -38,7 +38,9 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.hbase.TableDescriptor; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; @@ -88,15 +90,10 @@ public class FSTableDescriptors implements TableDescriptors { // This cache does not age out the old stuff. Thinking is that the amount // of data we keep up in here is so small, no need to do occasional purge. // TODO. - private final Map cache = - new ConcurrentHashMap(); + private final Map cache = + new ConcurrentHashMap(); /** - * Table descriptor for hbase:meta catalog table - */ - private final HTableDescriptor metaTableDescriptor; - - /** * Construct a FSTableDescriptors instance using the hbase root dir of the given * conf and the filesystem where that root dir lives. * This instance can do write operations (is not read only). @@ -121,7 +118,6 @@ public FSTableDescriptors(final Configuration conf, final FileSystem fs, this.rootdir = rootdir; this.fsreadonly = fsreadonly; this.usecache = usecache; - this.metaTableDescriptor = HTableDescriptor.metaTableDescriptor(conf); } @Override @@ -148,12 +144,12 @@ public boolean isUsecache() { * to see if a newer file has been created since the cached one was read. */ @Override - public HTableDescriptor get(final TableName tablename) + public TableDescriptor getDescriptor(final TableName tablename) throws IOException { invocations++; if (TableName.META_TABLE_NAME.equals(tablename)) { cachehits++; - return metaTableDescriptor; + return new TableDescriptor(HTableDescriptor.META_TABLEDESC, TableState.State.ENABLED); } // hbase:meta is already handled. If some one tries to get the descriptor for // .logs, .oldlogs or .corrupt throw an exception. @@ -163,74 +159,101 @@ public HTableDescriptor get(final TableName tablename) if (usecache) { // Look in cache of descriptors. - HTableDescriptor cachedtdm = this.cache.get(tablename); + TableDescriptor cachedtdm = this.cache.get(tablename); if (cachedtdm != null) { cachehits++; return cachedtdm; } } - HTableDescriptor tdmt = null; + TableDescriptor tdmt = null; try { - tdmt = getTableDescriptorFromFs(fs, rootdir, tablename, !fsreadonly); - } catch (NullPointerException e) { - LOG.debug("Exception during readTableDecriptor. Current table name = " - + tablename, e); + tdmt = getTableDescriptorFromFs(fs, rootdir, tablename); } catch (TableInfoMissingException e) { // ignore. This is regular operation - } catch (IOException ioe) { + } catch (NullPointerException | IOException e) { LOG.debug("Exception during readTableDecriptor. Current table name = " - + tablename, ioe); + + tablename, e); } // last HTD written wins if (usecache && tdmt != null) { this.cache.put(tablename, tdmt); } - return tdmt; } + /** + * Get the current table descriptor for the given table, or null if none exists. + * + * Uses a local cache of the descriptor but still checks the filesystem on each call + * to see if a newer file has been created since the cached one was read. + */ + @Override + public HTableDescriptor get(TableName tableName) throws IOException { + if (HTableDescriptor.META_TABLEDESC.getTableName().equals(tableName)) { + cachehits++; + return HTableDescriptor.META_TABLEDESC; + } + TableDescriptor descriptor = getDescriptor(tableName); + return descriptor == null ? null : descriptor.getHTableDescriptor(); + } + /** * Returns a map from table name to table descriptor for all tables. */ @Override - public Map getAll() + public Map getAllDescriptors() throws IOException { - Map htds = new TreeMap(); + Map tds = new TreeMap(); if (fsvisited && usecache) { - for (Map.Entry entry: this.cache.entrySet()) { - htds.put(entry.getKey().toString(), entry.getValue()); + for (Map.Entry entry: this.cache.entrySet()) { + tds.put(entry.getKey().toString(), entry.getValue()); } // add hbase:meta to the response - htds.put(HTableDescriptor.META_TABLEDESC.getTableName().getNameAsString(), - HTableDescriptor.META_TABLEDESC); + tds.put(HTableDescriptor.META_TABLEDESC.getTableName().getNameAsString(), + new TableDescriptor(HTableDescriptor.META_TABLEDESC, TableState.State.ENABLED)); } else { LOG.debug("Fetching table descriptors from the filesystem."); boolean allvisited = true; for (Path d : FSUtils.getTableDirs(fs, rootdir)) { - HTableDescriptor htd = null; + TableDescriptor td = null; try { - htd = get(FSUtils.getTableName(d)); + td = getDescriptor(FSUtils.getTableName(d)); } catch (FileNotFoundException fnfe) { // inability of retrieving one HTD shouldn't stop getting the remaining LOG.warn("Trouble retrieving htd", fnfe); } - if (htd == null) { + if (td == null) { allvisited = false; continue; } else { - htds.put(htd.getTableName().getNameAsString(), htd); + tds.put(td.getHTableDescriptor().getTableName().getNameAsString(), td); } fsvisited = allvisited; } } - return htds; + return tds; } - /* (non-Javadoc) - * @see org.apache.hadoop.hbase.TableDescriptors#getTableDescriptors(org.apache.hadoop.fs.FileSystem, org.apache.hadoop.fs.Path) + /** + * Returns a map from table name to table descriptor for all tables. */ @Override + public Map getAll() throws IOException { + Map htds = new TreeMap(); + Map allDescriptors = getAllDescriptors(); + for (Map.Entry entry : allDescriptors + .entrySet()) { + htds.put(entry.getKey(), entry.getValue().getHTableDescriptor()); + } + return htds; + } + + /** + * Find descriptors by namespace. + * @see #get(org.apache.hadoop.hbase.TableName) + */ + @Override public Map getByNamespace(String name) throws IOException { Map htds = new TreeMap(); @@ -255,20 +278,50 @@ public Map getByNamespace(String name) * and updates the local cache with it. */ @Override - public void add(HTableDescriptor htd) throws IOException { + public void add(TableDescriptor htd) throws IOException { if (fsreadonly) { throw new NotImplementedException("Cannot add a table descriptor - in read only mode"); } - if (TableName.META_TABLE_NAME.equals(htd.getTableName())) { + TableName tableName = htd.getHTableDescriptor().getTableName(); + if (TableName.META_TABLE_NAME.equals(tableName)) { throw new NotImplementedException(); } - if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(htd.getTableName().getNameAsString())) { + if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tableName.getNameAsString())) { throw new NotImplementedException( - "Cannot add a table descriptor for a reserved subdirectory name: " + htd.getNameAsString()); + "Cannot add a table descriptor for a reserved subdirectory name: " + + htd.getHTableDescriptor().getNameAsString()); } updateTableDescriptor(htd); } + /** + * Adds (or updates) the table descriptor to the FileSystem + * and updates the local cache with it. + */ + @Override + public void add(HTableDescriptor htd) throws IOException { + if (fsreadonly) { + throw new NotImplementedException("Cannot add a table descriptor - in read only mode"); + } + TableName tableName = htd.getTableName(); + if (TableName.META_TABLE_NAME.equals(tableName)) { + throw new NotImplementedException(); + } + if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tableName.getNameAsString())) { + throw new NotImplementedException( + "Cannot add a table descriptor for a reserved subdirectory name: " + + htd.getNameAsString()); + } + TableDescriptor descriptor = getDescriptor(htd.getTableName()); + if (descriptor == null) { + descriptor = new TableDescriptor(htd); + } + else { + descriptor.setHTableDescriptor(htd); + } + updateTableDescriptor(descriptor); + } + /** * Removes the table descriptor from the local cache and returns it. * If not in read only mode, it also deletes the entire table directory(!) @@ -286,11 +339,11 @@ public HTableDescriptor remove(final TableName tablename) throw new IOException("Failed delete of " + tabledir.toString()); } } - HTableDescriptor descriptor = this.cache.remove(tablename); + TableDescriptor descriptor = this.cache.remove(tablename); if (descriptor == null) { return null; } else { - return descriptor; + return descriptor.getHTableDescriptor(); } } @@ -474,8 +527,8 @@ private static String formatTableInfoSequenceId(final int number) { * if it exists, bypassing the local cache. * Returns null if it's not found. */ - public static HTableDescriptor getTableDescriptorFromFs(FileSystem fs, - Path hbaseRootDir, TableName tableName) throws IOException { + public static TableDescriptor getTableDescriptorFromFs(FileSystem fs, + Path hbaseRootDir, TableName tableName) throws IOException { Path tableDir = FSUtils.getTableDir(hbaseRootDir, tableName); return getTableDescriptorFromFs(fs, tableDir); } @@ -485,37 +538,16 @@ public static HTableDescriptor getTableDescriptorFromFs(FileSystem fs, * directly from the file system if it exists. * @throws TableInfoMissingException if there is no descriptor */ - public static HTableDescriptor getTableDescriptorFromFs(FileSystem fs, - Path hbaseRootDir, TableName tableName, boolean rewritePb) throws IOException { - Path tableDir = FSUtils.getTableDir(hbaseRootDir, tableName); - return getTableDescriptorFromFs(fs, tableDir, rewritePb); - } - /** - * Returns the latest table descriptor for the table located at the given directory - * directly from the file system if it exists. - * @throws TableInfoMissingException if there is no descriptor - */ - public static HTableDescriptor getTableDescriptorFromFs(FileSystem fs, Path tableDir) + public static TableDescriptor getTableDescriptorFromFs(FileSystem fs, Path tableDir) throws IOException { - return getTableDescriptorFromFs(fs, tableDir, false); - } - - /** - * Returns the latest table descriptor for the table located at the given directory - * directly from the file system if it exists. - * @throws TableInfoMissingException if there is no descriptor - */ - public static HTableDescriptor getTableDescriptorFromFs(FileSystem fs, Path tableDir, - boolean rewritePb) - throws IOException { FileStatus status = getTableInfoPath(fs, tableDir, false); if (status == null) { throw new TableInfoMissingException("No table descriptor file under " + tableDir); } - return readTableDescriptor(fs, status, rewritePb); + return readTableDescriptor(fs, status, false); } - private static HTableDescriptor readTableDescriptor(FileSystem fs, FileStatus status, + private static TableDescriptor readTableDescriptor(FileSystem fs, FileStatus status, boolean rewritePb) throws IOException { int len = Ints.checkedCast(status.getLen()); byte [] content = new byte[len]; @@ -525,30 +557,32 @@ private static HTableDescriptor readTableDescriptor(FileSystem fs, FileStatus st } finally { fsDataInputStream.close(); } - HTableDescriptor htd = null; + TableDescriptor td = null; try { - htd = HTableDescriptor.parseFrom(content); + td = TableDescriptor.parseFrom(content); } catch (DeserializationException e) { // we have old HTableDescriptor here try { HTableDescriptor ohtd = HTableDescriptor.parseFrom(content); LOG.warn("Found old table descriptor, converting to new format for table " + ohtd.getTableName()); - htd = new HTableDescriptor(ohtd); - if (rewritePb) rewriteTableDescriptor(fs, status, htd); + td = new TableDescriptor(ohtd); + if (rewritePb) { + rewriteTableDescriptor(fs, status, td); + } } catch (DeserializationException e1) { throw new IOException("content=" + Bytes.toShort(content), e1); } } if (rewritePb && !ProtobufUtil.isPBMagicPrefix(content)) { // Convert the file over to be pb before leaving here. - rewriteTableDescriptor(fs, status, htd); + rewriteTableDescriptor(fs, status, td); } - return htd; + return td; } private static void rewriteTableDescriptor(final FileSystem fs, final FileStatus status, - final HTableDescriptor td) + final TableDescriptor td) throws IOException { Path tableInfoDir = status.getPath().getParent(); Path tableDir = tableInfoDir.getParent(); @@ -560,17 +594,18 @@ private static void rewriteTableDescriptor(final FileSystem fs, final FileStatus * @throws IOException Thrown if failed update. * @throws NotImplementedException if in read only mode */ - @VisibleForTesting Path updateTableDescriptor(HTableDescriptor htd) + @VisibleForTesting Path updateTableDescriptor(TableDescriptor td) throws IOException { if (fsreadonly) { throw new NotImplementedException("Cannot update a table descriptor - in read only mode"); } - Path tableDir = getTableDir(htd.getTableName()); - Path p = writeTableDescriptor(fs, htd, tableDir, getTableInfoPath(tableDir)); + TableName tableName = td.getHTableDescriptor().getTableName(); + Path tableDir = getTableDir(tableName); + Path p = writeTableDescriptor(fs, td, tableDir, getTableInfoPath(tableDir)); if (p == null) throw new IOException("Failed update"); LOG.info("Updated tableinfo=" + p); if (usecache) { - this.cache.put(htd.getTableName(), htd); + this.cache.put(td.getHTableDescriptor().getTableName(), td); } return p; } @@ -621,9 +656,8 @@ private static void deleteTableDescriptorFiles(FileSystem fs, Path dir, int maxS * @return Descriptor file or null if we failed write. */ private static Path writeTableDescriptor(final FileSystem fs, - final HTableDescriptor htd, final Path tableDir, - final FileStatus currentDescriptorFile) - throws IOException { + final TableDescriptor htd, final Path tableDir, + final FileStatus currentDescriptorFile) throws IOException { // Get temporary dir into which we'll first write a file to avoid half-written file phenomenon. // This directory is never removed to avoid removing it out from under a concurrent writer. Path tmpTableDir = new Path(tableDir, TMP_DIR); @@ -652,7 +686,7 @@ private static Path writeTableDescriptor(final FileSystem fs, } tableInfoDirPath = new Path(tableInfoDir, filename); try { - writeHTD(fs, tempPath, htd); + writeTD(fs, tempPath, htd); fs.mkdirs(tableInfoDirPath.getParent()); if (!fs.rename(tempPath, tableInfoDirPath)) { throw new IOException("Failed rename of " + tempPath + " to " + tableInfoDirPath); @@ -676,7 +710,7 @@ private static Path writeTableDescriptor(final FileSystem fs, return tableInfoDirPath; } - private static void writeHTD(final FileSystem fs, final Path p, final HTableDescriptor htd) + private static void writeTD(final FileSystem fs, final Path p, final TableDescriptor htd) throws IOException { FSDataOutputStream out = fs.create(p, false); try { @@ -693,10 +727,19 @@ private static void writeHTD(final FileSystem fs, final Path p, final HTableDesc * Used by tests. * @return True if we successfully created file. */ - public boolean createTableDescriptor(HTableDescriptor htd) throws IOException { + public boolean createTableDescriptor(TableDescriptor htd) throws IOException { return createTableDescriptor(htd, false); } + /** + * Create new HTableDescriptor in HDFS. Happens when we are creating table. + * Used by tests. + * @return True if we successfully created file. + */ + public boolean createTableDescriptor(HTableDescriptor htd) throws IOException { + return createTableDescriptor(new TableDescriptor(htd), false); + } + /** * Create new HTableDescriptor in HDFS. Happens when we are creating table. If * forceCreation is true then even if previous table descriptor is present it @@ -704,9 +747,9 @@ public boolean createTableDescriptor(HTableDescriptor htd) throws IOException { * * @return True if we successfully created file. */ - public boolean createTableDescriptor(HTableDescriptor htd, boolean forceCreation) + public boolean createTableDescriptor(TableDescriptor htd, boolean forceCreation) throws IOException { - Path tableDir = getTableDir(htd.getTableName()); + Path tableDir = getTableDir(htd.getHTableDescriptor().getTableName()); return createTableDescriptorForTableDirectory(tableDir, htd, forceCreation); } @@ -722,7 +765,7 @@ public boolean createTableDescriptor(HTableDescriptor htd, boolean forceCreation * @throws IOException if a filesystem error occurs */ public boolean createTableDescriptorForTableDirectory(Path tableDir, - HTableDescriptor htd, boolean forceCreation) throws IOException { + TableDescriptor htd, boolean forceCreation) throws IOException { if (fsreadonly) { throw new NotImplementedException("Cannot create a table descriptor - in read only mode"); } @@ -743,4 +786,3 @@ public boolean createTableDescriptorForTableDirectory(Path tableDir, } } - diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index 6cb3d204a6b6..6ed2b3e75d80 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -71,7 +71,6 @@ import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.ClusterStatus; -import org.apache.hadoop.hbase.CoordinatedStateException; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HColumnDescriptor; @@ -84,6 +83,7 @@ import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.classification.InterfaceAudience; @@ -107,13 +107,13 @@ import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.io.FileLink; import org.apache.hadoop.hbase.io.HFileLink; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService.BlockingInterface; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.regionserver.StoreFileInfo; @@ -128,9 +128,6 @@ import org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandlerImpl; import org.apache.hadoop.hbase.util.hbck.TableLockChecker; import org.apache.hadoop.hbase.wal.WALSplitter; -import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; -import org.apache.hadoop.hbase.zookeeper.ZKTableStateClientSideReader; -import org.apache.hadoop.hbase.zookeeper.ZKTableStateManager; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; @@ -1337,9 +1334,9 @@ private SortedMap loadHdfsRegionInfos() modTInfo = new TableInfo(tableName); tablesInfo.put(tableName, modTInfo); try { - HTableDescriptor htd = + TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, hbaseRoot, tableName); - modTInfo.htds.add(htd); + modTInfo.htds.add(htd.getHTableDescriptor()); } catch (IOException ioe) { if (!orphanTableDirs.containsKey(tableName)) { LOG.warn("Unable to read .tableinfo from " + hbaseRoot, ioe); @@ -1394,7 +1391,7 @@ private boolean fabricateTableInfo(FSTableDescriptors fstd, TableName tableName, for (String columnfamimly : columns) { htd.addFamily(new HColumnDescriptor(columnfamimly)); } - fstd.createTableDescriptor(htd, true); + fstd.createTableDescriptor(new TableDescriptor(htd, TableState.State.ENABLED), true); return true; } @@ -1442,7 +1439,7 @@ public void fixOrphanTables() throws IOException { if (tableName.equals(htds[j].getTableName())) { HTableDescriptor htd = htds[j]; LOG.info("fixing orphan table: " + tableName + " from cache"); - fstd.createTableDescriptor(htd, true); + fstd.createTableDescriptor(new TableDescriptor(htd, TableState.State.ENABLED), true); j++; iter.remove(); } @@ -1802,19 +1799,16 @@ Path sidelineOldMeta() throws IOException { * @throws IOException */ private void loadDisabledTables() - throws ZooKeeperConnectionException, IOException { + throws IOException { HConnectionManager.execute(new HConnectable(getConf()) { @Override public Void connect(HConnection connection) throws IOException { - try { - for (TableName tableName : - ZKTableStateClientSideReader.getDisabledOrDisablingTables(zkw)) { - disabledTables.add(tableName); + TableName[] tables = connection.listTableNames(); + for (TableName table : tables) { + if (connection.getTableState(table) + .inStates(TableState.State.DISABLED, TableState.State.DISABLING)) { + disabledTables.add(table); } - } catch (KeeperException ke) { - throw new IOException(ke); - } catch (InterruptedException e) { - throw new InterruptedIOException(); } return null; } @@ -3546,12 +3540,15 @@ private void checkAndFixReplication() throws IOException { /** * Check whether a orphaned table ZNode exists and fix it if requested. * @throws IOException - * @throws KeeperException - * @throws InterruptedException */ private void checkAndFixOrphanedTableZNodes() - throws IOException, KeeperException, InterruptedException { - Set enablingTables = ZKTableStateClientSideReader.getEnablingTables(zkw); + throws IOException { + Set enablingTables = new HashSet<>(); + for (TableName tableName: admin.listTableNames()) { + if (connection.getTableState(tableName).getState().equals(TableState.State.ENABLING)) { + enablingTables.add(tableName); + } + } String msg; TableInfo tableInfo; @@ -3570,21 +3567,12 @@ private void checkAndFixOrphanedTableZNodes() } if (orphanedTableZNodes.size() > 0 && this.fixTableZNodes) { - ZKTableStateManager zkTableStateMgr = new ZKTableStateManager(zkw); - for (TableName tableName : orphanedTableZNodes) { - try { - // Set the table state to be disabled so that if we made mistake, we can trace - // the history and figure it out. - // Another choice is to call checkAndRemoveTableState() to delete the orphaned ZNode. - // Both approaches works. - zkTableStateMgr.setTableState(tableName, ZooKeeperProtos.Table.State.DISABLED); - } catch (CoordinatedStateException e) { - // This exception should not happen here - LOG.error( - "Got a CoordinatedStateException while fixing the ENABLING table znode " + tableName, - e); - } + // Set the table state to be disabled so that if we made mistake, we can trace + // the history and figure it out. + // Another choice is to call checkAndRemoveTableState() to delete the orphaned ZNode. + // Both approaches works. + admin.disableTable(tableName); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java index 7f2c85db060d..02b598066884 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java @@ -155,7 +155,8 @@ protected Merger(Configuration conf, FileSystem fs, final TableName tableName) this.rootDir = FSUtils.getRootDir(conf); Path tabledir = FSUtils.getTableDir(this.rootDir, tableName); - this.htd = FSTableDescriptors.getTableDescriptorFromFs(this.fs, tabledir); + this.htd = FSTableDescriptors.getTableDescriptorFromFs(this.fs, tabledir) + .getHTableDescriptor(); String logname = "merge_" + System.currentTimeMillis() + HConstants.HREGION_LOGDIR_NAME; final Configuration walConf = new Configuration(conf); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java index 23c4f3cd5565..4128c8cd9968 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java @@ -18,6 +18,7 @@ */ package org.apache.hadoop.hbase.util; +import com.google.common.base.Preconditions; import java.io.InterruptedIOException; import java.io.IOException; import java.lang.reflect.Constructor; @@ -26,6 +27,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CoordinatedStateManager; @@ -144,6 +146,11 @@ public static JVMClusterUtil.MasterThread createMasterThread( } catch (Exception e) { throw new IOException(e); } + // Needed if a master based registry is configured for internal cluster connections. Here, we + // just add the current master host port since we do not know other master addresses up front + // in mini cluster tests. + c.set(HConstants.MASTER_ADDRS_KEY, + Preconditions.checkNotNull(server.getServerName().getAddress()).toString()); return new JVMClusterUtil.MasterThread(server, index); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java index adab20351284..1530d2845dc9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java @@ -29,6 +29,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.hadoop.hbase.TableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; @@ -153,9 +154,9 @@ private void mergeTwoRegions() throws IOException { if (info2 == null) { throw new NullPointerException("info2 is null using key " + meta); } - HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(FileSystem.get(getConf()), + TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(FileSystem.get(getConf()), this.rootdir, this.tableName); - HRegion merged = merge(htd, meta, info1, info2); + HRegion merged = merge(htd.getHTableDescriptor(), meta, info1, info2); LOG.info("Adding " + merged.getRegionInfo() + " to " + meta.getRegionInfo()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java index 57ec87d8e48e..82308be78b29 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java @@ -18,8 +18,11 @@ package org.apache.hadoop.hbase.util; import java.io.IOException; +import java.util.HashMap; import java.util.List; +import java.util.Map; +import com.google.protobuf.InvalidProtocolBufferException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -27,6 +30,9 @@ import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.TableState; +import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; @@ -153,8 +159,9 @@ private void checkAndMigrateTableStatesToPB(ZooKeeperWatcher zkw) throws KeeperE } byte[] data = ZKUtil.getData(zkw, znode); if (ProtobufUtil.isPBMagicPrefix(data)) continue; - ZooKeeperProtos.Table.Builder builder = ZooKeeperProtos.Table.newBuilder(); - builder.setState(ZooKeeperProtos.Table.State.valueOf(Bytes.toString(data))); + ZooKeeperProtos.DeprecatedTableState.Builder builder = + ZooKeeperProtos.DeprecatedTableState.newBuilder(); + builder.setState(ZooKeeperProtos.DeprecatedTableState.State.valueOf(Bytes.toString(data))); data = ProtobufUtil.prependPBMagic(builder.build().toByteArray()); ZKUtil.setData(zkw, znode, data); } @@ -232,15 +239,14 @@ private void checkAndMigratePeerZnodesToPB(ZooKeeperWatcher zkw, String znode, } private void migrateClusterKeyToPB(ZooKeeperWatcher zkw, String peerZnode, byte[] data) - throws KeeperException, NoNodeException { + throws KeeperException { ReplicationPeer peer = ZooKeeperProtos.ReplicationPeer.newBuilder() .setClusterkey(Bytes.toString(data)).build(); ZKUtil.setData(zkw, peerZnode, ProtobufUtil.prependPBMagic(peer.toByteArray())); } private void migratePeerStateToPB(ZooKeeperWatcher zkw, byte[] data, - String peerStatePath) - throws KeeperException, NoNodeException { + String peerStatePath) throws KeeperException { String state = Bytes.toString(data); if (ZooKeeperProtos.ReplicationState.State.ENABLED.name().equals(state)) { ZKUtil.setData(zkw, peerStatePath, ReplicationStateZKBase.ENABLED_ZNODE_BYTES); @@ -249,6 +255,80 @@ private void migratePeerStateToPB(ZooKeeperWatcher zkw, byte[] data, } } + /** + * Method for table states migration. + * Reading state from zk, applying them to internal state + * and delete. + * Used by master to clean migration from zk based states to + * table descriptor based states. + */ + @Deprecated + public static Map queryForTableStates(ZooKeeperWatcher zkw) + throws KeeperException, InterruptedException { + Map rv = new HashMap<>(); + List children = ZKUtil.listChildrenNoWatch(zkw, zkw.tableZNode); + if (children == null) { + return rv; + } + for (String child: children) { + TableName tableName = TableName.valueOf(child); + ZooKeeperProtos.DeprecatedTableState.State state = getTableState(zkw, tableName); + TableState.State newState = TableState.State.ENABLED; + if (state != null) { + switch (state) { + case ENABLED: + newState = TableState.State.ENABLED; + break; + case DISABLED: + newState = TableState.State.DISABLED; + break; + case DISABLING: + newState = TableState.State.DISABLING; + break; + case ENABLING: + newState = TableState.State.ENABLING; + break; + default: + } + } + rv.put(tableName, newState); + } + return rv; + } + + /** + * Gets table state from ZK. + * @param zkw ZooKeeperWatcher instance to use + * @param tableName table we're checking + * @return Null or {@link ZooKeeperProtos.DeprecatedTableState.State} found in znode. + * @throws KeeperException + */ + @Deprecated + private static ZooKeeperProtos.DeprecatedTableState.State getTableState( + final ZooKeeperWatcher zkw, final TableName tableName) + throws KeeperException, InterruptedException { + String znode = ZKUtil.joinZNode(zkw.tableZNode, tableName.getNameAsString()); + byte [] data = ZKUtil.getData(zkw, znode); + if (data == null || data.length <= 0) { + return null; + } + try { + ProtobufUtil.expectPBMagicPrefix(data); + ZooKeeperProtos.DeprecatedTableState.Builder builder = + ZooKeeperProtos.DeprecatedTableState.newBuilder(); + int magicLen = ProtobufUtil.lengthOfPBMagic(); + ZooKeeperProtos.DeprecatedTableState t = builder.mergeFrom(data, + magicLen, data.length - magicLen).build(); + return t.getState(); + } catch (InvalidProtocolBufferException e) { + KeeperException ke = new KeeperException.DataInconsistencyException(); + ke.initCause(e); + throw ke; + } catch (DeserializationException e) { + throw ZKUtil.convert(e); + } + } + public static void main(String args[]) throws Exception { System.exit(ToolRunner.run(HBaseConfiguration.create(), new ZKDataMigrator(), args)); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java index 9273b6a490e5..bb703ed41d3b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java @@ -50,6 +50,9 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; +import com.google.common.base.Preconditions; +import com.google.common.collect.Lists; +import com.google.protobuf.ServiceException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -61,7 +64,6 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.CoordinatedStateException; import org.apache.hadoop.hbase.CoordinatedStateManager; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; @@ -71,7 +73,6 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; -import org.apache.hadoop.hbase.TableStateManager; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.ConnectionUtils; import org.apache.hadoop.hbase.client.Delete; @@ -82,6 +83,7 @@ import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager; import org.apache.hadoop.hbase.coordination.ZKSplitLogManagerCoordination; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.exceptions.RegionOpeningException; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.master.SplitLogManager; @@ -98,7 +100,6 @@ import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.LastSequenceId; @@ -123,9 +124,6 @@ import org.apache.hadoop.io.MultipleIOException; import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; -import com.google.protobuf.ServiceException; import com.google.protobuf.TextFormat; /** @@ -335,13 +333,14 @@ boolean splitLogFile(FileStatus logfile, CancelableProgressable reporter) throws LOG.warn("Nothing to split in log file " + logPath); return true; } - if (csm != null) { - try { - TableStateManager tsm = csm.getTableStateManager(); - disablingOrDisabledTables = tsm.getTablesInStates( - ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING); - } catch (CoordinatedStateException e) { - throw new IOException("Can't get disabling/disabled tables", e); + if(csm != null) { + HConnection scc = csm.getServer().getConnection(); + TableName[] tables = scc.listTableNames(); + for (TableName table : tables) { + if (scc.getTableState(table) + .inStates(TableState.State.DISABLED, TableState.State.DISABLING)) { + disablingOrDisabledTables.add(table); + } } } int numOpenedFilesBeforeReporting = conf.getInt("hbase.splitlog.report.openedfiles", 3); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableStateManager.java deleted file mode 100644 index db00c14ce23a..000000000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableStateManager.java +++ /dev/null @@ -1,369 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.zookeeper; - -import java.io.IOException; -import java.io.InterruptedIOException; -import java.util.Arrays; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.CoordinatedStateException; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.TableStateManager; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; -import org.apache.zookeeper.KeeperException; - -/** - * Implementation of TableStateManager which reads, caches and sets state - * up in ZooKeeper. If multiple read/write clients, will make for confusion. - * Code running on client side without consensus context should use - * {@link ZKTableStateClientSideReader} instead. - * - *

To save on trips to the zookeeper ensemble, internally we cache table - * state. - */ -@InterfaceAudience.Private -public class ZKTableStateManager implements TableStateManager { - // A znode will exist under the table directory if it is in any of the - // following states: {@link TableState#ENABLING} , {@link TableState#DISABLING}, - // or {@link TableState#DISABLED}. If {@link TableState#ENABLED}, there will - // be no entry for a table in zk. Thats how it currently works. - - private static final Log LOG = LogFactory.getLog(ZKTableStateManager.class); - private final ZooKeeperWatcher watcher; - - /** - * Cache of what we found in zookeeper so we don't have to go to zk ensemble - * for every query. Synchronize access rather than use concurrent Map because - * synchronization needs to span query of zk. - */ - private final Map cache = - new HashMap(); - - public ZKTableStateManager(final ZooKeeperWatcher zkw) throws KeeperException, - InterruptedException { - super(); - this.watcher = zkw; - populateTableStates(); - } - - /** - * Gets a list of all the tables set as disabled in zookeeper. - * @throws KeeperException, InterruptedException - */ - private void populateTableStates() throws KeeperException, InterruptedException { - synchronized (this.cache) { - List children = ZKUtil.listChildrenNoWatch(this.watcher, this.watcher.tableZNode); - if (children == null) return; - for (String child: children) { - TableName tableName = TableName.valueOf(child); - ZooKeeperProtos.Table.State state = getTableState(this.watcher, tableName); - if (state != null) this.cache.put(tableName, state); - } - } - } - - /** - * Sets table state in ZK. Sets no watches. - * - * {@inheritDoc} - */ - @Override - public void setTableState(TableName tableName, ZooKeeperProtos.Table.State state) - throws CoordinatedStateException { - synchronized (this.cache) { - LOG.info("Moving table " + tableName + " state from " + this.cache.get(tableName) - + " to " + state); - try { - setTableStateInZK(tableName, state); - } catch (KeeperException e) { - throw new CoordinatedStateException(e); - } - } - } - - /** - * Checks and sets table state in ZK. Sets no watches. - * {@inheritDoc} - */ - @Override - public boolean setTableStateIfInStates(TableName tableName, - ZooKeeperProtos.Table.State newState, - ZooKeeperProtos.Table.State... states) - throws CoordinatedStateException { - synchronized (this.cache) { - // Transition ENABLED->DISABLING has to be performed with a hack, because - // we treat empty state as enabled in this case because 0.92- clusters. - if ( - (newState == ZooKeeperProtos.Table.State.DISABLING) && - this.cache.get(tableName) != null && !isTableState(tableName, states) || - (newState != ZooKeeperProtos.Table.State.DISABLING && - !isTableState(tableName, states) )) { - return false; - } - try { - setTableStateInZK(tableName, newState); - } catch (KeeperException e) { - throw new CoordinatedStateException(e); - } - return true; - } - } - - /** - * Checks and sets table state in ZK. Sets no watches. - * {@inheritDoc} - */ - @Override - public boolean setTableStateIfNotInStates(TableName tableName, - ZooKeeperProtos.Table.State newState, - ZooKeeperProtos.Table.State... states) - throws CoordinatedStateException { - synchronized (this.cache) { - if (isTableState(tableName, states)) { - // If the table is in the one of the states from the states list, the cache - // might be out-of-date, try to find it out from the master source (zookeeper server). - // - // Note: this adds extra zookeeper server calls and might have performance impact. - // However, this is not the happy path so we should not reach here often. Therefore, - // the performance impact should be minimal to none. - try { - ZooKeeperProtos.Table.State curstate = getTableState(watcher, tableName); - - if (isTableInState(Arrays.asList(states), curstate)) { - return false; - } - } catch (KeeperException e) { - throw new CoordinatedStateException(e); - } catch (InterruptedException e) { - throw new CoordinatedStateException(e); - } - } - try { - setTableStateInZK(tableName, newState); - } catch (KeeperException e) { - throw new CoordinatedStateException(e); - } - return true; - } - } - - private void setTableStateInZK(final TableName tableName, - final ZooKeeperProtos.Table.State state) - throws KeeperException { - String znode = ZKUtil.joinZNode(this.watcher.tableZNode, tableName.getNameAsString()); - if (ZKUtil.checkExists(this.watcher, znode) == -1) { - ZKUtil.createAndFailSilent(this.watcher, znode); - } - synchronized (this.cache) { - ZooKeeperProtos.Table.Builder builder = ZooKeeperProtos.Table.newBuilder(); - builder.setState(state); - byte [] data = ProtobufUtil.prependPBMagic(builder.build().toByteArray()); - ZKUtil.setData(this.watcher, znode, data); - this.cache.put(tableName, state); - } - } - - /** - * Checks if table is marked in specified state in ZK (using cache only). {@inheritDoc} - */ - @Override - public boolean isTableState(final TableName tableName, - final ZooKeeperProtos.Table.State... states) { - return isTableState(tableName, false, states); // only check cache - } - - /** - * Checks if table is marked in specified state in ZK. {@inheritDoc} - */ - @Override - public boolean isTableState(final TableName tableName, final boolean checkSource, - final ZooKeeperProtos.Table.State... states) { - boolean isTableInSpecifiedState; - synchronized (this.cache) { - ZooKeeperProtos.Table.State currentState = this.cache.get(tableName); - if (checkSource) { - // The cache might be out-of-date, try to find it out from the master source (zookeeper - // server) and update the cache. - try { - ZooKeeperProtos.Table.State stateInZK = getTableState(watcher, tableName); - - if (currentState != stateInZK) { - if (stateInZK != null) { - this.cache.put(tableName, stateInZK); - } else { - this.cache.remove(tableName); - } - currentState = stateInZK; - } - } catch (KeeperException | InterruptedException e) { - // Contacting zookeeper failed. Let us just trust the value in cache. - } - } - return isTableInState(Arrays.asList(states), currentState); - } - } - - /** - * Deletes the table in zookeeper. Fails silently if the table is not currently disabled in - * zookeeper. Sets no watches. {@inheritDoc} - */ - @Override - public void setDeletedTable(final TableName tableName) - throws CoordinatedStateException { - synchronized (this.cache) { - if (this.cache.remove(tableName) == null) { - LOG.warn("Moving table " + tableName + " state to deleted but was already deleted"); - } - try { - ZKUtil.deleteNodeFailSilent(this.watcher, - ZKUtil.joinZNode(this.watcher.tableZNode, tableName.getNameAsString())); - } catch (KeeperException e) { - throw new CoordinatedStateException(e); - } - } - } - - /** - * check if table is present. - * - * @param tableName table we're working on - * @return true if the table is present - */ - @Override - public boolean isTablePresent(final TableName tableName) { - synchronized (this.cache) { - ZooKeeperProtos.Table.State state = this.cache.get(tableName); - return !(state == null); - } - } - - /** - * Gets a list of all the tables set as disabling in zookeeper. - * @return Set of disabling tables, empty Set if none - * @throws CoordinatedStateException if error happened in underlying coordination engine - */ - @Override - public Set getTablesInStates(ZooKeeperProtos.Table.State... states) - throws InterruptedIOException, CoordinatedStateException { - try { - return getAllTables(states); - } catch (KeeperException e) { - throw new CoordinatedStateException(e); - } - } - - /** - * {@inheritDoc} - */ - @Override - public void checkAndRemoveTableState(TableName tableName, ZooKeeperProtos.Table.State states, - boolean deletePermanentState) - throws CoordinatedStateException { - synchronized (this.cache) { - if (isTableState(tableName, states)) { - this.cache.remove(tableName); - if (deletePermanentState) { - try { - ZKUtil.deleteNodeFailSilent(this.watcher, - ZKUtil.joinZNode(this.watcher.tableZNode, tableName.getNameAsString())); - } catch (KeeperException e) { - throw new CoordinatedStateException(e); - } - } - } - } - } - - /** - * Gets a list of all the tables of specified states in zookeeper. - * @return Set of tables of specified states, empty Set if none - * @throws KeeperException - */ - Set getAllTables(final ZooKeeperProtos.Table.State... states) - throws KeeperException, InterruptedIOException { - - Set allTables = new HashSet(); - List children = - ZKUtil.listChildrenNoWatch(watcher, watcher.tableZNode); - if(children == null) return allTables; - for (String child: children) { - TableName tableName = TableName.valueOf(child); - ZooKeeperProtos.Table.State state; - try { - state = getTableState(watcher, tableName); - } catch (InterruptedException e) { - throw new InterruptedIOException(); - } - for (ZooKeeperProtos.Table.State expectedState: states) { - if (state == expectedState) { - allTables.add(tableName); - break; - } - } - } - return allTables; - } - - /** - * Gets table state from ZK. - * @param zkw ZooKeeperWatcher instance to use - * @param tableName table we're checking - * @return Null or {@link ZooKeeperProtos.Table.State} found in znode. - * @throws KeeperException - */ - private ZooKeeperProtos.Table.State getTableState(final ZooKeeperWatcher zkw, - final TableName tableName) - throws KeeperException, InterruptedException { - String znode = ZKUtil.joinZNode(zkw.tableZNode, tableName.getNameAsString()); - byte [] data = ZKUtil.getData(zkw, znode); - if (data == null || data.length <= 0) return null; - try { - ProtobufUtil.expectPBMagicPrefix(data); - ZooKeeperProtos.Table.Builder builder = ZooKeeperProtos.Table.newBuilder(); - int magicLen = ProtobufUtil.lengthOfPBMagic(); - ProtobufUtil.mergeFrom(builder, data, magicLen, data.length - magicLen); - return builder.getState(); - } catch (IOException e) { - KeeperException ke = new KeeperException.DataInconsistencyException(); - ke.initCause(e); - throw ke; - } catch (DeserializationException e) { - throw ZKUtil.convert(e); - } - } - - /** - * @return true if current state isn't null and is contained - * in the list of expected states. - */ - private boolean isTableInState(final List expectedStates, - final ZooKeeperProtos.Table.State currentState) { - return currentState != null && expectedStates.contains(currentState); - } -} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index ec1e32c2947b..a9b5fb124937 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -1110,6 +1110,9 @@ public MiniHBaseCluster startMiniHBaseCluster(final int numMasters, Configuration c = new Configuration(this.conf); this.hbaseCluster = new MiniHBaseCluster(c, numMasters, numSlaves, masterClass, regionserverClass); + // Populate the master address configuration from mini cluster configuration. + conf.set(HConstants.MASTER_ADDRS_KEY, + c.get(HConstants.MASTER_ADDRS_KEY, HConstants.MASTER_ADDRS_DEFAULT)); // Don't leave here till we've done a successful scan of the hbase:meta Table t = new HTable(c, TableName.META_TABLE_NAME); ResultScanner s = t.getScanner(new Scan()); @@ -3390,6 +3393,7 @@ public void waitTableDisabled(Admin admin, byte[] table, long timeoutMillis) } } + /** * Make sure that at least the specified number of region servers * are running diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestCachedClusterId.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestCachedClusterId.java new file mode 100644 index 000000000000..8f0b32db8b7e --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestCachedClusterId.java @@ -0,0 +1,86 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import static org.junit.Assert.assertEquals; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.MultithreadedTestUtil.TestContext; +import org.apache.hadoop.hbase.MultithreadedTestUtil.TestThread; +import org.apache.hadoop.hbase.master.CachedClusterId; +import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category(MediumTests.class) +public class TestCachedClusterId { + + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + + private static String clusterId; + private static HMaster activeMaster; + private static HMaster standByMaster; + + private static class GetClusterIdThread extends TestThread { + CachedClusterId cachedClusterId; + public GetClusterIdThread(TestContext ctx, CachedClusterId clusterId) { + super(ctx); + cachedClusterId = clusterId; + } + + @Override + public void doWork() throws Exception { + assertEquals(clusterId, cachedClusterId.getFromCacheOrFetch()); + } + } + + @BeforeClass + public static void setUp() throws Exception { + TEST_UTIL.startMiniCluster(1); + activeMaster = TEST_UTIL.getHBaseCluster().getMaster(); + clusterId = activeMaster.getClusterId(); + standByMaster = TEST_UTIL.getHBaseCluster().startMaster().getMaster(); + } + + @AfterClass + public static void tearDown() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + @Test + public void testClusterIdMatch() { + assertEquals(clusterId, standByMaster.getClusterId()); + } + + @Test + public void testMultiThreadedGetClusterId() throws Exception { + Configuration conf = TEST_UTIL.getConfiguration(); + CachedClusterId cachedClusterId = new CachedClusterId(conf); + TestContext context = new TestContext(conf); + int numThreads = 100; + for (int i = 0; i < numThreads; i++) { + context.addThread(new GetClusterIdThread(context, cachedClusterId)); + } + context.startThreads(); + context.stop(); + int cacheMisses = cachedClusterId.getCacheStats(); + assertEquals(cacheMisses, 1); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestDrainingServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestDrainingServer.java index 946b812ed234..5b7ba496b360 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestDrainingServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestDrainingServer.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.master.RegionPlan; import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.master.ServerManager; +import org.apache.hadoop.hbase.master.TableStateManager; import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory; import org.apache.hadoop.hbase.regionserver.RegionOpeningState; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -54,6 +55,7 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.mock; /** @@ -98,70 +100,72 @@ public void testAssignmentManagerDoesntUseDrainingServer() throws Exception { final HRegionInfo REGIONINFO = new HRegionInfo(TableName.valueOf("table_test"), HConstants.EMPTY_START_ROW, HConstants.EMPTY_START_ROW); - ZooKeeperWatcher zkWatcher = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(), - "zkWatcher-Test", abortable, true); + try (ZooKeeperWatcher zkWatcher = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(), + "zkWatcher-Test", abortable, true)) { - Map onlineServers = new HashMap(); + Map onlineServers = new HashMap(); - onlineServers.put(SERVERNAME_A, ServerLoad.EMPTY_SERVERLOAD); - onlineServers.put(SERVERNAME_B, ServerLoad.EMPTY_SERVERLOAD); + onlineServers.put(SERVERNAME_A, ServerLoad.EMPTY_SERVERLOAD); + onlineServers.put(SERVERNAME_B, ServerLoad.EMPTY_SERVERLOAD); - Mockito.when(server.getConfiguration()).thenReturn(conf); - Mockito.when(server.getServerName()).thenReturn(ServerName.valueOf("masterMock,1,1")); - Mockito.when(server.getZooKeeper()).thenReturn(zkWatcher); - Mockito.when(server.getRegionServerVersion(Mockito.any(ServerName.class))).thenReturn("0.0.0"); + Mockito.when(server.getConfiguration()).thenReturn(conf); + Mockito.when(server.getServerName()).thenReturn(ServerName.valueOf("masterMock,1,1")); + Mockito.when(server.getZooKeeper()).thenReturn(zkWatcher); + Mockito.when(server.getRegionServerVersion(Mockito.any(ServerName.class))).thenReturn("0.0.0"); - CoordinatedStateManager cp = new ZkCoordinatedStateManager(); - cp.initialize(server); - cp.start(); + CoordinatedStateManager cp = new ZkCoordinatedStateManager(); + cp.initialize(server); + cp.start(); - Mockito.when(server.getCoordinatedStateManager()).thenReturn(cp); + Mockito.when(server.getCoordinatedStateManager()).thenReturn(cp); - Mockito.when(serverManager.getOnlineServers()).thenReturn(onlineServers); - Mockito.when(serverManager.getOnlineServersList()) - .thenReturn(new ArrayList(onlineServers.keySet())); + Mockito.when(serverManager.getOnlineServers()).thenReturn(onlineServers); + Mockito.when(serverManager.getOnlineServersList()) + .thenReturn(new ArrayList(onlineServers.keySet())); - Mockito.when(serverManager.createDestinationServersList()) - .thenReturn(new ArrayList(onlineServers.keySet())); - Mockito.when(serverManager.createDestinationServersList(null)) - .thenReturn(new ArrayList(onlineServers.keySet())); - Mockito.when(serverManager.createDestinationServersList(Mockito.anyList())).thenReturn( - new ArrayList(onlineServers.keySet())); + Mockito.when(serverManager.createDestinationServersList()) + .thenReturn(new ArrayList(onlineServers.keySet())); + Mockito.when(serverManager.createDestinationServersList(null)) + .thenReturn(new ArrayList(onlineServers.keySet())); + Mockito.when(serverManager.createDestinationServersList(Mockito.anyList())).thenReturn( + new ArrayList(onlineServers.keySet())); - for (ServerName sn : onlineServers.keySet()) { - Mockito.when(serverManager.isServerOnline(sn)).thenReturn(true); - Mockito.when(serverManager.sendRegionClose(sn, REGIONINFO, -1)).thenReturn(true); - Mockito.when(serverManager.sendRegionClose(sn, REGIONINFO, -1, null, false)).thenReturn(true); - Mockito.when(serverManager.sendRegionOpen(sn, REGIONINFO, -1, new ArrayList())) - .thenReturn(RegionOpeningState.OPENED); - Mockito.when(serverManager.sendRegionOpen(sn, REGIONINFO, -1, null)) - .thenReturn(RegionOpeningState.OPENED); - Mockito.when(serverManager.addServerToDrainList(sn)).thenReturn(true); - } + for (ServerName sn : onlineServers.keySet()) { + Mockito.when(serverManager.isServerOnline(sn)).thenReturn(true); + Mockito.when(serverManager.sendRegionClose(sn, REGIONINFO, -1)).thenReturn(true); + Mockito.when(serverManager.sendRegionClose(sn, REGIONINFO, -1, null, false)).thenReturn(true); + Mockito.when(serverManager.sendRegionOpen(sn, REGIONINFO, -1, new ArrayList())) + .thenReturn(RegionOpeningState.OPENED); + Mockito.when(serverManager.sendRegionOpen(sn, REGIONINFO, -1, null)) + .thenReturn(RegionOpeningState.OPENED); + Mockito.when(serverManager.addServerToDrainList(sn)).thenReturn(true); + } - Mockito.when(master.getServerManager()).thenReturn(serverManager); + Mockito.when(master.getServerManager()).thenReturn(serverManager); - am = new AssignmentManager(server, serverManager, - balancer, startupMasterExecutor("mockExecutorService"), null, null); + TableStateManager tsm = mock(TableStateManager.class); + am = new AssignmentManager(server, serverManager, + balancer, startupMasterExecutor("mockExecutorService"), null, null, tsm); - Mockito.when(master.getAssignmentManager()).thenReturn(am); - Mockito.when(master.getZooKeeper()).thenReturn(zkWatcher); + Mockito.when(master.getAssignmentManager()).thenReturn(am); + Mockito.when(master.getZooKeeper()).thenReturn(zkWatcher); - am.addPlan(REGIONINFO.getEncodedName(), new RegionPlan(REGIONINFO, null, SERVERNAME_A)); + am.addPlan(REGIONINFO.getEncodedName(), new RegionPlan(REGIONINFO, null, SERVERNAME_A)); - zkWatcher.registerListenerFirst(am); + zkWatcher.registerListenerFirst(am); - addServerToDrainedList(SERVERNAME_A, onlineServers, serverManager); + addServerToDrainedList(SERVERNAME_A, onlineServers, serverManager); - am.assign(REGIONINFO, true); + am.assign(REGIONINFO, true); - setRegionOpenedOnZK(zkWatcher, SERVERNAME_A, REGIONINFO); - setRegionOpenedOnZK(zkWatcher, SERVERNAME_B, REGIONINFO); + setRegionOpenedOnZK(zkWatcher, SERVERNAME_A, REGIONINFO); + setRegionOpenedOnZK(zkWatcher, SERVERNAME_B, REGIONINFO); - am.waitForAssignment(REGIONINFO); + am.waitForAssignment(REGIONINFO); - assertTrue(am.getRegionStates().isRegionOnline(REGIONINFO)); - assertNotEquals(am.getRegionStates().getRegionServerOfRegion(REGIONINFO), SERVERNAME_A); + assertTrue(am.getRegionStates().isRegionOnline(REGIONINFO)); + assertNotEquals(am.getRegionStates().getRegionServerOfRegion(REGIONINFO), SERVERNAME_A); + } } @Test @@ -207,80 +211,82 @@ public void testAssignmentManagerDoesntUseDrainedServerWithBulkAssign() throws E bulk.put(REGIONINFO_D, SERVERNAME_D); bulk.put(REGIONINFO_E, SERVERNAME_E); - ZooKeeperWatcher zkWatcher = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(), - "zkWatcher-BulkAssignTest", abortable, true); - - Mockito.when(server.getConfiguration()).thenReturn(conf); - Mockito.when(server.getServerName()).thenReturn(ServerName.valueOf("masterMock,1,1")); - Mockito.when(server.getZooKeeper()).thenReturn(zkWatcher); - - CoordinatedStateManager cp = new ZkCoordinatedStateManager(); - cp.initialize(server); - cp.start(); - - Mockito.when(server.getCoordinatedStateManager()).thenReturn(cp); - - Mockito.when(serverManager.getOnlineServers()).thenReturn(onlineServers); - Mockito.when(serverManager.getOnlineServersList()).thenReturn( - new ArrayList(onlineServers.keySet())); - - Mockito.when(serverManager.createDestinationServersList()).thenReturn( - new ArrayList(onlineServers.keySet())); - Mockito.when(serverManager.createDestinationServersList(null)).thenReturn( - new ArrayList(onlineServers.keySet())); - Mockito.when(serverManager.createDestinationServersList(Mockito.anyList())).thenReturn( - new ArrayList(onlineServers.keySet())); - - for (Entry entry : bulk.entrySet()) { - Mockito.when(serverManager.isServerOnline(entry.getValue())).thenReturn(true); - Mockito.when(serverManager.sendRegionClose(entry.getValue(), - entry.getKey(), -1)).thenReturn(true); - Mockito.when(serverManager.sendRegionOpen(entry.getValue(), - entry.getKey(), -1, null)).thenReturn(RegionOpeningState.OPENED); - Mockito.when(serverManager.addServerToDrainList(entry.getValue())).thenReturn(true); - } - - Mockito.when(master.getServerManager()).thenReturn(serverManager); - - drainedServers.add(SERVERNAME_A); - drainedServers.add(SERVERNAME_B); - drainedServers.add(SERVERNAME_C); - drainedServers.add(SERVERNAME_D); - - am = new AssignmentManager(server, serverManager, - balancer, startupMasterExecutor("mockExecutorServiceBulk"), null, null); - - Mockito.when(master.getAssignmentManager()).thenReturn(am); - - zkWatcher.registerListener(am); - - for (ServerName drained : drainedServers) { - addServerToDrainedList(drained, onlineServers, serverManager); - } - - am.assign(bulk); - - Set regionsInTransition = am.getRegionStates().getRegionsInTransition(); - for (RegionState rs : regionsInTransition) { - setRegionOpenedOnZK(zkWatcher, rs.getServerName(), rs.getRegion()); - } - - am.waitForAssignment(REGIONINFO_A); - am.waitForAssignment(REGIONINFO_B); - am.waitForAssignment(REGIONINFO_C); - am.waitForAssignment(REGIONINFO_D); - am.waitForAssignment(REGIONINFO_E); - - Map regionAssignments = am.getRegionStates().getRegionAssignments(); - for (Entry entry : regionAssignments.entrySet()) { - LOG.info("Region Assignment: " - + entry.getKey().getRegionNameAsString() + " Server: " + entry.getValue()); - bunchServersAssigned.add(entry.getValue()); - } - - for (ServerName sn : drainedServers) { - assertFalse(bunchServersAssigned.contains(sn)); - } + try (ZooKeeperWatcher zkWatcher = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(), + "zkWatcher-BulkAssignTest", abortable, true)) { + + Mockito.when(server.getConfiguration()).thenReturn(conf); + Mockito.when(server.getServerName()).thenReturn(ServerName.valueOf("masterMock,1,1")); + Mockito.when(server.getZooKeeper()).thenReturn(zkWatcher); + + CoordinatedStateManager cp = new ZkCoordinatedStateManager(); + cp.initialize(server); + cp.start(); + + Mockito.when(server.getCoordinatedStateManager()).thenReturn(cp); + + Mockito.when(serverManager.getOnlineServers()).thenReturn(onlineServers); + Mockito.when(serverManager.getOnlineServersList()).thenReturn( + new ArrayList(onlineServers.keySet())); + + Mockito.when(serverManager.createDestinationServersList()).thenReturn( + new ArrayList(onlineServers.keySet())); + Mockito.when(serverManager.createDestinationServersList(null)).thenReturn( + new ArrayList(onlineServers.keySet())); + Mockito.when(serverManager.createDestinationServersList(Mockito.anyList())).thenReturn( + new ArrayList(onlineServers.keySet())); + + for (Entry entry : bulk.entrySet()) { + Mockito.when(serverManager.isServerOnline(entry.getValue())).thenReturn(true); + Mockito.when(serverManager.sendRegionClose(entry.getValue(), + entry.getKey(), -1)).thenReturn(true); + Mockito.when(serverManager.sendRegionOpen(entry.getValue(), + entry.getKey(), -1, null)).thenReturn(RegionOpeningState.OPENED); + Mockito.when(serverManager.addServerToDrainList(entry.getValue())).thenReturn(true); + } + + Mockito.when(master.getServerManager()).thenReturn(serverManager); + + drainedServers.add(SERVERNAME_A); + drainedServers.add(SERVERNAME_B); + drainedServers.add(SERVERNAME_C); + drainedServers.add(SERVERNAME_D); + + TableStateManager tsm = mock(TableStateManager.class); + am = new AssignmentManager(server, serverManager, balancer, + startupMasterExecutor("mockExecutorServiceBulk"), null, null, tsm); + + Mockito.when(master.getAssignmentManager()).thenReturn(am); + + zkWatcher.registerListener(am); + + for (ServerName drained : drainedServers) { + addServerToDrainedList(drained, onlineServers, serverManager); + } + + am.assign(bulk); + + Set regionsInTransition = am.getRegionStates().getRegionsInTransition(); + for (RegionState rs : regionsInTransition) { + setRegionOpenedOnZK(zkWatcher, rs.getServerName(), rs.getRegion()); + } + + am.waitForAssignment(REGIONINFO_A); + am.waitForAssignment(REGIONINFO_B); + am.waitForAssignment(REGIONINFO_C); + am.waitForAssignment(REGIONINFO_D); + am.waitForAssignment(REGIONINFO_E); + + Map regionAssignments = am.getRegionStates().getRegionAssignments(); + for (Entry entry : regionAssignments.entrySet()) { + LOG.info("Region Assignment: " + + entry.getKey().getRegionNameAsString() + " Server: " + entry.getValue()); + bunchServersAssigned.add(entry.getValue()); + } + + for (ServerName sn : drainedServers) { + assertFalse(bunchServersAssigned.contains(sn)); + } + } } private void addServerToDrainedList(ServerName serverName, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java index f963461b7c20..9d5259a9af05 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java @@ -42,8 +42,8 @@ public void testShouldCreateNewTableDescriptorIfForcefulCreationIsFalse() Path rootdir = new Path(UTIL.getDataTestDir(), name); FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir); HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name)); - - assertTrue("Should create new table descriptor", fstd.createTableDescriptor(htd, false)); + assertTrue("Should create new table descriptor", + fstd.createTableDescriptor(new TableDescriptor(htd), false)); } @Test @@ -56,7 +56,8 @@ public void testShouldNotCreateTheSameTableDescriptorIfForcefulCreationIsFalse() FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir); HTableDescriptor htd = new HTableDescriptor(name); fstd.add(htd); - assertFalse("Should not create new table descriptor", fstd.createTableDescriptor(htd, false)); + assertFalse("Should not create new table descriptor", + fstd.createTableDescriptor(new TableDescriptor(htd), false)); } @Test @@ -67,9 +68,10 @@ public void testShouldAllowForcefulCreationOfAlreadyExistingTableDescriptor() Path rootdir = new Path(UTIL.getDataTestDir(), name); FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir); HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name)); - fstd.createTableDescriptor(htd, false); + TableDescriptor td = new TableDescriptor(htd); + fstd.createTableDescriptor(td, false); assertTrue("Should create new table descriptor", - fstd.createTableDescriptor(htd, true)); + fstd.createTableDescriptor(td, true)); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptorDefaultVersions.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptorDefaultVersions.java index 4660bbbeec0a..8d0e4188552e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptorDefaultVersions.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptorDefaultVersions.java @@ -160,8 +160,8 @@ private void verifyHColumnDescriptor(int expected, final TableName tableName, // Verify descriptor from HDFS MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem(); Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName); - htd = FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir); - hcds = htd.getColumnFamilies(); + TableDescriptor td = FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir); + hcds = td.getHTableDescriptor().getColumnFamilies(); verifyHColumnDescriptor(expected, hcds, tableName, families); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestTableDescriptor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestTableDescriptor.java new file mode 100644 index 000000000000..19c1136727f0 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestTableDescriptor.java @@ -0,0 +1,57 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import java.io.IOException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.client.Durability; +import org.apache.hadoop.hbase.client.TableState; +import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import static org.junit.Assert.assertEquals; + +/** + * Test setting values in the descriptor + */ +@Category(SmallTests.class) +public class TestTableDescriptor { + final static Log LOG = LogFactory.getLog(TestTableDescriptor.class); + + @Test + public void testPb() throws DeserializationException, IOException { + HTableDescriptor htd = new HTableDescriptor(HTableDescriptor.META_TABLEDESC); + final int v = 123; + htd.setMaxFileSize(v); + htd.setDurability(Durability.ASYNC_WAL); + htd.setReadOnly(true); + htd.setRegionReplication(2); + TableDescriptor td = new TableDescriptor(htd, TableState.State.ENABLED); + byte[] bytes = td.toByteArray(); + TableDescriptor deserializedTd = TableDescriptor.parseFrom(bytes); + assertEquals(td, deserializedTd); + assertEquals(td.getHTableDescriptor(), deserializedTd.getHTableDescriptor()); + assertEquals(td.getTableState(), deserializedTd.getTableState()); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java index c0b32b82a5bf..0a9984597352 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java @@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.InvalidFamilyOperationException; +import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; @@ -49,11 +50,8 @@ import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.exceptions.MergeRegionException; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; -import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; -import org.apache.hadoop.hbase.zookeeper.ZKTableStateClientSideReader; import org.apache.hadoop.hbase.master.AssignmentManager; import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; @@ -255,7 +253,7 @@ public void testDisableAndEnableTable() throws IOException { this.admin.disableTable(ht.getName()); assertTrue("Table must be disabled.", TEST_UTIL.getHBaseCluster() .getMaster().getAssignmentManager().getTableStateManager().isTableState( - ht.getName(), ZooKeeperProtos.Table.State.DISABLED)); + ht.getName(), TableState.State.DISABLED)); // Test that table is disabled get = new Get(row); @@ -282,7 +280,7 @@ public void testDisableAndEnableTable() throws IOException { this.admin.enableTable(table); assertTrue("Table must be enabled.", TEST_UTIL.getHBaseCluster() .getMaster().getAssignmentManager().getTableStateManager().isTableState( - ht.getName(), ZooKeeperProtos.Table.State.ENABLED)); + ht.getName(), TableState.State.ENABLED)); // Test that table is enabled try { @@ -354,7 +352,7 @@ public void testCreateTable() throws IOException { assertEquals(numTables + 1, tables.length); assertTrue("Table must be enabled.", TEST_UTIL.getHBaseCluster() .getMaster().getAssignmentManager().getTableStateManager().isTableState( - TableName.valueOf("testCreateTable"), ZooKeeperProtos.Table.State.ENABLED)); + TableName.valueOf("testCreateTable"), TableState.State.ENABLED)); } @Test (timeout=300000) @@ -1340,11 +1338,9 @@ public void testInvalidHColumnDescriptor() throws IOException { @Test (timeout=300000) public void testEnableDisableAddColumnDeleteColumn() throws Exception { - ZooKeeperWatcher zkw = HBaseTestingUtility.getZooKeeperWatcher(TEST_UTIL); TableName tableName = TableName.valueOf("testEnableDisableAddColumnDeleteColumn"); TEST_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY).close(); - while (!ZKTableStateClientSideReader.isEnabledTable(zkw, - TableName.valueOf("testEnableDisableAddColumnDeleteColumn"))) { + while (!this.admin.isTableEnabled(tableName)) { Thread.sleep(10); } this.admin.disableTable(tableName); @@ -1487,16 +1483,4 @@ public void testMergeRegions() throws Exception { this.admin.deleteTable(tableName); } } - - @Test (timeout=30000) - public void testTableNotFoundException() throws Exception { - ZooKeeperWatcher zkw = TEST_UTIL.getZooKeeperWatcher(); - TableName table = TableName.valueOf("tableNotExists"); - try { - ZKTableStateClientSideReader.isDisabledTable(zkw, table); - fail("Shouldn't be here"); - } catch (TableNotFoundException e) { - // This is expected. - } - } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java index 2e436c69f00f..0e715a93a50b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java @@ -30,6 +30,7 @@ import java.lang.reflect.Method; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -117,40 +118,88 @@ import org.apache.log4j.AppenderSkeleton; import org.apache.log4j.Logger; import org.apache.log4j.spi.LoggingEvent; -import org.junit.After; import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; +import org.junit.Assume; import org.junit.Ignore; import org.junit.Test; import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + /** * Run tests that use the HBase clients; {@link HTable}. * Sets up the HBase mini cluster once at start and runs through all client tests. * Each creates a table named for the method and does its stuff against that. + * + * Parameterized to run with different registry implementations. */ @Category(LargeTests.class) @SuppressWarnings ("deprecation") +@RunWith(Parameterized.class) public class TestFromClientSide { private static final Log LOG = LogFactory.getLog(TestFromClientSide.class); - protected final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + protected static HBaseTestingUtility TEST_UTIL; private static byte [] ROW = Bytes.toBytes("testRow"); private static byte [] FAMILY = Bytes.toBytes("testFamily"); private static byte [] QUALIFIER = Bytes.toBytes("testQualifier"); private static byte [] VALUE = Bytes.toBytes("testValue"); protected static int SLAVES = 3; + @Parameterized.Parameters + public static Collection parameters() { + return Arrays.asList(new Object[][] { + { MasterRegistry.class }, + { ZKConnectionRegistry.class } + }); + } + + // To keep the child classes happy. + TestFromClientSide() {} + + public TestFromClientSide(Class registry) throws Exception { + initialize(registry); + } + /** - * @throws java.lang.Exception + * JUnit does not provide an easy way to run a hook after each parameterized run. Without that + * there is no easy way to restart the test cluster after each parameterized run. Annotation + * BeforeParam does not work either because it runs before parameterization and hence does not + * have access to the test parameters (which is weird). + * + * This *hack* checks if the current instance of test cluster configuration has the passed + * parameterized configs. In such a case, we can just reuse the cluster for test and do not need + * to initialize from scratch. While this is a hack, it saves a ton of time for the full + * test and de-flakes it. */ - @BeforeClass - public static void setUpBeforeClass() throws Exception { + protected static boolean isSameParameterizedCluster( + Class registryImpl) { + if (TEST_UTIL == null) { + return false; + } + Configuration conf = TEST_UTIL.getConfiguration(); + Class confClass = conf.getClass(HConstants.REGISTRY_IMPL_CONF_KEY, + ZKConnectionRegistry.class, ConnectionRegistry.class); + return confClass.getName().equals(registryImpl.getName()); + } + + public static void initialize(Class registry) throws Exception { + // initialize() is called for every unit test, however we only want to reset the cluster state + // at the end of every parameterized run. + if (isSameParameterizedCluster(registry)) { + return; + } + if (TEST_UTIL != null) { + // We reached end of a parameterized run, clean up. + TEST_UTIL.shutdownMiniCluster(); + } + TEST_UTIL = new HBaseTestingUtility(); // Uncomment the following lines if more verbosity is needed for // debugging (see HBASE-12285 for details). //((Log4JLogger)RpcServer.LOG).getLogger().setLevel(Level.ALL); //((Log4JLogger)RpcClient.LOG).getLogger().setLevel(Level.ALL); //((Log4JLogger)ScannerCallable.LOG).getLogger().setLevel(Level.ALL); Configuration conf = TEST_UTIL.getConfiguration(); + conf.setClass(HConstants.REGISTRY_IMPL_CONF_KEY, registry, ConnectionRegistry.class); conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, MultiRowMutationEndpoint.class.getName()); conf.setBoolean("hbase.table.sanity.checks", true); // enable for below tests @@ -167,22 +216,6 @@ public static void tearDownAfterClass() throws Exception { TEST_UTIL.shutdownMiniCluster(); } - /** - * @throws java.lang.Exception - */ - @Before - public void setUp() throws Exception { - // Nothing to do. - } - - /** - * @throws java.lang.Exception - */ - @After - public void tearDown() throws Exception { - // Nothing to do. - } - /** * Test append result when there are duplicate rpc request. */ @@ -4461,6 +4494,12 @@ public void testUnmanagedHConnection() throws IOException, InterruptedException */ @Test public void testUnmanagedHConnectionReconnect() throws Exception { + Configuration conf = TEST_UTIL.getConfiguration(); + Class registryImpl = conf.getClass( + HConstants.REGISTRY_IMPL_CONF_KEY, ZKConnectionRegistry.class); + // This test does not make sense for MasterRegistry since it stops the only master in the + // cluster and starts a new master without populating the underlying config for the connection. + Assume.assumeFalse(registryImpl.equals(MasterRegistry.class)); final TableName tableName = TableName.valueOf("testUnmanagedHConnectionReconnect"); HTable t = createUnmangedHConnectionHTable(tableName); Connection conn = t.getConnection(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideWithCoprocessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideWithCoprocessor.java index cd2409e43e02..9ce013341847 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideWithCoprocessor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideWithCoprocessor.java @@ -17,13 +17,18 @@ */ package org.apache.hadoop.hbase.client; +import java.util.Arrays; +import java.util.Collection; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint; import org.apache.hadoop.hbase.regionserver.NoOpScanPolicyObserver; import org.junit.BeforeClass; import org.junit.experimental.categories.Category; +import org.junit.runners.Parameterized; /** * Test all client operations with a coprocessor that @@ -31,12 +36,32 @@ */ @Category(LargeTests.class) public class TestFromClientSideWithCoprocessor extends TestFromClientSide { - @BeforeClass - public static void setUpBeforeClass() throws Exception { + + @Parameterized.Parameters + public static Collection parameters() { + return Arrays.asList(new Object[][] { + { ZKConnectionRegistry.class } + }); + } + + public TestFromClientSideWithCoprocessor(Class registry) throws Exception { + initialize(registry); + } + + public static void initialize(Class registry) throws Exception { + if (isSameParameterizedCluster(registry)) { + return; + } + if (TEST_UTIL != null) { + // We reached end of a parameterized run, clean up. + TEST_UTIL.shutdownMiniCluster(); + } + TEST_UTIL = new HBaseTestingUtility(); Configuration conf = TEST_UTIL.getConfiguration(); conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, MultiRowMutationEndpoint.class.getName(), NoOpScanPolicyObserver.class.getName()); conf.setBoolean("hbase.table.sanity.checks", true); // enable for below tests + conf.setClass(HConstants.REGISTRY_IMPL_CONF_KEY, registry, ConnectionRegistry.class); // We need more than one region server in this test TEST_UTIL.startMiniCluster(SLAVES); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterAddressRefresher.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterAddressRefresher.java new file mode 100644 index 000000000000..22dbfa92cccb --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterAddressRefresher.java @@ -0,0 +1,181 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import com.google.common.util.concurrent.Uninterruptibles; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.Waiter; +import org.apache.hadoop.hbase.testclassification.ClientTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.junit.Assert; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({ClientTests.class, SmallTests.class}) +public class TestMasterAddressRefresher { + + static class DummyConnection implements Connection { + private final Configuration conf; + + DummyConnection(Configuration conf) { + this.conf = conf; + } + + @Override + public Configuration getConfiguration() { + return conf; + } + + @Override + public Table getTable(TableName tableName) throws IOException { + return null; + } + + @Override + public Table getTable(TableName tableName, ExecutorService pool) throws IOException { + return null; + } + + @Override + public BufferedMutator getBufferedMutator(TableName tableName) throws IOException { + return null; + } + + @Override + public BufferedMutator getBufferedMutator(BufferedMutatorParams params) throws IOException { + return null; + } + + @Override + public RegionLocator getRegionLocator(TableName tableName) throws IOException { + return null; + } + + @Override + public Admin getAdmin() throws IOException { + return null; + } + + @Override + public String getClusterId() throws IOException { + return null; + } + + @Override + public void close() throws IOException { + + } + + @Override + public boolean isClosed() { + return false; + } + + @Override + public void abort(String why, Throwable e) { + + } + + @Override + public boolean isAborted() { + return false; + } + } + + private static class DummyMasterRegistry extends MasterRegistry { + + private final AtomicInteger getMastersCallCounter = new AtomicInteger(0); + private final List callTimeStamps = new ArrayList<>(); + + @Override + public void init(Connection connection) throws IOException { + super.init(connection); + } + + @Override + List getMasters() { + getMastersCallCounter.incrementAndGet(); + callTimeStamps.add(EnvironmentEdgeManager.currentTime()); + return new ArrayList<>(); + } + + public int getMastersCount() { + return getMastersCallCounter.get(); + } + + public List getCallTimeStamps() { + return callTimeStamps; + } + } + + @Test + public void testPeriodicMasterEndPointRefresh() throws IOException { + Configuration conf = HBaseConfiguration.create(); + // Refresh every 1 second. + conf.setLong(MasterAddressRefresher.PERIODIC_REFRESH_INTERVAL_SECS, 1); + conf.setLong(MasterAddressRefresher.MIN_SECS_BETWEEN_REFRESHES, 0); + final DummyMasterRegistry registry = new DummyMasterRegistry(); + registry.init(new DummyConnection(conf)); + // Wait for > 3 seconds to see that at least 3 getMasters() RPCs have been made. + Waiter.waitFor( + conf, 5000, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return registry.getMastersCount() > 3; + } + }); + } + + @Test + public void testDurationBetweenRefreshes() throws IOException { + Configuration conf = HBaseConfiguration.create(); + // Disable periodic refresh + conf.setLong(MasterAddressRefresher.PERIODIC_REFRESH_INTERVAL_SECS, Integer.MAX_VALUE); + // A minimum duration of 1s between refreshes + conf.setLong(MasterAddressRefresher.MIN_SECS_BETWEEN_REFRESHES, 1); + DummyMasterRegistry registry = new DummyMasterRegistry(); + registry.init(new DummyConnection(conf)); + // Issue a ton of manual refreshes. + for (int i = 0; i < 10000; i++) { + registry.masterAddressRefresher.refreshNow(); + Uninterruptibles.sleepUninterruptibly(1, TimeUnit.MILLISECONDS); + } + // Overall wait time is 10000 ms, so the number of requests should be <=10 + List callTimeStamps = registry.getCallTimeStamps(); + // Actual calls to getMasters() should be much lower than the refresh count. + Assert.assertTrue( + String.valueOf(registry.getMastersCount()), registry.getMastersCount() <= 20); + Assert.assertTrue(callTimeStamps.size() > 0); + // Verify that the delta between subsequent RPCs is at least 1sec as configured. + for (int i = 1; i < callTimeStamps.size() - 1; i++) { + long delta = callTimeStamps.get(i) - callTimeStamps.get(i - 1); + // Few ms cushion to account for any env jitter. + Assert.assertTrue(callTimeStamps.toString(), delta > 990); + } + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterRegistry.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterRegistry.java new file mode 100644 index 000000000000..0695e4b0252e --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterRegistry.java @@ -0,0 +1,236 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import static org.apache.hadoop.hbase.HConstants.META_REPLICAS_NUM; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import com.google.common.base.Joiner; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableSet; +import com.google.protobuf.RpcController; +import java.io.IOException; +import java.net.SocketTimeoutException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.Waiter; +import org.apache.hadoop.hbase.exceptions.MasterRegistryFetchException; +import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.testclassification.ClientTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClientMetaService; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse; + +@Category({ MediumTests.class, ClientTests.class }) +public class TestMasterRegistry { + + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + + private static final int META_REPLICA_COUNT = 3; + + @BeforeClass + public static void setUp() throws Exception { + TEST_UTIL.getConfiguration().setInt(META_REPLICAS_NUM, META_REPLICA_COUNT); + TEST_UTIL.startMiniCluster(3, 3); + } + + @AfterClass + public static void tearDown() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + private static class ExceptionInjectorRegistry extends MasterRegistry { + @Override + public String getClusterId() throws IOException { + GetClusterIdResponse resp = doCall(new Callable() { + @Override + public GetClusterIdResponse call(ClientMetaService.Interface stub, RpcController controller) + throws IOException { + throw new SocketTimeoutException("Injected exception."); + } + }); + return resp.getClusterId(); + } + } + + /** + * Generates a string of dummy master addresses in host:port format. Every other hostname won't + * have a port number. + */ + private static String generateDummyMastersList(int size) { + List masters = new ArrayList<>(); + for (int i = 0; i < size; i++) { + masters.add(" localhost" + (i % 2 == 0 ? ":" + (1000 + i) : "")); + } + return Joiner.on(",").join(masters); + } + + /** + * Makes sure the master registry parses the master end points in the configuration correctly. + */ + @Test + public void testMasterAddressParsing() throws IOException { + Configuration conf = new Configuration(TEST_UTIL.getConfiguration()); + int numMasters = 10; + conf.set(HConstants.MASTER_ADDRS_KEY, generateDummyMastersList(numMasters)); + List parsedMasters = new ArrayList<>(MasterRegistry.parseMasterAddrs(conf)); + // Half of them would be without a port, duplicates are removed. + assertEquals(numMasters / 2 + 1, parsedMasters.size()); + // Sort in the increasing order of port numbers. + Collections.sort(parsedMasters, new Comparator() { + @Override + public int compare(ServerName sn1, ServerName sn2) { + return sn1.getPort() - sn2.getPort(); + } + }); + for (int i = 0; i < parsedMasters.size(); i++) { + ServerName sn = parsedMasters.get(i); + assertEquals("localhost", sn.getHostname()); + if (i == parsedMasters.size() - 1) { + // Last entry should be the one with default port. + assertEquals(HConstants.DEFAULT_MASTER_PORT, sn.getPort()); + } else { + assertEquals(1000 + (2 * i), sn.getPort()); + } + } + } + + @Test + public void testRegistryRPCs() throws Exception { + HMaster activeMaster = TEST_UTIL.getHBaseCluster().getMaster(); + final MasterRegistry registry = new MasterRegistry(); + try { + registry.init(TEST_UTIL.getConnection()); + // Add wait on all replicas being assigned before proceeding w/ test. Failed on occasion + // because not all replicas had made it up before test started. + TEST_UTIL.waitFor(10000, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return registry.getMetaRegionLocations().size() == META_REPLICA_COUNT; + } + }); + assertEquals(registry.getClusterId(), activeMaster.getClusterId()); + assertEquals(registry.getActiveMaster(), activeMaster.getServerName()); + List metaLocations = + Arrays.asList(registry.getMetaRegionLocations().getRegionLocations()); + List actualMetaLocations = + activeMaster.getMetaRegionLocationCache().getMetaRegionLocations(); + Collections.sort(metaLocations); + Collections.sort(actualMetaLocations); + assertEquals(actualMetaLocations, metaLocations); + int numRs = registry.getCurrentNrHRS(); + assertEquals(TEST_UTIL.getMiniHBaseCluster().getLiveRegionServerThreads().size(), numRs); + } finally { + registry.close(); + } + } + + /** + * Tests that the list of masters configured in the MasterRegistry is dynamically refreshed in the + * event of errors. + */ + @Test + public void testDynamicMasterConfigurationRefresh() throws Exception { + Configuration conf = TEST_UTIL.getConnection().getConfiguration(); + String currentMasterAddrs = Preconditions.checkNotNull(conf.get(HConstants.MASTER_ADDRS_KEY)); + HMaster activeMaster = TEST_UTIL.getHBaseCluster().getMaster(); + // Add a non-working master + ServerName badServer = ServerName.valueOf("localhost", 1234, -1); + conf.set(HConstants.MASTER_ADDRS_KEY, badServer.toShortString() + "," + currentMasterAddrs); + // Do not limit the number of refreshes during the test run. + conf.setLong(MasterAddressRefresher.MIN_SECS_BETWEEN_REFRESHES, 0); + final ExceptionInjectorRegistry registry = new ExceptionInjectorRegistry(); + try { + registry.init(TEST_UTIL.getConnection()); + final ImmutableSet masters = registry.getParsedMasterServers(); + assertTrue(masters.contains(badServer.toString())); + // Make a registry RPC, this should trigger a refresh since one of the RPC fails. + try { + registry.getClusterId(); + } catch (MasterRegistryFetchException e) { + // Expected. + } + + // Wait for new set of masters to be populated. + TEST_UTIL.waitFor(5000, + new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return !registry.getParsedMasterServers().equals(masters); + } + }); + // new set of masters should not include the bad server + final ImmutableSet newMasters = registry.getParsedMasterServers(); + // Bad one should be out. + assertEquals(3, newMasters.size()); + assertFalse(newMasters.contains(badServer.toString())); + // Kill the active master + activeMaster.stopMaster(); + TEST_UTIL.waitFor(10000, + new Waiter.Predicate() { + @Override + public boolean evaluate() { + return TEST_UTIL.getMiniHBaseCluster().getLiveMasterThreads().size() == 2; + } + }); + TEST_UTIL.getMiniHBaseCluster().waitForActiveAndReadyMaster(10000); + // Make a registry RPC, this should trigger a refresh since one of the RPC fails. + try { + registry.getClusterId(); + } catch (MasterRegistryFetchException e) { + // Expected. + } + // Wait until the killed master de-registered. + TEST_UTIL.waitFor(10000, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return registry.getMasters().size() == 2; + } + }); + TEST_UTIL.waitFor(20000, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return registry.getParsedMasterServers().size() == 2; + } + }); + final ImmutableSet newMasters2 = registry.getParsedMasterServers(); + assertEquals(2, newMasters2.size()); + assertFalse(newMasters2.contains(activeMaster.getServerName().toString())); + } finally { + registry.close(); + // Reset the state, add a killed master. + TEST_UTIL.getMiniHBaseCluster().startMaster(); + } + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java new file mode 100644 index 000000000000..f5ba56dc84b5 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java @@ -0,0 +1,236 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import com.google.common.base.Preconditions; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.MultithreadedTestUtil; +import org.apache.hadoop.hbase.RegionLocations; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.Waiter; +import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.master.MetaRegionLocationCache; +import org.apache.hadoop.hbase.master.RegionState; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.JVMClusterUtil; +import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; +import org.apache.hadoop.hbase.zookeeper.ZKUtil; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({SmallTests.class, MasterTests.class }) +public class TestMetaRegionLocationCache { + + private static final Log LOG = LogFactory.getLog(TestMetaRegionLocationCache.class.getName()); + + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + private static ConnectionRegistry REGISTRY; + + // waits for all replicas to have region location + static void waitUntilAllMetaReplicasHavingRegionLocation(Configuration conf, + final ConnectionRegistry registry, final int regionReplication) throws IOException { + Waiter.waitFor(conf, conf.getLong( + "hbase.client.sync.wait.timeout.msec", 60000), 200, true, + new Waiter.ExplainingPredicate() { + @Override + public String explainFailure() throws IOException { + return "Not all meta replicas get assigned"; + } + + @Override + public boolean evaluate() throws IOException { + try { + RegionLocations locs = registry.getMetaRegionLocations(); + if (locs == null || locs.size() < regionReplication) { + return false; + } + for (int i = 0; i < regionReplication; i++) { + if (locs.getRegionLocation(i) == null) { + return false; + } + } + return true; + } catch (Exception e) { + LOG.warn("Failed to get meta region locations", e); + return false; + } + } + }); + } + + @BeforeClass + public static void setUp() throws Exception { + TEST_UTIL.getConfiguration().setInt(HConstants.META_REPLICAS_NUM, 3); + TEST_UTIL.startMiniCluster(3); + REGISTRY = ConnectionRegistryFactory.getRegistry(TEST_UTIL.getConnection()); + waitUntilAllMetaReplicasHavingRegionLocation( + TEST_UTIL.getConfiguration(), REGISTRY, 3); + TEST_UTIL.getConnection().getAdmin().setBalancerRunning(false, true); + } + + @AfterClass + public static void cleanUp() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + private List getCurrentMetaLocations(ZooKeeperWatcher zk) throws Exception { + List result = new ArrayList<>(); + for (String znode: zk.getMetaReplicaNodes()) { + String path = ZKUtil.joinZNode(zk.baseZNode, znode); + int replicaId = zk.getMetaReplicaIdFromPath(path); + RegionState state = MetaTableLocator.getMetaRegionState(zk, replicaId); + result.add(new HRegionLocation(state.getRegion(), state.getServerName())); + } + return result; + } + + // Verifies that the cached meta locations in the given master are in sync with what is in ZK. + private void verifyCachedMetaLocations(final HMaster master) throws Exception { + // Wait until initial meta locations are loaded. + ZooKeeperWatcher zk = master.getZooKeeper(); + final List metaZnodes = zk.getMetaReplicaNodes(); + assertEquals(3, metaZnodes.size()); + TEST_UTIL.waitFor(10000, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return master.getMetaRegionLocationCache().getMetaRegionLocations().size() + == metaZnodes.size(); + } + }); + List metaHRLs = master.getMetaRegionLocationCache().getMetaRegionLocations(); + List actualHRLs = getCurrentMetaLocations(zk); + Collections.sort(metaHRLs); + Collections.sort(actualHRLs); + assertEquals(actualHRLs, metaHRLs); + } + + @Test public void testInitialMetaLocations() throws Exception { + verifyCachedMetaLocations(TEST_UTIL.getMiniHBaseCluster().getMaster()); + } + + @Test public void testStandByMetaLocations() throws Exception { + HMaster standBy = TEST_UTIL.getMiniHBaseCluster().startMaster().getMaster(); + verifyCachedMetaLocations(standBy); + } + + private static ServerName getOtherRS(List allServers, ServerName except) { + Preconditions.checkArgument(allServers.size() > 0); + allServers.remove(except); + ServerName ret; + try { + Collections.shuffle(allServers); + ret = allServers.get(0); + } finally { + allServers.add(except); + } + return ret; + } + + /* + * Shuffles the meta region replicas around the cluster and makes sure the cache is not stale. + */ + @Test public void testMetaLocationsChange() throws Exception { + List currentMetaLocs = + getCurrentMetaLocations(TEST_UTIL.getMiniHBaseCluster().getMaster().getZooKeeper()); + List allServers = new ArrayList<>(); + for (JVMClusterUtil.RegionServerThread rs: + TEST_UTIL.getMiniHBaseCluster().getRegionServerThreads()) { + allServers.add(rs.getRegionServer().getServerName()); + } + // Move these replicas to random servers. + for (HRegionLocation location: currentMetaLocs) { + TEST_UTIL.moveRegionAndWait( + location.getRegionInfo(), getOtherRS(allServers, location.getServerName())); + } + waitUntilAllMetaReplicasHavingRegionLocation( + TEST_UTIL.getConfiguration(), REGISTRY, 3); + for (JVMClusterUtil.MasterThread masterThread: + TEST_UTIL.getMiniHBaseCluster().getMasterThreads()) { + verifyCachedMetaLocations(masterThread.getMaster()); + } + } + + /** + * Tests MetaRegionLocationCache's init procedure to make sure that it correctly watches the base + * znode for notifications. + */ + @Test public void testMetaRegionLocationCache() throws Exception { + final String parentZnodeName = "/randomznodename"; + Configuration conf = new Configuration(TEST_UTIL.getConfiguration()); + conf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, parentZnodeName); + ServerName sn = ServerName.valueOf("localhost", 1234, 5678); + try (ZooKeeperWatcher zkWatcher = new ZooKeeperWatcher(conf, null, null, true)) { + // A thread that repeatedly creates and drops an unrelated child znode. This is to simulate + // some ZK activity in the background. + MultithreadedTestUtil.TestContext ctx = new MultithreadedTestUtil.TestContext(conf); + ctx.addThread(new MultithreadedTestUtil.RepeatingTestThread(ctx) { + @Override public void doAnAction() throws Exception { + final String testZnode = parentZnodeName + "/child"; + ZKUtil.createNodeIfNotExistsAndWatch(zkWatcher, testZnode, testZnode.getBytes()); + ZKUtil.deleteNode(zkWatcher, testZnode); + } + }); + ctx.startThreads(); + try { + MetaRegionLocationCache metaCache = new MetaRegionLocationCache(zkWatcher); + // meta znodes do not exist at this point, cache should be empty. + assertTrue(metaCache.getMetaRegionLocations().isEmpty()); + // Set the meta locations for a random meta replicas, simulating an active hmaster meta + // assignment. + for (int i = 0; i < 3; i++) { + // Updates the meta znodes. + MetaTableLocator.setMetaLocation(zkWatcher, sn, i, RegionState.State.OPEN); + } + // Wait until the meta cache is populated. + int iters = 0; + while (iters++ < 10) { + if (metaCache.getMetaRegionLocations().size() == 3) { + break; + } + Thread.sleep(1000); + } + List metaLocations = metaCache.getMetaRegionLocations(); + assertNotNull(metaLocations); + assertEquals(3, metaLocations.size()); + for (HRegionLocation location : metaLocations) { + assertEquals(sn, location.getServerName()); + } + } finally { + // clean up. + ctx.stop(); + ZKUtil.deleteChildrenRecursively(zkWatcher, parentZnodeName); + } + } + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java index db26d37a2f09..6258f6db9042 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java @@ -70,6 +70,11 @@ public TableLockManager getTableLockManager() { return null; } + @Override + public TableStateManager getTableStateManager() { + return null; + } + @Override public MasterCoprocessorHost getMasterCoprocessorHost() { return null; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java index 428ff1e5abf4..5e3106f7888c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java @@ -1,4 +1,4 @@ -/** +/* * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file @@ -18,11 +18,15 @@ */ package org.apache.hadoop.hbase.master; +import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import java.io.IOException; +import java.io.InterruptedIOException; +import java.util.ArrayList; +import java.util.List; import java.util.concurrent.Semaphore; import org.apache.commons.logging.Log; @@ -31,6 +35,7 @@ import org.apache.hadoop.hbase.ChoreService; import org.apache.hadoop.hbase.CoordinatedStateManager; import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; @@ -68,39 +73,41 @@ public static void tearDownAfterClass() throws Exception { } @Test public void testRestartMaster() throws IOException, KeeperException { - ZooKeeperWatcher zk = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(), - "testActiveMasterManagerFromZK", null, true); - try { - ZKUtil.deleteNode(zk, zk.getMasterAddressZNode()); - ZKUtil.deleteNode(zk, zk.clusterStateZNode); - } catch(KeeperException.NoNodeException nne) {} - - // Create the master node with a dummy address - ServerName master = ServerName.valueOf("localhost", 1, System.currentTimeMillis()); - // Should not have a master yet - DummyMaster dummyMaster = new DummyMaster(zk,master); - ClusterStatusTracker clusterStatusTracker = - dummyMaster.getClusterStatusTracker(); - ActiveMasterManager activeMasterManager = - dummyMaster.getActiveMasterManager(); - assertFalse(activeMasterManager.clusterHasActiveMaster.get()); - - // First test becoming the active master uninterrupted - MonitoredTask status = Mockito.mock(MonitoredTask.class); - clusterStatusTracker.setClusterUp(); - - activeMasterManager.blockUntilBecomingActiveMaster(100, status); - assertTrue(activeMasterManager.clusterHasActiveMaster.get()); - assertMaster(zk, master); - - // Now pretend master restart - DummyMaster secondDummyMaster = new DummyMaster(zk,master); - ActiveMasterManager secondActiveMasterManager = - secondDummyMaster.getActiveMasterManager(); - assertFalse(secondActiveMasterManager.clusterHasActiveMaster.get()); - activeMasterManager.blockUntilBecomingActiveMaster(100, status); - assertTrue(activeMasterManager.clusterHasActiveMaster.get()); - assertMaster(zk, master); + try (ZooKeeperWatcher zk = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(), + "testActiveMasterManagerFromZK", null, true)) { + try { + ZKUtil.deleteNode(zk, zk.getMasterAddressZNode()); + ZKUtil.deleteNode(zk, zk.clusterStateZNode); + } catch (KeeperException.NoNodeException nne) { + } + + // Create the master node with a dummy address + ServerName master = ServerName.valueOf("localhost", 1, System.currentTimeMillis()); + // Should not have a master yet + DummyMaster dummyMaster = new DummyMaster(zk, master); + ClusterStatusTracker clusterStatusTracker = + dummyMaster.getClusterStatusTracker(); + ActiveMasterManager activeMasterManager = + dummyMaster.getActiveMasterManager(); + assertFalse(activeMasterManager.clusterHasActiveMaster.get()); + + // First test becoming the active master uninterrupted + MonitoredTask status = Mockito.mock(MonitoredTask.class); + clusterStatusTracker.setClusterUp(); + + activeMasterManager.blockUntilBecomingActiveMaster(100, status); + assertTrue(activeMasterManager.clusterHasActiveMaster.get()); + assertMaster(zk, master); + + // Now pretend master restart + DummyMaster secondDummyMaster = new DummyMaster(zk, master); + ActiveMasterManager secondActiveMasterManager = + secondDummyMaster.getActiveMasterManager(); + assertFalse(secondActiveMasterManager.clusterHasActiveMaster.get()); + activeMasterManager.blockUntilBecomingActiveMaster(100, status); + assertTrue(activeMasterManager.clusterHasActiveMaster.get()); + assertMaster(zk, master); + } } /** @@ -110,81 +117,126 @@ public static void tearDownAfterClass() throws Exception { */ @Test public void testActiveMasterManagerFromZK() throws Exception { - ZooKeeperWatcher zk = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(), - "testActiveMasterManagerFromZK", null, true); - try { + try (ZooKeeperWatcher zk = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(), + "testActiveMasterManagerFromZK", null, true)) { + try { + ZKUtil.deleteNode(zk, zk.getMasterAddressZNode()); + ZKUtil.deleteNode(zk, zk.clusterStateZNode); + } catch (KeeperException.NoNodeException nne) { + } + + // Create the master node with a dummy address + ServerName firstMasterAddress = + ServerName.valueOf("localhost", 1, System.currentTimeMillis()); + ServerName secondMasterAddress = + ServerName.valueOf("localhost", 2, System.currentTimeMillis()); + + // Should not have a master yet + DummyMaster ms1 = new DummyMaster(zk, firstMasterAddress); + ActiveMasterManager activeMasterManager = + ms1.getActiveMasterManager(); + assertFalse(activeMasterManager.clusterHasActiveMaster.get()); + + // First test becoming the active master uninterrupted + ClusterStatusTracker clusterStatusTracker = + ms1.getClusterStatusTracker(); + clusterStatusTracker.setClusterUp(); + activeMasterManager.blockUntilBecomingActiveMaster(100, + Mockito.mock(MonitoredTask.class)); + assertTrue(activeMasterManager.clusterHasActiveMaster.get()); + assertMaster(zk, firstMasterAddress); + + // New manager will now try to become the active master in another thread + WaitToBeMasterThread t = new WaitToBeMasterThread(zk, secondMasterAddress); + t.start(); + // Wait for this guy to figure out there is another active master + // Wait for 1 second at most + int sleeps = 0; + while (!t.manager.clusterHasActiveMaster.get() && sleeps < 100) { + Thread.sleep(10); + sleeps++; + } + + // Both should see that there is an active master + assertTrue(activeMasterManager.clusterHasActiveMaster.get()); + assertTrue(t.manager.clusterHasActiveMaster.get()); + // But secondary one should not be the active master + assertFalse(t.isActiveMaster); + + // Close the first server and delete it's master node + ms1.stop("stopping first server"); + + // Use a listener to capture when the node is actually deleted + NodeDeletionListener listener = new NodeDeletionListener(zk, zk.getMasterAddressZNode()); + zk.registerListener(listener); + + LOG.info("Deleting master node"); ZKUtil.deleteNode(zk, zk.getMasterAddressZNode()); - ZKUtil.deleteNode(zk, zk.clusterStateZNode); - } catch(KeeperException.NoNodeException nne) {} - - // Create the master node with a dummy address - ServerName firstMasterAddress = - ServerName.valueOf("localhost", 1, System.currentTimeMillis()); - ServerName secondMasterAddress = - ServerName.valueOf("localhost", 2, System.currentTimeMillis()); - - // Should not have a master yet - DummyMaster ms1 = new DummyMaster(zk,firstMasterAddress); - ActiveMasterManager activeMasterManager = - ms1.getActiveMasterManager(); - assertFalse(activeMasterManager.clusterHasActiveMaster.get()); - - // First test becoming the active master uninterrupted - ClusterStatusTracker clusterStatusTracker = - ms1.getClusterStatusTracker(); - clusterStatusTracker.setClusterUp(); - activeMasterManager.blockUntilBecomingActiveMaster(100, - Mockito.mock(MonitoredTask.class)); - assertTrue(activeMasterManager.clusterHasActiveMaster.get()); - assertMaster(zk, firstMasterAddress); - - // New manager will now try to become the active master in another thread - WaitToBeMasterThread t = new WaitToBeMasterThread(zk, secondMasterAddress); - t.start(); - // Wait for this guy to figure out there is another active master - // Wait for 1 second at most - int sleeps = 0; - while(!t.manager.clusterHasActiveMaster.get() && sleeps < 100) { - Thread.sleep(10); - sleeps++; - } - // Both should see that there is an active master - assertTrue(activeMasterManager.clusterHasActiveMaster.get()); - assertTrue(t.manager.clusterHasActiveMaster.get()); - // But secondary one should not be the active master - assertFalse(t.isActiveMaster); - - // Close the first server and delete it's master node - ms1.stop("stopping first server"); - - // Use a listener to capture when the node is actually deleted - NodeDeletionListener listener = new NodeDeletionListener(zk, zk.getMasterAddressZNode()); - zk.registerListener(listener); - - LOG.info("Deleting master node"); - ZKUtil.deleteNode(zk, zk.getMasterAddressZNode()); - - // Wait for the node to be deleted - LOG.info("Waiting for active master manager to be notified"); - listener.waitForDeletion(); - LOG.info("Master node deleted"); - - // Now we expect the secondary manager to have and be the active master - // Wait for 1 second at most - sleeps = 0; - while(!t.isActiveMaster && sleeps < 100) { - Thread.sleep(10); - sleeps++; - } - LOG.debug("Slept " + sleeps + " times"); + // Wait for the node to be deleted + LOG.info("Waiting for active master manager to be notified"); + listener.waitForDeletion(); + LOG.info("Master node deleted"); + + // Now we expect the secondary manager to have and be the active master + // Wait for 1 second at most + sleeps = 0; + while (!t.isActiveMaster && sleeps < 100) { + Thread.sleep(10); + sleeps++; + } + LOG.debug("Slept " + sleeps + " times"); - assertTrue(t.manager.clusterHasActiveMaster.get()); - assertTrue(t.isActiveMaster); + assertTrue(t.manager.clusterHasActiveMaster.get()); + assertTrue(t.isActiveMaster); - LOG.info("Deleting master node"); + LOG.info("Deleting master node"); + + ZKUtil.deleteNode(zk, zk.getMasterAddressZNode()); + } + } - ZKUtil.deleteNode(zk, zk.getMasterAddressZNode()); + @Test + public void testBackupMasterUpdates() throws Exception { + Configuration conf = TEST_UTIL.getConfiguration(); + try (ZooKeeperWatcher zk = new ZooKeeperWatcher( + conf, "testBackupMasterUpdates", null, true)) { + ServerName sn1 = ServerName.valueOf("localhost", 1, -1); + DummyMaster master1 = new DummyMaster(zk, sn1); + final ActiveMasterManager activeMasterManager = master1.getActiveMasterManager(); + activeMasterManager.blockUntilBecomingActiveMaster(100, + Mockito.mock(MonitoredTask.class)); + assertEquals(sn1, activeMasterManager.getActiveMasterServerName()); + assertEquals(0, activeMasterManager.getBackupMasters().size()); + // Add backup masters + final List backupZNodes = new ArrayList<>(); + for (int i = 1; i <= 10; i++) { + ServerName backupSn = ServerName.valueOf("localhost", 1000 + i, -1); + String backupZn = ZKUtil.joinZNode(zk.backupMasterAddressesZNode, backupSn.toString()); + backupZNodes.add(backupZn); + MasterAddressTracker.setMasterAddress(zk, backupZn, backupSn, 1234); + TEST_UTIL.waitFor(10000, + new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return activeMasterManager.getBackupMasters().size() == backupZNodes.size(); + } + }); + } + // Remove backup masters + int numBackups = backupZNodes.size(); + for (String backupZNode: backupZNodes) { + ZKUtil.deleteNode(zk, backupZNode); + final int currentBackups = --numBackups; + TEST_UTIL.waitFor(10000, + new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return activeMasterManager.getBackupMasters().size() == currentBackups; + } + }); + } + } } /** @@ -195,8 +247,8 @@ public void testActiveMasterManagerFromZK() throws Exception { * @throws IOException if an IO problem is encountered */ private void assertMaster(ZooKeeperWatcher zk, - ServerName expectedAddress) - throws KeeperException, IOException { + ServerName expectedAddress) + throws KeeperException, IOException { ServerName readAddress = MasterAddressTracker.getMasterAddress(zk); assertNotNull(readAddress); assertTrue(expectedAddress.equals(readAddress)); @@ -208,7 +260,8 @@ public static class WaitToBeMasterThread extends Thread { DummyMaster dummyMaster; boolean isActiveMaster; - public WaitToBeMasterThread(ZooKeeperWatcher zk, ServerName address) { + public WaitToBeMasterThread(ZooKeeperWatcher zk, ServerName address) + throws InterruptedIOException { this.dummyMaster = new DummyMaster(zk,address); this.manager = this.dummyMaster.getActiveMasterManager(); isActiveMaster = false; @@ -256,13 +309,13 @@ public static class DummyMaster implements Server { private ClusterStatusTracker clusterStatusTracker; private ActiveMasterManager activeMasterManager; - public DummyMaster(ZooKeeperWatcher zk, ServerName master) { + public DummyMaster(ZooKeeperWatcher zk, ServerName master) throws InterruptedIOException { this.clusterStatusTracker = - new ClusterStatusTracker(zk, this); + new ClusterStatusTracker(zk, this); clusterStatusTracker.start(); this.activeMasterManager = - new ActiveMasterManager(zk, master, this); + new ActiveMasterManager(zk, master, this); zk.registerListener(activeMasterManager); } @@ -327,4 +380,4 @@ public ChoreService getChoreService() { return null; } } -} +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java index 28f9e8315313..92c045fc56c1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java @@ -60,6 +60,7 @@ import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.coordination.ZkCoordinatedStateManager; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.coprocessor.ObserverContext; @@ -69,7 +70,6 @@ import org.apache.hadoop.hbase.master.RegionState.State; import org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; @@ -156,10 +156,9 @@ public void testRestartMetaRegionServer() throws Exception { Bytes.toBytes(metaServerName.getServerName())); master.assignmentManager.waitUntilNoRegionsInTransition(60000); } - RegionState metaState = - MetaTableLocator.getMetaRegionState(master.getZooKeeper()); - assertEquals("Meta should be not in transition", - metaState.getState(), RegionState.State.OPEN); + RegionState metaState = MetaTableLocator.getMetaRegionState(master.getZooKeeper()); + assertEquals("Meta should be not in transition", + metaState.getState(), RegionState.State.OPEN); assertNotEquals("Meta should be moved off master", metaServerName, master.getServerName()); cluster.killRegionServer(metaServerName); @@ -289,7 +288,8 @@ public void testAssignRegionOnRestartedServer() throws Exception { String table = "testAssignRegionOnRestartedServer"; TEST_UTIL.getMiniHBaseCluster().getConf().setInt("hbase.assignment.maximum.attempts", 20); TEST_UTIL.getMiniHBaseCluster().stopMaster(0); - TEST_UTIL.getMiniHBaseCluster().startMaster(); //restart the master so that conf take into affect + //restart the master so that conf take into affect + TEST_UTIL.getMiniHBaseCluster().startMaster(); ServerName deadServer = null; HMaster master = null; @@ -888,7 +888,7 @@ public void testSSHWhenDisablingTableRegionsInOpeningOrPendingOpenState() throws } } - am.getTableStateManager().setTableState(table, ZooKeeperProtos.Table.State.DISABLING); + am.getTableStateManager().setTableState(table, TableState.State.DISABLING); List toAssignRegions = am.cleanOutCrashedServerReferences(destServerName); assertTrue("Regions to be assigned should be empty.", toAssignRegions.isEmpty()); assertTrue("Regions to be assigned should be empty.", am.getRegionStates() @@ -897,7 +897,7 @@ public void testSSHWhenDisablingTableRegionsInOpeningOrPendingOpenState() throws if (hri != null && serverName != null) { am.regionOnline(hri, serverName); } - am.getTableStateManager().setTableState(table, ZooKeeperProtos.Table.State.DISABLED); + am.getTableStateManager().setTableState(table, TableState.State.DISABLED); TEST_UTIL.deleteTable(table); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java index 397d5a80f083..6b499f21cd6f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java @@ -41,6 +41,8 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.ChoreService; import org.apache.hadoop.hbase.CoordinatedStateManager; +import org.apache.hadoop.hbase.TableDescriptor; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; @@ -54,13 +56,13 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.TableDescriptors; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.HConnectionTestingUtility; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager; import org.apache.hadoop.hbase.coordination.SplitLogManagerCoordination; import org.apache.hadoop.hbase.coordination.SplitLogManagerCoordination.SplitLogManagerDetails; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.io.Reference; import org.apache.hadoop.hbase.master.CatalogJanitor.SplitParentFirstComparator; @@ -352,13 +354,18 @@ public TableDescriptors getTableDescriptors() { return new TableDescriptors() { @Override public HTableDescriptor remove(TableName tablename) throws IOException { - // TODO Auto-generated method stub + // noop return null; } @Override public Map getAll() throws IOException { - // TODO Auto-generated method stub + // noop + return null; + } + + @Override public Map getAllDescriptors() throws IOException { + // noop return null; } @@ -368,6 +375,12 @@ public HTableDescriptor get(TableName tablename) return createHTableDescriptor(); } + @Override + public TableDescriptor getDescriptor(TableName tablename) + throws IOException { + return createTableDescriptor(); + } + @Override public Map getByNamespace(String name) throws IOException { return null; @@ -375,8 +388,12 @@ public Map getByNamespace(String name) throws IOExcept @Override public void add(HTableDescriptor htd) throws IOException { - // TODO Auto-generated method stub + // noop + } + @Override + public void add(TableDescriptor htd) throws IOException { + // noop } @Override public void setCacheOn() throws IOException { @@ -540,6 +557,11 @@ public TableNamespaceManager getTableNamespaceManager() { return null; } + @Override + public TableStateManager getTableStateManager() { + return null; + } + @Override public void dispatchMergingRegions(HRegionInfo region_a, HRegionInfo region_b, boolean forcible, User user) throws IOException { @@ -1169,6 +1191,11 @@ private HTableDescriptor createHTableDescriptor() { return htd; } + private TableDescriptor createTableDescriptor() { + TableDescriptor htd = new TableDescriptor(createHTableDescriptor(), TableState.State.ENABLED); + return htd; + } + private MultiResponse buildMultiResponse(MultiRequest req) { MultiResponse.Builder builder = MultiResponse.newBuilder(); RegionActionResult.Builder regionActionResultBuilder = diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClientMetaServiceRPCs.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClientMetaServiceRPCs.java new file mode 100644 index 000000000000..013d2a4e1b5c --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClientMetaServiceRPCs.java @@ -0,0 +1,137 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master; + +import static org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_RPC_TIMEOUT; +import static org.apache.hadoop.hbase.HConstants.HBASE_RPC_TIMEOUT_KEY; +import static org.junit.Assert.assertEquals; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.TimeUnit; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.ipc.HBaseRpcController; +import org.apache.hadoop.hbase.ipc.RpcClient; +import org.apache.hadoop.hbase.ipc.RpcClientFactory; +import org.apache.hadoop.hbase.ipc.RpcControllerFactory; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.security.User; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.JVMClusterUtil; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClientMetaService; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse; + +@Category({MediumTests.class, MasterTests.class}) +public class TestClientMetaServiceRPCs { + + // Total number of masters (active + stand by) for the purpose of this test. + private static final int MASTER_COUNT = 3; + private static final int RS_COUNT = 3; + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + private static Configuration conf; + private static int rpcTimeout; + private static RpcClient rpcClient; + + @BeforeClass + public static void setUp() throws Exception { + // Start the mini cluster with stand-by masters. + TEST_UTIL.startMiniCluster(MASTER_COUNT, RS_COUNT); + conf = TEST_UTIL.getConfiguration(); + rpcTimeout = (int) Math.min(Integer.MAX_VALUE, TimeUnit.MILLISECONDS.toNanos( + conf.getLong(HBASE_RPC_TIMEOUT_KEY, DEFAULT_HBASE_RPC_TIMEOUT))); + rpcClient = RpcClientFactory.createClient(conf, + TEST_UTIL.getMiniHBaseCluster().getMaster().getClusterId()); + } + + @AfterClass + public static void tearDown() throws Exception { + if (rpcClient != null) { + rpcClient.close(); + } + TEST_UTIL.shutdownMiniCluster(); + } + + private static ClientMetaService.BlockingInterface getMasterStub(ServerName server) + throws IOException { + return ClientMetaService.newBlockingStub( + rpcClient.createBlockingRpcChannel(server, User.getCurrent(), rpcTimeout)); + } + + private static HBaseRpcController getRpcController() { + return RpcControllerFactory.instantiate(conf).newController(); + } + + /** + * Verifies the cluster ID from all running masters. + */ + @Test public void TestClusterID() throws Exception { + HBaseRpcController rpcController = getRpcController(); + String clusterID = TEST_UTIL.getMiniHBaseCluster().getMaster().getClusterId(); + int rpcCount = 0; + for (JVMClusterUtil.MasterThread masterThread: + TEST_UTIL.getMiniHBaseCluster().getMasterThreads()) { + ClientMetaService.BlockingInterface stub = + getMasterStub(masterThread.getMaster().getServerName()); + GetClusterIdResponse resp = + stub.getClusterId(rpcController, GetClusterIdRequest.getDefaultInstance()); + assertEquals(clusterID, resp.getClusterId()); + rpcCount++; + } + assertEquals(MASTER_COUNT, rpcCount); + } + + /** + * Verifies that the meta region locations RPC returns consistent results across all masters. + */ + @Test public void TestMetaLocations() throws Exception { + HBaseRpcController rpcController = getRpcController(); + List metaLocations = TEST_UTIL.getMiniHBaseCluster().getMaster() + .getMetaRegionLocationCache().getMetaRegionLocations(); + Collections.sort(metaLocations); + int rpcCount = 0; + for (JVMClusterUtil.MasterThread masterThread: + TEST_UTIL.getMiniHBaseCluster().getMasterThreads()) { + ClientMetaService.BlockingInterface stub = + getMasterStub(masterThread.getMaster().getServerName()); + GetMetaRegionLocationsResponse resp = stub.getMetaRegionLocations( + rpcController, GetMetaRegionLocationsRequest.getDefaultInstance()); + List result = new ArrayList<>(); + for (HBaseProtos.RegionLocation location: resp.getMetaLocationsList()) { + result.add(ProtobufUtil.toRegionLocation(location)); + } + Collections.sort(result); + assertEquals(metaLocations, result); + rpcCount++; + } + assertEquals(MASTER_COUNT, rpcCount); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java index 34715aad5a16..80e05e00d649 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java @@ -42,7 +42,7 @@ import org.apache.hadoop.hbase.UnknownRegionException; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.HTable; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.util.StringUtils; @@ -84,7 +84,7 @@ public void testMasterOpsWhileSplitting() throws Exception { try (HTable ht = TEST_UTIL.createTable(TABLENAME, FAMILYNAME)) { assertTrue(m.assignmentManager.getTableStateManager().isTableState(TABLENAME, - ZooKeeperProtos.Table.State.ENABLED)); + TableState.State.ENABLED)); TEST_UTIL.loadTable(ht, FAMILYNAME, false); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java index 222818835701..a5b3d49b6fa3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java @@ -43,20 +43,20 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.Waiter; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.RegionTransition; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.TableStateManager; import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.executor.EventType; import org.apache.hadoop.hbase.master.RegionState.State; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.RequestConverter; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.Region; @@ -71,10 +71,8 @@ import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; import org.apache.hadoop.hbase.zookeeper.ZKAssign; -import org.apache.hadoop.hbase.zookeeper.ZKTableStateManager; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.zookeeper.data.Stat; -import org.junit.Ignore; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -302,8 +300,8 @@ public void testMasterFailoverWithMockedRIT() throws Exception { log("Beginning to mock scenarios"); // Disable the disabledTable in ZK - TableStateManager zktable = new ZKTableStateManager(zkw); - zktable.setTableState(disabledTable, ZooKeeperProtos.Table.State.DISABLED); + TableStateManager tsm = master.getTableStateManager(); + tsm.setTableState(disabledTable, TableState.State.DISABLED); /* * ZK = OFFLINE @@ -619,7 +617,7 @@ public boolean isAborted() { assertTrue(" Table must be enabled.", master.getAssignmentManager() .getTableStateManager().isTableState(TableName.valueOf("enabledTable"), - ZooKeeperProtos.Table.State.ENABLED)); + TableState.State.ENABLED)); // we also need regions assigned out on the dead server List enabledAndOnDeadRegions = new ArrayList(); enabledAndOnDeadRegions.addAll(enabledRegions.subList(0, 6)); @@ -679,13 +677,11 @@ public boolean isAborted() { log("Beginning to mock scenarios"); // Disable the disabledTable in ZK - TableStateManager zktable = new ZKTableStateManager(zkw); - zktable.setTableState(disabledTable, ZooKeeperProtos.Table.State.DISABLED); + TableStateManager tsm = master.getTableStateManager(); + tsm.setTableState(disabledTable, TableState.State.DISABLED); assertTrue(" The enabled table should be identified on master fail over.", - zktable.isTableState(TableName.valueOf("enabledTable"), - ZooKeeperProtos.Table.State.ENABLED)); - + tsm.isTableState(TableName.valueOf("enabledTable"), TableState.State.ENABLED)); /* * ZK = CLOSING */ @@ -1143,10 +1139,16 @@ public void testSimpleMasterFailover() throws Exception { // Check that ClusterStatus reports the correct active and backup masters assertNotNull(active); + final HMaster finalActive = active; + TEST_UTIL.waitFor(10000, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + ClusterStatus status = finalActive.getClusterStatus(); + return status.getBackupMastersSize() == 1 && status.getBackupMasters().size() == 1; + } + }); status = active.getClusterStatus(); assertTrue(status.getMaster().equals(activeName)); - assertEquals(1, status.getBackupMastersSize()); - assertEquals(1, status.getBackupMasters().size()); // kill the active master LOG.debug("\n\nStopping the active master " + active.getServerName() + "\n"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java index a2ecfb4ead63..5af7b470afd7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java @@ -35,7 +35,7 @@ import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.RegionLocator; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread; @@ -102,8 +102,8 @@ public void testForCheckingIfEnableAndDisableWorksFineAfterSwitch() assertTrue("The table should not be in enabled state", cluster.getMaster() .getAssignmentManager().getTableStateManager().isTableState( - TableName.valueOf("tableRestart"), ZooKeeperProtos.Table.State.DISABLED, - ZooKeeperProtos.Table.State.DISABLING)); + TableName.valueOf("tableRestart"), TableState.State.DISABLED, + TableState.State.DISABLING)); log("Enabling table\n"); // Need a new Admin, the previous one is on the old master Admin admin = new HBaseAdmin(TEST_UTIL.getConfiguration()); @@ -118,7 +118,7 @@ public void testForCheckingIfEnableAndDisableWorksFineAfterSwitch() 6, regions.size()); assertTrue("The table should be in enabled state", cluster.getMaster() .getAssignmentManager().getTableStateManager() - .isTableState(TableName.valueOf("tableRestart"), ZooKeeperProtos.Table.State.ENABLED)); + .isTableState(TableName.valueOf("tableRestart"), TableState.State.ENABLED)); ht.close(); TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestOpenedRegionHandler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestOpenedRegionHandler.java index 9ecac42883db..c1affd56c832 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestOpenedRegionHandler.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestOpenedRegionHandler.java @@ -42,9 +42,9 @@ import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.MockServer; import org.apache.hadoop.hbase.zookeeper.ZKAssign; -import org.apache.hadoop.hbase.zookeeper.ZKTableStateManager; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.zookeeper.KeeperException; @@ -140,7 +140,10 @@ public void testShouldNotCompeleteOpenedRegionSuccessfullyIfVersionMismatches() // create a node with OPENED state zkw = HBaseTestingUtility.createAndForceNodeToOpenedState(TEST_UTIL, region, server.getServerName()); - when(am.getTableStateManager()).thenReturn(new ZKTableStateManager(zkw)); + MasterServices masterServices = Mockito.mock(MasterServices.class); + when(masterServices.getTableDescriptors()).thenReturn(new FSTableDescriptors(conf)); + TableStateManager tsm = new TableStateManager(masterServices); + when(am.getTableStateManager()).thenReturn(tsm); Stat stat = new Stat(); String nodeName = ZKAssign.getNodeName(zkw, region.getRegionInfo() .getEncodedName()); @@ -171,8 +174,8 @@ public void testShouldNotCompeleteOpenedRegionSuccessfullyIfVersionMismatches() } catch (Exception e) { expectedException = true; } - assertFalse("The process method should not throw any exception.", - expectedException); + assertFalse("The process method should not throw any exception. " + , expectedException); List znodes = ZKUtil.listChildrenAndWatchForNewChildren(zkw, zkw.assignmentZNode); String regionName = znodes.get(0); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionStates.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionStates.java index 04102947f1f4..a35e3594b39d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionStates.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionStates.java @@ -19,10 +19,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.TableStateManager; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.junit.Test; import org.junit.experimental.categories.Category; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionsRecoveryConfigManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionsRecoveryConfigManager.java index 2949cc2e5466..3735d4ad4f28 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionsRecoveryConfigManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionsRecoveryConfigManager.java @@ -122,5 +122,4 @@ public boolean isStopped() { } } - -} \ No newline at end of file +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java index 16a6450aecb8..7e5656bfa1fb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java @@ -36,7 +36,6 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ChoreService; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; @@ -53,7 +52,6 @@ import org.apache.hadoop.hbase.coprocessor.BaseMasterObserver; import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.ObserverContext; -import org.apache.hadoop.hbase.exceptions.LockTimeoutException; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; @@ -388,12 +386,14 @@ public void chore() { choreService.scheduleChore(alterThread); choreService.scheduleChore(splitThread); TEST_UTIL.waitTableEnabled(tableName); + while (true) { List regions = admin.getTableRegions(tableName); LOG.info(String.format("Table #regions: %d regions: %s:", regions.size(), regions)); assertEquals(admin.getTableDescriptor(tableName), desc); for (HRegion region : TEST_UTIL.getMiniHBaseCluster().getRegions(tableName)) { - assertEquals(desc, region.getTableDesc()); + HTableDescriptor regionTableDesc = region.getTableDesc(); + assertEquals(desc, regionTableDesc); } if (regions.size() >= 5) { break; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java index ff479d48e54a..86a54e5171d0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java @@ -35,7 +35,7 @@ import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.TableStateManager; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.client.BufferedMutator; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Durability; @@ -45,9 +45,9 @@ import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitorBase; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.master.TableStateManager; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.util.ModifyRegionUtils; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; @@ -188,13 +188,13 @@ public boolean processRow(Result rowResult) throws IOException { public static void validateTableIsEnabled(final HMaster master, final TableName tableName) throws IOException { TableStateManager tsm = master.getAssignmentManager().getTableStateManager(); - assertTrue(tsm.isTableState(tableName, ZooKeeperProtos.Table.State.ENABLED)); + assertTrue(tsm.isTableState(tableName, TableState.State.ENABLED)); } public static void validateTableIsDisabled(final HMaster master, final TableName tableName) throws IOException { TableStateManager tsm = master.getAssignmentManager().getTableStateManager(); - assertTrue(tsm.isTableState(tableName, ZooKeeperProtos.Table.State.DISABLED)); + assertTrue(tsm.isTableState(tableName, TableState.State.DISABLED)); } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure2.java index f27150efc81e..c4ec0acfb937 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure2.java @@ -23,10 +23,10 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; @@ -45,6 +45,8 @@ public void tearDown() throws Exception { TEST_UTIL.shutdownMiniZKCluster(); } + /* + Note: Relevant fix was undone by HBASE-7767. @Test public void testMasterRestartAfterNameSpaceEnablingNodeIsCreated() throws Exception { // Step 1: start mini zk cluster. @@ -54,8 +56,9 @@ public void testMasterRestartAfterNameSpaceEnablingNodeIsCreated() throws Except TableName tableName = TableName.valueOf("hbase:namespace"); ZooKeeperWatcher zkw = TEST_UTIL.getZooKeeperWatcher(); String znode = ZKUtil.joinZNode(zkw.tableZNode, tableName.getNameAsString()); - ZooKeeperProtos.Table.Builder builder = ZooKeeperProtos.Table.newBuilder(); - builder.setState(ZooKeeperProtos.Table.State.ENABLED); + HBaseProtos.TableState.Builder builder = HBaseProtos.TableState.newBuilder(); + builder.setState(HBaseProtos.TableState.State.ENABLED); + builder.setTable(ProtobufUtil.toProtoTableName(tableName)); byte [] data = ProtobufUtil.prependPBMagic(builder.build().toByteArray()); ZKUtil.createSetData(zkw, znode, data); LOG.info("Create an orphaned Znode " + znode); @@ -65,4 +68,5 @@ public void testMasterRestartAfterNameSpaceEnablingNodeIsCreated() throws Except TEST_UTIL.startMiniCluster(); assertTrue(TEST_UTIL.getHBaseCluster().getLiveMasterThreads().size() == 1); } + */ } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDescriptorModificationFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDescriptorModificationFromClient.java index 0b5e83fafd47..d849f020db59 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDescriptorModificationFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDescriptorModificationFromClient.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.InvalidFamilyOperationException; +import org.apache.hadoop.hbase.TableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.master.MasterFileSystem; @@ -273,8 +274,9 @@ private void verifyTableDescriptor(final TableName tableName, // Verify descriptor from HDFS MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem(); Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName); - htd = FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir); - verifyTableDescriptor(htd, tableName, families); + TableDescriptor td = + FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir); + verifyTableDescriptor(td.getHTableDescriptor(), tableName, families); } private void verifyTableDescriptor(final HTableDescriptor htd, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/protobuf/TestProtobufUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/protobuf/TestProtobufUtil.java index c3e6279f5725..ab6b2758d71a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/protobuf/TestProtobufUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/protobuf/TestProtobufUtil.java @@ -19,19 +19,22 @@ package org.apache.hadoop.hbase.protobuf; import static org.junit.Assert.assertEquals; - +import com.google.protobuf.ByteString; import java.io.IOException; import java.util.Collections; import java.util.List; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Increment; import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Column; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto; @@ -40,14 +43,13 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.DeleteType; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair; +import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.ByteStringer; import org.apache.hadoop.hbase.util.Bytes; import org.junit.Test; import org.junit.experimental.categories.Category; -import com.google.protobuf.ByteString; - /** * Class to test ProtobufUtil. */ @@ -350,4 +352,31 @@ public void testScan() throws IOException { ProtobufUtil.toScan(expectedProto)); assertEquals(expectedProto, actualProto); } + + @Test + public void testMetaRegionState() throws Exception { + ServerName serverName = ServerName.valueOf("localhost", 1234, 5678); + // New region state style. + for (RegionState.State state: RegionState.State.values()) { + RegionState regionState = + new RegionState(HRegionInfo.FIRST_META_REGIONINFO, state, serverName); + MetaRegionServer metars = MetaRegionServer.newBuilder() + .setServer(org.apache.hadoop.hbase.protobuf.ProtobufUtil.toServerName(serverName)) + .setRpcVersion(HConstants.RPC_CURRENT_VERSION) + .setState(state.convert()).build(); + // Serialize + byte[] data = ProtobufUtil.prependPBMagic(metars.toByteArray()); + ProtobufUtil.prependPBMagic(data); + // Deserialize + RegionState regionStateNew = ProtobufUtil.parseMetaRegionStateFrom(data, 1); + assertEquals(regionState.getServerName(), regionStateNew.getServerName()); + assertEquals(regionState.getState(), regionStateNew.getState()); + } + // old style. + RegionState rs = + org.apache.hadoop.hbase.protobuf.ProtobufUtil.parseMetaRegionStateFrom( + serverName.getVersionedBytes(), 1); + assertEquals(serverName, rs.getServerName()); + assertEquals(rs.getState(), RegionState.State.OPEN); + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMasterAddressTracker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMasterAddressTracker.java index fd8c4dc95a46..cdb6af528b55 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMasterAddressTracker.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMasterAddressTracker.java @@ -23,6 +23,7 @@ import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; +import java.util.List; import java.util.concurrent.Semaphore; import org.apache.commons.logging.Log; @@ -33,6 +34,7 @@ import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.junit.After; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; @@ -45,10 +47,19 @@ public class TestMasterAddressTracker { private static final Log LOG = LogFactory.getLog(TestMasterAddressTracker.class); private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + // Cleaned up after each unit test. + private static ZooKeeperWatcher zk; @Rule public TestName name = new TestName(); + @After + public void cleanUp() { + if (zk != null) { + zk.close(); + } + } + @BeforeClass public static void setUpBeforeClass() throws Exception { TEST_UTIL.startMiniZKCluster(); @@ -79,9 +90,10 @@ public void testDeleteIfEquals() throws Exception { */ private MasterAddressTracker setupMasterTracker(final ServerName sn, final int infoPort) throws Exception { - ZooKeeperWatcher zk = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(), + zk = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(), name.getMethodName(), null); ZKUtil.createAndFailSilent(zk, zk.baseZNode); + ZKUtil.createAndFailSilent(zk, zk.backupMasterAddressesZNode); // Should not have a master yet MasterAddressTracker addressTracker = new MasterAddressTracker(zk, null); @@ -155,6 +167,29 @@ public void testNoMaster() throws Exception { assertEquals("Should receive 0 for backup not found.", 0, addressTracker.getMasterInfoPort()); } + @Test + public void testBackupMasters() throws Exception { + final ServerName sn = ServerName.valueOf("localhost", 5678, System.currentTimeMillis()); + final MasterAddressTracker addressTracker = setupMasterTracker(sn, 1111); + assertTrue(addressTracker.hasMaster()); + ServerName activeMaster = addressTracker.getMasterAddress(); + assertEquals(activeMaster, sn); + // No current backup masters + List backupMasters = MasterAddressTracker.getBackupMastersAndRenewWatch(zk); + assertEquals(0, backupMasters.size()); + ServerName backupMaster1 = ServerName.valueOf("localhost", 2222, -1); + ServerName backupMaster2 = ServerName.valueOf("localhost", 3333, -1); + String backupZNode1 = ZKUtil.joinZNode(zk.backupMasterAddressesZNode, backupMaster1.toString()); + String backupZNode2 = ZKUtil.joinZNode(zk.backupMasterAddressesZNode, backupMaster2.toString()); + // Add a backup master + MasterAddressTracker.setMasterAddress(zk, backupZNode1, backupMaster1, 2222); + MasterAddressTracker.setMasterAddress(zk, backupZNode2, backupMaster2, 3333); + backupMasters = MasterAddressTracker.getBackupMastersAndRenewWatch(zk); + assertEquals(2, backupMasters.size()); + assertTrue(backupMasters.contains(backupMaster1)); + assertTrue(backupMasters.contains(backupMaster2)); + } + public static class NodeCreationListener extends ZooKeeperListener { private static final Log LOG = LogFactory.getLog(NodeCreationListener.class); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java index 75dc31f081f4..aafebab201c4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java @@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotEnabledException; import org.apache.hadoop.hbase.classification.InterfaceAudience; @@ -501,8 +502,8 @@ public SnapshotBuilder(final Configuration conf, final FileSystem fs, this.desc = desc; this.tableRegions = tableRegions; this.snapshotDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(desc, rootDir, conf); - new FSTableDescriptors(conf, snapshotDir.getFileSystem(conf), rootDir) - .createTableDescriptorForTableDirectory(snapshotDir, htd, false); + new FSTableDescriptors(conf).createTableDescriptorForTableDirectory(snapshotDir, + new TableDescriptor(htd), false); } public HTableDescriptor getTableDescriptor() { @@ -719,7 +720,8 @@ public HTableDescriptor createHtd(final String tableName) { private RegionData[] createTable(final HTableDescriptor htd, final int nregions) throws IOException { Path tableDir = FSUtils.getTableDir(rootDir, htd.getTableName()); - new FSTableDescriptors(conf).createTableDescriptorForTableDirectory(tableDir, htd, false); + new FSTableDescriptors(conf).createTableDescriptorForTableDirectory(tableDir, + new TableDescriptor(htd), false); assertTrue(nregions % 2 == 0); RegionData[] regions = new RegionData[nregions]; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java index df01d710699f..3a22e400d20c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java @@ -35,14 +35,16 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.TableDescriptors; import org.apache.hadoop.hbase.TableExistsException; +import org.apache.hadoop.hbase.client.TableState; +import org.apache.hadoop.hbase.testclassification.MediumTests; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -71,6 +73,7 @@ public void testRegexAgainstOldStyleTableInfo() { public void testCreateAndUpdate() throws IOException { Path testdir = UTIL.getDataTestDir("testCreateAndUpdate"); HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("testCreate")); + TableDescriptor td = new TableDescriptor(htd, TableState.State.ENABLED); FileSystem fs = FileSystem.get(UTIL.getConfiguration()); FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, testdir); assertTrue(fstd.createTableDescriptor(htd)); @@ -78,7 +81,7 @@ public void testCreateAndUpdate() throws IOException { FileStatus[] statuses = fs.listStatus(testdir); assertTrue("statuses.length=" + statuses.length, statuses.length == 1); for (int i = 0; i < 10; i++) { - fstd.updateTableDescriptor(htd); + fstd.updateTableDescriptor(td); } statuses = fs.listStatus(testdir); assertTrue(statuses.length == 1); @@ -92,20 +95,29 @@ public void testSequenceIdAdvancesOnTableInfo() throws IOException { Path testdir = UTIL.getDataTestDir("testSequenceidAdvancesOnTableInfo"); HTableDescriptor htd = new HTableDescriptor( TableName.valueOf("testSequenceidAdvancesOnTableInfo")); + TableDescriptor td = new TableDescriptor(htd); FileSystem fs = FileSystem.get(UTIL.getConfiguration()); FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, testdir); - Path p0 = fstd.updateTableDescriptor(htd); + Path p0 = fstd.updateTableDescriptor(td); int i0 = FSTableDescriptors.getTableInfoSequenceId(p0); - Path p1 = fstd.updateTableDescriptor(htd); + Path p1 = fstd.updateTableDescriptor(td); // Assert we cleaned up the old file. assertTrue(!fs.exists(p0)); int i1 = FSTableDescriptors.getTableInfoSequenceId(p1); assertTrue(i1 == i0 + 1); - Path p2 = fstd.updateTableDescriptor(htd); + Path p2 = fstd.updateTableDescriptor(td); // Assert we cleaned up the old file. assertTrue(!fs.exists(p1)); int i2 = FSTableDescriptors.getTableInfoSequenceId(p2); assertTrue(i2 == i1 + 1); + td = new TableDescriptor(htd, TableState.State.DISABLED); + Path p3 = fstd.updateTableDescriptor(td); + // Assert we cleaned up the old file. + assertTrue(!fs.exists(p2)); + int i3 = FSTableDescriptors.getTableInfoSequenceId(p3); + assertTrue(i3 == i2 + 1); + TableDescriptor descriptor = fstd.getDescriptor(htd.getTableName()); + assertEquals(descriptor, td); } @Test @@ -158,12 +170,13 @@ public void testReadingHTDFromFS() final String name = "testReadingHTDFromFS"; FileSystem fs = FileSystem.get(UTIL.getConfiguration()); HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name)); + TableDescriptor td = new TableDescriptor(htd, TableState.State.ENABLED); Path rootdir = UTIL.getDataTestDir(name); FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir); fstd.createTableDescriptor(htd); - HTableDescriptor htd2 = + TableDescriptor td2 = FSTableDescriptors.getTableDescriptorFromFs(fs, rootdir, htd.getTableName()); - assertTrue(htd.equals(htd2)); + assertTrue(td.equals(td2)); } @Test @@ -177,7 +190,8 @@ public void testHTableDescriptors() final int count = 10; // Write out table infos. for (int i = 0; i < count; i++) { - HTableDescriptor htd = new HTableDescriptor(name + i); + TableDescriptor htd = new TableDescriptor(new HTableDescriptor(name + i), + TableState.State.ENABLED); htds.createTableDescriptor(htd); } @@ -191,7 +205,7 @@ public void testHTableDescriptors() for (int i = 0; i < count; i++) { HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name + i)); htd.addFamily(new HColumnDescriptor("" + i)); - htds.updateTableDescriptor(htd); + htds.updateTableDescriptor(new TableDescriptor(htd)); } // Wait a while so mod time we write is for sure different. Thread.sleep(100); @@ -232,7 +246,7 @@ public void testHTableDescriptorsNoCache() for (int i = 0; i < count; i++) { HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name + i)); htd.addFamily(new HColumnDescriptor("" + i)); - htds.updateTableDescriptor(htd); + htds.updateTableDescriptor(new TableDescriptor(htd)); } // Wait a while so mod time we write is for sure different. Thread.sleep(100); @@ -378,18 +392,19 @@ public void testCreateTableDescriptorUpdatesIfExistsAlready() throws IOException Path testdir = UTIL.getDataTestDir("testCreateTableDescriptorUpdatesIfThereExistsAlready"); HTableDescriptor htd = new HTableDescriptor(TableName.valueOf( "testCreateTableDescriptorUpdatesIfThereExistsAlready")); + TableDescriptor td = new TableDescriptor(htd, TableState.State.ENABLED); FileSystem fs = FileSystem.get(UTIL.getConfiguration()); FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, testdir); assertTrue(fstd.createTableDescriptor(htd)); assertFalse(fstd.createTableDescriptor(htd)); htd.setValue(Bytes.toBytes("mykey"), Bytes.toBytes("myValue")); - assertTrue(fstd.createTableDescriptor(htd)); //this will re-create + assertTrue(fstd.createTableDescriptor(td)); //this will re-create Path tableDir = fstd.getTableDir(htd.getTableName()); Path tmpTableDir = new Path(tableDir, FSTableDescriptors.TMP_DIR); FileStatus[] statuses = fs.listStatus(tmpTableDir); assertTrue(statuses.length == 0); - assertEquals(htd, FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir)); + assertEquals(td, FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir)); } private static class FSTableDescriptorsTest diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java index 4f76cb6f7b7d..7b6a4b367cb7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java @@ -68,7 +68,6 @@ import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.TableExistsException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Waiter.Predicate; import org.apache.hadoop.hbase.client.Admin; @@ -106,7 +105,6 @@ import org.apache.hadoop.hbase.master.TableLockManager.TableLock; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.regionserver.HRegionServer; @@ -2891,55 +2889,6 @@ public void run() { tableLockManager.tableDeleted(tableName); } - /** - * Test orphaned table ZNode (for table states) - */ - @Test - public void testOrphanedTableZNode() throws Exception { - TableName table = TableName.valueOf("testOrphanedZKTableEntry"); - - try { - TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager().getTableStateManager() - .setTableState(table, ZooKeeperProtos.Table.State.ENABLING); - - try { - setupTable(table); - Assert.fail( - "Create table should fail when its ZNode has already existed with ENABLING state."); - } catch(TableExistsException t) { - //Expected exception - } - // The setup table was interrupted in some state that needs to some cleanup. - try { - cleanupTable(table); - } catch (IOException e) { - // Because create table failed, it is expected that the cleanup table would - // throw some exception. Ignore and continue. - } - - HBaseFsck hbck = doFsck(conf, false); - assertTrue(hbck.getErrors().getErrorList().contains(ERROR_CODE.ORPHANED_ZK_TABLE_ENTRY)); - - // fix the orphaned ZK entry - hbck = doFsck(conf, true); - - // check that orpahned ZK table entry is gone. - hbck = doFsck(conf, false); - assertFalse(hbck.getErrors().getErrorList().contains(ERROR_CODE.ORPHANED_ZK_TABLE_ENTRY)); - // Now create table should succeed. - setupTable(table); - } finally { - // This code could be called that either a table was created successfully or set up - // table failed in some unknown state. Therefore, clean up can either succeed or fail. - try { - cleanupTable(table); - } catch (IOException e) { - // The cleanup table would throw some exception if create table failed in some state. - // Ignore this exception - } - } - } - @Test (timeout=180000) public void testMetaOffline() throws Exception { // check no errors diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKTableStateManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKTableStateManager.java deleted file mode 100644 index e81c89f03407..000000000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKTableStateManager.java +++ /dev/null @@ -1,114 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.zookeeper; - -import java.io.IOException; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; - -import org.apache.hadoop.hbase.Abortable; -import org.apache.hadoop.hbase.CoordinatedStateException; -import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.TableStateManager; -import org.apache.zookeeper.KeeperException; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table; - -@Category(MediumTests.class) -public class TestZKTableStateManager { - private static final Log LOG = LogFactory.getLog(TestZKTableStateManager.class); - private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - - @BeforeClass - public static void setUpBeforeClass() throws Exception { - TEST_UTIL.startMiniZKCluster(); - } - - @AfterClass - public static void tearDownAfterClass() throws Exception { - TEST_UTIL.shutdownMiniZKCluster(); - } - - @Test - public void testTableStates() - throws CoordinatedStateException, IOException, KeeperException, InterruptedException { - final TableName name = - TableName.valueOf("testDisabled"); - Abortable abortable = new Abortable() { - @Override - public void abort(String why, Throwable e) { - LOG.info(why, e); - } - - @Override - public boolean isAborted() { - return false; - } - - }; - ZooKeeperWatcher zkw = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(), - name.getNameAsString(), abortable, true); - TableStateManager zkt = new ZKTableStateManager(zkw); - assertFalse(zkt.isTableState(name, Table.State.ENABLED)); - assertFalse(zkt.isTableState(name, Table.State.DISABLING)); - assertFalse(zkt.isTableState(name, Table.State.DISABLED)); - assertFalse(zkt.isTableState(name, Table.State.ENABLING)); - assertFalse(zkt.isTableState(name, Table.State.DISABLED, Table.State.DISABLING)); - assertFalse(zkt.isTableState(name, Table.State.DISABLED, Table.State.ENABLING)); - assertFalse(zkt.isTablePresent(name)); - zkt.setTableState(name, Table.State.DISABLING); - assertTrue(zkt.isTableState(name, Table.State.DISABLING)); - assertTrue(zkt.isTableState(name, Table.State.DISABLED, Table.State.DISABLING)); - assertFalse(zkt.getTablesInStates(Table.State.DISABLED).contains(name)); - assertTrue(zkt.isTablePresent(name)); - zkt.setTableState(name, Table.State.DISABLED); - assertTrue(zkt.isTableState(name, Table.State.DISABLED)); - assertTrue(zkt.isTableState(name, Table.State.DISABLED, Table.State.DISABLING)); - assertFalse(zkt.isTableState(name, Table.State.DISABLING)); - assertTrue(zkt.getTablesInStates(Table.State.DISABLED).contains(name)); - assertTrue(zkt.isTablePresent(name)); - zkt.setTableState(name, Table.State.ENABLING); - assertTrue(zkt.isTableState(name, Table.State.ENABLING)); - assertTrue(zkt.isTableState(name, Table.State.DISABLED, Table.State.ENABLING)); - assertFalse(zkt.isTableState(name, Table.State.DISABLED)); - assertFalse(zkt.getTablesInStates(Table.State.DISABLED).contains(name)); - assertTrue(zkt.isTablePresent(name)); - zkt.setTableState(name, Table.State.ENABLED); - assertTrue(zkt.isTableState(name, Table.State.ENABLED)); - assertFalse(zkt.isTableState(name, Table.State.ENABLING)); - assertTrue(zkt.isTablePresent(name)); - zkt.setDeletedTable(name); - assertFalse(zkt.isTableState(name, Table.State.ENABLED)); - assertFalse(zkt.isTableState(name, Table.State.DISABLING)); - assertFalse(zkt.isTableState(name, Table.State.DISABLED)); - assertFalse(zkt.isTableState(name, Table.State.ENABLING)); - assertFalse(zkt.isTableState(name, Table.State.DISABLED, Table.State.DISABLING)); - assertFalse(zkt.isTableState(name, Table.State.DISABLED, Table.State.ENABLING)); - assertFalse(zkt.isTablePresent(name)); - } -} diff --git a/pom.xml b/pom.xml index a6518ae76e35..19ffcd313c1a 100644 --- a/pom.xml +++ b/pom.xml @@ -28,6 +28,7 @@ https://issues.apache.org/jira/browse/HBASE-6795. --> + 4.0.0 org.apache