diff --git a/ambari-agent/conf/unix/ambari-agent b/ambari-agent/conf/unix/ambari-agent
index a5cab3202fe..ba1c3abdeee 100644
--- a/ambari-agent/conf/unix/ambari-agent
+++ b/ambari-agent/conf/unix/ambari-agent
@@ -26,7 +26,7 @@ AMBARI_AGENT=ambari-agent
PIDFILE=/var/run/ambari-agent/$AMBARI_AGENT.pid
OUTFILE=/var/log/ambari-agent/ambari-agent.out
LOGFILE=/var/log/ambari-agent/ambari-agent.log
-AGENT_SCRIPT=/usr/lib/python2.6/site-packages/ambari_agent/main.py
+AGENT_SCRIPT=/usr/sbin/ambari-agent.py
OK=1
NOTOK=0
@@ -65,27 +65,8 @@ export AMBARI_PASSPHRASE=$RESOLVED_AMBARI_PASSPHRASE
#echo $AMBARI_PASSPHRASE
-# check for version
-check_python_version ()
-{
- echo "Verifying Python version compatibility..."
- majversion=`$PYTHON -V 2>&1 | awk '{print $2}' | cut -d'.' -f1`
- minversion=`$PYTHON -V 2>&1 | awk '{print $2}' | cut -d'.' -f2`
- numversion=$(( 10 * $majversion + $minversion))
- if (( $numversion < 26 )); then
- echo "ERROR: Found Python version $majversion.$minversion. Ambari Agent requires Python version > 2.6"
- return $NOTOK
- fi
- echo "Using python " $PYTHON
- return $OK
-}
-
case "$1" in
start)
- check_python_version
- if [ "$?" -eq "$NOTOK" ]; then
- exit -1
- fi
echo "Checking for previously running Ambari Agent..."
if [ -f $PIDFILE ]; then
PID=`cat $PIDFILE`
@@ -101,7 +82,7 @@ case "$1" in
fi
fi
echo "Starting ambari-agent"
- nohup $PYTHON $AGENT_SCRIPT > $OUTFILE 2>&1 &
+ nohup $AGENT_SCRIPT > $OUTFILE 2>&1 &
sleep 2
PID=$!
echo "Verifying $AMBARI_AGENT process status..."
@@ -138,10 +119,6 @@ case "$1" in
fi
;;
stop)
- check_python_version
- if [ "$?" -eq "$NOTOK" ]; then
- exit -1
- fi
if [ -f $PIDFILE ]; then
PID=`cat $PIDFILE`
echo "Found $AMBARI_AGENT PID: $PID"
@@ -151,7 +128,7 @@ case "$1" in
tput sgr0
else
echo "Stopping $AMBARI_AGENT"
- $PYTHON $AGENT_SCRIPT stop
+ $AGENT_SCRIPT stop
fi
echo "Removing PID file at $PIDFILE"
rm -f $PIDFILE
diff --git a/ambari-agent/pom.xml b/ambari-agent/pom.xml
index 107686b84a7..4855a3362d3 100644
--- a/ambari-agent/pom.xml
+++ b/ambari-agent/pom.xml
@@ -31,6 +31,9 @@
Ambari AgentUTF-8
+ 2.6
+ python2
+ python >= ${python.ver}${project.artifactId}-${project.version}1/usr
@@ -39,10 +42,9 @@
falsehttp://downloads.puppetlabs.com/facter/facter-1.6.10.tar.gzhttp://downloads.puppetlabs.com/puppet/puppet-2.7.9.tar.gz
- /usr/lib/python2.6/site-packages/ambari_agent
+ /usr/lib/python${python.ver}/site-packages/ambari_agenthttp://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/centos6/ruby-1.8.7-p370.tar.gz/usr/lib/ambari-agent/lib
- python >= 2.6
@@ -89,7 +91,7 @@
- python2.6
+ pythonsrc/test/pythonunitTests.py
@@ -107,7 +109,7 @@
- python2.6
+ pythontarget/ambari-agent-${project.version}${project.basedir}/src/main/python/setup.py
@@ -146,7 +148,7 @@
opensslzlib
- ${python.ver}
+ ${python.requires}src/main/package/rpm/postinstall.sh
@@ -165,7 +167,7 @@
false
- ${install.dir}
+ ${agent.install.dir}${project.build.directory}/${project.artifactId}-${project.version}/ambari_agent
@@ -236,6 +238,12 @@
conf/unix/ambari-agent
+
+ src/main/python/ambari-agent.py
+
+
+ src/main/python/ambari-machine
+
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hcfs.pp b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hcfs.pp
new file mode 100644
index 00000000000..8f6dc5e1e26
--- /dev/null
+++ b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hcfs.pp
@@ -0,0 +1,36 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hadoop::hcfs_client(
+ $service_state = $hdp::params::cluster_client_state,
+ $opts = {}
+) inherits hdp-hadoop::params
+{
+ $hdp::params::service_exists['hdp-hadoop::hcfs_client'] = true
+ Hdp-hadoop::Common<||>{service_states +> $service_state}
+ Hdp-hadoop::Package<||>{include_64_bit => true}
+ Hdp-hadoop::Configfile<||>{sizes +> 64}
+
+ if ($service_state == 'no_op') {
+ } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) {
+ #adds package, users and directories, and common hadoop configs
+ include hdp-hadoop::initialize
+ }
+}
\ No newline at end of file
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hcfs_service_check.pp b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hcfs_service_check.pp
new file mode 100644
index 00000000000..a0ae930fa14
--- /dev/null
+++ b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hcfs_service_check.pp
@@ -0,0 +1,26 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hadoop::hcfs_service_check(
+ $service_state = $hdp::params::cluster_client_state
+) inherits hdp-hadoop::params
+{
+ $hdp::params::service_exists['hdp-hadoop::hcfs'] = true
+}
\ No newline at end of file
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/init.pp b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/init.pp
index 8c254999789..5dadabdc307 100644
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/init.pp
+++ b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/init.pp
@@ -229,7 +229,7 @@
hdp::user{ $hdfs_user:
groups => [$hdp::params::user_group]
}
- if ($hdfs_user != $mapred_user) {
+ if ( !defined(hdp::user[$mapred_user]) ) {
hdp::user { $mapred_user:
groups => [$hdp::params::user_group]
}
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/params.pp b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/params.pp
index e54535d7390..eea8005bb5f 100644
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/params.pp
+++ b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/params.pp
@@ -54,7 +54,7 @@
$dtnode_heapsize = hdp_default("dtnode_heapsize","1024m")
$ttnode_heapsize = hdp_default("ttnode_heapsize","1024m")
- $hadoop_heapsize = hdp_default("hadoop_heapsize","1024m")
+ $hadoop_heapsize = hdp_default("hadoop_heapsize","1024")
$hdfs_log_dir_prefix = hdp_default("hdfs_log_dir_prefix","/var/log/hadoop")
diff --git a/ambari-agent/src/main/puppet/modules/hdp-yarn/files/validateYarnComponentStatus.py b/ambari-agent/src/main/puppet/modules/hdp-yarn/files/validateYarnComponentStatus.py
index 33bdba0bb2e..2793c854017 100644
--- a/ambari-agent/src/main/puppet/modules/hdp-yarn/files/validateYarnComponentStatus.py
+++ b/ambari-agent/src/main/puppet/modules/hdp-yarn/files/validateYarnComponentStatus.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.6
+#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
diff --git a/ambari-agent/src/main/puppet/modules/hdp/manifests/params.pp b/ambari-agent/src/main/puppet/modules/hdp/manifests/params.pp
index 97e42451f28..b2ffaedb860 100644
--- a/ambari-agent/src/main/puppet/modules/hdp/manifests/params.pp
+++ b/ambari-agent/src/main/puppet/modules/hdp/manifests/params.pp
@@ -267,7 +267,7 @@
$dfs_data_dir = hdp_default("hdfs-site/dfs.data.dir","/tmp/hadoop-hdfs/dfs/data")
### artifact dir
- $artifact_dir = hdp_default("artifact_dir","/tmp/HDP-artifacts/")
+ $artifact_dir = hdp_default("artifact_dir","/tmp/HDP-artifacts")
### artifacts download url ##
$apache_artifacts_download_url = hdp_default("apache_artifacts_download_url","")
diff --git a/ambari-agent/src/main/python/ambari-agent.py b/ambari-agent/src/main/python/ambari-agent.py
new file mode 100644
index 00000000000..feaea5308cf
--- /dev/null
+++ b/ambari-agent/src/main/python/ambari-agent.py
@@ -0,0 +1,8 @@
+#!/usr/bin/env python2
+from ambari_agent import main as agent_main
+
+def main():
+ return agent_main.main()
+
+if __name__ == "__main__":
+ main()
diff --git a/ambari-agent/src/main/python/ambari-machine b/ambari-agent/src/main/python/ambari-machine
new file mode 100644
index 00000000000..6768a5980c8
--- /dev/null
+++ b/ambari-agent/src/main/python/ambari-machine
@@ -0,0 +1,11 @@
+#!/usr/bin/env python2
+import sys
+from ambari_agent import machine as agent_machine
+
+def main():
+ # Not sure why machine.py passes sys.argv explicitly
+ # since it's globally available but we'll keep that convention
+ return agent_machine.main(sys.argv)
+
+if __name__ == "__main__":
+ main()
diff --git a/ambari-agent/src/main/python/ambari_agent/ActionQueue.py b/ambari-agent/src/main/python/ambari_agent/ActionQueue.py
index d2a6ae6d8c0..6320c3152fa 100644
--- a/ambari-agent/src/main/python/ambari_agent/ActionQueue.py
+++ b/ambari-agent/src/main/python/ambari_agent/ActionQueue.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.6
+#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
diff --git a/ambari-agent/src/main/python/ambari_agent/ActualConfigHandler.py b/ambari-agent/src/main/python/ambari_agent/ActualConfigHandler.py
index ff7220bc175..db009d6f11b 100644
--- a/ambari-agent/src/main/python/ambari_agent/ActualConfigHandler.py
+++ b/ambari-agent/src/main/python/ambari_agent/ActualConfigHandler.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.6
+#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
diff --git a/ambari-agent/src/main/python/ambari_agent/AmbariConfig.py b/ambari-agent/src/main/python/ambari_agent/AmbariConfig.py
index 8222786ac19..fcd590b7a50 100644
--- a/ambari-agent/src/main/python/ambari_agent/AmbariConfig.py
+++ b/ambari-agent/src/main/python/ambari_agent/AmbariConfig.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.6
+#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
@@ -55,7 +55,7 @@
[heartbeat]
state_interval = 6
dirs=/etc/hadoop,/etc/hadoop/conf,/var/run/hadoop,/var/log/hadoop
-rpms=hadoop,openssl,wget,net-snmp,ntpd,ruby,ganglia,nagios
+rpms=glusterfs,openssl,wget,net-snmp,ntpd,ruby,ganglia,nagios,glusterfs
"""
s = StringIO.StringIO(content)
config.readfp(s)
@@ -77,6 +77,9 @@
]
rolesToClass = {
+ 'HCFS': 'hdp-hadoop::hcfs',
+ 'HCFS_CLIENT': 'hdp-hadoop::hcfs_client',
+ 'HCFS_SERVICE_CHECK': 'hdp-hadoop::hcfs_service_check',
'NAMENODE': 'hdp-hadoop::namenode',
'DATANODE': 'hdp-hadoop::datanode',
'SECONDARY_NAMENODE': 'hdp-hadoop::snamenode',
@@ -136,6 +139,7 @@
}
servicesToPidNames = {
+ 'HCFS' : 'glusterd.pid',
'NAMENODE': 'hadoop-{USER}-namenode.pid$',
'SECONDARY_NAMENODE': 'hadoop-{USER}-secondarynamenode.pid$',
'DATANODE': 'hadoop-{USER}-datanode.pid$',
@@ -164,6 +168,8 @@
linuxUserPattern = '[A-Za-z0-9_-]*[$]?'
pidPathesVars = [
+ {'var' : 'hcfs_pid_dir_prefix',
+ 'defaultValue' : '/var/run'},
{'var' : 'hadoop_pid_dir_prefix',
'defaultValue' : '/var/run/hadoop'},
{'var' : 'hadoop_pid_dir_prefix',
diff --git a/ambari-agent/src/main/python/ambari_agent/Controller.py b/ambari-agent/src/main/python/ambari_agent/Controller.py
index eaec3bb7702..24d259ac6d8 100644
--- a/ambari-agent/src/main/python/ambari_agent/Controller.py
+++ b/ambari-agent/src/main/python/ambari_agent/Controller.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.6
+#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
diff --git a/ambari-agent/src/main/python/ambari_agent/Hardware.py b/ambari-agent/src/main/python/ambari_agent/Hardware.py
index 2d780c1419b..5e65c5e7bb2 100644
--- a/ambari-agent/src/main/python/ambari_agent/Hardware.py
+++ b/ambari-agent/src/main/python/ambari_agent/Hardware.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.6
+#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
diff --git a/ambari-agent/src/main/python/ambari_agent/Heartbeat.py b/ambari-agent/src/main/python/ambari_agent/Heartbeat.py
index 79390ca217c..6395d1c94e8 100644
--- a/ambari-agent/src/main/python/ambari_agent/Heartbeat.py
+++ b/ambari-agent/src/main/python/ambari_agent/Heartbeat.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.6
+#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
diff --git a/ambari-agent/src/main/python/ambari_agent/HostInfo.py b/ambari-agent/src/main/python/ambari_agent/HostInfo.py
index f66a143114f..6eac99adec7 100644
--- a/ambari-agent/src/main/python/ambari_agent/HostInfo.py
+++ b/ambari-agent/src/main/python/ambari_agent/HostInfo.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.6
+#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
diff --git a/ambari-agent/src/main/python/ambari_agent/LiveStatus.py b/ambari-agent/src/main/python/ambari_agent/LiveStatus.py
index d13167cc122..d2825d00119 100644
--- a/ambari-agent/src/main/python/ambari_agent/LiveStatus.py
+++ b/ambari-agent/src/main/python/ambari_agent/LiveStatus.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.6
+#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
diff --git a/ambari-agent/src/main/python/ambari_agent/ProcessHelper.py b/ambari-agent/src/main/python/ambari_agent/ProcessHelper.py
index bfce47b4cc5..753348039ea 100644
--- a/ambari-agent/src/main/python/ambari_agent/ProcessHelper.py
+++ b/ambari-agent/src/main/python/ambari_agent/ProcessHelper.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.6
+#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
diff --git a/ambari-agent/src/main/python/ambari_agent/PuppetExecutor.py b/ambari-agent/src/main/python/ambari_agent/PuppetExecutor.py
index bebe1b053fb..1ab5d0e7044 100644
--- a/ambari-agent/src/main/python/ambari_agent/PuppetExecutor.py
+++ b/ambari-agent/src/main/python/ambari_agent/PuppetExecutor.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.6
+#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
diff --git a/ambari-agent/src/main/python/ambari_agent/PythonExecutor.py b/ambari-agent/src/main/python/ambari_agent/PythonExecutor.py
index 0d56486baca..530023b6e88 100644
--- a/ambari-agent/src/main/python/ambari_agent/PythonExecutor.py
+++ b/ambari-agent/src/main/python/ambari_agent/PythonExecutor.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.6
+#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
diff --git a/ambari-agent/src/main/python/ambari_agent/Register.py b/ambari-agent/src/main/python/ambari_agent/Register.py
index d93e35b3246..266d86d688d 100644
--- a/ambari-agent/src/main/python/ambari_agent/Register.py
+++ b/ambari-agent/src/main/python/ambari_agent/Register.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.6
+#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
diff --git a/ambari-agent/src/main/python/ambari_agent/RepoInstaller.py b/ambari-agent/src/main/python/ambari_agent/RepoInstaller.py
index 0817c8ff5c3..78e7dc2099e 100644
--- a/ambari-agent/src/main/python/ambari_agent/RepoInstaller.py
+++ b/ambari-agent/src/main/python/ambari_agent/RepoInstaller.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.6
+#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
diff --git a/ambari-agent/src/main/python/ambari_agent/StackVersionsFileHandler.py b/ambari-agent/src/main/python/ambari_agent/StackVersionsFileHandler.py
index 1731045476f..77c5253f493 100644
--- a/ambari-agent/src/main/python/ambari_agent/StackVersionsFileHandler.py
+++ b/ambari-agent/src/main/python/ambari_agent/StackVersionsFileHandler.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.6
+#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
diff --git a/ambari-agent/src/main/python/ambari_agent/StatusCheck.py b/ambari-agent/src/main/python/ambari_agent/StatusCheck.py
index 8ce79795541..718cedc7511 100644
--- a/ambari-agent/src/main/python/ambari_agent/StatusCheck.py
+++ b/ambari-agent/src/main/python/ambari_agent/StatusCheck.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.6
+#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
diff --git a/ambari-agent/src/main/python/ambari_agent/UpgradeExecutor.py b/ambari-agent/src/main/python/ambari_agent/UpgradeExecutor.py
index b1899218dab..828b7efdaab 100644
--- a/ambari-agent/src/main/python/ambari_agent/UpgradeExecutor.py
+++ b/ambari-agent/src/main/python/ambari_agent/UpgradeExecutor.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.6
+#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
diff --git a/ambari-agent/src/main/python/ambari_agent/__init__.py b/ambari-agent/src/main/python/ambari_agent/__init__.py
index d904724b574..15082eaeac2 100644
--- a/ambari-agent/src/main/python/ambari_agent/__init__.py
+++ b/ambari-agent/src/main/python/ambari_agent/__init__.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.6
+#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
diff --git a/ambari-agent/src/main/python/ambari_agent/hostname.py b/ambari-agent/src/main/python/ambari_agent/hostname.py
index a418e4f08d4..6a3d0e461d9 100644
--- a/ambari-agent/src/main/python/ambari_agent/hostname.py
+++ b/ambari-agent/src/main/python/ambari_agent/hostname.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.6
+#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
diff --git a/ambari-agent/src/main/python/ambari_agent/main.py b/ambari-agent/src/main/python/ambari_agent/main.py
index 3f1d26e4443..033981ed89c 100644
--- a/ambari-agent/src/main/python/ambari_agent/main.py
+++ b/ambari-agent/src/main/python/ambari_agent/main.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.6
+#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
diff --git a/ambari-agent/src/main/python/ambari_agent/manifestGenerator.py b/ambari-agent/src/main/python/ambari_agent/manifestGenerator.py
index 8b1b6e5289f..8678b2b610d 100644
--- a/ambari-agent/src/main/python/ambari_agent/manifestGenerator.py
+++ b/ambari-agent/src/main/python/ambari_agent/manifestGenerator.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.6
+#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
diff --git a/ambari-agent/src/main/python/ambari_agent/security.py b/ambari-agent/src/main/python/ambari_agent/security.py
index de8bd005b1c..bde9c49029f 100644
--- a/ambari-agent/src/main/python/ambari_agent/security.py
+++ b/ambari-agent/src/main/python/ambari_agent/security.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.6
+#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
diff --git a/ambari-agent/src/main/python/ambari_agent/shell.py b/ambari-agent/src/main/python/ambari_agent/shell.py
index 296efc6a720..f4a1f352109 100644
--- a/ambari-agent/src/main/python/ambari_agent/shell.py
+++ b/ambari-agent/src/main/python/ambari_agent/shell.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.6
+#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
diff --git a/ambari-agent/src/test/python/TestActionQueue.py b/ambari-agent/src/test/python/TestActionQueue.py
index f5d8d2a7a47..308ee5edb2e 100644
--- a/ambari-agent/src/test/python/TestActionQueue.py
+++ b/ambari-agent/src/test/python/TestActionQueue.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.6
+#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
diff --git a/ambari-agent/src/test/python/TestActualConfigHandler.py b/ambari-agent/src/test/python/TestActualConfigHandler.py
index 4824dd0f0cb..4745610366b 100644
--- a/ambari-agent/src/test/python/TestActualConfigHandler.py
+++ b/ambari-agent/src/test/python/TestActualConfigHandler.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.6
+#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
diff --git a/ambari-agent/src/test/python/TestAgentActions.py b/ambari-agent/src/test/python/TestAgentActions.py
index dc3c9194f1f..04ce6e80dc7 100644
--- a/ambari-agent/src/test/python/TestAgentActions.py
+++ b/ambari-agent/src/test/python/TestAgentActions.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.6
+#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
diff --git a/ambari-agent/src/test/python/TestCertGeneration.py b/ambari-agent/src/test/python/TestCertGeneration.py
index 94bb9f633a2..52164470eb0 100644
--- a/ambari-agent/src/test/python/TestCertGeneration.py
+++ b/ambari-agent/src/test/python/TestCertGeneration.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.6
+#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
diff --git a/ambari-agent/src/test/python/TestController.py b/ambari-agent/src/test/python/TestController.py
index 182868d41c3..5778135b3fc 100644
--- a/ambari-agent/src/test/python/TestController.py
+++ b/ambari-agent/src/test/python/TestController.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.6
+#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
diff --git a/ambari-agent/src/test/python/TestGrep.py b/ambari-agent/src/test/python/TestGrep.py
index 108f22ac17c..37528e50937 100644
--- a/ambari-agent/src/test/python/TestGrep.py
+++ b/ambari-agent/src/test/python/TestGrep.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.6
+#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
diff --git a/ambari-agent/src/test/python/TestHardware.py b/ambari-agent/src/test/python/TestHardware.py
index 89b4bdb26ef..2813876ede2 100644
--- a/ambari-agent/src/test/python/TestHardware.py
+++ b/ambari-agent/src/test/python/TestHardware.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.6
+#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
diff --git a/ambari-agent/src/test/python/TestHeartbeat.py b/ambari-agent/src/test/python/TestHeartbeat.py
index a7370fff0a2..7797d3c2654 100644
--- a/ambari-agent/src/test/python/TestHeartbeat.py
+++ b/ambari-agent/src/test/python/TestHeartbeat.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.6
+#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
diff --git a/ambari-agent/src/test/python/TestHostname.py b/ambari-agent/src/test/python/TestHostname.py
index 3198905b4d0..6fdd472d451 100644
--- a/ambari-agent/src/test/python/TestHostname.py
+++ b/ambari-agent/src/test/python/TestHostname.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.6
+#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
diff --git a/ambari-agent/src/test/python/TestLiveStatus.py b/ambari-agent/src/test/python/TestLiveStatus.py
index 60cbfc24e95..1f26ebb494e 100644
--- a/ambari-agent/src/test/python/TestLiveStatus.py
+++ b/ambari-agent/src/test/python/TestLiveStatus.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.6
+#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
diff --git a/ambari-agent/src/test/python/TestMain.py b/ambari-agent/src/test/python/TestMain.py
index 799bc8ff8b9..3c8d93c53e4 100644
--- a/ambari-agent/src/test/python/TestMain.py
+++ b/ambari-agent/src/test/python/TestMain.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.6
+#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
diff --git a/ambari-agent/src/test/python/TestManifestGenerator.py b/ambari-agent/src/test/python/TestManifestGenerator.py
index 507435ff646..43ed5c18e3c 100644
--- a/ambari-agent/src/test/python/TestManifestGenerator.py
+++ b/ambari-agent/src/test/python/TestManifestGenerator.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.6
+#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
diff --git a/ambari-agent/src/test/python/TestNetUtil.py b/ambari-agent/src/test/python/TestNetUtil.py
index e1fe02d7d03..10ddca3e0c4 100644
--- a/ambari-agent/src/test/python/TestNetUtil.py
+++ b/ambari-agent/src/test/python/TestNetUtil.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.6
+#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
diff --git a/ambari-agent/src/test/python/TestProcessHelper.py b/ambari-agent/src/test/python/TestProcessHelper.py
index c7a42619f80..3b14ba3f56e 100644
--- a/ambari-agent/src/test/python/TestProcessHelper.py
+++ b/ambari-agent/src/test/python/TestProcessHelper.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.6
+#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
diff --git a/ambari-agent/src/test/python/TestPuppetExecutor.py b/ambari-agent/src/test/python/TestPuppetExecutor.py
index d4512e99e02..202818d63dd 100644
--- a/ambari-agent/src/test/python/TestPuppetExecutor.py
+++ b/ambari-agent/src/test/python/TestPuppetExecutor.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.6
+#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
diff --git a/ambari-agent/src/test/python/TestPuppetExecutorManually.py b/ambari-agent/src/test/python/TestPuppetExecutorManually.py
index 90151b6db88..5299edbac47 100644
--- a/ambari-agent/src/test/python/TestPuppetExecutorManually.py
+++ b/ambari-agent/src/test/python/TestPuppetExecutorManually.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.6
+#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
diff --git a/ambari-agent/src/test/python/TestPythonExecutor.py b/ambari-agent/src/test/python/TestPythonExecutor.py
index 85705519a46..1fcf180ce23 100644
--- a/ambari-agent/src/test/python/TestPythonExecutor.py
+++ b/ambari-agent/src/test/python/TestPythonExecutor.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.6
+#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
diff --git a/ambari-agent/src/test/python/TestRegistration.py b/ambari-agent/src/test/python/TestRegistration.py
index 4ac8d3d5bab..0cb4971c9e4 100644
--- a/ambari-agent/src/test/python/TestRegistration.py
+++ b/ambari-agent/src/test/python/TestRegistration.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.6
+#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
diff --git a/ambari-agent/src/test/python/TestRepoInstaller.py b/ambari-agent/src/test/python/TestRepoInstaller.py
index 54273b4051b..28044d1264a 100644
--- a/ambari-agent/src/test/python/TestRepoInstaller.py
+++ b/ambari-agent/src/test/python/TestRepoInstaller.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.6
+#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
diff --git a/ambari-agent/src/test/python/TestSecurity.py b/ambari-agent/src/test/python/TestSecurity.py
index 0c656041ede..5eb17b214f2 100644
--- a/ambari-agent/src/test/python/TestSecurity.py
+++ b/ambari-agent/src/test/python/TestSecurity.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.6
+#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
diff --git a/ambari-agent/src/test/python/TestStackVersionsFileHandler.py b/ambari-agent/src/test/python/TestStackVersionsFileHandler.py
index d44a8e5d262..73de8d96e4e 100644
--- a/ambari-agent/src/test/python/TestStackVersionsFileHandler.py
+++ b/ambari-agent/src/test/python/TestStackVersionsFileHandler.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.6
+#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
diff --git a/ambari-agent/src/test/python/TestStatusCheck.py b/ambari-agent/src/test/python/TestStatusCheck.py
index 98b299ce583..28453382890 100644
--- a/ambari-agent/src/test/python/TestStatusCheck.py
+++ b/ambari-agent/src/test/python/TestStatusCheck.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.6
+#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
diff --git a/ambari-agent/src/test/python/TestUpgradeExecutor.py b/ambari-agent/src/test/python/TestUpgradeExecutor.py
index 7abb9592024..fc8bca73818 100644
--- a/ambari-agent/src/test/python/TestUpgradeExecutor.py
+++ b/ambari-agent/src/test/python/TestUpgradeExecutor.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.6
+#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
diff --git a/ambari-agent/src/test/python/examples/ControllerTester.py b/ambari-agent/src/test/python/examples/ControllerTester.py
index 64295b5abac..6abcfed2a62 100644
--- a/ambari-agent/src/test/python/examples/ControllerTester.py
+++ b/ambari-agent/src/test/python/examples/ControllerTester.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.6
+#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
diff --git a/ambari-agent/src/test/python/examples/debug_testcase_example.py b/ambari-agent/src/test/python/examples/debug_testcase_example.py
index 74bd8174323..6c238024b37 100644
--- a/ambari-agent/src/test/python/examples/debug_testcase_example.py
+++ b/ambari-agent/src/test/python/examples/debug_testcase_example.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.6
+#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
diff --git a/ambari-agent/src/test/python/unitTests.py b/ambari-agent/src/test/python/unitTests.py
index 79d5ce8883c..c82a23ae06c 100644
--- a/ambari-agent/src/test/python/unitTests.py
+++ b/ambari-agent/src/test/python/unitTests.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.6
+#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
diff --git a/ambari-client/src/test/python/unitTests.py b/ambari-client/src/test/python/unitTests.py
index 0ba2cea048f..181c6395c11 100644
--- a/ambari-client/src/test/python/unitTests.py
+++ b/ambari-client/src/test/python/unitTests.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.6
+#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
diff --git a/ambari-server/conf/unix/ambari.properties b/ambari-server/conf/unix/ambari.properties
index e65dc95b336..77154326645 100644
--- a/ambari-server/conf/unix/ambari.properties
+++ b/ambari-server/conf/unix/ambari.properties
@@ -24,8 +24,8 @@ metadata.path=/var/lib/ambari-server/resources/stacks
server.version.file=/var/lib/ambari-server/resources/version
webapp.dir=/usr/lib/ambari-server/web
bootstrap.dir=/var/run/ambari-server/bootstrap
-bootstrap.script=/usr/lib/python2.6/site-packages/ambari_server/bootstrap.py
-bootstrap.setup_agent.script=/usr/lib/python2.6/site-packages/ambari_server/setupAgent.py
+bootstrap.script=/usr/lib/exec/ambari-server/bootstrap.py
+bootstrap.setup_agent.script=/usr/lib/exec/ambari-server/setupAgent.py
server.persistence.inMemory=false
api.authenticate=true
server.connection.max.idle.millis=900000
diff --git a/ambari-server/pom.xml b/ambari-server/pom.xml
index b0b131c465c..a661d6930e8 100644
--- a/ambari-server/pom.xml
+++ b/ambari-server/pom.xml
@@ -28,7 +28,9 @@
Ambari ServerUTF-8
- python >= 2.6
+ 2.6
+ python2
+ python >= ${python.ver}
@@ -117,7 +119,7 @@
postgresql-server >= 8.1openssl
- ${python.ver}
+ ${python.requires}src/main/package/rpm/postinstall.sh
@@ -225,6 +227,23 @@
+
+ /usr/lib/exec/ambari-server
+ 755
+ root
+ root
+
+
+ src/main/python/bootstrap.py
+
+
+ src/main/python/setupAgent.py
+
+
+ src/main/python/os_type_check.sh
+
+
+ /var/lib/ambari-server/keys/db755
@@ -319,23 +338,6 @@
-
- /usr/lib/python2.6/site-packages/ambari_server
- 755
- root
- root
-
-
- src/main/python/bootstrap.py
-
-
- src/main/python/setupAgent.py
-
-
- src/main/python/os_type_check.sh
-
-
- /var/run/ambari-server755
@@ -390,7 +392,7 @@
- python2.6
+ ${python.exec}src/test/pythonunitTests.py
diff --git a/ambari-server/sbin/ambari-server b/ambari-server/sbin/ambari-server
index cee6d532780..5c800fc01c9 100644
--- a/ambari-server/sbin/ambari-server
+++ b/ambari-server/sbin/ambari-server
@@ -23,18 +23,10 @@
export PATH=/usr/lib/ambari-server/*:$PATH
export AMBARI_CONF_DIR=/etc/ambari-server/conf:$PATH
-if [ -a /usr/bin/python2.6 ]; then
- PYTHON=/usr/bin/python2.6
-fi
-
if [ -a /var/lib/ambari-server/ambari-env.sh ]; then
. /var/lib/ambari-server/ambari-env.sh
fi
-if [ "x$PYTHON" == "x" ]; then
- PYTHON=/usr/bin/python
-fi
-
if [ "x$AMBARI_PASSPHRASE" == "x" ]; then
AMBARI_PASSPHRASE="DEV"
fi
@@ -45,28 +37,18 @@ fi
export AMBARI_PASSPHRASE=$AMBARI_PASSPHRASE
-# check for version
-majversion=`$PYTHON -V 2>&1 | awk '{print $2}' | cut -d'.' -f1`
-minversion=`$PYTHON -V 2>&1 | awk '{print $2}' | cut -d'.' -f2`
-numversion=$(( 10 * $majversion + $minversion))
-if (( $numversion < 26 )); then
- echo "Need python version > 2.6"
- exit 1
-fi
-echo "Using python " $PYTHON
-
case "$1" in
start)
echo -e "Starting ambari-server"
- $PYTHON /usr/sbin/ambari-server.py $@
+ /usr/sbin/ambari-server.py $@
;;
stop)
echo -e "Stopping ambari-server"
- $PYTHON /usr/sbin/ambari-server.py $@
+ /usr/sbin/ambari-server.py $@
;;
reset)
echo -e "Resetting ambari-server"
- $PYTHON /usr/sbin/ambari-server.py $@
+ /usr/sbin/ambari-server.py $@
;;
restart)
echo -e "Restarting ambari-server"
@@ -94,7 +76,7 @@ case "$1" in
echo -e "Run postgresql start"
/sbin/service postgresql start
echo -e "Setup ambari-server"
- $PYTHON /usr/sbin/ambari-server.py $@
+ /usr/sbin/ambari-server.py $@
;;
*)
echo "Usage: /usr/sbin/ambari-server {start|stop|restart|setup|upgrade|status|upgradestack} [options]"
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/Role.java b/ambari-server/src/main/java/org/apache/ambari/server/Role.java
index c4d5eb49c87..d0298913a43 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/Role.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/Role.java
@@ -21,6 +21,9 @@
//This enumerates all the roles that the server can handle.
//Each component or a job maps to a particular role.
public enum Role {
+ HCFS_SERVICE_CHECK,
+ HCFS_CLIENT,
+ PEERSTATUS,
ZOOKEEPER_SERVER,
ZOOKEEPER_CLIENT,
NAMENODE,
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java b/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
index 8998cfa83aa..ca3650f09fb 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
@@ -49,7 +49,7 @@ public class Configuration {
public static final String BOOTSTRAP_DIR_DEFAULT = "/var/run/ambari-server/bootstrap";
public static final String WEBAPP_DIR = "webapp.dir";
public static final String BOOTSTRAP_SCRIPT = "bootstrap.script";
- public static final String BOOTSTRAP_SCRIPT_DEFAULT = "/usr/bin/ambari_bootstrap";
+ public static final String BOOTSTRAP_SCRIPT_DEFAULT = "/usr/lib/exec/ambari-server/bootstrap.py";
public static final String BOOTSTRAP_SETUP_AGENT_SCRIPT = "bootstrap.setup_agent.script";
public static final String BOOTSTRAP_SETUP_AGENT_PASSWORD = "bootstrap.setup_agent.password";
public static final String BOOTSTRAP_MASTER_HOSTNAME = "bootstrap.master_host_name";
@@ -335,7 +335,7 @@ public String getBootStrapScript() {
public String getBootSetupAgentScript() {
return properties.getProperty(BOOTSTRAP_SETUP_AGENT_SCRIPT,
- "/usr/lib/python2.6/site-packages/ambari_server/setupAgent.py");
+ "/usr/lib/exec/ambari-server/setupAgent.py");
}
public String getBootSetupAgentPassword() {
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index 544d9a508b2..29f8c08bc33 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@ -123,12 +123,6 @@ public class AmbariManagementControllerImpl implements
private final Gson gson;
- private static RoleCommandOrder rco;
- static {
- rco = new RoleCommandOrder();
- RoleCommandOrder.initialize();
- }
-
@Inject
private ServiceFactory serviceFactory;
@Inject
@@ -177,6 +171,13 @@ public AmbariManagementControllerImpl(ActionManager actionManager,
}
}
+ private RoleCommandOrder getRCO(Cluster cluster) {
+ RoleCommandOrder rco;
+ rco = injector.getInstance(RoleCommandOrder.class);
+ rco.initialize(cluster);
+ return rco;
+ };
+
@Override
public void createCluster(ClusterRequest request)
throws AmbariException {
@@ -2203,6 +2204,7 @@ private List doStageCreation(Cluster cluster,
clusters.getHostsForCluster(cluster.getClusterName()), cluster, hostsMap, injector));
}
+ RoleCommandOrder rco = this.getRCO(cluster);
RoleGraph rg = new RoleGraph(rco);
rg.build(stage);
return rg.getStages();
@@ -4162,6 +4164,9 @@ public RequestStatusResponse createActions(Set request, Map stages = rg.getStages();
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/metadata/ActionMetadata.java b/ambari-server/src/main/java/org/apache/ambari/server/metadata/ActionMetadata.java
index de04dd4dd58..a8b47987fcd 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/metadata/ActionMetadata.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/metadata/ActionMetadata.java
@@ -41,6 +41,7 @@ public ActionMetadata() {
private void fillServiceClients() {
serviceClients.put("hdfs" , Role.HDFS_CLIENT.toString());
+ serviceClients.put("hcfs" , Role.HCFS_CLIENT.toString());
serviceClients.put("hbase" , Role.HBASE_CLIENT.toString());
serviceClients.put("mapreduce" , Role.MAPREDUCE_CLIENT.toString());
serviceClients.put("zookeeper" , Role.ZOOKEEPER_CLIENT.toString());
@@ -53,6 +54,7 @@ private void fillServiceClients() {
private void fillServiceActions() {
serviceActions.put("hdfs" , Arrays.asList(Role.HDFS_SERVICE_CHECK.toString()));
+ serviceActions.put("hcfs" , Arrays.asList(Role.HCFS_SERVICE_CHECK.toString()));
serviceActions.put("hbase" , Arrays.asList(Role.HBASE_SERVICE_CHECK.toString()));
serviceActions.put("mapreduce" , Arrays.asList(Role.MAPREDUCE_SERVICE_CHECK.toString()));
serviceActions.put("zookeeper" , Arrays.asList(Role.ZOOKEEPER_QUORUM_SERVICE_CHECK.toString()));
@@ -66,6 +68,7 @@ private void fillServiceActions() {
private void fillServiceCheckActions() {
serviceCheckActions.put("hdfs", Role.HDFS_SERVICE_CHECK.toString());
+ serviceCheckActions.put("hcfs", Role.HCFS_SERVICE_CHECK.toString());
serviceCheckActions.put("hbase", Role.HBASE_SERVICE_CHECK.toString());
serviceCheckActions.put("mapreduce",
Role.MAPREDUCE_SERVICE_CHECK.toString());
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/metadata/RoleCommandOrder.java b/ambari-server/src/main/java/org/apache/ambari/server/metadata/RoleCommandOrder.java
index 2a6113663a4..0a26df167ba 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/metadata/RoleCommandOrder.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/metadata/RoleCommandOrder.java
@@ -18,6 +18,7 @@
package org.apache.ambari.server.metadata;
import java.util.HashMap;
+
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
@@ -25,6 +26,10 @@
import org.apache.ambari.server.Role;
import org.apache.ambari.server.RoleCommand;
import org.apache.ambari.server.stageplanner.RoleGraphNode;
+import org.apache.ambari.server.state.Cluster;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.apache.ambari.server.AmbariException;
/**
* This class is used to establish the order between two roles. This class
@@ -32,6 +37,9 @@
*/
public class RoleCommandOrder {
+ private final static Logger LOG =
+ LoggerFactory.getLogger(RoleCommandOrder.class);
+
private static class RoleCommandPair {
Role role;
RoleCommand cmd;
@@ -63,7 +71,7 @@ public boolean equals(Object other) {
/**
* key -> blocked role command value -> set of blocker role commands.
*/
- private static Map> dependencies = new HashMap>();
+ private Map> dependencies = new HashMap>();
/**
* Add a pair of tuples where the tuple defined by the first two parameters are blocked on
@@ -73,203 +81,333 @@ public boolean equals(Object other) {
* @param blockerRole The role that is blocking
* @param blockerCommand The command on the blocking role
*/
- private static void addDependency(Role blockedRole,
+ private void addDependency(Role blockedRole,
RoleCommand blockedCommand, Role blockerRole, RoleCommand blockerCommand) {
RoleCommandPair rcp1 = new RoleCommandPair(blockedRole, blockedCommand);
RoleCommandPair rcp2 = new RoleCommandPair(blockerRole, blockerCommand);
- if (dependencies.get(rcp1) == null) {
- dependencies.put(rcp1, new HashSet());
+ if (this.dependencies.get(rcp1) == null) {
+ this.dependencies.put(rcp1, new HashSet());
}
- dependencies.get(rcp1).add(rcp2);
+ this.dependencies.get(rcp1).add(rcp2);
}
- public static void initialize() {
- // Installs
- addDependency(Role.NAGIOS_SERVER, RoleCommand.INSTALL, Role.HIVE_CLIENT,
- RoleCommand.INSTALL);
- addDependency(Role.NAGIOS_SERVER, RoleCommand.INSTALL, Role.HCAT,
- RoleCommand.INSTALL);
- addDependency(Role.NAGIOS_SERVER, RoleCommand.INSTALL, Role.MAPREDUCE_CLIENT,
- RoleCommand.INSTALL);
- addDependency(Role.NAGIOS_SERVER, RoleCommand.INSTALL, Role.OOZIE_CLIENT,
- RoleCommand.INSTALL);
+ public void initialize(Cluster cluster) {
+ Boolean hasHCFS = false;
+
+ try {
+ if (cluster != null && cluster.getService("HCFS") != null) {
+ hasHCFS = true;
+ }
+ } catch (AmbariException e) {
+ }
+
+ if (hasHCFS) {
+ // Installs
+ addDependency(Role.NAGIOS_SERVER, RoleCommand.INSTALL, Role.HIVE_CLIENT,
+ RoleCommand.INSTALL);
+ addDependency(Role.NAGIOS_SERVER, RoleCommand.INSTALL, Role.HCAT,
+ RoleCommand.INSTALL);
+ addDependency(Role.NAGIOS_SERVER, RoleCommand.INSTALL, Role.MAPREDUCE_CLIENT,
+ RoleCommand.INSTALL);
+ addDependency(Role.NAGIOS_SERVER, RoleCommand.INSTALL, Role.OOZIE_CLIENT,
+ RoleCommand.INSTALL);
+
+ // Starts
+ addDependency(Role.HBASE_MASTER, RoleCommand.START, Role.ZOOKEEPER_SERVER,
+ RoleCommand.START);
+ addDependency(Role.HBASE_MASTER, RoleCommand.START, Role.PEERSTATUS,
+ RoleCommand.START);
+ addDependency(Role.HBASE_REGIONSERVER, RoleCommand.START,
+ Role.HBASE_MASTER, RoleCommand.START);
+ addDependency(Role.JOBTRACKER, RoleCommand.START, Role.PEERSTATUS,
+ RoleCommand.START);
+ addDependency(Role.TASKTRACKER, RoleCommand.START, Role.PEERSTATUS,
+ RoleCommand.START);
+ addDependency(Role.OOZIE_SERVER, RoleCommand.START, Role.JOBTRACKER,
+ RoleCommand.START);
+ addDependency(Role.OOZIE_SERVER, RoleCommand.START, Role.TASKTRACKER,
+ RoleCommand.START);
+ addDependency(Role.HIVE_SERVER, RoleCommand.START, Role.TASKTRACKER,
+ RoleCommand.START);
+ addDependency(Role.WEBHCAT_SERVER, RoleCommand.START, Role.TASKTRACKER,
+ RoleCommand.START);
+ addDependency(Role.WEBHCAT_SERVER, RoleCommand.START, Role.HIVE_SERVER,
+ RoleCommand.START);
+ addDependency(Role.HIVE_METASTORE, RoleCommand.START, Role.MYSQL_SERVER,
+ RoleCommand.START);
+ addDependency(Role.HIVE_SERVER, RoleCommand.START, Role.MYSQL_SERVER,
+ RoleCommand.START);
+ addDependency(Role.HUE_SERVER, RoleCommand.START, Role.HIVE_SERVER,
+ RoleCommand.START);
+ addDependency(Role.HUE_SERVER, RoleCommand.START, Role.HCAT,
+ RoleCommand.START);
+ addDependency(Role.HUE_SERVER, RoleCommand.START, Role.OOZIE_SERVER,
+ RoleCommand.START);
- // Starts
- addDependency(Role.SECONDARY_NAMENODE, RoleCommand.START, Role.NAMENODE,
- RoleCommand.START);
- addDependency(Role.RESOURCEMANAGER, RoleCommand.START, Role.NAMENODE,
- RoleCommand.START);
- addDependency(Role.RESOURCEMANAGER, RoleCommand.START, Role.DATANODE,
- RoleCommand.START);
- addDependency(Role.NODEMANAGER, RoleCommand.START, Role.NAMENODE,
- RoleCommand.START);
- addDependency(Role.NODEMANAGER, RoleCommand.START, Role.DATANODE,
- RoleCommand.START);
- addDependency(Role.HISTORYSERVER, RoleCommand.START, Role.NAMENODE,
- RoleCommand.START);
- addDependency(Role.HISTORYSERVER, RoleCommand.START, Role.DATANODE,
- RoleCommand.START);
- addDependency(Role.HBASE_MASTER, RoleCommand.START, Role.ZOOKEEPER_SERVER,
- RoleCommand.START);
- addDependency(Role.HBASE_MASTER, RoleCommand.START, Role.NAMENODE,
- RoleCommand.START);
- addDependency(Role.HBASE_MASTER, RoleCommand.START, Role.DATANODE,
- RoleCommand.START);
- addDependency(Role.HBASE_REGIONSERVER, RoleCommand.START,
- Role.HBASE_MASTER, RoleCommand.START);
- addDependency(Role.JOBTRACKER, RoleCommand.START, Role.NAMENODE,
- RoleCommand.START);
- addDependency(Role.JOBTRACKER, RoleCommand.START, Role.DATANODE,
- RoleCommand.START);
- addDependency(Role.TASKTRACKER, RoleCommand.START, Role.NAMENODE,
- RoleCommand.START);
- addDependency(Role.TASKTRACKER, RoleCommand.START, Role.DATANODE,
- RoleCommand.START);
- addDependency(Role.OOZIE_SERVER, RoleCommand.START, Role.JOBTRACKER,
- RoleCommand.START);
- addDependency(Role.OOZIE_SERVER, RoleCommand.START, Role.TASKTRACKER,
- RoleCommand.START);
- addDependency(Role.HIVE_SERVER, RoleCommand.START, Role.TASKTRACKER,
- RoleCommand.START);
- addDependency(Role.HIVE_SERVER, RoleCommand.START, Role.DATANODE,
- RoleCommand.START);
- addDependency(Role.WEBHCAT_SERVER, RoleCommand.START, Role.TASKTRACKER,
- RoleCommand.START);
- addDependency(Role.WEBHCAT_SERVER, RoleCommand.START, Role.DATANODE,
- RoleCommand.START);
- addDependency(Role.WEBHCAT_SERVER, RoleCommand.START, Role.HIVE_SERVER,
- RoleCommand.START);
- addDependency(Role.HIVE_METASTORE, RoleCommand.START, Role.MYSQL_SERVER,
- RoleCommand.START);
- addDependency(Role.HIVE_SERVER, RoleCommand.START, Role.MYSQL_SERVER,
- RoleCommand.START);
- addDependency(Role.HUE_SERVER, RoleCommand.START, Role.HIVE_SERVER,
- RoleCommand.START);
- addDependency(Role.HUE_SERVER, RoleCommand.START, Role.HCAT,
- RoleCommand.START);
- addDependency(Role.HUE_SERVER, RoleCommand.START, Role.OOZIE_SERVER,
- RoleCommand.START);
+ // Service checks
+ addDependency(Role.HCFS_SERVICE_CHECK, RoleCommand.EXECUTE, Role.PEERSTATUS,
+ RoleCommand.START);
+ addDependency(Role.MAPREDUCE_SERVICE_CHECK, RoleCommand.EXECUTE,
+ Role.JOBTRACKER, RoleCommand.START);
+ addDependency(Role.MAPREDUCE_SERVICE_CHECK, RoleCommand.EXECUTE,
+ Role.TASKTRACKER, RoleCommand.START);
+ addDependency(Role.OOZIE_SERVICE_CHECK, RoleCommand.EXECUTE,
+ Role.OOZIE_SERVER, RoleCommand.START);
+ addDependency(Role.WEBHCAT_SERVICE_CHECK, RoleCommand.EXECUTE,
+ Role.WEBHCAT_SERVER, RoleCommand.START);
+ addDependency(Role.HBASE_SERVICE_CHECK, RoleCommand.EXECUTE,
+ Role.HBASE_MASTER, RoleCommand.START);
+ addDependency(Role.HBASE_SERVICE_CHECK, RoleCommand.EXECUTE,
+ Role.HBASE_REGIONSERVER, RoleCommand.START);
+ addDependency(Role.HIVE_SERVICE_CHECK, RoleCommand.EXECUTE,
+ Role.HIVE_SERVER, RoleCommand.START);
+ addDependency(Role.HIVE_SERVICE_CHECK, RoleCommand.EXECUTE,
+ Role.HIVE_METASTORE, RoleCommand.START);
+ addDependency(Role.HCAT_SERVICE_CHECK, RoleCommand.EXECUTE,
+ Role.HIVE_SERVER, RoleCommand.START);
+ addDependency(Role.PIG_SERVICE_CHECK, RoleCommand.EXECUTE,
+ Role.JOBTRACKER, RoleCommand.START);
+ addDependency(Role.PIG_SERVICE_CHECK, RoleCommand.EXECUTE,
+ Role.TASKTRACKER, RoleCommand.START);
+ addDependency(Role.SQOOP_SERVICE_CHECK, RoleCommand.EXECUTE,
+ Role.JOBTRACKER, RoleCommand.START);
+ addDependency(Role.SQOOP_SERVICE_CHECK, RoleCommand.EXECUTE,
+ Role.TASKTRACKER, RoleCommand.START);
+ addDependency(Role.ZOOKEEPER_SERVICE_CHECK, RoleCommand.EXECUTE,
+ Role.ZOOKEEPER_SERVER, RoleCommand.START);
+ addDependency(Role.ZOOKEEPER_QUORUM_SERVICE_CHECK, RoleCommand.EXECUTE,
+ Role.ZOOKEEPER_SERVER, RoleCommand.START);
- // Service checks
- addDependency(Role.HDFS_SERVICE_CHECK, RoleCommand.EXECUTE, Role.NAMENODE,
- RoleCommand.START);
- addDependency(Role.HDFS_SERVICE_CHECK, RoleCommand.EXECUTE, Role.DATANODE,
- RoleCommand.START);
- addDependency(Role.MAPREDUCE_SERVICE_CHECK, RoleCommand.EXECUTE,
- Role.JOBTRACKER, RoleCommand.START);
- addDependency(Role.MAPREDUCE_SERVICE_CHECK, RoleCommand.EXECUTE,
- Role.TASKTRACKER, RoleCommand.START);
- addDependency(Role.RESOURCEMANAGER_SERVICE_CHECK, RoleCommand.EXECUTE,
- Role.RESOURCEMANAGER, RoleCommand.START);
- addDependency(Role.OOZIE_SERVICE_CHECK, RoleCommand.EXECUTE,
- Role.OOZIE_SERVER, RoleCommand.START);
- addDependency(Role.WEBHCAT_SERVICE_CHECK, RoleCommand.EXECUTE,
- Role.WEBHCAT_SERVER, RoleCommand.START);
- addDependency(Role.HBASE_SERVICE_CHECK, RoleCommand.EXECUTE,
- Role.HBASE_MASTER, RoleCommand.START);
- addDependency(Role.HBASE_SERVICE_CHECK, RoleCommand.EXECUTE,
- Role.HBASE_REGIONSERVER, RoleCommand.START);
- addDependency(Role.HIVE_SERVICE_CHECK, RoleCommand.EXECUTE,
- Role.HIVE_SERVER, RoleCommand.START);
- addDependency(Role.HIVE_SERVICE_CHECK, RoleCommand.EXECUTE,
- Role.HIVE_METASTORE, RoleCommand.START);
- addDependency(Role.HCAT_SERVICE_CHECK, RoleCommand.EXECUTE,
- Role.HIVE_SERVER, RoleCommand.START);
- addDependency(Role.PIG_SERVICE_CHECK, RoleCommand.EXECUTE,
- Role.JOBTRACKER, RoleCommand.START);
- addDependency(Role.PIG_SERVICE_CHECK, RoleCommand.EXECUTE,
- Role.TASKTRACKER, RoleCommand.START);
- addDependency(Role.SQOOP_SERVICE_CHECK, RoleCommand.EXECUTE,
- Role.JOBTRACKER, RoleCommand.START);
- addDependency(Role.SQOOP_SERVICE_CHECK, RoleCommand.EXECUTE,
- Role.TASKTRACKER, RoleCommand.START);
- addDependency(Role.ZOOKEEPER_SERVICE_CHECK, RoleCommand.EXECUTE,
- Role.ZOOKEEPER_SERVER, RoleCommand.START);
- addDependency(Role.ZOOKEEPER_QUORUM_SERVICE_CHECK, RoleCommand.EXECUTE,
- Role.ZOOKEEPER_SERVER, RoleCommand.START);
-
- addDependency(Role.ZOOKEEPER_SERVER, RoleCommand.STOP,
- Role.HBASE_MASTER, RoleCommand.STOP);
- addDependency(Role.ZOOKEEPER_SERVER, RoleCommand.STOP,
- Role.HBASE_REGIONSERVER, RoleCommand.STOP);
- addDependency(Role.NAMENODE, RoleCommand.STOP,
- Role.HBASE_MASTER, RoleCommand.STOP);
- addDependency(Role.DATANODE, RoleCommand.STOP,
- Role.HBASE_MASTER, RoleCommand.STOP);
- addDependency(Role.HBASE_MASTER, RoleCommand.STOP,
- Role.HBASE_REGIONSERVER, RoleCommand.STOP);
- addDependency(Role.NAMENODE, RoleCommand.STOP,
- Role.JOBTRACKER, RoleCommand.STOP);
- addDependency(Role.NAMENODE, RoleCommand.STOP,
- Role.TASKTRACKER, RoleCommand.STOP);
- addDependency(Role.NAMENODE, RoleCommand.STOP,
- Role.RESOURCEMANAGER, RoleCommand.STOP);
- addDependency(Role.NAMENODE, RoleCommand.STOP,
- Role.NODEMANAGER, RoleCommand.STOP);
- addDependency(Role.NAMENODE, RoleCommand.STOP,
- Role.HISTORYSERVER, RoleCommand.STOP);
- addDependency(Role.DATANODE, RoleCommand.STOP,
- Role.JOBTRACKER, RoleCommand.STOP);
- addDependency(Role.DATANODE, RoleCommand.STOP,
- Role.TASKTRACKER, RoleCommand.STOP);
- addDependency(Role.DATANODE, RoleCommand.STOP,
- Role.RESOURCEMANAGER, RoleCommand.STOP);
- addDependency(Role.DATANODE, RoleCommand.STOP,
- Role.NODEMANAGER, RoleCommand.STOP);
- addDependency(Role.DATANODE, RoleCommand.STOP,
- Role.HISTORYSERVER, RoleCommand.STOP);
+ addDependency(Role.ZOOKEEPER_SERVER, RoleCommand.STOP,
+ Role.HBASE_MASTER, RoleCommand.STOP);
+ addDependency(Role.ZOOKEEPER_SERVER, RoleCommand.STOP,
+ Role.HBASE_REGIONSERVER, RoleCommand.STOP);
+ addDependency(Role.HBASE_MASTER, RoleCommand.STOP,
+ Role.HBASE_REGIONSERVER, RoleCommand.STOP);
+
+ addDependency(Role.JOBTRACKER, RoleCommand.UPGRADE,
+ Role.HCFS_CLIENT, RoleCommand.UPGRADE);
+ addDependency(Role.TASKTRACKER, RoleCommand.UPGRADE,
+ Role.JOBTRACKER, RoleCommand.UPGRADE);
+ addDependency(Role.MAPREDUCE_CLIENT, RoleCommand.UPGRADE,
+ Role.TASKTRACKER, RoleCommand.UPGRADE);
+ addDependency(Role.MAPREDUCE_CLIENT, RoleCommand.UPGRADE,
+ Role.JOBTRACKER, RoleCommand.UPGRADE);
+ addDependency(Role.ZOOKEEPER_SERVER, RoleCommand.UPGRADE,
+ Role.MAPREDUCE_CLIENT, RoleCommand.UPGRADE);
+ addDependency(Role.ZOOKEEPER_CLIENT, RoleCommand.UPGRADE,
+ Role.ZOOKEEPER_SERVER, RoleCommand.UPGRADE);
+ addDependency(Role.HBASE_MASTER, RoleCommand.UPGRADE,
+ Role.ZOOKEEPER_CLIENT, RoleCommand.UPGRADE);
+ addDependency(Role.HBASE_REGIONSERVER, RoleCommand.UPGRADE,
+ Role.HBASE_MASTER, RoleCommand.UPGRADE);
+ addDependency(Role.HBASE_CLIENT, RoleCommand.UPGRADE,
+ Role.HBASE_REGIONSERVER, RoleCommand.UPGRADE);
- addDependency(Role.SECONDARY_NAMENODE, RoleCommand.UPGRADE,
- Role.NAMENODE, RoleCommand.UPGRADE);
- addDependency(Role.DATANODE, RoleCommand.UPGRADE,
- Role.SECONDARY_NAMENODE, RoleCommand.UPGRADE);
- addDependency(Role.HDFS_CLIENT, RoleCommand.UPGRADE,
- Role.DATANODE, RoleCommand.UPGRADE);
- addDependency(Role.JOBTRACKER, RoleCommand.UPGRADE,
- Role.HDFS_CLIENT, RoleCommand.UPGRADE);
- addDependency(Role.TASKTRACKER, RoleCommand.UPGRADE,
- Role.JOBTRACKER, RoleCommand.UPGRADE);
- addDependency(Role.MAPREDUCE_CLIENT, RoleCommand.UPGRADE,
- Role.TASKTRACKER, RoleCommand.UPGRADE);
- addDependency(Role.MAPREDUCE_CLIENT, RoleCommand.UPGRADE,
- Role.TASKTRACKER, RoleCommand.UPGRADE);
- addDependency(Role.ZOOKEEPER_SERVER, RoleCommand.UPGRADE,
- Role.MAPREDUCE_CLIENT, RoleCommand.UPGRADE);
- addDependency(Role.ZOOKEEPER_CLIENT, RoleCommand.UPGRADE,
- Role.ZOOKEEPER_SERVER, RoleCommand.UPGRADE);
- addDependency(Role.HBASE_MASTER, RoleCommand.UPGRADE,
- Role.ZOOKEEPER_CLIENT, RoleCommand.UPGRADE);
- addDependency(Role.HBASE_REGIONSERVER, RoleCommand.UPGRADE,
- Role.HBASE_MASTER, RoleCommand.UPGRADE);
- addDependency(Role.HBASE_CLIENT, RoleCommand.UPGRADE,
- Role.HBASE_REGIONSERVER, RoleCommand.UPGRADE);
- addDependency(Role.HIVE_SERVER, RoleCommand.UPGRADE,
- Role.HBASE_CLIENT, RoleCommand.UPGRADE);
- addDependency(Role.HIVE_METASTORE, RoleCommand.UPGRADE,
- Role.HIVE_SERVER, RoleCommand.UPGRADE);
- addDependency(Role.MYSQL_SERVER, RoleCommand.UPGRADE,
- Role.HIVE_METASTORE, RoleCommand.UPGRADE);
- addDependency(Role.HIVE_CLIENT, RoleCommand.UPGRADE,
- Role.MYSQL_SERVER, RoleCommand.UPGRADE);
- addDependency(Role.HCAT, RoleCommand.UPGRADE,
- Role.HIVE_CLIENT, RoleCommand.UPGRADE);
- addDependency(Role.OOZIE_SERVER, RoleCommand.UPGRADE,
- Role.HCAT, RoleCommand.UPGRADE);
- addDependency(Role.OOZIE_CLIENT, RoleCommand.UPGRADE,
- Role.OOZIE_SERVER, RoleCommand.UPGRADE);
- addDependency(Role.WEBHCAT_SERVER, RoleCommand.UPGRADE,
- Role.OOZIE_CLIENT, RoleCommand.UPGRADE);
- addDependency(Role.PIG, RoleCommand.UPGRADE,
- Role.WEBHCAT_SERVER, RoleCommand.UPGRADE);
- addDependency(Role.SQOOP, RoleCommand.UPGRADE,
- Role.PIG, RoleCommand.UPGRADE);
- addDependency(Role.NAGIOS_SERVER, RoleCommand.UPGRADE,
- Role.SQOOP, RoleCommand.UPGRADE);
- addDependency(Role.GANGLIA_SERVER, RoleCommand.UPGRADE,
- Role.NAGIOS_SERVER, RoleCommand.UPGRADE);
- addDependency(Role.GANGLIA_MONITOR, RoleCommand.UPGRADE,
- Role.GANGLIA_SERVER, RoleCommand.UPGRADE);
+ addDependency(Role.JOBTRACKER, RoleCommand.UPGRADE,
+ Role.HCFS_CLIENT, RoleCommand.UPGRADE);
+ addDependency(Role.TASKTRACKER, RoleCommand.UPGRADE,
+ Role.JOBTRACKER, RoleCommand.UPGRADE);
+ addDependency(Role.MAPREDUCE_CLIENT, RoleCommand.UPGRADE,
+ Role.TASKTRACKER, RoleCommand.UPGRADE);
+ addDependency(Role.MAPREDUCE_CLIENT, RoleCommand.UPGRADE,
+ Role.JOBTRACKER, RoleCommand.UPGRADE);
+ addDependency(Role.ZOOKEEPER_SERVER, RoleCommand.UPGRADE,
+ Role.MAPREDUCE_CLIENT, RoleCommand.UPGRADE);
+ addDependency(Role.ZOOKEEPER_CLIENT, RoleCommand.UPGRADE,
+ Role.ZOOKEEPER_SERVER, RoleCommand.UPGRADE);
+ addDependency(Role.HBASE_MASTER, RoleCommand.UPGRADE,
+ Role.ZOOKEEPER_CLIENT, RoleCommand.UPGRADE);
+ addDependency(Role.HBASE_REGIONSERVER, RoleCommand.UPGRADE,
+ Role.HBASE_MASTER, RoleCommand.UPGRADE);
+ addDependency(Role.HBASE_CLIENT, RoleCommand.UPGRADE,
+ Role.HBASE_REGIONSERVER, RoleCommand.UPGRADE);
+ addDependency(Role.HIVE_SERVER, RoleCommand.UPGRADE,
+ Role.HBASE_CLIENT, RoleCommand.UPGRADE);
+ addDependency(Role.HIVE_METASTORE, RoleCommand.UPGRADE,
+ Role.HIVE_SERVER, RoleCommand.UPGRADE);
+ addDependency(Role.MYSQL_SERVER, RoleCommand.UPGRADE,
+ Role.HIVE_METASTORE, RoleCommand.UPGRADE);
+ addDependency(Role.HIVE_CLIENT, RoleCommand.UPGRADE,
+ Role.MYSQL_SERVER, RoleCommand.UPGRADE);
+ addDependency(Role.HCAT, RoleCommand.UPGRADE,
+ Role.HIVE_CLIENT, RoleCommand.UPGRADE);
+ addDependency(Role.OOZIE_SERVER, RoleCommand.UPGRADE,
+ Role.HCAT, RoleCommand.UPGRADE);
+ addDependency(Role.OOZIE_CLIENT, RoleCommand.UPGRADE,
+ Role.OOZIE_SERVER, RoleCommand.UPGRADE);
+ addDependency(Role.WEBHCAT_SERVER, RoleCommand.UPGRADE,
+ Role.OOZIE_CLIENT, RoleCommand.UPGRADE);
+ addDependency(Role.PIG, RoleCommand.UPGRADE,
+ Role.WEBHCAT_SERVER, RoleCommand.UPGRADE);
+ addDependency(Role.SQOOP, RoleCommand.UPGRADE,
+ Role.PIG, RoleCommand.UPGRADE);
+ addDependency(Role.NAGIOS_SERVER, RoleCommand.UPGRADE,
+ Role.SQOOP, RoleCommand.UPGRADE);
+ addDependency(Role.GANGLIA_SERVER, RoleCommand.UPGRADE,
+ Role.NAGIOS_SERVER, RoleCommand.UPGRADE);
+ addDependency(Role.GANGLIA_MONITOR, RoleCommand.UPGRADE,
+ Role.GANGLIA_SERVER, RoleCommand.UPGRADE);
+ } else {
+ addDependency(Role.NAGIOS_SERVER, RoleCommand.INSTALL, Role.HIVE_CLIENT,
+ RoleCommand.INSTALL);
+ addDependency(Role.NAGIOS_SERVER, RoleCommand.INSTALL, Role.HCAT,
+ RoleCommand.INSTALL);
+ addDependency(Role.NAGIOS_SERVER, RoleCommand.INSTALL, Role.MAPREDUCE_CLIENT,
+ RoleCommand.INSTALL);
+ addDependency(Role.NAGIOS_SERVER, RoleCommand.INSTALL, Role.OOZIE_CLIENT,
+ RoleCommand.INSTALL);
+ // Starts
+ addDependency(Role.SECONDARY_NAMENODE, RoleCommand.START, Role.NAMENODE,
+ RoleCommand.START);
+ addDependency(Role.HBASE_MASTER, RoleCommand.START, Role.ZOOKEEPER_SERVER,
+ RoleCommand.START);
+ addDependency(Role.HBASE_MASTER, RoleCommand.START, Role.NAMENODE,
+ RoleCommand.START);
+ addDependency(Role.HBASE_MASTER, RoleCommand.START, Role.DATANODE,
+ RoleCommand.START);
+ addDependency(Role.HBASE_REGIONSERVER, RoleCommand.START,
+ Role.HBASE_MASTER, RoleCommand.START);
+ addDependency(Role.JOBTRACKER, RoleCommand.START, Role.NAMENODE,
+ RoleCommand.START);
+ addDependency(Role.JOBTRACKER, RoleCommand.START, Role.DATANODE,
+ RoleCommand.START);
+ addDependency(Role.TASKTRACKER, RoleCommand.START, Role.NAMENODE,
+ RoleCommand.START);
+ addDependency(Role.TASKTRACKER, RoleCommand.START, Role.DATANODE,
+ RoleCommand.START);
+ addDependency(Role.OOZIE_SERVER, RoleCommand.START, Role.JOBTRACKER,
+ RoleCommand.START);
+ addDependency(Role.OOZIE_SERVER, RoleCommand.START, Role.TASKTRACKER,
+ RoleCommand.START);
+ addDependency(Role.HIVE_SERVER, RoleCommand.START, Role.TASKTRACKER,
+ RoleCommand.START);
+ addDependency(Role.HIVE_SERVER, RoleCommand.START, Role.DATANODE,
+ RoleCommand.START);
+ addDependency(Role.WEBHCAT_SERVER, RoleCommand.START, Role.TASKTRACKER,
+ RoleCommand.START);
+ addDependency(Role.WEBHCAT_SERVER, RoleCommand.START, Role.DATANODE,
+ RoleCommand.START);
+ addDependency(Role.WEBHCAT_SERVER, RoleCommand.START, Role.HIVE_SERVER,
+ RoleCommand.START);
+ addDependency(Role.HIVE_METASTORE, RoleCommand.START, Role.MYSQL_SERVER,
+ RoleCommand.START);
+ addDependency(Role.HIVE_SERVER, RoleCommand.START, Role.MYSQL_SERVER,
+ RoleCommand.START);
+ addDependency(Role.HUE_SERVER, RoleCommand.START, Role.HIVE_SERVER,
+ RoleCommand.START);
+ addDependency(Role.HUE_SERVER, RoleCommand.START, Role.HCAT,
+ RoleCommand.START);
+ addDependency(Role.HUE_SERVER, RoleCommand.START, Role.OOZIE_SERVER,
+ RoleCommand.START);
+
+ // Service checks
+ addDependency(Role.HDFS_SERVICE_CHECK, RoleCommand.EXECUTE, Role.NAMENODE,
+ RoleCommand.START);
+ addDependency(Role.HDFS_SERVICE_CHECK, RoleCommand.EXECUTE, Role.DATANODE,
+ RoleCommand.START);
+ addDependency(Role.MAPREDUCE_SERVICE_CHECK, RoleCommand.EXECUTE,
+ Role.JOBTRACKER, RoleCommand.START);
+ addDependency(Role.MAPREDUCE_SERVICE_CHECK, RoleCommand.EXECUTE,
+ Role.TASKTRACKER, RoleCommand.START);
+ addDependency(Role.OOZIE_SERVICE_CHECK, RoleCommand.EXECUTE,
+ Role.OOZIE_SERVER, RoleCommand.START);
+ addDependency(Role.WEBHCAT_SERVICE_CHECK, RoleCommand.EXECUTE,
+ Role.WEBHCAT_SERVER, RoleCommand.START);
+ addDependency(Role.HBASE_SERVICE_CHECK, RoleCommand.EXECUTE,
+ Role.HBASE_MASTER, RoleCommand.START);
+ addDependency(Role.HBASE_SERVICE_CHECK, RoleCommand.EXECUTE,
+ Role.HBASE_REGIONSERVER, RoleCommand.START);
+ addDependency(Role.HIVE_SERVICE_CHECK, RoleCommand.EXECUTE,
+ Role.HIVE_SERVER, RoleCommand.START);
+ addDependency(Role.HIVE_SERVICE_CHECK, RoleCommand.EXECUTE,
+ Role.HIVE_METASTORE, RoleCommand.START);
+ addDependency(Role.HCAT_SERVICE_CHECK, RoleCommand.EXECUTE,
+ Role.HIVE_SERVER, RoleCommand.START);
+ addDependency(Role.PIG_SERVICE_CHECK, RoleCommand.EXECUTE,
+ Role.JOBTRACKER, RoleCommand.START);
+ addDependency(Role.PIG_SERVICE_CHECK, RoleCommand.EXECUTE,
+ Role.TASKTRACKER, RoleCommand.START);
+ addDependency(Role.SQOOP_SERVICE_CHECK, RoleCommand.EXECUTE,
+ Role.JOBTRACKER, RoleCommand.START);
+ addDependency(Role.SQOOP_SERVICE_CHECK, RoleCommand.EXECUTE,
+ Role.TASKTRACKER, RoleCommand.START);
+ addDependency(Role.ZOOKEEPER_SERVICE_CHECK, RoleCommand.EXECUTE,
+ Role.ZOOKEEPER_SERVER, RoleCommand.START);
+ addDependency(Role.ZOOKEEPER_QUORUM_SERVICE_CHECK, RoleCommand.EXECUTE,
+ Role.ZOOKEEPER_SERVER, RoleCommand.START);
+
+ addDependency(Role.ZOOKEEPER_SERVER, RoleCommand.STOP,
+ Role.HBASE_MASTER, RoleCommand.STOP);
+ addDependency(Role.ZOOKEEPER_SERVER, RoleCommand.STOP,
+ Role.HBASE_REGIONSERVER, RoleCommand.STOP);
+ addDependency(Role.NAMENODE, RoleCommand.STOP,
+ Role.HBASE_MASTER, RoleCommand.STOP);
+ addDependency(Role.DATANODE, RoleCommand.STOP,
+ Role.HBASE_MASTER, RoleCommand.STOP);
+ addDependency(Role.HBASE_MASTER, RoleCommand.STOP,
+ Role.HBASE_REGIONSERVER, RoleCommand.STOP);
+ addDependency(Role.NAMENODE, RoleCommand.STOP,
+ Role.JOBTRACKER, RoleCommand.STOP);
+ addDependency(Role.NAMENODE, RoleCommand.STOP,
+ Role.TASKTRACKER, RoleCommand.STOP);
+ addDependency(Role.DATANODE, RoleCommand.STOP,
+ Role.JOBTRACKER, RoleCommand.STOP);
+ addDependency(Role.DATANODE, RoleCommand.STOP,
+ Role.TASKTRACKER, RoleCommand.STOP);
+
+ addDependency(Role.SECONDARY_NAMENODE, RoleCommand.UPGRADE,
+ Role.NAMENODE, RoleCommand.UPGRADE);
+ addDependency(Role.DATANODE, RoleCommand.UPGRADE,
+ Role.SECONDARY_NAMENODE, RoleCommand.UPGRADE);
+ addDependency(Role.HDFS_CLIENT, RoleCommand.UPGRADE,
+ Role.DATANODE, RoleCommand.UPGRADE);
+ addDependency(Role.JOBTRACKER, RoleCommand.UPGRADE,
+ Role.HDFS_CLIENT, RoleCommand.UPGRADE);
+ addDependency(Role.TASKTRACKER, RoleCommand.UPGRADE,
+ Role.JOBTRACKER, RoleCommand.UPGRADE);
+ addDependency(Role.MAPREDUCE_CLIENT, RoleCommand.UPGRADE,
+ Role.TASKTRACKER, RoleCommand.UPGRADE);
+ addDependency(Role.MAPREDUCE_CLIENT, RoleCommand.UPGRADE,
+ Role.TASKTRACKER, RoleCommand.UPGRADE);
+ addDependency(Role.ZOOKEEPER_SERVER, RoleCommand.UPGRADE,
+ Role.MAPREDUCE_CLIENT, RoleCommand.UPGRADE);
+ addDependency(Role.ZOOKEEPER_CLIENT, RoleCommand.UPGRADE,
+ Role.ZOOKEEPER_SERVER, RoleCommand.UPGRADE);
+ addDependency(Role.HBASE_MASTER, RoleCommand.UPGRADE,
+ Role.ZOOKEEPER_CLIENT, RoleCommand.UPGRADE);
+ addDependency(Role.HBASE_REGIONSERVER, RoleCommand.UPGRADE,
+ Role.HBASE_MASTER, RoleCommand.UPGRADE);
+ addDependency(Role.HBASE_CLIENT, RoleCommand.UPGRADE,
+ Role.HBASE_REGIONSERVER, RoleCommand.UPGRADE);
+ addDependency(Role.HIVE_SERVER, RoleCommand.UPGRADE,
+ Role.HBASE_CLIENT, RoleCommand.UPGRADE);
+ addDependency(Role.HIVE_METASTORE, RoleCommand.UPGRADE,
+ Role.HIVE_SERVER, RoleCommand.UPGRADE);
+ addDependency(Role.MYSQL_SERVER, RoleCommand.UPGRADE,
+ Role.HIVE_METASTORE, RoleCommand.UPGRADE);
+ addDependency(Role.HIVE_CLIENT, RoleCommand.UPGRADE,
+ Role.MYSQL_SERVER, RoleCommand.UPGRADE);
+ addDependency(Role.HCAT, RoleCommand.UPGRADE,
+ Role.HIVE_CLIENT, RoleCommand.UPGRADE);
+ addDependency(Role.OOZIE_SERVER, RoleCommand.UPGRADE,
+ Role.HCAT, RoleCommand.UPGRADE);
+ addDependency(Role.OOZIE_CLIENT, RoleCommand.UPGRADE,
+ Role.OOZIE_SERVER, RoleCommand.UPGRADE);
+ addDependency(Role.WEBHCAT_SERVER, RoleCommand.UPGRADE,
+ Role.OOZIE_CLIENT, RoleCommand.UPGRADE);
+ addDependency(Role.PIG, RoleCommand.UPGRADE,
+ Role.WEBHCAT_SERVER, RoleCommand.UPGRADE);
+ addDependency(Role.SQOOP, RoleCommand.UPGRADE,
+ Role.PIG, RoleCommand.UPGRADE);
+ addDependency(Role.NAGIOS_SERVER, RoleCommand.UPGRADE,
+ Role.SQOOP, RoleCommand.UPGRADE);
+ addDependency(Role.GANGLIA_SERVER, RoleCommand.UPGRADE,
+ Role.NAGIOS_SERVER, RoleCommand.UPGRADE);
+ addDependency(Role.GANGLIA_MONITOR, RoleCommand.UPGRADE,
+ Role.GANGLIA_SERVER, RoleCommand.UPGRADE);
+ }
extendTransitiveDependency();
}
@@ -285,11 +423,11 @@ public int order(RoleGraphNode rgn1, RoleGraphNode rgn2) {
rgn1.getCommand());
RoleCommandPair rcp2 = new RoleCommandPair(rgn2.getRole(),
rgn2.getCommand());
- if ((dependencies.get(rcp1) != null)
- && (dependencies.get(rcp1).contains(rcp2))) {
+ if ((this.dependencies.get(rcp1) != null)
+ && (this.dependencies.get(rcp1).contains(rcp2))) {
return 1;
- } else if ((dependencies.get(rcp2) != null)
- && (dependencies.get(rcp2).contains(rcp1))) {
+ } else if ((this.dependencies.get(rcp2) != null)
+ && (this.dependencies.get(rcp2).contains(rcp1))) {
return -1;
} else if (!rgn2.getCommand().equals(rgn1.getCommand())) {
return compareCommands(rgn1, rgn2);
@@ -301,24 +439,24 @@ public int order(RoleGraphNode rgn1, RoleGraphNode rgn2) {
* Adds transitive dependencies to each node.
* A => B and B => C implies A => B,C and B => C
*/
- private static void extendTransitiveDependency() {
- for (RoleCommandPair rcp : dependencies.keySet()) {
+ private void extendTransitiveDependency() {
+ for (RoleCommandPair rcp : this.dependencies.keySet()) {
HashSet visited = new HashSet();
HashSet transitiveDependencies = new HashSet();
- for (RoleCommandPair directlyBlockedOn : dependencies.get(rcp)) {
+ for (RoleCommandPair directlyBlockedOn : this.dependencies.get(rcp)) {
visited.add(directlyBlockedOn);
identifyTransitiveDependencies(directlyBlockedOn, visited, transitiveDependencies);
}
if (transitiveDependencies.size() > 0) {
- dependencies.get(rcp).addAll(transitiveDependencies);
+ this.dependencies.get(rcp).addAll(transitiveDependencies);
}
}
}
- private static void identifyTransitiveDependencies(RoleCommandPair rcp, HashSet visited,
+ private void identifyTransitiveDependencies(RoleCommandPair rcp, HashSet visited,
HashSet transitiveDependencies) {
- if (dependencies.get(rcp) != null) {
- for (RoleCommandPair blockedOn : dependencies.get(rcp)) {
+ if (this.dependencies.get(rcp) != null) {
+ for (RoleCommandPair blockedOn : this.dependencies.get(rcp)) {
if (!visited.contains(blockedOn)) {
visited.add(blockedOn);
transitiveDependencies.add(blockedOn);
@@ -357,4 +495,33 @@ private int compareCommands(RoleGraphNode rgn1, RoleGraphNode rgn2) {
}
return 0;
}
+
+ public int compareDeps(RoleCommandOrder rco) {
+ Set v1 = null, v2 = null;
+ if (this == rco) {
+ return 0;
+ }
+
+ // Check for key set match
+ if (!this.dependencies.keySet().equals(rco.dependencies.keySet())){
+ LOG.debug("dependency keysets differ");
+ return 1;
+ }
+ LOG.debug("dependency keysets match");
+
+ // So far so good. Since the keysets match, let's check the
+ // actual entries against each other
+ for (RoleCommandPair key: this.dependencies.keySet()) {
+ v1 = this.dependencies.get(key);
+ v2 = rco.dependencies.get(key);
+ if (!v1.equals(v2)) {
+ LOG.debug("different entry found for key (" + key.role.toString() + ", "
+ + key.cmd.toString() + ")" );
+ return 1;
+ }
+ }
+ LOG.debug("dependency entries match");
+ return 0;
+ }
+
}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/Service.java b/ambari-server/src/main/java/org/apache/ambari/server/state/Service.java
index 8e7051cc06d..ae19592a6f0 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/Service.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/Service.java
@@ -87,6 +87,7 @@ public void deleteServiceComponent(String componentName)
public enum Type {
HDFS,
+ HCFS,
MAPREDUCE,
HBASE,
HIVE,
diff --git a/ambari-server/src/main/python/ambari-server.py b/ambari-server/src/main/python/ambari-server.py
index 4ae90cc5890..9e94e316909 100755
--- a/ambari-server/src/main/python/ambari-server.py
+++ b/ambari-server/src/main/python/ambari-server.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.6
+#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
@@ -991,7 +991,8 @@ def start(args):
print_error_msg ("Failed to stop iptables. Exiting")
sys.exit(retcode)
- command = SERVER_START_CMD.format(jdk_path, conf_dir, get_ambari_classpath())
+ command_base = SERVER_START_CMD_DEBUG if (SERVER_DEBUG_MODE or SERVER_START_DEBUG) else SERVER_START_CMD
+ command = command_base.format(jdk_path, conf_dir, get_ambari_classpath())
print "Running server: " + command
server_process = subprocess.Popen(["/bin/sh", "-c", command])
f = open(PID_DIR + os.sep + PID_NAME, "w")
@@ -1265,6 +1266,8 @@ def main():
parser.add_option("-b", "--remote-database",
action="store_true", dest="remote_database", default=False,
help="Set up remote database instead of local")
+ parser.add_option('-g', '--debug', action="store_true", dest='debug', default=False,
+ help="Start ambari-server in debug mode")
(options, args) = parser.parse_args()
@@ -1280,6 +1283,9 @@ def main():
global REMOTE_DATABASE
REMOTE_DATABASE = options.remote_database
+ # debug mode
+ global SERVER_DEBUG_MODE
+ SERVER_DEBUG_MODE = options.debug
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.1/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.1/metainfo.xml
new file mode 100644
index 00000000000..5d9cb06e0fe
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.1/metainfo.xml
@@ -0,0 +1,23 @@
+
+
+
+
+ 1.2.0
+ true
+
+
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.1/repos/repoinfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.1/repos/repoinfo.xml
new file mode 100644
index 00000000000..f5c0fee5c3f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.1/repos/repoinfo.xml
@@ -0,0 +1,97 @@
+
+
+
+
+
+ http://public-repo-1.hortonworks.com/HDP/centos6/1.x/updates/1.2.1
+ HDP-1.2.1
+ HDP
+
+
+
+ HDP-epel
+ HDP-epel
+
+
+
+
+
+ http://public-repo-1.hortonworks.com/HDP/centos5/1.x/updates/1.2.1
+ HDP-1.2.1
+ HDP
+
+
+
+ HDP-epel
+ HDP-epel
+
+
+
+
+
+ http://public-repo-1.hortonworks.com/HDP/centos6/1.x/updates/1.2.1
+ HDP-1.2.1
+ HDP
+
+
+
+ HDP-epel
+ HDP-epel
+
+
+
+
+
+ http://public-repo-1.hortonworks.com/HDP/centos5/1.x/updates/1.2.1
+ HDP-1.2.1
+ HDP
+
+
+
+ HDP-epel
+ HDP-epel
+
+
+
+
+
+ http://public-repo-1.hortonworks.com/HDP/suse11/1.x/updates/1.2.1
+ HDP-1.2.1
+ HDP
+
+
+ http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/suse11
+ HDP-UTILS-1.1.0.15
+ HDP-UTILS
+
+
+
+
+
+ http://public-repo-1.hortonworks.com/HDP/suse11/1.x/updates/1.2.1
+ HDP-1.2.1
+ HDP
+
+
+ http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/suse11
+ HDP-UTILS-1.1.0.15
+ HDP-UTILS
+
+
+
+
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/GANGLIA/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/GANGLIA/metainfo.xml
new file mode 100644
index 00000000000..0b21f0f6348
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/GANGLIA/metainfo.xml
@@ -0,0 +1,40 @@
+
+
+
+ root
+ Ganglia Metrics Collection system
+ 3.2.0
+
+
+
+ GANGLIA_SERVER
+ MASTER
+
+
+
+ GANGLIA_MONITOR
+ SLAVE
+
+
+
+ MONITOR_WEBSERVER
+ MASTER
+
+
+
+
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/HBASE/configuration/hbase-policy.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/HBASE/configuration/hbase-policy.xml
new file mode 100644
index 00000000000..e45f23c962c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/HBASE/configuration/hbase-policy.xml
@@ -0,0 +1,53 @@
+
+
+
+
+
+
+ security.client.protocol.acl
+ *
+ ACL for HRegionInterface protocol implementations (ie.
+ clients talking to HRegionServers)
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+ security.admin.protocol.acl
+ *
+ ACL for HMasterInterface protocol implementation (ie.
+ clients talking to HMaster for admin operations).
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+ security.masterregion.protocol.acl
+ *
+ ACL for HMasterRegionInterface protocol implementations
+ (for HRegionServers communicating with HMaster)
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/HBASE/configuration/hbase-site.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/HBASE/configuration/hbase-site.xml
new file mode 100644
index 00000000000..149751e8718
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/HBASE/configuration/hbase-site.xml
@@ -0,0 +1,345 @@
+
+
+
+
+
+ hbase.rootdir
+
+ The directory shared by region servers and into
+ which HBase persists. The URL should be 'fully-qualified'
+ to include the filesystem scheme. For example, to specify the
+ HDFS directory '/hbase' where the HDFS instance's namenode is
+ running at namenode.example.org on port 9000, set this value to:
+ hdfs://namenode.example.org:9000/hbase. By default HBase writes
+ into /tmp. Change this configuration else all data will be lost
+ on machine restart.
+
+
+
+ hbase.cluster.distributed
+ true
+ The mode the cluster will be in. Possible values are
+ false for standalone mode and true for distributed mode. If
+ false, startup will run all HBase and ZooKeeper daemons together
+ in the one JVM.
+
+
+
+ hbase.tmp.dir
+
+ Temporary directory on the local filesystem.
+ Change this setting to point to a location more permanent
+ than '/tmp' (The '/tmp' directory is often cleared on
+ machine restart).
+
+
+
+ hbase.master.info.bindAddress
+
+ The bind address for the HBase Master web UI
+
+
+
+ hbase.regionserver.global.memstore.upperLimit
+
+ Maximum size of all memstores in a region server before new
+ updates are blocked and flushes are forced. Defaults to 40% of heap
+
+
+
+ hbase.regionserver.handler.count
+
+ Count of RPC Listener instances spun up on RegionServers.
+ Same property is used by the Master for count of master handlers.
+ Default is 10.
+
+
+
+ hbase.hregion.majorcompaction
+
+ The time (in miliseconds) between 'major' compactions of all
+ HStoreFiles in a region. Default: 1 day.
+ Set to 0 to disable automated major compactions.
+
+
+
+ hbase.master.lease.thread.wakefrequency
+ 3000
+ The interval between checks for expired region server leases.
+ This value has been reduced due to the other reduced values above so that
+ the master will notice a dead region server sooner. The default is 15 seconds.
+
+
+
+ hbase.regionserver.global.memstore.lowerLimit
+
+ When memstores are being forced to flush to make room in
+ memory, keep flushing until we hit this mark. Defaults to 35% of heap.
+ This value equal to hbase.regionserver.global.memstore.upperLimit causes
+ the minimum possible flushing to occur when updates are blocked due to
+ memstore limiting.
+
+
+
+ hbase.hregion.memstore.block.multiplier
+
+ Block updates if memstore has hbase.hregion.memstore.block.multiplier
+ time hbase.hregion.flush.size bytes. Useful preventing
+ runaway memstore during spikes in update traffic. Without an
+ upper-bound, memstore fills such that when it flushes the
+ resultant flush files take a long time to compact or split, or
+ worse, we OOME
+
+
+
+ hbase.hregion.memstore.flush.size
+
+
+ Memstore will be flushed to disk if size of the memstore
+ exceeds this number of bytes. Value is checked by a thread that runs
+ every hbase.server.thread.wakefrequency.
+
+
+
+ hbase.hregion.memstore.mslab.enabled
+
+
+ Enables the MemStore-Local Allocation Buffer,
+ a feature which works to prevent heap fragmentation under
+ heavy write loads. This can reduce the frequency of stop-the-world
+ GC pauses on large heaps.
+
+
+
+ hbase.hregion.max.filesize
+
+
+ Maximum HStoreFile size. If any one of a column families' HStoreFiles has
+ grown to exceed this value, the hosting HRegion is split in two.
+ Default: 1G.
+
+
+
+ hbase.client.scanner.caching
+
+ Number of rows that will be fetched when calling next
+ on a scanner if it is not served from (local, client) memory. Higher
+ caching values will enable faster scanners but will eat up more memory
+ and some calls of next may take longer and longer times when the cache is empty.
+ Do not set this value such that the time between invocations is greater
+ than the scanner timeout; i.e. hbase.regionserver.lease.period
+
+
+
+ zookeeper.session.timeout
+
+ ZooKeeper session timeout.
+ HBase passes this to the zk quorum as suggested maximum time for a
+ session (This setting becomes zookeeper's 'maxSessionTimeout'). See
+ http://hadoop.apache.org/zookeeper/docs/current/zookeeperProgrammers.html#ch_zkSessions
+ "The client sends a requested timeout, the server responds with the
+ timeout that it can give the client. " In milliseconds.
+
+
+
+ hbase.client.keyvalue.maxsize
+
+ Specifies the combined maximum allowed size of a KeyValue
+ instance. This is to set an upper boundary for a single entry saved in a
+ storage file. Since they cannot be split it helps avoiding that a region
+ cannot be split any further because the data is too large. It seems wise
+ to set this to a fraction of the maximum region size. Setting it to zero
+ or less disables the check.
+
+
+
+ hbase.hstore.compactionThreshold
+
+
+ If more than this number of HStoreFiles in any one HStore
+ (one HStoreFile is written per flush of memstore) then a compaction
+ is run to rewrite all HStoreFiles files as one. Larger numbers
+ put off compaction but when it runs, it takes longer to complete.
+
+
+
+ hbase.hstore.blockingStoreFiles
+
+
+ If more than this number of StoreFiles in any one Store
+ (one StoreFile is written per flush of MemStore) then updates are
+ blocked for this HRegion until a compaction is completed, or
+ until hbase.hstore.blockingWaitTime has been exceeded.
+
+
+
+ hfile.block.cache.size
+
+
+ Percentage of maximum heap (-Xmx setting) to allocate to block cache
+ used by HFile/StoreFile. Default of 0.25 means allocate 25%.
+ Set to 0 to disable but it's not recommended.
+
+
+
+
+
+ hbase.master.keytab.file
+
+ Full path to the kerberos keytab file to use for logging in
+ the configured HMaster server principal.
+
+
+
+ hbase.master.kerberos.principal
+
+ Ex. "hbase/_HOST@EXAMPLE.COM". The kerberos principal name
+ that should be used to run the HMaster process. The principal name should
+ be in the form: user/hostname@DOMAIN. If "_HOST" is used as the hostname
+ portion, it will be replaced with the actual hostname of the running
+ instance.
+
+
+
+ hbase.regionserver.keytab.file
+
+ Full path to the kerberos keytab file to use for logging in
+ the configured HRegionServer server principal.
+
+
+
+ hbase.regionserver.kerberos.principal
+
+ Ex. "hbase/_HOST@EXAMPLE.COM". The kerberos principal name
+ that should be used to run the HRegionServer process. The principal name
+ should be in the form: user/hostname@DOMAIN. If "_HOST" is used as the
+ hostname portion, it will be replaced with the actual hostname of the
+ running instance. An entry for this principal must exist in the file
+ specified in hbase.regionserver.keytab.file
+
+
+
+
+
+ hbase.superuser
+ hbase
+ List of users or groups (comma-separated), who are allowed
+ full privileges, regardless of stored ACLs, across the cluster.
+ Only used when HBase security is enabled.
+
+
+
+
+ hbase.coprocessor.region.classes
+
+ A comma-separated list of Coprocessors that are loaded by
+ default on all tables. For any override coprocessor method, these classes
+ will be called in order. After implementing your own Coprocessor, just put
+ it in HBase's classpath and add the fully qualified class name here.
+ A coprocessor can also be loaded on demand by setting HTableDescriptor.
+
+
+
+
+ hbase.coprocessor.master.classes
+
+ A comma-separated list of
+ org.apache.hadoop.hbase.coprocessor.MasterObserver coprocessors that are
+ loaded by default on the active HMaster process. For any implemented
+ coprocessor methods, the listed classes will be called in order. After
+ implementing your own MasterObserver, just put it in HBase's classpath
+ and add the fully qualified class name here.
+
+
+
+
+ hbase.zookeeper.property.clientPort
+ 2181
+ Property from ZooKeeper's config zoo.cfg.
+ The port at which the clients will connect.
+
+
+
+
+
+ hbase.zookeeper.quorum
+
+ Comma separated list of servers in the ZooKeeper Quorum.
+ For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
+ By default this is set to localhost for local and pseudo-distributed modes
+ of operation. For a fully-distributed setup, this should be set to a full
+ list of ZooKeeper quorum servers. If HBASE_MANAGES_ZK is set in hbase-env.sh
+ this is the list of servers which we will start/stop ZooKeeper on.
+
+
+
+
+
+ dfs.support.append
+
+ Does HDFS allow appends to files?
+ This is an hdfs config. set in here so the hdfs client will do append support.
+ You must ensure that this config. is true serverside too when running hbase
+ (You will have to restart your cluster after setting it).
+
+
+
+
+ dfs.client.read.shortcircuit
+
+ Enable/Disable short circuit read for your client.
+ Hadoop servers should be configured to allow short circuit read
+ for the hbase user for this to take effect
+
+
+
+
+ dfs.client.read.shortcircuit.skip.checksum
+
+ Enable/disbale skipping the checksum check
+
+
+
+ hbase.regionserver.optionalcacheflushinterval
+ 10000
+
+ Amount of time to wait since the last time a region was flushed before
+ invoking an optional cache flush. Default 60,000.
+
+
+
+ hbase.zookeeper.useMulti
+ true
+ Instructs HBase to make use of ZooKeeper's multi-update functionality.
+ This allows certain ZooKeeper operations to complete more quickly and prevents some issues
+ with rare Replication failure scenarios (see the release note of HBASE-2611 for an example).·
+ IMPORTANT: only set this to true if all ZooKeeper servers in the cluster are on version 3.4+
+ and will not be downgraded. ZooKeeper versions before 3.4 do not support multi-update and will
+ not fail gracefully if multi-update is invoked (see ZOOKEEPER-1495).
+
+
+
+
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/HBASE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/HBASE/metainfo.xml
new file mode 100644
index 00000000000..553fa2b0eb2
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/HBASE/metainfo.xml
@@ -0,0 +1,40 @@
+
+
+
+ mapred
+ Non-relational distributed database and centralized service for configuration management & synchronization
+ 0.94.5
+
+
+
+ HBASE_MASTER
+ MASTER
+
+
+
+ HBASE_REGIONSERVER
+ SLAVE
+
+
+
+ HBASE_CLIENT
+ CLIENT
+
+
+
+
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/HCATALOG/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/HCATALOG/metainfo.xml
new file mode 100644
index 00000000000..1951a5dcd1d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/HCATALOG/metainfo.xml
@@ -0,0 +1,30 @@
+
+
+
+ root
+ This is comment for HCATALOG service
+ 0.5.0
+
+
+
+ HCAT
+ CLIENT
+
+
+
+
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/HCFS/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/HCFS/configuration/core-site.xml
new file mode 100644
index 00000000000..a312e68fe62
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/HCFS/configuration/core-site.xml
@@ -0,0 +1,251 @@
+
+
+
+
+
+
+
+
+
+
+
+
+ io.file.buffer.size
+ 131072
+ The size of buffer for use in sequence files.
+ The size of this buffer should probably be a multiple of hardware
+ page size (4096 on Intel x86), and it determines how much data is
+ buffered during read and write operations.
+
+
+
+ io.serializations
+ org.apache.hadoop.io.serializer.WritableSerialization
+
+
+
+ io.compression.codecs
+
+ A list of the compression codec classes that can be used
+ for compression/decompression.
+
+
+
+ io.compression.codec.lzo.class
+ com.hadoop.compression.lzo.LzoCodec
+ The implementation for lzo codec.
+
+
+
+
+
+ fs.default.name
+
+
+ The name of the default file system. Either the
+ literal string "local" or a host:port for NDFS.
+ true
+
+
+
+ fs.trash.interval
+ 360
+ Number of minutes between trash checkpoints.
+ If zero, the trash feature is disabled.
+
+
+
+
+ fs.checkpoint.dir
+
+ Determines where on the local filesystem the DFS secondary
+ name node should store the temporary images to merge.
+ If this is a comma-delimited list of directories then the image is
+ replicated in all of the directories for redundancy.
+
+
+
+
+ fs.checkpoint.edits.dir
+ ${fs.checkpoint.dir}
+ Determines where on the local filesystem the DFS secondary
+ name node should store the temporary edits to merge.
+ If this is a comma-delimited list of directoires then teh edits is
+ replicated in all of the directoires for redundancy.
+ Default value is same as fs.checkpoint.dir
+
+
+
+
+ fs.checkpoint.period
+ 21600
+ The number of seconds between two periodic checkpoints.
+
+
+
+
+ fs.checkpoint.size
+ 536870912
+ The size of the current edit log (in bytes) that triggers
+ a periodic checkpoint even if the fs.checkpoint.period hasn't expired.
+
+
+
+
+
+ ipc.client.idlethreshold
+ 8000
+ Defines the threshold number of connections after which
+ connections will be inspected for idleness.
+
+
+
+
+ ipc.client.connection.maxidletime
+ 30000
+ The maximum time after which a client will bring down the
+ connection to the server.
+
+
+
+
+ ipc.client.connect.max.retries
+ 50
+ Defines the maximum number of retries for IPC connections.
+
+
+
+
+ webinterface.private.actions
+ false
+ If set to true, the web interfaces of JT and NN may contain
+ actions, such as kill job, delete file, etc., that should
+ not be exposed to public. Enable this option if the interfaces
+ are only reachable by those who have the right authorization.
+
+
+
+
+ hadoop.security.authentication
+
+
+ Set the authentication for the cluster. Valid values are: simple or
+ kerberos.
+
+
+
+ hadoop.security.authorization
+
+
+ Enable authorization for different protocols.
+
+
+
+
+ hadoop.security.auth_to_local
+
+The mapping from kerberos principal names to local OS user names.
+ So the default rule is just "DEFAULT" which takes all principals in your default domain to their first component.
+ "omalley@APACHE.ORG" and "omalley/admin@APACHE.ORG" to "omalley", if your default domain is APACHE.ORG.
+The translations rules have 3 sections:
+ base filter substitution
+The base consists of a number that represents the number of components in the principal name excluding the realm and the pattern for building the name from the sections of the principal name. The base uses $0 to mean the realm, $1 to mean the first component and $2 to mean the second component.
+
+[1:$1@$0] translates "omalley@APACHE.ORG" to "omalley@APACHE.ORG"
+[2:$1] translates "omalley/admin@APACHE.ORG" to "omalley"
+[2:$1%$2] translates "omalley/admin@APACHE.ORG" to "omalley%admin"
+
+The filter is a regex in parens that must the generated string for the rule to apply.
+
+"(.*%admin)" will take any string that ends in "%admin"
+"(.*@ACME.COM)" will take any string that ends in "@ACME.COM"
+
+Finally, the substitution is a sed rule to translate a regex into a fixed string.
+
+"s/@ACME\.COM//" removes the first instance of "@ACME.COM".
+"s/@[A-Z]*\.COM//" removes the first instance of "@" followed by a name followed by ".COM".
+"s/X/Y/g" replaces all of the "X" in the name with "Y"
+
+So, if your default realm was APACHE.ORG, but you also wanted to take all principals from ACME.COM that had a single component "joe@ACME.COM", you'd do:
+
+RULE:[1:$1@$0](.@ACME.ORG)s/@.//
+DEFAULT
+
+To also translate the names with a second component, you'd make the rules:
+
+RULE:[1:$1@$0](.@ACME.ORG)s/@.//
+RULE:[2:$1@$0](.@ACME.ORG)s/@.//
+DEFAULT
+
+If you want to treat all principals from APACHE.ORG with /admin as "admin", your rules would look like:
+
+RULE[2:$1%$2@$0](.%admin@APACHE.ORG)s/./admin/
+DEFAULT
+
+
+
+
+
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/HCFS/configuration/hadoop-policy.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/HCFS/configuration/hadoop-policy.xml
new file mode 100644
index 00000000000..900da99ef0f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/HCFS/configuration/hadoop-policy.xml
@@ -0,0 +1,134 @@
+
+
+
+
+
+
+
+
+
+ security.client.protocol.acl
+ *
+ ACL for ClientProtocol, which is used by user code
+ via the DistributedFileSystem.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+ security.client.datanode.protocol.acl
+ *
+ ACL for ClientDatanodeProtocol, the client-to-datanode protocol
+ for block recovery.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+ security.datanode.protocol.acl
+ *
+ ACL for DatanodeProtocol, which is used by datanodes to
+ communicate with the namenode.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+ security.inter.datanode.protocol.acl
+ *
+ ACL for InterDatanodeProtocol, the inter-datanode protocol
+ for updating generation timestamp.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+ security.namenode.protocol.acl
+ *
+ ACL for NamenodeProtocol, the protocol used by the secondary
+ namenode to communicate with the namenode.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+ security.inter.tracker.protocol.acl
+ *
+ ACL for InterTrackerProtocol, used by the tasktrackers to
+ communicate with the jobtracker.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+ security.job.submission.protocol.acl
+ *
+ ACL for JobSubmissionProtocol, used by job clients to
+ communciate with the jobtracker for job submission, querying job status etc.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+ security.task.umbilical.protocol.acl
+ *
+ ACL for TaskUmbilicalProtocol, used by the map and reduce
+ tasks to communicate with the parent tasktracker.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+ security.admin.operations.protocol.acl
+
+ ACL for AdminOperationsProtocol. Used for admin commands.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+ security.refresh.usertogroups.mappings.protocol.acl
+
+ ACL for RefreshUserMappingsProtocol. Used to refresh
+ users mappings. The ACL is a comma-separated list of user and
+ group names. The user and group list is separated by a blank. For
+ e.g. "alice,bob users,wheel". A special value of "*" means all
+ users are allowed.
+
+
+
+ security.refresh.policy.protocol.acl
+
+ ACL for RefreshAuthorizationPolicyProtocol, used by the
+ dfsadmin and mradmin commands to refresh the security policy in-effect.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/HCFS/configuration/hdfs-site.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/HCFS/configuration/hdfs-site.xml
new file mode 100644
index 00000000000..db92d4bd2c9
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/HCFS/configuration/hdfs-site.xml
@@ -0,0 +1,415 @@
+
+
+
+
+
+
+
+
+
+
+
+
+ dfs.name.dir
+
+
+ Determines where on the local filesystem the DFS name node
+ should store the name table. If this is a comma-delimited list
+ of directories then the name table is replicated in all of the
+ directories, for redundancy.
+ true
+
+
+
+ dfs.support.append
+
+ to enable dfs append
+ true
+
+
+
+ dfs.webhdfs.enabled
+
+ to enable webhdfs
+ true
+
+
+
+ dfs.datanode.socket.write.timeout
+ 0
+ DFS Client write socket timeout
+
+
+
+ dfs.datanode.failed.volumes.tolerated
+
+ #of failed disks dn would tolerate
+ true
+
+
+
+ dfs.block.local-path-access.user
+
+ the user who is allowed to perform short
+ circuit reads.
+
+ true
+
+
+
+ dfs.data.dir
+
+ Determines where on the local filesystem an DFS data node
+ should store its blocks. If this is a comma-delimited
+ list of directories, then data will be stored in all named
+ directories, typically on different devices.
+ Directories that do not exist are ignored.
+
+ true
+
+
+
+ dfs.hosts.exclude
+
+ Names a file that contains a list of hosts that are
+ not permitted to connect to the namenode. The full pathname of the
+ file must be specified. If the value is empty, no hosts are
+ excluded.
+
+
+
+ dfs.hosts
+
+ Names a file that contains a list of hosts that are
+ permitted to connect to the namenode. The full pathname of the file
+ must be specified. If the value is empty, all hosts are
+ permitted.
+
+
+
+ dfs.replication.max
+ 50
+ Maximal block replication.
+
+
+
+
+ dfs.replication
+
+ Default block replication.
+
+
+
+
+ dfs.heartbeat.interval
+ 3
+ Determines datanode heartbeat interval in seconds.
+
+
+
+ dfs.safemode.threshold.pct
+ 1.0f
+
+ Specifies the percentage of blocks that should satisfy
+ the minimal replication requirement defined by dfs.replication.min.
+ Values less than or equal to 0 mean not to start in safe mode.
+ Values greater than 1 will make safe mode permanent.
+
+
+
+
+ dfs.balance.bandwidthPerSec
+ 6250000
+
+ Specifies the maximum amount of bandwidth that each datanode
+ can utilize for the balancing purpose in term of
+ the number of bytes per second.
+
+
+
+
+ dfs.datanode.address
+
+
+
+
+ dfs.datanode.http.address
+
+
+
+
+ dfs.block.size
+ 134217728
+ The default block size for new files.
+
+
+
+ dfs.http.address
+
+The name of the default file system. Either the
+literal string "local" or a host:port for NDFS.
+true
+
+
+
+dfs.datanode.du.reserved
+
+
+Reserved space in bytes per volume. Always leave this much space free for non dfs use.
+
+
+
+
+dfs.datanode.ipc.address
+0.0.0.0:8010
+
+The datanode ipc server address and port.
+If the port is 0 then the server will start on a free port.
+
+
+
+
+dfs.blockreport.initialDelay
+120
+Delay for first block report in seconds.
+
+
+
+dfs.datanode.du.pct
+0.85f
+When calculating remaining space, only use this percentage of the real available space
+
+
+
+
+dfs.namenode.handler.count
+40
+The number of server threads for the namenode.
+
+
+
+dfs.datanode.max.xcievers
+4096
+PRIVATE CONFIG VARIABLE
+
+
+
+
+
+dfs.umaskmode
+077
+
+The octal umask used when creating files and directories.
+
+
+
+
+dfs.web.ugi
+
+gopher,gopher
+The user account used by the web interface.
+Syntax: USERNAME,GROUP1,GROUP2, ...
+
+
+
+
+dfs.permissions
+true
+
+If "true", enable permission checking in HDFS.
+If "false", permission checking is turned off,
+but all other behavior is unchanged.
+Switching from one parameter value to the other does not change the mode,
+owner or group of files or directories.
+
+
+
+
+dfs.permissions.supergroup
+hdfs
+The name of the group of super-users.
+
+
+
+dfs.namenode.handler.count
+100
+Added to grow Queue size so that more client connections are allowed
+
+
+
+ipc.server.max.response.size
+5242880
+
+
+dfs.block.access.token.enable
+true
+
+If "true", access tokens are used as capabilities for accessing datanodes.
+If "false", no access tokens are checked on accessing datanodes.
+
+
+
+
+dfs.namenode.kerberos.principal
+
+
+Kerberos principal name for the NameNode
+
+
+
+
+dfs.secondary.namenode.kerberos.principal
+
+
+ Kerberos principal name for the secondary NameNode.
+
+
+
+
+
+
+ dfs.namenode.kerberos.https.principal
+
+ The Kerberos principal for the host that the NameNode runs on.
+
+
+
+
+ dfs.secondary.namenode.kerberos.https.principal
+
+ The Kerberos principal for the hostthat the secondary NameNode runs on.
+
+
+
+
+
+ dfs.secondary.http.address
+
+ Address of secondary namenode web server
+
+
+
+ dfs.secondary.https.port
+ 50490
+ The https port where secondary-namenode binds
+
+
+
+ dfs.web.authentication.kerberos.principal
+
+
+ The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+ The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
+ HTTP SPENGO specification.
+
+
+
+
+ dfs.web.authentication.kerberos.keytab
+
+
+ The Kerberos keytab file with the credentials for the
+ HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+
+
+
+
+ dfs.datanode.kerberos.principal
+
+
+ The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
+
+
+
+
+ dfs.namenode.keytab.file
+
+
+ Combined keytab file containing the namenode service and host principals.
+
+
+
+
+ dfs.secondary.namenode.keytab.file
+
+
+ Combined keytab file containing the namenode service and host principals.
+
+
+
+
+ dfs.datanode.keytab.file
+
+
+ The filename of the keytab file for the DataNode.
+
+
+
+
+ dfs.https.port
+ 50470
+ The https port where namenode binds
+
+
+
+
+ dfs.https.address
+
+ The https address where namenode binds
+
+
+
+
+ dfs.datanode.data.dir.perm
+
+The permissions that should be there on dfs.data.dir
+directories. The datanode will not come up if the permissions are
+different on existing dfs.data.dir directories. If the directories
+don't exist, they will be created with this permission.
+
+
+
+ dfs.access.time.precision
+ 0
+ The access time for HDFS file is precise upto this value.
+ The default value is 1 hour. Setting a value of 0 disables
+ access times for HDFS.
+
+
+
+
+ dfs.cluster.administrators
+ hdfs
+ ACL for who all can view the default servlets in the HDFS
+
+
+
+ ipc.server.read.threadpool.size
+ 5
+
+
+
+
+ dfs.datanode.failed.volumes.tolerated
+ 0
+ Number of failed disks datanode would tolerate
+
+
+
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/HCFS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/HCFS/metainfo.xml
new file mode 100644
index 00000000000..2f42b7f88b8
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/HCFS/metainfo.xml
@@ -0,0 +1,30 @@
+
+
+
+ root
+ Hadoop Compatible File System
+ 1.0.0
+
+
+ HCFS_CLIENT
+ CLIENT
+
+
+
+
+
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/HDFS/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/HDFS/configuration/core-site.xml
new file mode 100644
index 00000000000..a312e68fe62
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/HDFS/configuration/core-site.xml
@@ -0,0 +1,251 @@
+
+
+
+
+
+
+
+
+
+
+
+
+ io.file.buffer.size
+ 131072
+ The size of buffer for use in sequence files.
+ The size of this buffer should probably be a multiple of hardware
+ page size (4096 on Intel x86), and it determines how much data is
+ buffered during read and write operations.
+
+
+
+ io.serializations
+ org.apache.hadoop.io.serializer.WritableSerialization
+
+
+
+ io.compression.codecs
+
+ A list of the compression codec classes that can be used
+ for compression/decompression.
+
+
+
+ io.compression.codec.lzo.class
+ com.hadoop.compression.lzo.LzoCodec
+ The implementation for lzo codec.
+
+
+
+
+
+ fs.default.name
+
+
+ The name of the default file system. Either the
+ literal string "local" or a host:port for NDFS.
+ true
+
+
+
+ fs.trash.interval
+ 360
+ Number of minutes between trash checkpoints.
+ If zero, the trash feature is disabled.
+
+
+
+
+ fs.checkpoint.dir
+
+ Determines where on the local filesystem the DFS secondary
+ name node should store the temporary images to merge.
+ If this is a comma-delimited list of directories then the image is
+ replicated in all of the directories for redundancy.
+
+
+
+
+ fs.checkpoint.edits.dir
+ ${fs.checkpoint.dir}
+ Determines where on the local filesystem the DFS secondary
+ name node should store the temporary edits to merge.
+ If this is a comma-delimited list of directoires then teh edits is
+ replicated in all of the directoires for redundancy.
+ Default value is same as fs.checkpoint.dir
+
+
+
+
+ fs.checkpoint.period
+ 21600
+ The number of seconds between two periodic checkpoints.
+
+
+
+
+ fs.checkpoint.size
+ 536870912
+ The size of the current edit log (in bytes) that triggers
+ a periodic checkpoint even if the fs.checkpoint.period hasn't expired.
+
+
+
+
+
+ ipc.client.idlethreshold
+ 8000
+ Defines the threshold number of connections after which
+ connections will be inspected for idleness.
+
+
+
+
+ ipc.client.connection.maxidletime
+ 30000
+ The maximum time after which a client will bring down the
+ connection to the server.
+
+
+
+
+ ipc.client.connect.max.retries
+ 50
+ Defines the maximum number of retries for IPC connections.
+
+
+
+
+ webinterface.private.actions
+ false
+ If set to true, the web interfaces of JT and NN may contain
+ actions, such as kill job, delete file, etc., that should
+ not be exposed to public. Enable this option if the interfaces
+ are only reachable by those who have the right authorization.
+
+
+
+
+ hadoop.security.authentication
+
+
+ Set the authentication for the cluster. Valid values are: simple or
+ kerberos.
+
+
+
+ hadoop.security.authorization
+
+
+ Enable authorization for different protocols.
+
+
+
+
+ hadoop.security.auth_to_local
+
+The mapping from kerberos principal names to local OS user names.
+ So the default rule is just "DEFAULT" which takes all principals in your default domain to their first component.
+ "omalley@APACHE.ORG" and "omalley/admin@APACHE.ORG" to "omalley", if your default domain is APACHE.ORG.
+The translations rules have 3 sections:
+ base filter substitution
+The base consists of a number that represents the number of components in the principal name excluding the realm and the pattern for building the name from the sections of the principal name. The base uses $0 to mean the realm, $1 to mean the first component and $2 to mean the second component.
+
+[1:$1@$0] translates "omalley@APACHE.ORG" to "omalley@APACHE.ORG"
+[2:$1] translates "omalley/admin@APACHE.ORG" to "omalley"
+[2:$1%$2] translates "omalley/admin@APACHE.ORG" to "omalley%admin"
+
+The filter is a regex in parens that must the generated string for the rule to apply.
+
+"(.*%admin)" will take any string that ends in "%admin"
+"(.*@ACME.COM)" will take any string that ends in "@ACME.COM"
+
+Finally, the substitution is a sed rule to translate a regex into a fixed string.
+
+"s/@ACME\.COM//" removes the first instance of "@ACME.COM".
+"s/@[A-Z]*\.COM//" removes the first instance of "@" followed by a name followed by ".COM".
+"s/X/Y/g" replaces all of the "X" in the name with "Y"
+
+So, if your default realm was APACHE.ORG, but you also wanted to take all principals from ACME.COM that had a single component "joe@ACME.COM", you'd do:
+
+RULE:[1:$1@$0](.@ACME.ORG)s/@.//
+DEFAULT
+
+To also translate the names with a second component, you'd make the rules:
+
+RULE:[1:$1@$0](.@ACME.ORG)s/@.//
+RULE:[2:$1@$0](.@ACME.ORG)s/@.//
+DEFAULT
+
+If you want to treat all principals from APACHE.ORG with /admin as "admin", your rules would look like:
+
+RULE[2:$1%$2@$0](.%admin@APACHE.ORG)s/./admin/
+DEFAULT
+
+
+
+
+
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/HDFS/configuration/hadoop-policy.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/HDFS/configuration/hadoop-policy.xml
new file mode 100644
index 00000000000..900da99ef0f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/HDFS/configuration/hadoop-policy.xml
@@ -0,0 +1,134 @@
+
+
+
+
+
+
+
+
+
+ security.client.protocol.acl
+ *
+ ACL for ClientProtocol, which is used by user code
+ via the DistributedFileSystem.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+ security.client.datanode.protocol.acl
+ *
+ ACL for ClientDatanodeProtocol, the client-to-datanode protocol
+ for block recovery.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+ security.datanode.protocol.acl
+ *
+ ACL for DatanodeProtocol, which is used by datanodes to
+ communicate with the namenode.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+ security.inter.datanode.protocol.acl
+ *
+ ACL for InterDatanodeProtocol, the inter-datanode protocol
+ for updating generation timestamp.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+ security.namenode.protocol.acl
+ *
+ ACL for NamenodeProtocol, the protocol used by the secondary
+ namenode to communicate with the namenode.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+ security.inter.tracker.protocol.acl
+ *
+ ACL for InterTrackerProtocol, used by the tasktrackers to
+ communicate with the jobtracker.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+ security.job.submission.protocol.acl
+ *
+ ACL for JobSubmissionProtocol, used by job clients to
+ communciate with the jobtracker for job submission, querying job status etc.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+ security.task.umbilical.protocol.acl
+ *
+ ACL for TaskUmbilicalProtocol, used by the map and reduce
+ tasks to communicate with the parent tasktracker.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+ security.admin.operations.protocol.acl
+
+ ACL for AdminOperationsProtocol. Used for admin commands.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+ security.refresh.usertogroups.mappings.protocol.acl
+
+ ACL for RefreshUserMappingsProtocol. Used to refresh
+ users mappings. The ACL is a comma-separated list of user and
+ group names. The user and group list is separated by a blank. For
+ e.g. "alice,bob users,wheel". A special value of "*" means all
+ users are allowed.
+
+
+
+ security.refresh.policy.protocol.acl
+
+ ACL for RefreshAuthorizationPolicyProtocol, used by the
+ dfsadmin and mradmin commands to refresh the security policy in-effect.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/HDFS/configuration/hdfs-site.xml
new file mode 100644
index 00000000000..db92d4bd2c9
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/HDFS/configuration/hdfs-site.xml
@@ -0,0 +1,415 @@
+
+
+
+
+
+
+
+
+
+
+
+
+ dfs.name.dir
+
+
+ Determines where on the local filesystem the DFS name node
+ should store the name table. If this is a comma-delimited list
+ of directories then the name table is replicated in all of the
+ directories, for redundancy.
+ true
+
+
+
+ dfs.support.append
+
+ to enable dfs append
+ true
+
+
+
+ dfs.webhdfs.enabled
+
+ to enable webhdfs
+ true
+
+
+
+ dfs.datanode.socket.write.timeout
+ 0
+ DFS Client write socket timeout
+
+
+
+ dfs.datanode.failed.volumes.tolerated
+
+ #of failed disks dn would tolerate
+ true
+
+
+
+ dfs.block.local-path-access.user
+
+ the user who is allowed to perform short
+ circuit reads.
+
+ true
+
+
+
+ dfs.data.dir
+
+ Determines where on the local filesystem an DFS data node
+ should store its blocks. If this is a comma-delimited
+ list of directories, then data will be stored in all named
+ directories, typically on different devices.
+ Directories that do not exist are ignored.
+
+ true
+
+
+
+ dfs.hosts.exclude
+
+ Names a file that contains a list of hosts that are
+ not permitted to connect to the namenode. The full pathname of the
+ file must be specified. If the value is empty, no hosts are
+ excluded.
+
+
+
+ dfs.hosts
+
+ Names a file that contains a list of hosts that are
+ permitted to connect to the namenode. The full pathname of the file
+ must be specified. If the value is empty, all hosts are
+ permitted.
+
+
+
+ dfs.replication.max
+ 50
+ Maximal block replication.
+
+
+
+
+ dfs.replication
+
+ Default block replication.
+
+
+
+
+ dfs.heartbeat.interval
+ 3
+ Determines datanode heartbeat interval in seconds.
+
+
+
+ dfs.safemode.threshold.pct
+ 1.0f
+
+ Specifies the percentage of blocks that should satisfy
+ the minimal replication requirement defined by dfs.replication.min.
+ Values less than or equal to 0 mean not to start in safe mode.
+ Values greater than 1 will make safe mode permanent.
+
+
+
+
+ dfs.balance.bandwidthPerSec
+ 6250000
+
+ Specifies the maximum amount of bandwidth that each datanode
+ can utilize for the balancing purpose in term of
+ the number of bytes per second.
+
+
+
+
+ dfs.datanode.address
+
+
+
+
+ dfs.datanode.http.address
+
+
+
+
+ dfs.block.size
+ 134217728
+ The default block size for new files.
+
+
+
+ dfs.http.address
+
+The name of the default file system. Either the
+literal string "local" or a host:port for NDFS.
+true
+
+
+
+dfs.datanode.du.reserved
+
+
+Reserved space in bytes per volume. Always leave this much space free for non dfs use.
+
+
+
+
+dfs.datanode.ipc.address
+0.0.0.0:8010
+
+The datanode ipc server address and port.
+If the port is 0 then the server will start on a free port.
+
+
+
+
+dfs.blockreport.initialDelay
+120
+Delay for first block report in seconds.
+
+
+
+dfs.datanode.du.pct
+0.85f
+When calculating remaining space, only use this percentage of the real available space
+
+
+
+
+dfs.namenode.handler.count
+40
+The number of server threads for the namenode.
+
+
+
+dfs.datanode.max.xcievers
+4096
+PRIVATE CONFIG VARIABLE
+
+
+
+
+
+dfs.umaskmode
+077
+
+The octal umask used when creating files and directories.
+
+
+
+
+dfs.web.ugi
+
+gopher,gopher
+The user account used by the web interface.
+Syntax: USERNAME,GROUP1,GROUP2, ...
+
+
+
+
+dfs.permissions
+true
+
+If "true", enable permission checking in HDFS.
+If "false", permission checking is turned off,
+but all other behavior is unchanged.
+Switching from one parameter value to the other does not change the mode,
+owner or group of files or directories.
+
+
+
+
+dfs.permissions.supergroup
+hdfs
+The name of the group of super-users.
+
+
+
+dfs.namenode.handler.count
+100
+Added to grow Queue size so that more client connections are allowed
+
+
+
+ipc.server.max.response.size
+5242880
+
+
+dfs.block.access.token.enable
+true
+
+If "true", access tokens are used as capabilities for accessing datanodes.
+If "false", no access tokens are checked on accessing datanodes.
+
+
+
+
+dfs.namenode.kerberos.principal
+
+
+Kerberos principal name for the NameNode
+
+
+
+
+dfs.secondary.namenode.kerberos.principal
+
+
+ Kerberos principal name for the secondary NameNode.
+
+
+
+
+
+
+ dfs.namenode.kerberos.https.principal
+
+ The Kerberos principal for the host that the NameNode runs on.
+
+
+
+
+ dfs.secondary.namenode.kerberos.https.principal
+
+ The Kerberos principal for the hostthat the secondary NameNode runs on.
+
+
+
+
+
+ dfs.secondary.http.address
+
+ Address of secondary namenode web server
+
+
+
+ dfs.secondary.https.port
+ 50490
+ The https port where secondary-namenode binds
+
+
+
+ dfs.web.authentication.kerberos.principal
+
+
+ The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+ The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
+ HTTP SPENGO specification.
+
+
+
+
+ dfs.web.authentication.kerberos.keytab
+
+
+ The Kerberos keytab file with the credentials for the
+ HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+
+
+
+
+ dfs.datanode.kerberos.principal
+
+
+ The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
+
+
+
+
+ dfs.namenode.keytab.file
+
+
+ Combined keytab file containing the namenode service and host principals.
+
+
+
+
+ dfs.secondary.namenode.keytab.file
+
+
+ Combined keytab file containing the namenode service and host principals.
+
+
+
+
+ dfs.datanode.keytab.file
+
+
+ The filename of the keytab file for the DataNode.
+
+
+
+
+ dfs.https.port
+ 50470
+ The https port where namenode binds
+
+
+
+
+ dfs.https.address
+
+ The https address where namenode binds
+
+
+
+
+ dfs.datanode.data.dir.perm
+
+The permissions that should be there on dfs.data.dir
+directories. The datanode will not come up if the permissions are
+different on existing dfs.data.dir directories. If the directories
+don't exist, they will be created with this permission.
+
+
+
+ dfs.access.time.precision
+ 0
+ The access time for HDFS file is precise upto this value.
+ The default value is 1 hour. Setting a value of 0 disables
+ access times for HDFS.
+
+
+
+
+ dfs.cluster.administrators
+ hdfs
+ ACL for who all can view the default servlets in the HDFS
+
+
+
+ ipc.server.read.threadpool.size
+ 5
+
+
+
+
+ dfs.datanode.failed.volumes.tolerated
+ 0
+ Number of failed disks datanode would tolerate
+
+
+
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/HDFS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/HDFS/metainfo.xml
new file mode 100644
index 00000000000..1b185e1579c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/HDFS/metainfo.xml
@@ -0,0 +1,46 @@
+
+
+
+ root
+ Apache Hadoop Distributed File System
+ 1.1.2
+
+
+
+ NAMENODE
+ MASTER
+
+
+
+ DATANODE
+ SLAVE
+
+
+
+ SECONDARY_NAMENODE
+ MASTER
+
+
+
+ HDFS_CLIENT
+ CLIENT
+
+
+
+
+
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/HIVE/configuration/hive-site.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/HIVE/configuration/hive-site.xml
new file mode 100644
index 00000000000..7d35558b8c6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/HIVE/configuration/hive-site.xml
@@ -0,0 +1,138 @@
+
+
+
+
+
+
+ hive.metastore.local
+ false
+ controls whether to connect to remove metastore server or
+ open a new metastore server in Hive Client JVM
+
+
+
+ javax.jdo.option.ConnectionURL
+
+ JDBC connect string for a JDBC metastore
+
+
+
+ javax.jdo.option.ConnectionDriverName
+ com.mysql.jdbc.Driver
+ Driver class name for a JDBC metastore
+
+
+
+ javax.jdo.option.ConnectionUserName
+
+ username to use against metastore database
+
+
+
+ javax.jdo.option.ConnectionPassword
+
+ password to use against metastore database
+
+
+
+ hive.metastore.warehouse.dir
+ /apps/hive/warehouse
+ location of default database for the warehouse
+
+
+
+ hive.metastore.sasl.enabled
+
+ If true, the metastore thrift interface will be secured with SASL.
+ Clients must authenticate with Kerberos.
+
+
+
+ hive.metastore.kerberos.keytab.file
+
+ The path to the Kerberos Keytab file containing the metastore
+ thrift server's service principal.
+
+
+
+ hive.metastore.kerberos.principal
+
+ The service principal for the metastore thrift server. The special
+ string _HOST will be replaced automatically with the correct host name.
+
+
+
+ hive.metastore.cache.pinobjtypes
+ Table,Database,Type,FieldSchema,Order
+ List of comma separated metastore object types that should be pinned in the cache
+
+
+
+ hive.metastore.uris
+
+ URI for client to contact metastore server
+
+
+
+ hive.semantic.analyzer.factory.impl
+ org.apache.hivealog.cli.HCatSemanticAnalyzerFactory
+ controls which SemanticAnalyzerFactory implemenation class is used by CLI
+
+
+
+ hadoop.clientside.fs.operations
+ true
+ FS operations are owned by client
+
+
+
+ hive.metastore.client.socket.timeout
+ 60
+ MetaStore Client socket timeout in seconds
+
+
+
+ hive.metastore.execute.setugi
+ true
+ In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using the client's reported user and group permissions. Note that this property must be set on both the client and server sides. Further note that its best effort. If client sets its to true and server sets it to false, client setting will be ignored.
+
+
+
+ hive.security.authorization.enabled
+ true
+ enable or disable the hive client authorization
+
+
+
+ hive.security.authorization.manager
+ org.apache.hcatalog.security.HdfsAuthorizationProvider
+ the hive client authorization manager class name.
+ The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.
+
+
+
+ hive.server2.enable.doAs
+ true
+
+
+
+ fs.hdfs.impl.disable.cache
+ true
+
+
+
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/HIVE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/HIVE/metainfo.xml
new file mode 100644
index 00000000000..6a52064cb6e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/HIVE/metainfo.xml
@@ -0,0 +1,43 @@
+
+
+
+ root
+ Data warehouse system for ad-hoc queries & analysis of large datasets and table & storage management service
+ 0.10.0
+
+
+
+ HIVE_METASTORE
+ MASTER
+
+
+ HIVE_SERVER
+ MASTER
+
+
+ MYSQL_SERVER
+ MASTER
+
+
+ HIVE_CLIENT
+ CLIENT
+
+
+
+
+
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/MAPREDUCE/configuration/capacity-scheduler.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/MAPREDUCE/configuration/capacity-scheduler.xml
new file mode 100644
index 00000000000..8034d1911f6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/MAPREDUCE/configuration/capacity-scheduler.xml
@@ -0,0 +1,195 @@
+
+
+
+
+
+
+
+
+
+
+
+
+ mapred.capacity-scheduler.maximum-system-jobs
+ 3000
+ Maximum number of jobs in the system which can be initialized,
+ concurrently, by the CapacityScheduler.
+
+
+
+
+ mapred.capacity-scheduler.queue.default.capacity
+ 100
+ Percentage of the number of slots in the cluster that are
+ to be available for jobs in this queue.
+
+
+
+
+ mapred.capacity-scheduler.queue.default.maximum-capacity
+ -1
+
+ maximum-capacity defines a limit beyond which a queue cannot use the capacity of the cluster.
+ This provides a means to limit how much excess capacity a queue can use. By default, there is no limit.
+ The maximum-capacity of a queue can only be greater than or equal to its minimum capacity.
+ Default value of -1 implies a queue can use complete capacity of the cluster.
+
+ This property could be to curtail certain jobs which are long running in nature from occupying more than a
+ certain percentage of the cluster, which in the absence of pre-emption, could lead to capacity guarantees of
+ other queues being affected.
+
+ One important thing to note is that maximum-capacity is a percentage , so based on the cluster's capacity
+ the max capacity would change. So if large no of nodes or racks get added to the cluster , max Capacity in
+ absolute terms would increase accordingly.
+
+
+
+
+ mapred.capacity-scheduler.queue.default.supports-priority
+ false
+ If true, priorities of jobs will be taken into
+ account in scheduling decisions.
+
+
+
+
+ mapred.capacity-scheduler.queue.default.minimum-user-limit-percent
+ 100
+ Each queue enforces a limit on the percentage of resources
+ allocated to a user at any given time, if there is competition for them.
+ This user limit can vary between a minimum and maximum value. The former
+ depends on the number of users who have submitted jobs, and the latter is
+ set to this property value. For example, suppose the value of this
+ property is 25. If two users have submitted jobs to a queue, no single
+ user can use more than 50% of the queue resources. If a third user submits
+ a job, no single user can use more than 33% of the queue resources. With 4
+ or more users, no user can use more than 25% of the queue's resources. A
+ value of 100 implies no user limits are imposed.
+
+
+
+
+ mapred.capacity-scheduler.queue.default.user-limit-factor
+ 1
+ The multiple of the queue capacity which can be configured to
+ allow a single user to acquire more slots.
+
+
+
+
+ mapred.capacity-scheduler.queue.default.maximum-initialized-active-tasks
+ 200000
+ The maximum number of tasks, across all jobs in the queue,
+ which can be initialized concurrently. Once the queue's jobs exceed this
+ limit they will be queued on disk.
+
+
+
+
+ mapred.capacity-scheduler.queue.default.maximum-initialized-active-tasks-per-user
+ 100000
+ The maximum number of tasks per-user, across all the of the
+ user's jobs in the queue, which can be initialized concurrently. Once the
+ user's jobs exceed this limit they will be queued on disk.
+
+
+
+
+ mapred.capacity-scheduler.queue.default.init-accept-jobs-factor
+ 10
+ The multipe of (maximum-system-jobs * queue-capacity) used to
+ determine the number of jobs which are accepted by the scheduler.
+
+
+
+
+
+
+
+ mapred.capacity-scheduler.default-supports-priority
+ false
+ If true, priorities of jobs will be taken into
+ account in scheduling decisions by default in a job queue.
+
+
+
+
+ mapred.capacity-scheduler.default-minimum-user-limit-percent
+ 100
+ The percentage of the resources limited to a particular user
+ for the job queue at any given point of time by default.
+
+
+
+
+
+ mapred.capacity-scheduler.default-user-limit-factor
+ 1
+ The default multiple of queue-capacity which is used to
+ determine the amount of slots a single user can consume concurrently.
+
+
+
+
+ mapred.capacity-scheduler.default-maximum-active-tasks-per-queue
+ 200000
+ The default maximum number of tasks, across all jobs in the
+ queue, which can be initialized concurrently. Once the queue's jobs exceed
+ this limit they will be queued on disk.
+
+
+
+
+ mapred.capacity-scheduler.default-maximum-active-tasks-per-user
+ 100000
+ The default maximum number of tasks per-user, across all the of
+ the user's jobs in the queue, which can be initialized concurrently. Once
+ the user's jobs exceed this limit they will be queued on disk.
+
+
+
+
+ mapred.capacity-scheduler.default-init-accept-jobs-factor
+ 10
+ The default multipe of (maximum-system-jobs * queue-capacity)
+ used to determine the number of jobs which are accepted by the scheduler.
+
+
+
+
+
+ mapred.capacity-scheduler.init-poll-interval
+ 5000
+ The amount of time in miliseconds which is used to poll
+ the job queues for jobs to initialize.
+
+
+
+ mapred.capacity-scheduler.init-worker-threads
+ 5
+ Number of worker threads which would be used by
+ Initialization poller to initialize jobs in a set of queue.
+ If number mentioned in property is equal to number of job queues
+ then a single thread would initialize jobs in a queue. If lesser
+ then a thread would get a set of queues assigned. If the number
+ is greater then number of threads would be equal to number of
+ job queues.
+
+
+
+
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/MAPREDUCE/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/MAPREDUCE/configuration/core-site.xml
new file mode 100644
index 00000000000..3a2af490593
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/MAPREDUCE/configuration/core-site.xml
@@ -0,0 +1,20 @@
+
+
+
+
+
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/MAPREDUCE/configuration/mapred-queue-acls.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/MAPREDUCE/configuration/mapred-queue-acls.xml
new file mode 100644
index 00000000000..ce12380767c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/MAPREDUCE/configuration/mapred-queue-acls.xml
@@ -0,0 +1,39 @@
+
+
+
+
+
+
+
+
+
+
+
+
+ mapred.queue.default.acl-submit-job
+ *
+
+
+
+ mapred.queue.default.acl-administer-jobs
+ *
+
+
+
+
+
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/MAPREDUCE/configuration/mapred-site.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/MAPREDUCE/configuration/mapred-site.xml
new file mode 100644
index 00000000000..11a72b1446a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/MAPREDUCE/configuration/mapred-site.xml
@@ -0,0 +1,531 @@
+
+
+
+
+
+
+
+
+
+
+
+
+ io.sort.mb
+
+ No description
+
+
+
+ io.sort.record.percent
+ .2
+ No description
+
+
+
+ io.sort.spill.percent
+
+ No description
+
+
+
+ io.sort.factor
+ 100
+ No description
+
+
+
+
+
+ mapred.tasktracker.tasks.sleeptime-before-sigkill
+ 250
+ Normally, this is the amount of time before killing
+ processes, and the recommended-default is 5.000 seconds - a value of
+ 5000 here. In this case, we are using it solely to blast tasks before
+ killing them, and killing them very quickly (1/4 second) to guarantee
+ that we do not leave VMs around for later jobs.
+
+
+
+
+ mapred.job.tracker.handler.count
+ 50
+
+ The number of server threads for the JobTracker. This should be roughly
+ 4% of the number of tasktracker nodes.
+
+
+
+
+ mapred.system.dir
+ /mapred/system
+ No description
+ true
+
+
+
+ mapred.job.tracker
+
+
+ No description
+ true
+
+
+
+ mapred.job.tracker.http.address
+
+
+ No description
+ true
+
+
+
+
+ mapred.local.dir
+
+ No description
+ true
+
+
+
+ mapreduce.cluster.administrators
+ hadoop
+
+
+
+ mapred.reduce.parallel.copies
+ 30
+ No description
+
+
+
+ mapred.tasktracker.map.tasks.maximum
+
+ No description
+
+
+
+ mapred.tasktracker.reduce.tasks.maximum
+
+ No description
+
+
+
+ tasktracker.http.threads
+ 50
+
+
+
+ mapred.map.tasks.speculative.execution
+ false
+ If true, then multiple instances of some map tasks
+ may be executed in parallel.
+
+
+
+ mapred.reduce.tasks.speculative.execution
+ false
+ If true, then multiple instances of some reduce tasks
+ may be executed in parallel.
+
+
+
+ mapred.reduce.slowstart.completed.maps
+ 0.05
+
+
+
+ mapred.inmem.merge.threshold
+ 1000
+ The threshold, in terms of the number of files
+ for the in-memory merge process. When we accumulate threshold number of files
+ we initiate the in-memory merge and spill to disk. A value of 0 or less than
+ 0 indicates we want to DON'T have any threshold and instead depend only on
+ the ramfs's memory consumption to trigger the merge.
+
+
+
+
+ mapred.job.shuffle.merge.percent
+ 0.66
+ The usage threshold at which an in-memory merge will be
+ initiated, expressed as a percentage of the total memory allocated to
+ storing in-memory map outputs, as defined by
+ mapred.job.shuffle.input.buffer.percent.
+
+
+
+
+ mapred.job.shuffle.input.buffer.percent
+ 0.7
+ The percentage of memory to be allocated from the maximum heap
+ size to storing map outputs during the shuffle.
+
+
+
+
+ mapred.map.output.compression.codec
+
+ If the map outputs are compressed, how should they be
+ compressed
+
+
+
+
+ mapred.output.compression.type
+ BLOCK
+ If the job outputs are to compressed as SequenceFiles, how should
+ they be compressed? Should be one of NONE, RECORD or BLOCK.
+
+
+
+
+
+ mapred.jobtracker.completeuserjobs.maximum
+ 0
+
+
+
+ mapred.jobtracker.taskScheduler
+
+
+
+
+ mapred.jobtracker.restart.recover
+ false
+ "true" to enable (job) recovery upon restart,
+ "false" to start afresh
+
+
+
+
+ mapred.job.reduce.input.buffer.percent
+ 0.0
+ The percentage of memory- relative to the maximum heap size- to
+ retain map outputs during the reduce. When the shuffle is concluded, any
+ remaining map outputs in memory must consume less than this threshold before
+ the reduce can begin.
+
+
+
+
+ mapreduce.reduce.input.limit
+ 10737418240
+ The limit on the input size of the reduce. (This value
+ is 10 Gb.) If the estimated input size of the reduce is greater than
+ this value, job is failed. A value of -1 means that there is no limit
+ set.
+
+
+
+
+
+ mapred.compress.map.output
+
+
+
+
+
+ mapred.task.timeout
+ 600000
+ The number of milliseconds before a task will be
+ terminated if it neither reads an input, writes an output, nor
+ updates its status string.
+
+
+
+
+ jetty.connector
+ org.mortbay.jetty.nio.SelectChannelConnector
+ No description
+
+
+
+ mapred.task.tracker.task-controller
+
+
+ TaskController which is used to launch and manage task execution.
+
+
+
+
+ mapred.child.root.logger
+ INFO,TLA
+
+
+
+ mapred.child.java.opts
+
+
+ No description
+
+
+
+ mapred.cluster.map.memory.mb
+
+
+
+
+ mapred.cluster.reduce.memory.mb
+
+
+
+
+ mapred.job.map.memory.mb
+
+
+
+
+ mapred.job.reduce.memory.mb
+
+
+
+
+ mapred.cluster.max.map.memory.mb
+
+
+
+
+ mapred.cluster.max.reduce.memory.mb
+
+
+
+
+ mapred.hosts
+
+
+
+
+ mapred.hosts.exclude
+
+
+
+
+ mapred.max.tracker.blacklists
+ 16
+
+ if node is reported blacklisted by 16 successful jobs within timeout-window, it will be graylisted
+
+
+
+
+ mapred.healthChecker.script.path
+
+
+
+
+ mapred.healthChecker.interval
+ 135000
+
+
+
+ mapred.healthChecker.script.timeout
+ 60000
+
+
+
+ mapred.job.tracker.persist.jobstatus.active
+ false
+ Indicates if persistency of job status information is
+ active or not.
+
+
+
+
+ mapred.job.tracker.persist.jobstatus.hours
+ 1
+ The number of hours job status information is persisted in DFS.
+ The job status information will be available after it drops of the memory
+ queue and between jobtracker restarts. With a zero value the job status
+ information is not persisted at all in DFS.
+
+
+
+
+ mapred.job.tracker.persist.jobstatus.dir
+
+ The directory where the job status information is persisted
+ in a file system to be available after it drops of the memory queue and
+ between jobtracker restarts.
+
+
+
+
+ mapred.jobtracker.retirejob.check
+ 10000
+
+
+
+ mapred.jobtracker.retirejob.interval
+ 0
+
+
+
+ mapred.job.tracker.history.completed.location
+ /mapred/history/done
+ No description
+
+
+
+ mapred.task.maxvmem
+
+ true
+ No description
+
+
+
+ mapred.jobtracker.maxtasks.per.job
+
+ true
+ The maximum number of tasks for a single job.
+ A value of -1 indicates that there is no maximum.
+
+
+
+ mapreduce.fileoutputcommitter.marksuccessfuljobs
+ false
+
+
+
+ mapred.userlog.retain.hours
+
+
+
+
+ mapred.job.reuse.jvm.num.tasks
+ 1
+
+ How many tasks to run per jvm. If set to -1, there is no limit
+
+ true
+
+
+
+ mapreduce.jobtracker.kerberos.principal
+
+
+ JT user name key.
+
+
+
+
+ mapreduce.tasktracker.kerberos.principal
+
+
+ tt user name key. "_HOST" is replaced by the host name of the task tracker.
+
+
+
+
+
+ hadoop.job.history.user.location
+ none
+ true
+
+
+
+
+ mapreduce.jobtracker.keytab.file
+
+
+ The keytab for the jobtracker principal.
+
+
+
+
+
+ mapreduce.tasktracker.keytab.file
+
+ The filename of the keytab for the task tracker
+
+
+
+ mapreduce.jobtracker.staging.root.dir
+ /user
+ The Path prefix for where the staging directories should be placed. The next level is always the user's
+ name. It is a path in the default file system.
+
+
+
+ mapreduce.tasktracker.group
+ hadoop
+ The group that the task controller uses for accessing the task controller. The mapred user must be a member and users should *not* be members.
+
+
+
+
+ mapreduce.jobtracker.split.metainfo.maxsize
+ 50000000
+ true
+ If the size of the split metainfo file is larger than this, the JobTracker will fail the job during
+ initialize.
+
+
+
+ mapreduce.history.server.embedded
+ false
+ Should job history server be embedded within Job tracker
+process
+ true
+
+
+
+ mapreduce.history.server.http.address
+
+
+ Http address of the history server
+ true
+
+
+
+ mapreduce.jobhistory.kerberos.principal
+
+
+ Job history user name key. (must map to same user as JT
+user)
+
+
+
+ mapreduce.jobhistory.keytab.file
+
+
+ The keytab for the job history server principal.
+
+
+
+ mapred.jobtracker.blacklist.fault-timeout-window
+ 180
+
+ 3-hour sliding window (value is in minutes)
+
+
+
+
+ mapred.jobtracker.blacklist.fault-bucket-width
+ 15
+
+ 15-minute bucket size (value is in minutes)
+
+
+
+
+ mapred.queue.names
+ default
+ Comma separated list of queues configured for this jobtracker.
+
+
+
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/MAPREDUCE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/MAPREDUCE/metainfo.xml
new file mode 100644
index 00000000000..79d219bd19e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/MAPREDUCE/metainfo.xml
@@ -0,0 +1,41 @@
+
+
+
+ mapred
+ Apache Hadoop Distributed Processing Framework
+ 1.1.2
+
+
+
+ JOBTRACKER
+ MASTER
+
+
+
+ TASKTRACKER
+ SLAVE
+
+
+
+ MAPREDUCE_CLIENT
+ CLIENT
+
+
+
+
+
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/NAGIOS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/NAGIOS/metainfo.xml
new file mode 100644
index 00000000000..bd7de072f7d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/NAGIOS/metainfo.xml
@@ -0,0 +1,30 @@
+
+
+
+ root
+ Nagios Monitoring and Alerting system
+ 3.2.3
+
+
+
+ NAGIOS_SERVER
+ MASTER
+
+
+
+
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/OOZIE/configuration/oozie-site.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/OOZIE/configuration/oozie-site.xml
new file mode 100644
index 00000000000..1665ba8a581
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/OOZIE/configuration/oozie-site.xml
@@ -0,0 +1,245 @@
+
+
+
+
+
+
+
+ oozie.base.url
+ http://localhost:11000/oozie
+ Base Oozie URL.
+
+
+
+ oozie.system.id
+ oozie-${user.name}
+
+ The Oozie system ID.
+
+
+
+
+ oozie.systemmode
+ NORMAL
+
+ System mode for Oozie at startup.
+
+
+
+
+ oozie.service.AuthorizationService.security.enabled
+ true
+
+ Specifies whether security (user name/admin role) is enabled or not.
+ If disabled any user can manage Oozie system and manage any job.
+
+
+
+
+ oozie.service.PurgeService.older.than
+ 30
+
+ Jobs older than this value, in days, will be purged by the PurgeService.
+
+
+
+
+ oozie.service.PurgeService.purge.interval
+ 3600
+
+ Interval at which the purge service will run, in seconds.
+
+
+
+
+ oozie.service.CallableQueueService.queue.size
+ 1000
+ Max callable queue size
+
+
+
+ oozie.service.CallableQueueService.threads
+ 10
+ Number of threads used for executing callables
+
+
+
+ oozie.service.CallableQueueService.callable.concurrency
+ 3
+
+ Maximum concurrency for a given callable type.
+ Each command is a callable type (submit, start, run, signal, job, jobs, suspend,resume, etc).
+ Each action type is a callable type (Map-Reduce, Pig, SSH, FS, sub-workflow, etc).
+ All commands that use action executors (action-start, action-end, action-kill and action-check) use
+ the action type as the callable type.
+
+
+
+
+ oozie.service.coord.normal.default.timeout
+ 120
+ Default timeout for a coordinator action input check (in minutes) for normal job.
+ -1 means infinite timeout
+
+
+
+ oozie.db.schema.name
+ oozie
+
+ Oozie DataBase Name
+
+
+
+
+ oozie.service.HadoopAccessorService.jobTracker.whitelist
+
+
+ Whitelisted job tracker for Oozie service.
+
+
+
+
+ oozie.authentication.type
+ simple
+
+
+
+
+
+ oozie.service.HadoopAccessorService.nameNode.whitelist
+
+
+
+
+
+
+ oozie.service.WorkflowAppService.system.libpath
+ /user/${user.name}/share/lib
+
+ System library path to use for workflow applications.
+ This path is added to workflow application if their job properties sets
+ the property 'oozie.use.system.libpath' to true.
+
+
+
+
+ use.system.libpath.for.mapreduce.and.pig.jobs
+ false
+
+ If set to true, submissions of MapReduce and Pig jobs will include
+ automatically the system library path, thus not requiring users to
+ specify where the Pig JAR files are. Instead, the ones from the system
+ library path are used.
+
+
+
+ oozie.authentication.kerberos.name.rules
+
+ RULE:[2:$1@$0]([jt]t@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-MAPREDUSER/
+ RULE:[2:$1@$0]([nd]n@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HDFSUSER/
+ RULE:[2:$1@$0](hm@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/
+ RULE:[2:$1@$0](rs@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/
+ DEFAULT
+
+ The mapping from kerberos principal names to local OS user names.
+
+
+ oozie.service.HadoopAccessorService.hadoop.configurations
+ *=/etc/hadoop/conf
+
+ Comma separated AUTHORITY=HADOOP_CONF_DIR, where AUTHORITY is the HOST:PORT of
+ the Hadoop service (JobTracker, HDFS). The wildcard '*' configuration is
+ used when there is no exact match for an authority. The HADOOP_CONF_DIR contains
+ the relevant Hadoop *-site.xml files. If the path is relative is looked within
+ the Oozie configuration directory; though the path can be absolute (i.e. to point
+ to Hadoop client conf/ directories in the local filesystem.
+
+
+
+ oozie.service.ActionService.executor.ext.classes
+
+ org.apache.oozie.action.email.EmailActionExecutor,
+ org.apache.oozie.action.hadoop.HiveActionExecutor,
+ org.apache.oozie.action.hadoop.ShellActionExecutor,
+ org.apache.oozie.action.hadoop.SqoopActionExecutor,
+ org.apache.oozie.action.hadoop.DistcpActionExecutor
+
+
+
+
+ oozie.service.SchemaService.wf.ext.schemas
+ shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd
+
+
+ oozie.service.JPAService.create.db.schema
+ false
+
+ Creates Oozie DB.
+
+ If set to true, it creates the DB schema if it does not exist. If the DB schema exists is a NOP.
+ If set to false, it does not create the DB schema. If the DB schema does not exist it fails start up.
+
+
+
+
+ oozie.service.JPAService.jdbc.driver
+ org.apache.derby.jdbc.EmbeddedDriver
+
+ JDBC driver class.
+
+
+
+
+ oozie.service.JPAService.jdbc.url
+ jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true
+
+ JDBC URL.
+
+
+
+
+ oozie.service.JPAService.jdbc.username
+ sa
+
+ DB user name.
+
+
+
+
+ oozie.service.JPAService.jdbc.password
+
+
+ DB user password.
+
+ IMPORTANT: if password is emtpy leave a 1 space string, the service trims the value,
+ if empty Configuration assumes it is NULL.
+
+
+
+
+ oozie.service.JPAService.pool.max.active.conn
+ 10
+
+ Max number of connections.
+
+
+
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/OOZIE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/OOZIE/metainfo.xml
new file mode 100644
index 00000000000..83ccb06d5bb
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/OOZIE/metainfo.xml
@@ -0,0 +1,35 @@
+
+
+
+ root
+ System for workflow coordination and execution of Apache Hadoop jobs
+ 3.2.0
+
+
+
+ OOZIE_SERVER
+ MASTER
+
+
+
+ OOZIE_CLIENT
+ CLIENT
+
+
+
+
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/PIG/configuration/pig.properties b/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/PIG/configuration/pig.properties
new file mode 100644
index 00000000000..01000b53ab2
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/PIG/configuration/pig.properties
@@ -0,0 +1,52 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Pig default configuration file. All values can be overwritten by pig.properties and command line arguments.
+# see bin/pig -help
+
+# brief logging (no timestamps)
+brief=false
+
+#debug level, INFO is default
+debug=INFO
+
+#verbose print all log messages to screen (default to print only INFO and above to screen)
+verbose=false
+
+#exectype local|mapreduce, mapreduce is default
+exectype=mapreduce
+
+#Enable insertion of information about script into hadoop job conf
+pig.script.info.enabled=true
+
+#Do not spill temp files smaller than this size (bytes)
+pig.spill.size.threshold=5000000
+#EXPERIMENT: Activate garbage collection when spilling a file bigger than this size (bytes)
+#This should help reduce the number of files being spilled.
+pig.spill.gc.activation.size=40000000
+
+#the following two parameters are to help estimate the reducer number
+pig.exec.reducers.bytes.per.reducer=1000000000
+pig.exec.reducers.max=999
+
+#Temporary location to store the intermediate data.
+pig.temp.dir=/tmp/
+
+#Threshold for merging FRJoin fragment files
+pig.files.concatenation.threshold=100
+pig.optimistic.files.concatenation=false;
+
+pig.disable.counter=false
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/PIG/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/PIG/metainfo.xml
new file mode 100644
index 00000000000..4982fd217ba
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/PIG/metainfo.xml
@@ -0,0 +1,30 @@
+
+
+
+ root
+ Scripting platform for analyzing large datasets
+ 0.10.1
+
+
+
+ PIG
+ CLIENT
+
+
+
+
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/SQOOP/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/SQOOP/metainfo.xml
new file mode 100644
index 00000000000..ae0e68b9c48
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/SQOOP/metainfo.xml
@@ -0,0 +1,30 @@
+
+
+
+ root
+ Tool for transferring bulk data between Apache Hadoop and structured data stores such as relational databases
+ 1.4.2
+
+
+
+ SQOOP
+ CLIENT
+
+
+
+
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/WEBHCAT/configuration/webhcat-site.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/WEBHCAT/configuration/webhcat-site.xml
new file mode 100644
index 00000000000..31d0113faa2
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/WEBHCAT/configuration/webhcat-site.xml
@@ -0,0 +1,126 @@
+
+
+
+
+
+
+
+
+
+
+ templeton.port
+ 50111
+ The HTTP port for the main server.
+
+
+
+ templeton.hadoop.conf.dir
+ /etc/hadoop/conf
+ The path to the Hadoop configuration.
+
+
+
+ templeton.jar
+ /usr/lib/hcatalog/share/webhcat/svr/webhcat.jar
+ The path to the Templeton jar file.
+
+
+
+ templeton.libjars
+ /usr/lib/zookeeper/zookeeper.jar
+ Jars to add the the classpath.
+
+
+
+
+ templeton.hadoop
+ /usr/bin/hadoop
+ The path to the Hadoop executable.
+
+
+
+ templeton.pig.archive
+ hdfs:///apps/webhcat/pig.tar.gz
+ The path to the Pig archive.
+
+
+
+ templeton.pig.path
+ pig.tar.gz/pig/bin/pig
+ The path to the Pig executable.
+
+
+
+ templeton.hcat
+ /usr/bin/hcat
+ The path to the hcatalog executable.
+
+
+
+ templeton.hive.archive
+ hdfs:///apps/webhcat/hive.tar.gz
+ The path to the Hive archive.
+
+
+
+ templeton.hive.path
+ hive.tar.gz/hive/bin/hive
+ The path to the Hive executable.
+
+
+
+ templeton.hive.properties
+
+ Properties to set when running hive.
+
+
+
+
+ templeton.zookeeper.hosts
+
+ ZooKeeper servers, as comma separated host:port pairs
+
+
+
+ templeton.storage.class
+ org.apache.hcatalog.templeton.tool.ZooKeeperStorage
+ The class to use as storage
+
+
+
+ templeton.override.enabled
+ false
+
+ Enable the override path in templeton.override.jars
+
+
+
+
+ templeton.streaming.jar
+ hdfs:///apps/webhcat/hadoop-streaming.jar
+ The hdfs path to the Hadoop streaming jar file.
+
+
+
+ templeton.exec.timeout
+ 60000
+ Time out for templeton api
+
+
+
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/WEBHCAT/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/WEBHCAT/metainfo.xml
new file mode 100644
index 00000000000..e65992f3237
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/WEBHCAT/metainfo.xml
@@ -0,0 +1,31 @@
+
+
+
+ root
+ This is comment for WEBHCAT service
+ 0.5.0
+
+
+
+ WEBHCAT_SERVER
+ MASTER
+
+
+
+
+
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/ZOOKEEPER/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/ZOOKEEPER/metainfo.xml
new file mode 100644
index 00000000000..0e21f4f94f6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.1/services/ZOOKEEPER/metainfo.xml
@@ -0,0 +1,35 @@
+
+
+
+ root
+ This is comment for ZOOKEEPER service
+ 3.4.5
+
+
+
+ ZOOKEEPER_SERVER
+ MASTER
+
+
+
+ ZOOKEEPER_CLIENT
+ CLIENT
+
+
+
+
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/metadata/RoleGraphTest.java b/ambari-server/src/test/java/org/apache/ambari/server/metadata/RoleGraphTest.java
index 40dd2e96607..d399cc7a1e6 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/metadata/RoleGraphTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/metadata/RoleGraphTest.java
@@ -18,18 +18,136 @@
package org.apache.ambari.server.metadata;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
import junit.framework.Assert;
import org.apache.ambari.server.Role;
import org.apache.ambari.server.RoleCommand;
import org.apache.ambari.server.stageplanner.RoleGraphNode;
import org.junit.Test;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.orm.entities.ClusterEntity;
+import org.apache.ambari.server.orm.entities.ClusterServiceEntity;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.orm.entities.HostEntity;
+import org.apache.ambari.server.state.HostState;
+import org.apache.ambari.server.orm.entities.HostStateEntity;
+import org.apache.ambari.server.orm.entities.ServiceDesiredStateEntity;
+import org.apache.ambari.server.state.StackId;
+import com.google.gson.Gson;
+import com.google.inject.Guice;
+import com.google.inject.Injector;
+import org.apache.ambari.server.orm.GuiceJpaInitializer;
+import org.apache.ambari.server.state.cluster.ClusterImpl;
+import com.google.inject.persist.PersistService;
+import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
+import org.junit.After;
+import org.junit.Before;
public class RoleGraphTest {
+ private Injector injector;
+
+ public ClusterEntity createDummyData() {
+ ClusterEntity clusterEntity = new ClusterEntity();
+ clusterEntity.setClusterName("test_cluster1");
+ clusterEntity.setClusterInfo("test_cluster_info1");
+
+ HostEntity host1 = new HostEntity();
+ HostEntity host2 = new HostEntity();
+ HostEntity host3 = new HostEntity();
+
+ host1.setHostName("test_host1");
+ host2.setHostName("test_host2");
+ host3.setHostName("test_host3");
+ host1.setIpv4("192.168.0.1");
+ host2.setIpv4("192.168.0.2");
+ host3.setIpv4("192.168.0.3");
+
+ List hostEntities = new ArrayList();
+ hostEntities.add(host1);
+ hostEntities.add(host2);
+
+ clusterEntity.setHostEntities(hostEntities);
+ clusterEntity.setClusterConfigEntities(Collections.EMPTY_LIST);
+ //both sides of relation should be set when modifying in runtime
+ host1.setClusterEntities(Arrays.asList(clusterEntity));
+ host2.setClusterEntities(Arrays.asList(clusterEntity));
+
+ HostStateEntity hostStateEntity1 = new HostStateEntity();
+ hostStateEntity1.setCurrentState(HostState.HEARTBEAT_LOST);
+ hostStateEntity1.setHostEntity(host1);
+ HostStateEntity hostStateEntity2 = new HostStateEntity();
+ hostStateEntity2.setCurrentState(HostState.HEALTHY);
+ hostStateEntity2.setHostEntity(host2);
+ host1.setHostStateEntity(hostStateEntity1);
+ host2.setHostStateEntity(hostStateEntity2);
+
+ ClusterServiceEntity clusterServiceEntity = new ClusterServiceEntity();
+ clusterServiceEntity.setServiceName("HDFS");
+ clusterServiceEntity.setClusterEntity(clusterEntity);
+ clusterServiceEntity.setServiceComponentDesiredStateEntities(
+ Collections.EMPTY_LIST);
+ clusterServiceEntity.setServiceConfigMappings(Collections.EMPTY_LIST);
+ ServiceDesiredStateEntity stateEntity = mock(ServiceDesiredStateEntity.class);
+ Gson gson = new Gson();
+ when(stateEntity.getDesiredStackVersion()).thenReturn(gson.toJson(new StackId("HDP-0.1"),
+ StackId.class));
+ clusterServiceEntity.setServiceDesiredStateEntity(stateEntity);
+ List clusterServiceEntities = new ArrayList();
+ clusterServiceEntities.add(clusterServiceEntity);
+ clusterEntity.setClusterServiceEntities(clusterServiceEntities);
+ return clusterEntity;
+ }
+
+ @Before
+ public void setup() throws Exception {
+ injector = Guice.createInjector(new InMemoryDefaultTestModule());
+ injector.getInstance(GuiceJpaInitializer.class);
+ /*
+ clusters = injector.getInstance(Clusters.class);
+ serviceFactory = injector.getInstance(ServiceFactory.class);
+ serviceComponentFactory = injector.getInstance(
+ ServiceComponentFactory.class);
+ serviceComponentHostFactory = injector.getInstance(
+ ServiceComponentHostFactory.class);
+ configFactory = injector.getInstance(ConfigFactory.class);
+ metaInfo = injector.getInstance(AmbariMetaInfo.class);
+ metaInfo.init();
+ clusters.addCluster("c1");
+ c1 = clusters.getCluster("c1");
+ Assert.assertEquals("c1", c1.getClusterName());
+ Assert.assertEquals(1, c1.getClusterId());
+ clusters.addHost("h1");
+ Host host = clusters.getHost("h1");
+ host.setIPv4("ipv4");
+ host.setIPv6("ipv6");
+ host.setOsType("centos5");
+ host.persist();
+ c1.setDesiredStackVersion(new StackId("HDP-0.1"));
+ clusters.mapHostToCluster("h1", "c1");
+ */
+ }
+
+ @After
+ public void teardown() {
+ injector.getInstance(PersistService.class).stop();
+ }
+
@Test
public void testValidateOrder() {
RoleCommandOrder rco = new RoleCommandOrder();
- RoleCommandOrder.initialize();
+ ClusterEntity entity = createDummyData();
+ ClusterImpl cluster = new ClusterImpl(entity, injector);
+ rco.initialize(cluster);
RoleGraphNode datanode_upgrade = new RoleGraphNode(Role.DATANODE, RoleCommand.UPGRADE);
RoleGraphNode hdfs_client_upgrade = new RoleGraphNode(Role.HDFS_CLIENT, RoleCommand.UPGRADE);
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/stageplanner/TestDynamicRCO.java b/ambari-server/src/test/java/org/apache/ambari/server/stageplanner/TestDynamicRCO.java
new file mode 100644
index 00000000000..360b4255e65
--- /dev/null
+++ b/ambari-server/src/test/java/org/apache/ambari/server/stageplanner/TestDynamicRCO.java
@@ -0,0 +1,173 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.stageplanner;
+
+import static org.junit.Assert.*;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.ambari.server.Role;
+import org.apache.ambari.server.RoleCommand;
+import org.apache.ambari.server.actionmanager.Stage;
+import org.apache.ambari.server.controller.HostsMap;
+import org.apache.ambari.server.metadata.RoleCommandOrder;
+import org.apache.ambari.server.state.svccomphost.ServiceComponentHostStartEvent;
+import org.apache.ambari.server.utils.StageUtils;
+import org.junit.Test;
+
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.orm.entities.ClusterEntity;
+import org.apache.ambari.server.orm.entities.ClusterServiceEntity;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.orm.entities.HostEntity;
+import org.apache.ambari.server.state.HostState;
+import org.apache.ambari.server.orm.entities.HostStateEntity;
+import org.apache.ambari.server.orm.entities.ServiceDesiredStateEntity;
+import org.apache.ambari.server.state.StackId;
+import com.google.gson.Gson;
+import com.google.inject.Guice;
+import com.google.inject.Injector;
+import org.apache.ambari.server.orm.GuiceJpaInitializer;
+import org.apache.ambari.server.state.cluster.ClusterImpl;
+import com.google.inject.persist.PersistService;
+import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
+import org.junit.After;
+import org.junit.Before;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+
+public class TestDynamicRCO {
+
+ private static Logger LOG = LoggerFactory.getLogger(TestDynamicRCO.class);
+
+ private Injector injector;
+
+
+ public ClusterEntity createDummyData(String stackString, String serviceString) {
+ ClusterEntity clusterEntity = new ClusterEntity();
+ clusterEntity.setClusterName("test_cluster1");
+ clusterEntity.setClusterInfo("test_cluster_info1");
+
+ HostEntity host1 = new HostEntity();
+ HostEntity host2 = new HostEntity();
+ HostEntity host3 = new HostEntity();
+
+ host1.setHostName("test_host1");
+ host2.setHostName("test_host2");
+ host3.setHostName("test_host3");
+ host1.setIpv4("192.168.0.1");
+ host2.setIpv4("192.168.0.2");
+ host3.setIpv4("192.168.0.3");
+
+ List hostEntities = new ArrayList();
+ hostEntities.add(host1);
+ hostEntities.add(host2);
+
+ clusterEntity.setHostEntities(hostEntities);
+ clusterEntity.setClusterConfigEntities(Collections.EMPTY_LIST);
+ //both sides of relation should be set when modifying in runtime
+ host1.setClusterEntities(Arrays.asList(clusterEntity));
+ host2.setClusterEntities(Arrays.asList(clusterEntity));
+
+ HostStateEntity hostStateEntity1 = new HostStateEntity();
+ hostStateEntity1.setCurrentState(HostState.HEARTBEAT_LOST);
+ hostStateEntity1.setHostEntity(host1);
+ HostStateEntity hostStateEntity2 = new HostStateEntity();
+ hostStateEntity2.setCurrentState(HostState.HEALTHY);
+ hostStateEntity2.setHostEntity(host2);
+ host1.setHostStateEntity(hostStateEntity1);
+ host2.setHostStateEntity(hostStateEntity2);
+
+ ClusterServiceEntity clusterServiceEntity = new ClusterServiceEntity();
+ clusterServiceEntity.setServiceName(serviceString);
+ clusterServiceEntity.setClusterEntity(clusterEntity);
+ clusterServiceEntity.setServiceComponentDesiredStateEntities(
+ Collections.EMPTY_LIST);
+ clusterServiceEntity.setServiceConfigMappings(Collections.EMPTY_LIST);
+ ServiceDesiredStateEntity stateEntity = mock(ServiceDesiredStateEntity.class);
+ Gson gson = new Gson();
+ when(stateEntity.getDesiredStackVersion()).thenReturn(gson.toJson(new StackId(stackString),
+ StackId.class));
+ clusterServiceEntity.setServiceDesiredStateEntity(stateEntity);
+ List clusterServiceEntities = new ArrayList();
+ clusterServiceEntities.add(clusterServiceEntity);
+ clusterEntity.setClusterServiceEntities(clusterServiceEntities);
+ return clusterEntity;
+ }
+
+ @Before
+ public void setup() throws Exception {
+ injector = Guice.createInjector(new InMemoryDefaultTestModule());
+ injector.getInstance(GuiceJpaInitializer.class);
+ }
+
+ @After
+ public void teardown() {
+ injector.getInstance(PersistService.class).stop();
+ }
+
+ static private String toString(int v) {
+ if (v == 0) {
+ return "True";
+ } else {
+ return "False";
+ }
+ }
+
+ @Test
+ public void testDynamicRCO() {
+
+ RoleCommandOrder rcoHDFS1 = new RoleCommandOrder();
+ RoleCommandOrder rcoHDFS2 = new RoleCommandOrder();
+ RoleCommandOrder rcoHCFS = new RoleCommandOrder();
+
+ ClusterEntity entityHDFS1 = createDummyData("HDP-1.3.0", "HDFS");
+ ClusterEntity entityHDFS2 = createDummyData("HDP-1.3.1", "HDFS");
+ ClusterEntity entityHCFS = createDummyData("HDP-1.3.1", "HCFS");
+
+ ClusterImpl clusterHDFS1 = new ClusterImpl(entityHDFS1, injector);
+ ClusterImpl clusterHDFS2 = new ClusterImpl(entityHDFS2, injector);
+ ClusterImpl clusterHCFS = new ClusterImpl(entityHCFS, injector);
+
+ rcoHDFS1.initialize(clusterHDFS1);
+ rcoHDFS2.initialize(clusterHDFS2);
+ rcoHCFS.initialize(clusterHCFS);
+
+ int c1, c2;
+ c1 = rcoHDFS1.compareDeps(rcoHDFS2);
+ LOG.debug("HDFS deps match for stacks HDP-1.3.0 and HDP-1.3.1 = " + toString(c1) + ", expected True");
+
+ c2 = rcoHDFS2.compareDeps(rcoHCFS);
+ LOG.debug("HDFS deps and HCFS deps match for stack HDP-1.3.1 = " + toString(c2) + ", expected False");
+
+ assertEquals(c1, 0);
+ assertFalse("HCFS deps should differ", c2 == 0);
+
+ }
+
+}
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/stageplanner/TestStagePlanner.java b/ambari-server/src/test/java/org/apache/ambari/server/stageplanner/TestStagePlanner.java
index 69cdc175b32..8835bd88922 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/stageplanner/TestStagePlanner.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/stageplanner/TestStagePlanner.java
@@ -21,6 +21,11 @@
import java.util.HashMap;
import java.util.List;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
import org.apache.ambari.server.Role;
import org.apache.ambari.server.RoleCommand;
@@ -31,12 +36,125 @@
import org.apache.ambari.server.utils.StageUtils;
import org.junit.Test;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.orm.entities.ClusterEntity;
+import org.apache.ambari.server.orm.entities.ClusterServiceEntity;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.orm.entities.HostEntity;
+import org.apache.ambari.server.state.HostState;
+import org.apache.ambari.server.orm.entities.HostStateEntity;
+import org.apache.ambari.server.orm.entities.ServiceDesiredStateEntity;
+import org.apache.ambari.server.state.StackId;
+import com.google.gson.Gson;
+import com.google.inject.Guice;
+import com.google.inject.Injector;
+import org.apache.ambari.server.orm.GuiceJpaInitializer;
+import org.apache.ambari.server.state.cluster.ClusterImpl;
+import com.google.inject.persist.PersistService;
+import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
+import org.junit.After;
+import org.junit.Before;
+
public class TestStagePlanner {
+ private Injector injector;
+
+ public ClusterEntity createDummyData() {
+ ClusterEntity clusterEntity = new ClusterEntity();
+ clusterEntity.setClusterName("test_cluster1");
+ clusterEntity.setClusterInfo("test_cluster_info1");
+
+ HostEntity host1 = new HostEntity();
+ HostEntity host2 = new HostEntity();
+ HostEntity host3 = new HostEntity();
+
+ host1.setHostName("test_host1");
+ host2.setHostName("test_host2");
+ host3.setHostName("test_host3");
+ host1.setIpv4("192.168.0.1");
+ host2.setIpv4("192.168.0.2");
+ host3.setIpv4("192.168.0.3");
+
+ List hostEntities = new ArrayList();
+ hostEntities.add(host1);
+ hostEntities.add(host2);
+
+ clusterEntity.setHostEntities(hostEntities);
+ clusterEntity.setClusterConfigEntities(Collections.EMPTY_LIST);
+ //both sides of relation should be set when modifying in runtime
+ host1.setClusterEntities(Arrays.asList(clusterEntity));
+ host2.setClusterEntities(Arrays.asList(clusterEntity));
+
+ HostStateEntity hostStateEntity1 = new HostStateEntity();
+ hostStateEntity1.setCurrentState(HostState.HEARTBEAT_LOST);
+ hostStateEntity1.setHostEntity(host1);
+ HostStateEntity hostStateEntity2 = new HostStateEntity();
+ hostStateEntity2.setCurrentState(HostState.HEALTHY);
+ hostStateEntity2.setHostEntity(host2);
+ host1.setHostStateEntity(hostStateEntity1);
+ host2.setHostStateEntity(hostStateEntity2);
+
+ ClusterServiceEntity clusterServiceEntity = new ClusterServiceEntity();
+ clusterServiceEntity.setServiceName("HDFS");
+ clusterServiceEntity.setClusterEntity(clusterEntity);
+ clusterServiceEntity.setServiceComponentDesiredStateEntities(
+ Collections.EMPTY_LIST);
+ clusterServiceEntity.setServiceConfigMappings(Collections.EMPTY_LIST);
+ ServiceDesiredStateEntity stateEntity = mock(ServiceDesiredStateEntity.class);
+ Gson gson = new Gson();
+ when(stateEntity.getDesiredStackVersion()).thenReturn(gson.toJson(new StackId("HDP-0.1"),
+ StackId.class));
+ clusterServiceEntity.setServiceDesiredStateEntity(stateEntity);
+ List clusterServiceEntities = new ArrayList();
+ clusterServiceEntities.add(clusterServiceEntity);
+ clusterEntity.setClusterServiceEntities(clusterServiceEntities);
+ return clusterEntity;
+ }
+
+ @Before
+ public void setup() throws Exception {
+ injector = Guice.createInjector(new InMemoryDefaultTestModule());
+ injector.getInstance(GuiceJpaInitializer.class);
+ /*
+ clusters = injector.getInstance(Clusters.class);
+ serviceFactory = injector.getInstance(ServiceFactory.class);
+ serviceComponentFactory = injector.getInstance(
+ ServiceComponentFactory.class);
+ serviceComponentHostFactory = injector.getInstance(
+ ServiceComponentHostFactory.class);
+ configFactory = injector.getInstance(ConfigFactory.class);
+ metaInfo = injector.getInstance(AmbariMetaInfo.class);
+ metaInfo.init();
+ clusters.addCluster("c1");
+ c1 = clusters.getCluster("c1");
+ Assert.assertEquals("c1", c1.getClusterName());
+ Assert.assertEquals(1, c1.getClusterId());
+ clusters.addHost("h1");
+ Host host = clusters.getHost("h1");
+ host.setIPv4("ipv4");
+ host.setIPv6("ipv6");
+ host.setOsType("centos5");
+ host.persist();
+ c1.setDesiredStackVersion(new StackId("HDP-0.1"));
+ clusters.mapHostToCluster("h1", "c1");
+ */
+ }
+
+ @After
+ public void teardown() {
+ injector.getInstance(PersistService.class).stop();
+ }
+
@Test
public void testSingleStagePlan() {
- RoleCommandOrder.initialize();
RoleCommandOrder rco = new RoleCommandOrder();
+ ClusterEntity entity = createDummyData();
+ ClusterImpl cluster = new ClusterImpl(entity, injector);
+ rco.initialize(cluster);
+
RoleGraph rg = new RoleGraph(rco);
String hostname = "dummy";
Stage stage = StageUtils.getATestStage(1, 1, hostname);
@@ -52,8 +170,10 @@ public void testSingleStagePlan() {
@Test
public void testMultiStagePlan() {
- RoleCommandOrder.initialize();
RoleCommandOrder rco = new RoleCommandOrder();
+ ClusterEntity entity = createDummyData();
+ ClusterImpl cluster = new ClusterImpl(entity, injector);
+ rco.initialize(cluster);
RoleGraph rg = new RoleGraph(rco);
long now = System.currentTimeMillis();
Stage stage = StageUtils.getATestStage(1, 1, "host1");
@@ -76,8 +196,10 @@ RoleCommand.START, new ServiceComponentHostStartEvent("ZOOKEEPER_SERVER",
@Test
public void testManyStages() {
- RoleCommandOrder.initialize();
RoleCommandOrder rco = new RoleCommandOrder();
+ ClusterEntity entity = createDummyData();
+ ClusterImpl cluster = new ClusterImpl(entity, injector);
+ rco.initialize(cluster);
RoleGraph rg = new RoleGraph(rco);
long now = System.currentTimeMillis();
Stage stage = StageUtils.getATestStage(1, 1, "host1");
diff --git a/ambari-server/src/test/python/TestAmbaryServer.py b/ambari-server/src/test/python/TestAmbaryServer.py
index 57b02c8affc..2eda6a7e59f 100644
--- a/ambari-server/src/test/python/TestAmbaryServer.py
+++ b/ambari-server/src/test/python/TestAmbaryServer.py
@@ -384,6 +384,52 @@ def test_main_test_start(self, OptionParserMock, reset_method, stop_method,
self.assertFalse(False, ambari_server.SILENT)
+
+ @patch.object(ambari_server, 'setup')
+ @patch.object(ambari_server, 'start')
+ @patch.object(ambari_server, 'stop')
+ @patch.object(ambari_server, 'reset')
+ @patch('optparse.OptionParser')
+ def test_main_test_start_debug_short(self, OptionParserMock, reset_method, stop_method,
+ start_method, setup_method):
+ opm = OptionParserMock.return_value
+ options = MagicMock()
+ args = ["start", "-g"]
+ opm.parse_args.return_value = (options, args)
+
+ ambari_server.main()
+
+ self.assertFalse(setup_method.called)
+ self.assertTrue(start_method.called)
+ self.assertFalse(stop_method.called)
+ self.assertFalse(reset_method.called)
+
+ self.assertTrue(ambari_server.SERVER_DEBUG_MODE)
+
+
+
+ @patch.object(ambari_server, 'setup')
+ @patch.object(ambari_server, 'start')
+ @patch.object(ambari_server, 'stop')
+ @patch.object(ambari_server, 'reset')
+ @patch('optparse.OptionParser')
+ def test_main_test_start_debug_long(self, OptionParserMock, reset_method, stop_method,
+ start_method, setup_method):
+ opm = OptionParserMock.return_value
+ options = MagicMock()
+ args = ["start", "--debug"]
+ opm.parse_args.return_value = (options, args)
+
+ ambari_server.main()
+
+ self.assertFalse(setup_method.called)
+ self.assertTrue(start_method.called)
+ self.assertFalse(stop_method.called)
+ self.assertFalse(reset_method.called)
+
+ self.assertTrue(ambari_server.SERVER_DEBUG_MODE)
+
+
@patch.object(ambari_server, 'setup')
@patch.object(ambari_server, 'start')
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.0/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.0/metainfo.xml
new file mode 100644
index 00000000000..5d9cb06e0fe
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.0/metainfo.xml
@@ -0,0 +1,23 @@
+
+
+
+
+ 1.2.0
+ true
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.0/repos/repoinfo.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.0/repos/repoinfo.xml
new file mode 100644
index 00000000000..cff983a78f4
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.0/repos/repoinfo.xml
@@ -0,0 +1,111 @@
+
+
+
+
+
+ http://s3.amazonaws.com/dev.hortonworks.com/HDP-1.3.0/repos/centos6
+ HDP-1.3.0
+ HDP
+
+
+
+ HDP-epel
+ HDP-epel
+
+
+
+
+
+ http://s3.amazonaws.com/dev.hortonworks.com/HDP-1.3.0/repos/centos5
+ HDP-1.3.0
+ HDP
+
+
+
+ HDP-epel
+ HDP-epel
+
+
+
+
+
+ http://s3.amazonaws.com/dev.hortonworks.com/HDP-1.3.0/repos/centos6
+ HDP-1.3.0
+ HDP
+
+
+
+ HDP-epel
+ HDP-epel
+
+
+
+
+
+ http://s3.amazonaws.com/dev.hortonworks.com/HDP-1.3.0/repos/centos5
+ HDP-1.3.0
+ HDP
+
+
+
+ HDP-epel
+ HDP-epel
+
+
+
+
+
+ http://s3.amazonaws.com/dev.hortonworks.com/HDP-1.3.0/repos/centos6
+ HDP-1.3.0
+ HDP
+
+
+
+ HDP-epel
+ HDP-epel
+
+
+
+
+
+ http://s3.amazonaws.com/dev.hortonworks.com/HDP-1.3.0/repos/centos5
+ HDP-1.3.0
+ HDP
+
+
+
+ HDP-epel
+ HDP-epel
+
+
+
+
+
+ http://s3.amazonaws.com/dev.hortonworks.com/HDP-1.3.0/repos/suse11
+ HDP-1.3.0
+ HDP
+
+
+
+
+ http://s3.amazonaws.com/dev.hortonworks.com/HDP-1.3.0/repos/suse11
+ HDP-1.3.0
+ HDP
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/GANGLIA/configuration/global.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/GANGLIA/configuration/global.xml
new file mode 100644
index 00000000000..16df0b8fbce
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/GANGLIA/configuration/global.xml
@@ -0,0 +1,55 @@
+
+
+
+
+
+
+ ganglia_conf_dir
+ /etc/ganglia/hdp
+ Config directory for Ganglia
+
+
+ ganglia_runtime_dir
+ /var/run/ganglia/hdp
+ Run directories for Ganglia
+
+
+ ganglia_runtime_dir
+ /var/run/ganglia/hdp
+ Run directories for Ganglia
+
+
+ gmetad_user
+ nobody
+ User
+
+
+ gmond_user
+ nobody
+ User
+
+
+ rrdcached_base_dir
+ /var/lib/ganglia/rrds
+ Default directory for saving the rrd files on ganglia server
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/GANGLIA/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/GANGLIA/metainfo.xml
new file mode 100644
index 00000000000..0b21f0f6348
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/GANGLIA/metainfo.xml
@@ -0,0 +1,40 @@
+
+
+
+ root
+ Ganglia Metrics Collection system
+ 3.2.0
+
+
+
+ GANGLIA_SERVER
+ MASTER
+
+
+
+ GANGLIA_MONITOR
+ SLAVE
+
+
+
+ MONITOR_WEBSERVER
+ MASTER
+
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HBASE/configuration/global.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HBASE/configuration/global.xml
new file mode 100644
index 00000000000..82996b0d619
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HBASE/configuration/global.xml
@@ -0,0 +1,165 @@
+
+
+
+
+
+
+ hbasemaster_host
+
+ HBase Master Host.
+
+
+ regionserver_hosts
+
+ Region Server Hosts
+
+
+ hbase_log_dir
+ /var/log/hbase
+ Log Directories for HBase.
+
+
+ hbase_pid_dir
+ /var/run/hbase
+ Log Directories for HBase.
+
+
+ hbase_log_dir
+ /var/log/hbase
+ Log Directories for HBase.
+
+
+ hbase_regionserver_heapsize
+ 1024
+ Log Directories for HBase.
+
+
+ hbase_master_heapsize
+ 1024
+ HBase Master Heap Size
+
+
+ hstore_compactionthreshold
+ 3
+ HBase HStore compaction threshold.
+
+
+ hfile_blockcache_size
+ 0.25
+ HFile block cache size.
+
+
+ hstorefile_maxsize
+ 1073741824
+ Maximum HStoreFile Size
+
+
+ regionserver_handlers
+ 30
+ HBase RegionServer Handler
+
+
+ hregion_majorcompaction
+ 86400000
+ HBase Major Compaction.
+
+
+ hregion_blockmultiplier
+ 2
+ HBase Region Block Multiplier
+
+
+ hregion_memstoreflushsize
+
+ HBase Region MemStore Flush Size.
+
+
+ client_scannercaching
+ 100
+ Base Client Scanner Caching
+
+
+ zookeeper_sessiontimeout
+ 60000
+ ZooKeeper Session Timeout
+
+
+ hfile_max_keyvalue_size
+ 10485760
+ HBase Client Maximum key-value Size
+
+
+ hbase_hdfs_root_dir
+ /apps/hbase/data
+ HBase Relative Path to HDFS.
+
+
+ hbase_tmp_dir
+ /var/log/hbase
+ Hbase temp directory
+
+
+ hbase_conf_dir
+ /etc/hbase
+ Config Directory for HBase.
+
+
+ hdfs_enable_shortcircuit_read
+ true
+ HDFS Short Circuit Read
+
+
+ hdfs_support_append
+ true
+ HDFS append support
+
+
+ hstore_blockingstorefiles
+ 7
+ HStore blocking storefiles.
+
+
+ regionserver_memstore_lab
+ true
+ Region Server memstore.
+
+
+ regionserver_memstore_lowerlimit
+ 0.35
+ Region Server memstore lower limit.
+
+
+ regionserver_memstore_upperlimit
+ 0.4
+ Region Server memstore upper limit.
+
+
+ hbase_conf_dir
+ /etc/hbase
+ HBase conf dir.
+
+
+ hbase_user
+ hbase
+ HBase User Name.
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HBASE/configuration/hbase-policy.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HBASE/configuration/hbase-policy.xml
new file mode 100644
index 00000000000..e45f23c962c
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HBASE/configuration/hbase-policy.xml
@@ -0,0 +1,53 @@
+
+
+
+
+
+
+ security.client.protocol.acl
+ *
+ ACL for HRegionInterface protocol implementations (ie.
+ clients talking to HRegionServers)
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+ security.admin.protocol.acl
+ *
+ ACL for HMasterInterface protocol implementation (ie.
+ clients talking to HMaster for admin operations).
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+ security.masterregion.protocol.acl
+ *
+ ACL for HMasterRegionInterface protocol implementations
+ (for HRegionServers communicating with HMaster)
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HBASE/configuration/hbase-site.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HBASE/configuration/hbase-site.xml
new file mode 100644
index 00000000000..92181ba171b
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HBASE/configuration/hbase-site.xml
@@ -0,0 +1,365 @@
+
+
+
+
+
+ hbase.rootdir
+
+ The directory shared by region servers and into
+ which HBase persists. The URL should be 'fully-qualified'
+ to include the filesystem scheme. For example, to specify the
+ HDFS directory '/hbase' where the HDFS instance's namenode is
+ running at namenode.example.org on port 9000, set this value to:
+ hdfs://namenode.example.org:9000/hbase. By default HBase writes
+ into /tmp. Change this configuration else all data will be lost
+ on machine restart.
+
+
+
+ hbase.cluster.distributed
+ true
+ The mode the cluster will be in. Possible values are
+ false for standalone mode and true for distributed mode. If
+ false, startup will run all HBase and ZooKeeper daemons together
+ in the one JVM.
+
+
+
+ hbase.tmp.dir
+
+ Temporary directory on the local filesystem.
+ Change this setting to point to a location more permanent
+ than '/tmp' (The '/tmp' directory is often cleared on
+ machine restart).
+
+
+
+ hbase.master.info.bindAddress
+
+ The bind address for the HBase Master web UI
+
+
+
+ hbase.master.info.port
+
+ The port for the HBase Master web UI.
+
+
+ hbase.regionserver.info.port
+
+ The port for the HBase RegionServer web UI.
+
+
+ hbase.regionserver.global.memstore.upperLimit
+
+ Maximum size of all memstores in a region server before new
+ updates are blocked and flushes are forced. Defaults to 40% of heap
+
+
+
+ hbase.regionserver.handler.count
+
+ Count of RPC Listener instances spun up on RegionServers.
+ Same property is used by the Master for count of master handlers.
+ Default is 10.
+
+
+
+ hbase.hregion.majorcompaction
+
+ The time (in miliseconds) between 'major' compactions of all
+ HStoreFiles in a region. Default: 1 day.
+ Set to 0 to disable automated major compactions.
+
+
+
+ hbase.master.lease.thread.wakefrequency
+ 3000
+ The interval between checks for expired region server leases.
+ This value has been reduced due to the other reduced values above so that
+ the master will notice a dead region server sooner. The default is 15 seconds.
+
+
+
+ hbase.regionserver.global.memstore.lowerLimit
+
+ When memstores are being forced to flush to make room in
+ memory, keep flushing until we hit this mark. Defaults to 35% of heap.
+ This value equal to hbase.regionserver.global.memstore.upperLimit causes
+ the minimum possible flushing to occur when updates are blocked due to
+ memstore limiting.
+
+
+
+ hbase.hregion.memstore.block.multiplier
+
+ Block updates if memstore has hbase.hregion.memstore.block.multiplier
+ time hbase.hregion.flush.size bytes. Useful preventing
+ runaway memstore during spikes in update traffic. Without an
+ upper-bound, memstore fills such that when it flushes the
+ resultant flush files take a long time to compact or split, or
+ worse, we OOME
+
+
+
+ hbase.hregion.memstore.flush.size
+
+
+ Memstore will be flushed to disk if size of the memstore
+ exceeds this number of bytes. Value is checked by a thread that runs
+ every hbase.server.thread.wakefrequency.
+
+
+
+ hbase.hregion.memstore.mslab.enabled
+
+
+ Enables the MemStore-Local Allocation Buffer,
+ a feature which works to prevent heap fragmentation under
+ heavy write loads. This can reduce the frequency of stop-the-world
+ GC pauses on large heaps.
+
+
+
+ hbase.hregion.max.filesize
+
+
+ Maximum HStoreFile size. If any one of a column families' HStoreFiles has
+ grown to exceed this value, the hosting HRegion is split in two.
+ Default: 1G.
+
+
+
+ hbase.client.scanner.caching
+
+ Number of rows that will be fetched when calling next
+ on a scanner if it is not served from (local, client) memory. Higher
+ caching values will enable faster scanners but will eat up more memory
+ and some calls of next may take longer and longer times when the cache is empty.
+ Do not set this value such that the time between invocations is greater
+ than the scanner timeout; i.e. hbase.regionserver.lease.period
+
+
+
+ zookeeper.session.timeout
+ 60000
+ ZooKeeper session timeout.
+ HBase passes this to the zk quorum as suggested maximum time for a
+ session (This setting becomes zookeeper's 'maxSessionTimeout'). See
+ http://hadoop.apache.org/zookeeper/docs/current/zookeeperProgrammers.html#ch_zkSessions
+ "The client sends a requested timeout, the server responds with the
+ timeout that it can give the client. " In milliseconds.
+
+
+
+ hbase.client.keyvalue.maxsize
+
+ Specifies the combined maximum allowed size of a KeyValue
+ instance. This is to set an upper boundary for a single entry saved in a
+ storage file. Since they cannot be split it helps avoiding that a region
+ cannot be split any further because the data is too large. It seems wise
+ to set this to a fraction of the maximum region size. Setting it to zero
+ or less disables the check.
+
+
+
+ hbase.hstore.compactionThreshold
+
+
+ If more than this number of HStoreFiles in any one HStore
+ (one HStoreFile is written per flush of memstore) then a compaction
+ is run to rewrite all HStoreFiles files as one. Larger numbers
+ put off compaction but when it runs, it takes longer to complete.
+
+
+
+ hbase.hstore.blockingStoreFiles
+
+
+ If more than this number of StoreFiles in any one Store
+ (one StoreFile is written per flush of MemStore) then updates are
+ blocked for this HRegion until a compaction is completed, or
+ until hbase.hstore.blockingWaitTime has been exceeded.
+
+
+
+ hfile.block.cache.size
+
+
+ Percentage of maximum heap (-Xmx setting) to allocate to block cache
+ used by HFile/StoreFile. Default of 0.25 means allocate 25%.
+ Set to 0 to disable but it's not recommended.
+
+
+
+
+
+ hbase.master.keytab.file
+
+ Full path to the kerberos keytab file to use for logging in
+ the configured HMaster server principal.
+
+
+
+ hbase.master.kerberos.principal
+
+ Ex. "hbase/_HOST@EXAMPLE.COM". The kerberos principal name
+ that should be used to run the HMaster process. The principal name should
+ be in the form: user/hostname@DOMAIN. If "_HOST" is used as the hostname
+ portion, it will be replaced with the actual hostname of the running
+ instance.
+
+
+
+ hbase.regionserver.keytab.file
+
+ Full path to the kerberos keytab file to use for logging in
+ the configured HRegionServer server principal.
+
+
+
+ hbase.regionserver.kerberos.principal
+
+ Ex. "hbase/_HOST@EXAMPLE.COM". The kerberos principal name
+ that should be used to run the HRegionServer process. The principal name
+ should be in the form: user/hostname@DOMAIN. If "_HOST" is used as the
+ hostname portion, it will be replaced with the actual hostname of the
+ running instance. An entry for this principal must exist in the file
+ specified in hbase.regionserver.keytab.file
+
+
+
+
+
+ hbase.superuser
+
+ List of users or groups (comma-separated), who are allowed
+ full privileges, regardless of stored ACLs, across the cluster.
+ Only used when HBase security is enabled.
+
+
+
+
+ hbase.coprocessor.region.classes
+
+ A comma-separated list of Coprocessors that are loaded by
+ default on all tables. For any override coprocessor method, these classes
+ will be called in order. After implementing your own Coprocessor, just put
+ it in HBase's classpath and add the fully qualified class name here.
+ A coprocessor can also be loaded on demand by setting HTableDescriptor.
+
+
+
+
+ hbase.coprocessor.master.classes
+
+ A comma-separated list of
+ org.apache.hadoop.hbase.coprocessor.MasterObserver coprocessors that are
+ loaded by default on the active HMaster process. For any implemented
+ coprocessor methods, the listed classes will be called in order. After
+ implementing your own MasterObserver, just put it in HBase's classpath
+ and add the fully qualified class name here.
+
+
+
+
+ hbase.zookeeper.property.clientPort
+ 2181
+ Property from ZooKeeper's config zoo.cfg.
+ The port at which the clients will connect.
+
+
+
+
+
+ hbase.zookeeper.quorum
+
+ Comma separated list of servers in the ZooKeeper Quorum.
+ For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
+ By default this is set to localhost for local and pseudo-distributed modes
+ of operation. For a fully-distributed setup, this should be set to a full
+ list of ZooKeeper quorum servers. If HBASE_MANAGES_ZK is set in hbase-env.sh
+ this is the list of servers which we will start/stop ZooKeeper on.
+
+
+
+
+
+ dfs.support.append
+
+ Does HDFS allow appends to files?
+ This is an hdfs config. set in here so the hdfs client will do append support.
+ You must ensure that this config. is true serverside too when running hbase
+ (You will have to restart your cluster after setting it).
+
+
+
+
+ dfs.client.read.shortcircuit
+
+ Enable/Disable short circuit read for your client.
+ Hadoop servers should be configured to allow short circuit read
+ for the hbase user for this to take effect
+
+
+
+
+ dfs.client.read.shortcircuit.skip.checksum
+
+ Enable/disbale skipping the checksum check
+
+
+
+ hbase.regionserver.optionalcacheflushinterval
+ 10000
+
+ Amount of time to wait since the last time a region was flushed before
+ invoking an optional cache flush. Default 60,000.
+
+
+
+ hbase.zookeeper.useMulti
+ true
+ Instructs HBase to make use of ZooKeeper's multi-update functionality.
+ This allows certain ZooKeeper operations to complete more quickly and prevents some issues
+ with rare Replication failure scenarios (see the release note of HBASE-2611 for an example).·
+ IMPORTANT: only set this to true if all ZooKeeper servers in the cluster are on version 3.4+
+ and will not be downgraded. ZooKeeper versions before 3.4 do not support multi-update and will
+ not fail gracefully if multi-update is invoked (see ZOOKEEPER-1495).
+
+
+
+ zookeeper.znode.parent
+
+ Root ZNode for HBase in ZooKeeper. All of HBase's ZooKeeper
+ files that are configured with a relative path will go under this node.
+ By default, all of HBase's ZooKeeper file path are configured with a
+ relative path, so they will all go under this directory unless changed.
+
+
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HBASE/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HBASE/metainfo.xml
new file mode 100644
index 00000000000..1bb3d143675
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HBASE/metainfo.xml
@@ -0,0 +1,40 @@
+
+
+
+ mapred
+ Non-relational distributed database and centralized service for configuration management & synchronization
+ 0.94.6.1.3.0.0
+
+
+
+ HBASE_MASTER
+ MASTER
+
+
+
+ HBASE_REGIONSERVER
+ SLAVE
+
+
+
+ HBASE_CLIENT
+ CLIENT
+
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HCATALOG/configuration/global.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HCATALOG/configuration/global.xml
new file mode 100644
index 00000000000..dd89409bf99
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HCATALOG/configuration/global.xml
@@ -0,0 +1,45 @@
+
+
+
+
+
+
+ hcat_log_dir
+ /var/log/webhcat
+ WebHCat Log Dir.
+
+
+ hcat_pid_dir
+ /etc/run/webhcat
+ WebHCat Pid Dir.
+
+
+ hcat_user
+ hcat
+ HCat User.
+
+
+ webhcat_user
+ hcat
+ WebHCat User.
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HCATALOG/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HCATALOG/metainfo.xml
new file mode 100644
index 00000000000..dd5ff08f4bf
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HCATALOG/metainfo.xml
@@ -0,0 +1,30 @@
+
+
+
+ root
+ This is comment for HCATALOG service
+ 0.6.0.1.3.0.0
+
+
+
+ HCAT
+ CLIENT
+
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HDFS/configuration/core-site.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HDFS/configuration/core-site.xml
new file mode 100644
index 00000000000..94ffbbb9911
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HDFS/configuration/core-site.xml
@@ -0,0 +1,253 @@
+
+
+
+
+
+
+
+
+
+
+
+
+ io.file.buffer.size
+ 131072
+ The size of buffer for use in sequence files.
+ The size of this buffer should probably be a multiple of hardware
+ page size (4096 on Intel x86), and it determines how much data is
+ buffered during read and write operations.
+
+
+
+ io.serializations
+ org.apache.hadoop.io.serializer.WritableSerialization
+
+
+
+ io.compression.codecs
+ org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,com.hadoop.compression.lzo.LzoCodec,com.hadoop.compression.lzo.LzopCodec,org.apache.hadoop.io.compress.SnappyCodec
+ A list of the compression codec classes that can be used
+ for compression/decompression.
+
+
+
+
+ io.compression.codec.lzo.class
+ com.hadoop.compression.lzo.LzoCodec
+ The implementation for lzo codec.
+
+
+
+
+
+
+ fs.default.name
+
+
+ The name of the default file system. Either the
+ literal string "local" or a host:port for NDFS.
+ true
+
+
+
+ fs.trash.interval
+ 360
+ Number of minutes between trash checkpoints.
+ If zero, the trash feature is disabled.
+
+
+
+
+ fs.checkpoint.dir
+
+ Determines where on the local filesystem the DFS secondary
+ name node should store the temporary images to merge.
+ If this is a comma-delimited list of directories then the image is
+ replicated in all of the directories for redundancy.
+
+
+
+
+ fs.checkpoint.edits.dir
+ ${fs.checkpoint.dir}
+ Determines where on the local filesystem the DFS secondary
+ name node should store the temporary edits to merge.
+ If this is a comma-delimited list of directoires then teh edits is
+ replicated in all of the directoires for redundancy.
+ Default value is same as fs.checkpoint.dir
+
+
+
+
+ fs.checkpoint.period
+ 21600
+ The number of seconds between two periodic checkpoints.
+
+
+
+
+ fs.checkpoint.size
+ 536870912
+ The size of the current edit log (in bytes) that triggers
+ a periodic checkpoint even if the fs.checkpoint.period hasn't expired.
+
+
+
+
+
+ ipc.client.idlethreshold
+ 8000
+ Defines the threshold number of connections after which
+ connections will be inspected for idleness.
+
+
+
+
+ ipc.client.connection.maxidletime
+ 30000
+ The maximum time after which a client will bring down the
+ connection to the server.
+
+
+
+
+ ipc.client.connect.max.retries
+ 50
+ Defines the maximum number of retries for IPC connections.
+
+
+
+
+ webinterface.private.actions
+ false
+ If set to true, the web interfaces of JT and NN may contain
+ actions, such as kill job, delete file, etc., that should
+ not be exposed to public. Enable this option if the interfaces
+ are only reachable by those who have the right authorization.
+
+
+
+
+ hadoop.security.authentication
+
+
+ Set the authentication for the cluster. Valid values are: simple or
+ kerberos.
+
+
+
+ hadoop.security.authorization
+
+
+ Enable authorization for different protocols.
+
+
+
+
+ hadoop.security.auth_to_local
+
+The mapping from kerberos principal names to local OS user names.
+ So the default rule is just "DEFAULT" which takes all principals in your default domain to their first component.
+ "omalley@APACHE.ORG" and "omalley/admin@APACHE.ORG" to "omalley", if your default domain is APACHE.ORG.
+The translations rules have 3 sections:
+ base filter substitution
+The base consists of a number that represents the number of components in the principal name excluding the realm and the pattern for building the name from the sections of the principal name. The base uses $0 to mean the realm, $1 to mean the first component and $2 to mean the second component.
+
+[1:$1@$0] translates "omalley@APACHE.ORG" to "omalley@APACHE.ORG"
+[2:$1] translates "omalley/admin@APACHE.ORG" to "omalley"
+[2:$1%$2] translates "omalley/admin@APACHE.ORG" to "omalley%admin"
+
+The filter is a regex in parens that must the generated string for the rule to apply.
+
+"(.*%admin)" will take any string that ends in "%admin"
+"(.*@ACME.COM)" will take any string that ends in "@ACME.COM"
+
+Finally, the substitution is a sed rule to translate a regex into a fixed string.
+
+"s/@ACME\.COM//" removes the first instance of "@ACME.COM".
+"s/@[A-Z]*\.COM//" removes the first instance of "@" followed by a name followed by ".COM".
+"s/X/Y/g" replaces all of the "X" in the name with "Y"
+
+So, if your default realm was APACHE.ORG, but you also wanted to take all principals from ACME.COM that had a single component "joe@ACME.COM", you'd do:
+
+RULE:[1:$1@$0](.@ACME.ORG)s/@.//
+DEFAULT
+
+To also translate the names with a second component, you'd make the rules:
+
+RULE:[1:$1@$0](.@ACME.ORG)s/@.//
+RULE:[2:$1@$0](.@ACME.ORG)s/@.//
+DEFAULT
+
+If you want to treat all principals from APACHE.ORG with /admin as "admin", your rules would look like:
+
+RULE[2:$1%$2@$0](.%admin@APACHE.ORG)s/./admin/
+DEFAULT
+
+
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HDFS/configuration/global.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HDFS/configuration/global.xml
new file mode 100644
index 00000000000..dbbe81bb975
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HDFS/configuration/global.xml
@@ -0,0 +1,192 @@
+
+
+
+
+
+
+ namenode_host
+
+ NameNode Host.
+
+
+ dfs_name_dir
+ /hadoop/hdfs/namenode
+ NameNode Directories.
+
+
+ snamenode_host
+
+ Secondary NameNode.
+
+
+ fs_checkpoint_dir
+ /hadoop/hdfs/namesecondary
+ Secondary NameNode checkpoint dir.
+
+
+ datanode_hosts
+
+ List of Datanode Hosts.
+
+
+ dfs_data_dir
+ /hadoop/hdfs/data
+ Data directories for Data Nodes.
+
+
+ hdfs_log_dir_prefix
+ /var/log/hadoop
+ Hadoop Log Dir Prefix
+
+
+ hadoop_pid_dir_prefix
+ /var/run/hadoop
+ Hadoop PID Dir Prefix
+
+
+ dfs_webhdfs_enabled
+ true
+ WebHDFS enabled
+
+
+ hadoop_heapsize
+ 1024
+ Hadoop maximum Java heap size
+
+
+ namenode_heapsize
+ 1024
+ NameNode Java heap size
+
+
+ namenode_opt_newsize
+ 200
+ NameNode new generation size
+
+
+ namenode_opt_maxnewsize
+ 640
+ NameNode maximum new generation size
+
+
+ datanode_du_reserved
+ 1
+ Reserved space for HDFS
+
+
+ dtnode_heapsize
+ 1024
+ DataNode maximum Java heap size
+
+
+ dfs_datanode_failed_volume_tolerated
+ 0
+ DataNode volumes failure toleration
+
+
+ fs_checkpoint_period
+ 21600
+ HDFS Maximum Checkpoint Delay
+
+
+ fs_checkpoint_size
+ 0.5
+ FS Checkpoint Size.
+
+
+ proxyuser_group
+ users
+ Proxy user group.
+
+
+ dfs_exclude
+
+ HDFS Exclude hosts.
+
+
+ dfs_include
+
+ HDFS Include hosts.
+
+
+ dfs_replication
+ 3
+ Default Block Replication.
+
+
+ dfs_block_local_path_access_user
+ hbase
+ Default Block Replication.
+
+
+ dfs_datanode_address
+ 50010
+ Port for datanode address.
+
+
+ dfs_datanode_http_address
+ 50075
+ Port for datanode address.
+
+
+ dfs_datanode_data_dir_perm
+ 750
+ Datanode dir perms.
+
+
+
+ security_enabled
+ false
+ Hadoop Security
+
+
+ kerberos_domain
+ EXAMPLE.COM
+ Kerberos realm.
+
+
+ kadmin_pw
+
+ Kerberos realm admin password
+
+
+ keytab_path
+ /etc/security/keytabs
+ Kerberos keytab path.
+
+
+
+ keytab_path
+ /etc/security/keytabs
+ KeyTab Directory.
+
+
+ namenode_formatted_mark_dir
+ /var/run/hadoop/hdfs/namenode/formatted/
+ Formatteed Mark Directory.
+
+
+ hdfs_user
+ hdfs
+ User and Groups.
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HDFS/configuration/hadoop-policy.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HDFS/configuration/hadoop-policy.xml
new file mode 100644
index 00000000000..900da99ef0f
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HDFS/configuration/hadoop-policy.xml
@@ -0,0 +1,134 @@
+
+
+
+
+
+
+
+
+
+ security.client.protocol.acl
+ *
+ ACL for ClientProtocol, which is used by user code
+ via the DistributedFileSystem.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+ security.client.datanode.protocol.acl
+ *
+ ACL for ClientDatanodeProtocol, the client-to-datanode protocol
+ for block recovery.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+ security.datanode.protocol.acl
+ *
+ ACL for DatanodeProtocol, which is used by datanodes to
+ communicate with the namenode.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+ security.inter.datanode.protocol.acl
+ *
+ ACL for InterDatanodeProtocol, the inter-datanode protocol
+ for updating generation timestamp.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+ security.namenode.protocol.acl
+ *
+ ACL for NamenodeProtocol, the protocol used by the secondary
+ namenode to communicate with the namenode.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+ security.inter.tracker.protocol.acl
+ *
+ ACL for InterTrackerProtocol, used by the tasktrackers to
+ communicate with the jobtracker.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+ security.job.submission.protocol.acl
+ *
+ ACL for JobSubmissionProtocol, used by job clients to
+ communciate with the jobtracker for job submission, querying job status etc.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+ security.task.umbilical.protocol.acl
+ *
+ ACL for TaskUmbilicalProtocol, used by the map and reduce
+ tasks to communicate with the parent tasktracker.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+ security.admin.operations.protocol.acl
+
+ ACL for AdminOperationsProtocol. Used for admin commands.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+ security.refresh.usertogroups.mappings.protocol.acl
+
+ ACL for RefreshUserMappingsProtocol. Used to refresh
+ users mappings. The ACL is a comma-separated list of user and
+ group names. The user and group list is separated by a blank. For
+ e.g. "alice,bob users,wheel". A special value of "*" means all
+ users are allowed.
+
+
+
+ security.refresh.policy.protocol.acl
+
+ ACL for RefreshAuthorizationPolicyProtocol, used by the
+ dfsadmin and mradmin commands to refresh the security policy in-effect.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HDFS/configuration/hdfs-site.xml
new file mode 100644
index 00000000000..2102ddc0bfe
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HDFS/configuration/hdfs-site.xml
@@ -0,0 +1,425 @@
+
+
+
+
+
+
+
+
+
+
+
+
+ dfs.name.dir
+
+
+ Determines where on the local filesystem the DFS name node
+ should store the name table. If this is a comma-delimited list
+ of directories then the name table is replicated in all of the
+ directories, for redundancy.
+ true
+
+
+
+ dfs.support.append
+
+ to enable dfs append
+ true
+
+
+
+ dfs.webhdfs.enabled
+
+ to enable webhdfs
+ true
+
+
+
+ dfs.datanode.socket.write.timeout
+ 0
+ DFS Client write socket timeout
+
+
+
+ dfs.datanode.failed.volumes.tolerated
+
+ #of failed disks dn would tolerate
+ true
+
+
+
+ dfs.block.local-path-access.user
+
+ the user who is allowed to perform short
+ circuit reads.
+
+ true
+
+
+
+ dfs.data.dir
+
+ Determines where on the local filesystem an DFS data node
+ should store its blocks. If this is a comma-delimited
+ list of directories, then data will be stored in all named
+ directories, typically on different devices.
+ Directories that do not exist are ignored.
+
+ true
+
+
+
+ dfs.hosts.exclude
+
+ Names a file that contains a list of hosts that are
+ not permitted to connect to the namenode. The full pathname of the
+ file must be specified. If the value is empty, no hosts are
+ excluded.
+
+
+
+ dfs.hosts
+
+ Names a file that contains a list of hosts that are
+ permitted to connect to the namenode. The full pathname of the file
+ must be specified. If the value is empty, all hosts are
+ permitted.
+
+
+
+ dfs.replication.max
+ 50
+ Maximal block replication.
+
+
+
+
+ dfs.replication
+
+ Default block replication.
+
+
+
+
+ dfs.heartbeat.interval
+ 3
+ Determines datanode heartbeat interval in seconds.
+
+
+
+ dfs.safemode.threshold.pct
+ 1.0f
+
+ Specifies the percentage of blocks that should satisfy
+ the minimal replication requirement defined by dfs.replication.min.
+ Values less than or equal to 0 mean not to start in safe mode.
+ Values greater than 1 will make safe mode permanent.
+
+
+
+
+ dfs.balance.bandwidthPerSec
+ 6250000
+
+ Specifies the maximum amount of bandwidth that each datanode
+ can utilize for the balancing purpose in term of
+ the number of bytes per second.
+
+
+
+
+ dfs.datanode.address
+
+
+
+
+ dfs.datanode.http.address
+
+
+
+
+ dfs.block.size
+ 134217728
+ The default block size for new files.
+
+
+
+ dfs.http.address
+
+The name of the default file system. Either the
+literal string "local" or a host:port for NDFS.
+true
+
+
+
+dfs.datanode.du.reserved
+
+
+Reserved space in bytes per volume. Always leave this much space free for non dfs use.
+
+
+
+
+dfs.datanode.ipc.address
+0.0.0.0:8010
+
+The datanode ipc server address and port.
+If the port is 0 then the server will start on a free port.
+
+
+
+
+dfs.blockreport.initialDelay
+120
+Delay for first block report in seconds.
+
+
+
+dfs.datanode.du.pct
+0.85f
+When calculating remaining space, only use this percentage of the real available space
+
+
+
+
+dfs.namenode.handler.count
+40
+The number of server threads for the namenode.
+
+
+
+dfs.datanode.max.xcievers
+4096
+PRIVATE CONFIG VARIABLE
+
+
+
+
+
+dfs.umaskmode
+077
+
+The octal umask used when creating files and directories.
+
+
+
+
+dfs.web.ugi
+
+gopher,gopher
+The user account used by the web interface.
+Syntax: USERNAME,GROUP1,GROUP2, ...
+
+
+
+
+dfs.permissions
+true
+
+If "true", enable permission checking in HDFS.
+If "false", permission checking is turned off,
+but all other behavior is unchanged.
+Switching from one parameter value to the other does not change the mode,
+owner or group of files or directories.
+
+
+
+
+dfs.permissions.supergroup
+hdfs
+The name of the group of super-users.
+
+
+
+dfs.namenode.handler.count
+100
+Added to grow Queue size so that more client connections are allowed
+
+
+
+ipc.server.max.response.size
+5242880
+
+
+dfs.block.access.token.enable
+true
+
+If "true", access tokens are used as capabilities for accessing datanodes.
+If "false", no access tokens are checked on accessing datanodes.
+
+
+
+
+dfs.namenode.kerberos.principal
+
+
+Kerberos principal name for the NameNode
+
+
+
+
+dfs.secondary.namenode.kerberos.principal
+
+
+ Kerberos principal name for the secondary NameNode.
+
+
+
+
+
+
+ dfs.namenode.kerberos.https.principal
+
+ The Kerberos principal for the host that the NameNode runs on.
+
+
+
+
+ dfs.secondary.namenode.kerberos.https.principal
+
+ The Kerberos principal for the hostthat the secondary NameNode runs on.
+
+
+
+
+
+ dfs.secondary.http.address
+
+ Address of secondary namenode web server
+
+
+
+ dfs.secondary.https.port
+ 50490
+ The https port where secondary-namenode binds
+
+
+
+ dfs.web.authentication.kerberos.principal
+
+
+ The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+ The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
+ HTTP SPENGO specification.
+
+
+
+
+ dfs.web.authentication.kerberos.keytab
+
+
+ The Kerberos keytab file with the credentials for the
+ HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+
+
+
+
+ dfs.datanode.kerberos.principal
+
+
+ The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
+
+
+
+
+ dfs.namenode.keytab.file
+
+
+ Combined keytab file containing the namenode service and host principals.
+
+
+
+
+ dfs.secondary.namenode.keytab.file
+
+
+ Combined keytab file containing the namenode service and host principals.
+
+
+
+
+ dfs.datanode.keytab.file
+
+
+ The filename of the keytab file for the DataNode.
+
+
+
+
+ dfs.https.port
+ 50470
+ The https port where namenode binds
+
+
+
+
+ dfs.https.address
+
+ The https address where namenode binds
+
+
+
+
+ dfs.datanode.data.dir.perm
+
+The permissions that should be there on dfs.data.dir
+directories. The datanode will not come up if the permissions are
+different on existing dfs.data.dir directories. If the directories
+don't exist, they will be created with this permission.
+
+
+
+ dfs.access.time.precision
+ 0
+ The access time for HDFS file is precise upto this value.
+ The default value is 1 hour. Setting a value of 0 disables
+ access times for HDFS.
+
+
+
+
+ dfs.cluster.administrators
+ hdfs
+ ACL for who all can view the default servlets in the HDFS
+
+
+
+ ipc.server.read.threadpool.size
+ 5
+
+
+
+
+ dfs.datanode.failed.volumes.tolerated
+ 0
+ Number of failed disks datanode would tolerate
+
+
+
+ dfs.namenode.check.stale.datanode
+ true
+
+ With this setting, the datanodes that have not replied to the heartbeat
+ for more than 30s (i.e. in a stale state) are used for reads only if all
+ other remote replicas have failed.
+
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HDFS/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HDFS/metainfo.xml
new file mode 100644
index 00000000000..c29bb61140f
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HDFS/metainfo.xml
@@ -0,0 +1,46 @@
+
+
+
+ root
+ Apache Hadoop Distributed File System
+ 1.2.0.1.3.0.0
+
+
+
+ NAMENODE
+ MASTER
+
+
+
+ DATANODE
+ SLAVE
+
+
+
+ SECONDARY_NAMENODE
+ MASTER
+
+
+
+ HDFS_CLIENT
+ CLIENT
+
+
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HIVE/configuration/global.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HIVE/configuration/global.xml
new file mode 100644
index 00000000000..d9adc80fc43
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HIVE/configuration/global.xml
@@ -0,0 +1,125 @@
+
+
+
+
+
+
+ hivemetastore_host
+
+ Hive Metastore host.
+
+
+ hive_database
+
+ Hive database name.
+
+
+ hive_existing_mysql_database
+
+ Hive database name.
+
+
+ hive_existing_mysql_host
+
+
+
+
+ hive_existing_oracle_database
+
+ Hive database name.
+
+
+ hive_existing_oracle_host
+
+
+
+
+ hive_ambari_database
+ MySQL
+ Database type.
+
+
+ hive_ambari_host
+
+ Database hostname.
+
+
+ hive_database_name
+
+ Database hname
+
+
+ hive_metastore_user_name
+ hive
+ Database username to use to connect to the database.
+
+
+ hive_metastore_user_passwd
+
+ Database password to use to connect to the database.
+
+
+ hive_metastore_port
+ 9083
+ Hive Metastore port.
+
+
+ hive_lib
+ /usr/lib/hive/lib/
+ Hive Library.
+
+
+ hive_dbroot
+ /usr/lib/hive/lib/
+ Hive DB Directory.
+
+
+ hive_conf_dir
+ /etc/hive/conf
+ Hive Conf Dir.
+
+
+ hive_log_dir
+ /var/log/hive
+ Directory for Hive Log files.
+
+
+ hive_pid_dir
+ /var/run/hive
+ Hive PID Dir.
+
+
+ mysql_connector_url
+ ${download_url}/mysql-connector-java-5.1.18.zip
+ Hive PID Dir.
+
+
+ hive_aux_jars_path
+ /usr/lib/hcatalog/share/hcatalog/hcatalog-core.jar
+ Hive auxiliary jar path.
+
+
+ hive_user
+ hive
+ Hive User.
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HIVE/configuration/hive-site.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HIVE/configuration/hive-site.xml
new file mode 100644
index 00000000000..40fa0a7a09d
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HIVE/configuration/hive-site.xml
@@ -0,0 +1,243 @@
+
+
+
+
+
+
+ hive.metastore.local
+ false
+ controls whether to connect to remove metastore server or
+ open a new metastore server in Hive Client JVM
+
+
+
+ javax.jdo.option.ConnectionURL
+
+ JDBC connect string for a JDBC metastore
+
+
+
+ javax.jdo.option.ConnectionDriverName
+ com.mysql.jdbc.Driver
+ Driver class name for a JDBC metastore
+
+
+
+ javax.jdo.option.ConnectionUserName
+
+ username to use against metastore database
+
+
+
+ javax.jdo.option.ConnectionPassword
+
+ password to use against metastore database
+
+
+
+ hive.metastore.warehouse.dir
+ /apps/hive/warehouse
+ location of default database for the warehouse
+
+
+
+ hive.metastore.sasl.enabled
+
+ If true, the metastore thrift interface will be secured with SASL.
+ Clients must authenticate with Kerberos.
+
+
+
+ hive.metastore.kerberos.keytab.file
+
+ The path to the Kerberos Keytab file containing the metastore
+ thrift server's service principal.
+
+
+
+ hive.metastore.kerberos.principal
+
+ The service principal for the metastore thrift server. The special
+ string _HOST will be replaced automatically with the correct host name.
+
+
+
+ hive.metastore.cache.pinobjtypes
+ Table,Database,Type,FieldSchema,Order
+ List of comma separated metastore object types that should be pinned in the cache
+
+
+
+ hive.metastore.uris
+
+ URI for client to contact metastore server
+
+
+
+ hive.semantic.analyzer.factory.impl
+ org.apache.hivealog.cli.HCatSemanticAnalyzerFactory
+ controls which SemanticAnalyzerFactory implemenation class is used by CLI
+
+
+
+ hadoop.clientside.fs.operations
+ true
+ FS operations are owned by client
+
+
+
+ hive.metastore.client.socket.timeout
+ 60
+ MetaStore Client socket timeout in seconds
+
+
+
+ hive.metastore.execute.setugi
+ true
+ In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using the client's reported user and group permissions. Note that this property must be set on both the client and server sides. Further note that its best effort. If client sets its to true and server sets it to false, client setting will be ignored.
+
+
+
+ hive.security.authorization.enabled
+ true
+ enable or disable the hive client authorization
+
+
+
+ hive.security.authorization.manager
+ org.apache.hcatalog.security.HdfsAuthorizationProvider
+ the hive client authorization manager class name.
+ The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.
+
+
+
+ hive.server2.enable.doAs
+ true
+
+
+
+ fs.hdfs.impl.disable.cache
+ true
+
+
+
+ fs.file.impl.disable.cache
+ true
+
+
+
+ hive.enforce.bucketing
+ true
+ Whether bucketing is enforced. If true, while inserting into the table, bucketing is enforced.
+
+
+
+ hive.enforce.sorting
+ true
+ Whether sorting is enforced. If true, while inserting into the table, sorting is enforced.
+
+
+
+ hive.map.aggr
+ true
+ Whether to use map-side aggregation in Hive Group By queries.
+
+
+
+ hive.optimize.bucketmapjoin
+ true
+
+
+
+ hive.optimize.bucketmapjoin.sortedmerge
+ true
+
+
+
+ hive.mapred.reduce.tasks.speculative.execution
+ false
+ Whether speculative execution for reducers should be turned on.
+
+
+
+ hive.auto.convert.join
+ true
+ Whether Hive enable the optimization about converting common
+ join into mapjoin based on the input file size.
+
+
+
+ hive.auto.convert.sortmerge.join
+ true
+ Will the join be automatically converted to a sort-merge join, if the joined tables pass
+ the criteria for sort-merge join.
+
+
+
+
+ hive.auto.convert.sortmerge.join.noconditionaltask
+ true
+
+
+
+ hive.auto.convert.join.noconditionaltask
+ true
+ Whether Hive enable the optimization about converting common join into mapjoin based on the input file
+ size. If this paramater is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than the
+ specified size, the join is directly converted to a mapjoin (there is no conditional task).
+
+
+
+
+ hive.auto.convert.join.noconditionaltask.size
+ 1000000000
+ If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect. However, if it
+ is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than this size, the join is directly
+ converted to a mapjoin(there is no conditional task). The default is 10MB.
+
+
+
+
+ hive.optimize.reducededuplication.min.reducer
+ 1
+ Reduce deduplication merges two RSs by moving key/parts/reducer-num of the child RS to parent RS.
+ That means if reducer-num of the child RS is fixed (order by or forced bucketing) and small, it can make very slow, single MR.
+ The optimization will be disabled if number of reducers is less than specified value.
+
+
+
+
+ hive.optimize.mapjoin.mapreduce
+ true
+ If hive.auto.convert.join is off, this parameter does not take
+ affect. If it is on, and if there are map-join jobs followed by a map-reduce
+ job (for e.g a group by), each map-only job is merged with the following
+ map-reduce job.
+
+
+
+
+ hive.mapjoin.bucket.cache.size
+ 10000
+
+ Size per reducer.The default is 1G, i.e if the input size is 10G, it
+ will use 10 reducers.
+
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HIVE/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HIVE/metainfo.xml
new file mode 100644
index 00000000000..520ccec1c8f
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HIVE/metainfo.xml
@@ -0,0 +1,43 @@
+
+
+
+ root
+ Data warehouse system for ad-hoc queries & analysis of large datasets and table & storage management service
+ 0.11.0.1.3.0.0
+
+
+
+ HIVE_METASTORE
+ MASTER
+
+
+ HIVE_SERVER
+ MASTER
+
+
+ MYSQL_SERVER
+ MASTER
+
+
+ HIVE_CLIENT
+ CLIENT
+
+
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HUE/configuration/global.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HUE/configuration/global.xml
new file mode 100644
index 00000000000..c49480f4190
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HUE/configuration/global.xml
@@ -0,0 +1,35 @@
+
+
+
+
+
+
+ hue_pid_dir
+ /var/run/hue
+ Hue Pid Dir.
+
+
+ hue_log_dir
+ /var/log/hue
+ Hue Log Dir.
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HUE/configuration/hue-site.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HUE/configuration/hue-site.xml
new file mode 100644
index 00000000000..6eb52a23cc4
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HUE/configuration/hue-site.xml
@@ -0,0 +1,290 @@
+
+
+
+
+
+
+
+ send_debug_messages
+ 1
+
+
+
+
+ database_logging
+ 0
+ To show database transactions, set database_logging to 1.
+ default, database_logging=0
+
+
+
+ secret_key
+
+ This is used for secure hashing in the session store.
+
+
+
+ http_host
+ 0.0.0.0
+ Webserver listens on this address and port
+
+
+
+ http_port
+ 8000
+ Webserver listens on this address and port
+
+
+
+ time_zone
+ America/Los_Angeles
+ Time zone name
+
+
+
+ django_debug_mode
+ 1
+ Turn off debug
+
+
+
+ use_cherrypy_server
+ false
+ Set to true to use CherryPy as the webserver, set to false
+ to use Spawning as the webserver. Defaults to Spawning if
+ key is not specified.
+
+
+
+ http_500_debug_mode
+ 1
+ Turn off backtrace for server error
+
+
+
+ server_user
+
+ Webserver runs as this user
+
+
+
+ server_group
+
+ Webserver runs as this user
+
+
+
+ backend_auth_policy
+ desktop.auth.backend.AllowAllBackend
+ Authentication backend.
+
+
+
+
+ db_engine
+ mysql
+ Configuration options for specifying the Desktop Database.
+
+
+
+ db_host
+ localhost
+ Configuration options for specifying the Desktop Database.
+
+
+
+ db_port
+ 3306
+ Configuration options for specifying the Desktop Database.
+
+
+
+ db_user
+ sandbox
+ Configuration options for specifying the Desktop Database.
+
+
+
+ db_password
+ 1111
+ Configuration options for specifying the Desktop Database.
+
+
+
+ db_name
+ sandbox
+ Configuration options for specifying the Desktop Database.
+
+
+
+
+ smtp_host
+ localhost
+ The SMTP server information for email notification delivery.
+
+
+
+ smtp_port
+ 25
+ The SMTP server information for email notification delivery.
+
+
+
+ smtp_user
+
+ The SMTP server information for email notification delivery.
+
+
+
+ smtp_password
+ 25
+ The SMTP server information for email notification delivery.
+
+
+
+ tls
+ no
+ Whether to use a TLS (secure) connection when talking to the SMTP server.
+
+
+
+ default_from_email
+ sandbox@hortonworks.com
+ The SMTP server information for email notification delivery.
+
+
+
+
+ fs_defaultfs
+
+ Enter the filesystem uri. E.g
+ .:hdfs://sandbox:8020
+
+
+
+ webhdfs_url
+
+ Use WebHdfs/HttpFs as the communication mechanism. To fallback to
+ using the Thrift plugin (used in Hue 1.x), this must be uncommented
+ and explicitly set to the empty value.
+ Value e.g.: http://localhost:50070/webhdfs/v1/
+
+
+
+ jobtracker_host
+
+ Enter the host on which you are running the Hadoop JobTracker.
+
+
+
+ jobtracker_port
+ 50030
+ The port where the JobTracker IPC listens on.
+
+
+
+ hadoop_mapred_home
+ /usr/lib/hadoop/lib
+ The SMTP server information for email notification delivery.
+
+
+
+ resourcemanager_host
+
+ Enter the host on which you are running the ResourceManager.
+
+
+
+ resourcemanager_port
+
+ The port where the ResourceManager IPC listens on.
+
+
+
+
+ hive_home_dir
+
+ Hive home directory.
+
+
+
+ hive_conf_dir
+
+ Hive configuration directory, where hive-site.xml is
+ located.
+
+
+
+ templeton_url
+
+ WebHcat http URL
+
+
+
+
+ pig_nice_name
+
+ Define and configure a new shell type pig
+
+
+
+ pig_shell_command
+ /usr/bin/pig -l /dev/null
+ Define and configure a new shell type pig.
+
+
+
+ pig_java_home
+
+ Define and configure a new shell type pig.
+
+
+
+ hbase_nice_name
+ HBase Shell
+ Define and configure a new shell type hbase
+
+
+
+ hbase_shell_command
+ /usr/bin/hbase shell
+ Define and configure a new shell type hbase.
+
+
+
+ bash_nice_name
+
+ Define and configure a new shell type bash for testing
+ only
+
+
+
+ bash_shell_command
+ /bin/bash
+ Define and configure a new shell type bash for testing only
+ .
+
+
+
+
+ whitelist
+ (localhost|127\.0\.0\.1):(50030|50070|50060|50075|50111)
+ proxy settings
+
+
+
\ No newline at end of file
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HUE/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HUE/metainfo.xml
new file mode 100644
index 00000000000..c6e384f26f7
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HUE/metainfo.xml
@@ -0,0 +1,31 @@
+
+
+
+ root
+ Hue is a graphical user interface to operate and develop
+ applications for Apache Hadoop.
+ 2.2.0
+
+
+
+ HUE_SERVER
+ MASTER
+
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/MAPREDUCE/configuration/capacity-scheduler.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/MAPREDUCE/configuration/capacity-scheduler.xml
new file mode 100644
index 00000000000..8034d1911f6
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/MAPREDUCE/configuration/capacity-scheduler.xml
@@ -0,0 +1,195 @@
+
+
+
+
+
+
+
+
+
+
+
+
+ mapred.capacity-scheduler.maximum-system-jobs
+ 3000
+ Maximum number of jobs in the system which can be initialized,
+ concurrently, by the CapacityScheduler.
+
+
+
+
+ mapred.capacity-scheduler.queue.default.capacity
+ 100
+ Percentage of the number of slots in the cluster that are
+ to be available for jobs in this queue.
+
+
+
+
+ mapred.capacity-scheduler.queue.default.maximum-capacity
+ -1
+
+ maximum-capacity defines a limit beyond which a queue cannot use the capacity of the cluster.
+ This provides a means to limit how much excess capacity a queue can use. By default, there is no limit.
+ The maximum-capacity of a queue can only be greater than or equal to its minimum capacity.
+ Default value of -1 implies a queue can use complete capacity of the cluster.
+
+ This property could be to curtail certain jobs which are long running in nature from occupying more than a
+ certain percentage of the cluster, which in the absence of pre-emption, could lead to capacity guarantees of
+ other queues being affected.
+
+ One important thing to note is that maximum-capacity is a percentage , so based on the cluster's capacity
+ the max capacity would change. So if large no of nodes or racks get added to the cluster , max Capacity in
+ absolute terms would increase accordingly.
+
+
+
+
+ mapred.capacity-scheduler.queue.default.supports-priority
+ false
+ If true, priorities of jobs will be taken into
+ account in scheduling decisions.
+
+
+
+
+ mapred.capacity-scheduler.queue.default.minimum-user-limit-percent
+ 100
+ Each queue enforces a limit on the percentage of resources
+ allocated to a user at any given time, if there is competition for them.
+ This user limit can vary between a minimum and maximum value. The former
+ depends on the number of users who have submitted jobs, and the latter is
+ set to this property value. For example, suppose the value of this
+ property is 25. If two users have submitted jobs to a queue, no single
+ user can use more than 50% of the queue resources. If a third user submits
+ a job, no single user can use more than 33% of the queue resources. With 4
+ or more users, no user can use more than 25% of the queue's resources. A
+ value of 100 implies no user limits are imposed.
+
+
+
+
+ mapred.capacity-scheduler.queue.default.user-limit-factor
+ 1
+ The multiple of the queue capacity which can be configured to
+ allow a single user to acquire more slots.
+
+
+
+
+ mapred.capacity-scheduler.queue.default.maximum-initialized-active-tasks
+ 200000
+ The maximum number of tasks, across all jobs in the queue,
+ which can be initialized concurrently. Once the queue's jobs exceed this
+ limit they will be queued on disk.
+
+
+
+
+ mapred.capacity-scheduler.queue.default.maximum-initialized-active-tasks-per-user
+ 100000
+ The maximum number of tasks per-user, across all the of the
+ user's jobs in the queue, which can be initialized concurrently. Once the
+ user's jobs exceed this limit they will be queued on disk.
+
+
+
+
+ mapred.capacity-scheduler.queue.default.init-accept-jobs-factor
+ 10
+ The multipe of (maximum-system-jobs * queue-capacity) used to
+ determine the number of jobs which are accepted by the scheduler.
+
+
+
+
+
+
+
+ mapred.capacity-scheduler.default-supports-priority
+ false
+ If true, priorities of jobs will be taken into
+ account in scheduling decisions by default in a job queue.
+
+
+
+
+ mapred.capacity-scheduler.default-minimum-user-limit-percent
+ 100
+ The percentage of the resources limited to a particular user
+ for the job queue at any given point of time by default.
+
+
+
+
+
+ mapred.capacity-scheduler.default-user-limit-factor
+ 1
+ The default multiple of queue-capacity which is used to
+ determine the amount of slots a single user can consume concurrently.
+
+
+
+
+ mapred.capacity-scheduler.default-maximum-active-tasks-per-queue
+ 200000
+ The default maximum number of tasks, across all jobs in the
+ queue, which can be initialized concurrently. Once the queue's jobs exceed
+ this limit they will be queued on disk.
+
+
+
+
+ mapred.capacity-scheduler.default-maximum-active-tasks-per-user
+ 100000
+ The default maximum number of tasks per-user, across all the of
+ the user's jobs in the queue, which can be initialized concurrently. Once
+ the user's jobs exceed this limit they will be queued on disk.
+
+
+
+
+ mapred.capacity-scheduler.default-init-accept-jobs-factor
+ 10
+ The default multipe of (maximum-system-jobs * queue-capacity)
+ used to determine the number of jobs which are accepted by the scheduler.
+
+
+
+
+
+ mapred.capacity-scheduler.init-poll-interval
+ 5000
+ The amount of time in miliseconds which is used to poll
+ the job queues for jobs to initialize.
+
+
+
+ mapred.capacity-scheduler.init-worker-threads
+ 5
+ Number of worker threads which would be used by
+ Initialization poller to initialize jobs in a set of queue.
+ If number mentioned in property is equal to number of job queues
+ then a single thread would initialize jobs in a queue. If lesser
+ then a thread would get a set of queues assigned. If the number
+ is greater then number of threads would be equal to number of
+ job queues.
+
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/MAPREDUCE/configuration/core-site.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/MAPREDUCE/configuration/core-site.xml
new file mode 100644
index 00000000000..3a2af490593
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/MAPREDUCE/configuration/core-site.xml
@@ -0,0 +1,20 @@
+
+
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/MAPREDUCE/configuration/global.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/MAPREDUCE/configuration/global.xml
new file mode 100644
index 00000000000..d27972493b1
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/MAPREDUCE/configuration/global.xml
@@ -0,0 +1,160 @@
+
+
+
+
+
+
+ jobtracker_host
+
+ JobTracker Host.
+
+
+ tasktracker_hosts
+
+ TaskTracker hosts.
+
+
+ mapred_local_dir
+ /hadoop/mapred
+ MapRed Local Directories.
+
+
+ mapred_system_dir
+ /mapred/system
+ MapRed System Directories.
+
+
+ scheduler_name
+ org.apache.hadoop.mapred.CapacityTaskScheduler
+ MapRed Capacity Scheduler.
+
+
+ jtnode_opt_newsize
+ 200
+ Mem New Size.
+
+
+ jtnode_opt_maxnewsize
+ 200
+ Max New size.
+
+
+ hadoop_heapsize
+ 1024
+ Hadoop maximum Java heap size
+
+
+ jtnode_heapsize
+ 1024
+ Maximum Java heap size for JobTracker in MB (Java option -Xmx)
+
+
+ mapred_map_tasks_max
+ 4
+ Number of slots that Map tasks that run simultaneously can occupy on a TaskTracker
+
+
+ mapred_red_tasks_max
+ 2
+ Number of slots that Reduce tasks that run simultaneously can occupy on a TaskTracker
+
+
+ mapred_cluster_map_mem_mb
+ -1
+ The virtual memory size of a single Map slot in the MapReduce framework
+
+
+ mapred_cluster_red_mem_mb
+ -1
+ The virtual memory size of a single Reduce slot in the MapReduce framework
+
+
+ mapred_job_map_mem_mb
+ -1
+ Virtual memory for single Map task
+
+
+ mapred_child_java_opts_sz
+ 768
+ Java options for the TaskTracker child processes.
+
+
+ io_sort_mb
+ 200
+ The total amount of Map-side buffer memory to use while sorting files (Expert-only configuration).
+
+
+ io_sort_spill_percent
+ 0.9
+ Percentage of sort buffer used for record collection (Expert-only configuration.
+
+
+ mapreduce_userlog_retainhours
+ 24
+ The maximum time, in hours, for which the user-logs are to be retained after the job completion.
+
+
+ maxtasks_per_job
+ -1
+ Maximum number of tasks for a single Job
+
+
+ lzo_enabled
+ false
+ LZO compression enabled
+
+
+ snappy_enabled
+ true
+ LZO compression enabled
+
+
+ rca_enabled
+ true
+ Enable Job Diagnostics.
+
+
+ mapred_hosts_exclude
+
+ Exclude entered hosts
+
+
+ mapred_hosts_include
+
+ Include entered hosts
+
+
+ mapred_jobstatus_dir
+ file:////mapred/jobstatus
+ Job Status directory
+
+
+ task_controller
+ org.apache.hadoop.mapred.DefaultTaskController
+ Task Controller.
+
+
+ mapred_user
+ mapred
+ MapReduce User.
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/MAPREDUCE/configuration/mapred-queue-acls.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/MAPREDUCE/configuration/mapred-queue-acls.xml
new file mode 100644
index 00000000000..ce12380767c
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/MAPREDUCE/configuration/mapred-queue-acls.xml
@@ -0,0 +1,39 @@
+
+
+
+
+
+
+
+
+
+
+
+
+ mapred.queue.default.acl-submit-job
+ *
+
+
+
+ mapred.queue.default.acl-administer-jobs
+ *
+
+
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/MAPREDUCE/configuration/mapred-site.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/MAPREDUCE/configuration/mapred-site.xml
new file mode 100644
index 00000000000..2c5630f12c5
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/MAPREDUCE/configuration/mapred-site.xml
@@ -0,0 +1,537 @@
+
+
+
+
+
+
+
+
+
+
+
+
+ io.sort.mb
+
+ No description
+
+
+
+ io.sort.record.percent
+ .2
+ No description
+
+
+
+ io.sort.spill.percent
+
+ No description
+
+
+
+ io.sort.factor
+ 100
+ No description
+
+
+
+
+
+ mapred.tasktracker.tasks.sleeptime-before-sigkill
+ 250
+ Normally, this is the amount of time before killing
+ processes, and the recommended-default is 5.000 seconds - a value of
+ 5000 here. In this case, we are using it solely to blast tasks before
+ killing them, and killing them very quickly (1/4 second) to guarantee
+ that we do not leave VMs around for later jobs.
+
+
+
+
+ mapred.job.tracker.handler.count
+ 50
+
+ The number of server threads for the JobTracker. This should be roughly
+ 4% of the number of tasktracker nodes.
+
+
+
+
+ mapred.system.dir
+ /mapred/system
+ No description
+ true
+
+
+
+ mapred.job.tracker
+
+
+ No description
+ true
+
+
+
+ mapred.job.tracker.http.address
+
+
+ No description
+ true
+
+
+
+
+ mapred.local.dir
+
+ No description
+ true
+
+
+
+ mapreduce.cluster.administrators
+ hadoop
+
+
+
+ mapred.reduce.parallel.copies
+ 30
+ No description
+
+
+
+ mapred.tasktracker.map.tasks.maximum
+
+ No description
+
+
+
+ mapred.tasktracker.reduce.tasks.maximum
+
+ No description
+
+
+
+ tasktracker.http.threads
+ 50
+
+
+
+ mapred.map.tasks.speculative.execution
+ false
+ If true, then multiple instances of some map tasks
+ may be executed in parallel.
+
+
+
+ mapred.reduce.tasks.speculative.execution
+ false
+ If true, then multiple instances of some reduce tasks
+ may be executed in parallel.
+
+
+
+ mapred.reduce.slowstart.completed.maps
+ 0.05
+
+
+
+ mapred.inmem.merge.threshold
+ 1000
+ The threshold, in terms of the number of files
+ for the in-memory merge process. When we accumulate threshold number of files
+ we initiate the in-memory merge and spill to disk. A value of 0 or less than
+ 0 indicates we want to DON'T have any threshold and instead depend only on
+ the ramfs's memory consumption to trigger the merge.
+
+
+
+
+ mapred.job.shuffle.merge.percent
+ 0.66
+ The usage threshold at which an in-memory merge will be
+ initiated, expressed as a percentage of the total memory allocated to
+ storing in-memory map outputs, as defined by
+ mapred.job.shuffle.input.buffer.percent.
+
+
+
+
+ mapred.job.shuffle.input.buffer.percent
+ 0.7
+ The percentage of memory to be allocated from the maximum heap
+ size to storing map outputs during the shuffle.
+
+
+
+
+ mapred.map.output.compression.codec
+ org.apache.hadoop.io.compress.SnappyCodec
+ If the map outputs are compressed, how should they be
+ compressed
+
+
+
+
+ mapred.output.compression.type
+ BLOCK
+ If the job outputs are to compressed as SequenceFiles, how should
+ they be compressed? Should be one of NONE, RECORD or BLOCK.
+
+
+
+
+
+ mapred.jobtracker.completeuserjobs.maximum
+ 5
+
+
+
+ mapred.jobtracker.taskScheduler
+
+
+
+
+ mapred.jobtracker.restart.recover
+ false
+ "true" to enable (job) recovery upon restart,
+ "false" to start afresh
+
+
+
+
+ mapred.job.reduce.input.buffer.percent
+ 0.0
+ The percentage of memory- relative to the maximum heap size- to
+ retain map outputs during the reduce. When the shuffle is concluded, any
+ remaining map outputs in memory must consume less than this threshold before
+ the reduce can begin.
+
+
+
+
+ mapreduce.reduce.input.limit
+ 10737418240
+ The limit on the input size of the reduce. (This value
+ is 10 Gb.) If the estimated input size of the reduce is greater than
+ this value, job is failed. A value of -1 means that there is no limit
+ set.
+
+
+
+
+
+ mapred.compress.map.output
+
+
+
+
+
+ mapred.task.timeout
+ 600000
+ The number of milliseconds before a task will be
+ terminated if it neither reads an input, writes an output, nor
+ updates its status string.
+
+
+
+
+ jetty.connector
+ org.mortbay.jetty.nio.SelectChannelConnector
+ No description
+
+
+
+ mapred.task.tracker.task-controller
+
+
+ TaskController which is used to launch and manage task execution.
+
+
+
+
+ mapred.child.root.logger
+ INFO,TLA
+
+
+
+ mapred.child.java.opts
+
+
+ No description
+
+
+
+ mapred.cluster.map.memory.mb
+
+
+
+
+ mapred.cluster.reduce.memory.mb
+
+
+
+
+ mapred.job.map.memory.mb
+
+
+
+
+ mapred.job.reduce.memory.mb
+
+
+
+
+ mapred.cluster.max.map.memory.mb
+
+
+
+
+ mapred.cluster.max.reduce.memory.mb
+
+
+
+
+ mapred.hosts
+
+
+
+
+ mapred.hosts.exclude
+
+
+
+
+ mapred.max.tracker.blacklists
+ 16
+
+ if node is reported blacklisted by 16 successful jobs within timeout-window, it will be graylisted
+
+
+
+
+ mapred.healthChecker.script.path
+
+
+
+
+ mapred.healthChecker.interval
+ 135000
+
+
+
+ mapred.healthChecker.script.timeout
+ 60000
+
+
+
+ mapred.job.tracker.persist.jobstatus.active
+ false
+ Indicates if persistency of job status information is
+ active or not.
+
+
+
+
+ mapred.job.tracker.persist.jobstatus.hours
+ 1
+ The number of hours job status information is persisted in DFS.
+ The job status information will be available after it drops of the memory
+ queue and between jobtracker restarts. With a zero value the job status
+ information is not persisted at all in DFS.
+
+
+
+
+ mapred.job.tracker.persist.jobstatus.dir
+
+ The directory where the job status information is persisted
+ in a file system to be available after it drops of the memory queue and
+ between jobtracker restarts.
+
+
+
+
+ mapred.jobtracker.retirejob.check
+ 10000
+
+
+
+ mapred.jobtracker.retirejob.interval
+ 21600000
+
+
+
+ mapred.job.tracker.history.completed.location
+ /mapred/history/done
+ No description
+
+
+
+ mapred.task.maxvmem
+
+ true
+ No description
+
+
+
+ mapred.jobtracker.maxtasks.per.job
+
+ true
+ The maximum number of tasks for a single job.
+ A value of -1 indicates that there is no maximum.
+
+
+
+ mapreduce.fileoutputcommitter.marksuccessfuljobs
+ false
+
+
+
+ mapred.userlog.retain.hours
+
+
+
+
+ mapred.job.reuse.jvm.num.tasks
+ 1
+
+ How many tasks to run per jvm. If set to -1, there is no limit
+
+ true
+
+
+
+ mapreduce.jobtracker.kerberos.principal
+
+
+ JT user name key.
+
+
+
+
+ mapreduce.tasktracker.kerberos.principal
+
+
+ tt user name key. "_HOST" is replaced by the host name of the task tracker.
+
+
+
+
+
+ hadoop.job.history.user.location
+ none
+ true
+
+
+
+
+ mapreduce.jobtracker.keytab.file
+
+
+ The keytab for the jobtracker principal.
+
+
+
+
+
+ mapreduce.tasktracker.keytab.file
+
+ The filename of the keytab for the task tracker
+
+
+
+ mapred.task.tracker.http.address
+
+ Http address for task tracker.
+
+
+
+ mapreduce.jobtracker.staging.root.dir
+ /user
+ The Path prefix for where the staging directories should be placed. The next level is always the user's
+ name. It is a path in the default file system.
+
+
+
+ mapreduce.tasktracker.group
+ hadoop
+ The group that the task controller uses for accessing the task controller. The mapred user must be a member and users should *not* be members.
+
+
+
+
+ mapreduce.jobtracker.split.metainfo.maxsize
+ 50000000
+ true
+ If the size of the split metainfo file is larger than this, the JobTracker will fail the job during
+ initialize.
+
+
+
+ mapreduce.history.server.embedded
+ false
+ Should job history server be embedded within Job tracker
+process
+ true
+
+
+
+ mapreduce.history.server.http.address
+
+
+ Http address of the history server
+ true
+
+
+
+ mapreduce.jobhistory.kerberos.principal
+
+
+ Job history user name key. (must map to same user as JT
+user)
+
+
+
+ mapreduce.jobhistory.keytab.file
+
+
+ The keytab for the job history server principal.
+
+
+
+ mapred.jobtracker.blacklist.fault-timeout-window
+ 180
+
+ 3-hour sliding window (value is in minutes)
+
+
+
+
+ mapred.jobtracker.blacklist.fault-bucket-width
+ 15
+
+ 15-minute bucket size (value is in minutes)
+
+
+
+
+ mapred.queue.names
+ default
+ Comma separated list of queues configured for this jobtracker.
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/MAPREDUCE/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/MAPREDUCE/metainfo.xml
new file mode 100644
index 00000000000..b4a95a04a0c
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/MAPREDUCE/metainfo.xml
@@ -0,0 +1,41 @@
+
+
+
+ mapred
+ Apache Hadoop Distributed Processing Framework
+ 1.2.0.1.3.0.0
+
+
+
+ JOBTRACKER
+ MASTER
+
+
+
+ TASKTRACKER
+ SLAVE
+
+
+
+ MAPREDUCE_CLIENT
+ CLIENT
+
+
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/NAGIOS/configuration/global.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/NAGIOS/configuration/global.xml
new file mode 100644
index 00000000000..61a2b9025cd
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/NAGIOS/configuration/global.xml
@@ -0,0 +1,50 @@
+
+
+
+
+
+
+ nagios_user
+ nagios
+ Nagios Username.
+
+
+ nagios_group
+ nagios
+ Nagios Group.
+
+
+ nagios_web_login
+ nagiosadmin
+ Nagios web user.
+
+
+ nagios_web_password
+
+ Nagios Admin Password.
+
+
+ nagios_contact
+
+ Hadoop Admin Email.
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/NAGIOS/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/NAGIOS/metainfo.xml
new file mode 100644
index 00000000000..bd7de072f7d
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/NAGIOS/metainfo.xml
@@ -0,0 +1,30 @@
+
+
+
+ root
+ Nagios Monitoring and Alerting system
+ 3.2.3
+
+
+
+ NAGIOS_SERVER
+ MASTER
+
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/OOZIE/configuration/global.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/OOZIE/configuration/global.xml
new file mode 100644
index 00000000000..ddbf7804ed7
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/OOZIE/configuration/global.xml
@@ -0,0 +1,105 @@
+
+
+
+
+
+
+ oozie_user
+ oozie
+ Oozie User.
+
+
+ oozieserver_host
+
+ Oozie Server Host.
+
+
+ oozie_database
+
+ Oozie Server Database.
+
+
+ oozie_derby_database
+ Derby
+ Oozie Derby Database.
+
+
+ oozie_existing_mysql_database
+ MySQL
+ Oozie MySQL Database.
+
+
+ oozie_existing_mysql_host
+
+ Existing MySQL Host.
+
+
+ oozie_existing_oracle_database
+ Oracle
+ Oracle Database
+
+
+ oozie_existing_oracle_host
+
+ Database Host.
+
+
+ oozie_ambari_database
+ MySQL
+ Database default.
+
+
+ oozie_ambari_host
+
+ Host on which databse will be created.
+
+
+ oozie_database_name
+ oozie
+ Database name used for the Oozie.
+
+
+ oozie_metastore_user_name
+ oozie
+ Database user name to use to connect to the database
+
+
+ oozie_metastore_user_passwd
+
+ Database password to use to connect to the database
+
+
+ oozie_data_dir
+ /hadoop/oozie/data
+ Data directory in which the Oozie DB exists
+
+
+ oozie_log_dir
+ /var/log/oozie
+ Directory for oozie logs
+
+
+ oozie_pid_dir
+ /var/run/oozie
+ Directory in which the pid files for oozie reside.
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/OOZIE/configuration/oozie-site.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/OOZIE/configuration/oozie-site.xml
new file mode 100644
index 00000000000..5c7fd1c18d7
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/OOZIE/configuration/oozie-site.xml
@@ -0,0 +1,245 @@
+
+
+
+
+
+
+
+ oozie.base.url
+ http://localhost:11000/oozie
+ Base Oozie URL.
+
+
+
+ oozie.system.id
+ oozie-${user.name}
+
+ The Oozie system ID.
+
+
+
+
+ oozie.systemmode
+ NORMAL
+
+ System mode for Oozie at startup.
+
+
+
+
+ oozie.service.AuthorizationService.authorization.enabled
+ true
+
+ Specifies whether security (user name/admin role) is enabled or not.
+ If disabled any user can manage Oozie system and manage any job.
+
+
+
+
+ oozie.service.PurgeService.older.than
+ 30
+
+ Jobs older than this value, in days, will be purged by the PurgeService.
+
+
+
+
+ oozie.service.PurgeService.purge.interval
+ 3600
+
+ Interval at which the purge service will run, in seconds.
+
+
+
+
+ oozie.service.CallableQueueService.queue.size
+ 1000
+ Max callable queue size
+
+
+
+ oozie.service.CallableQueueService.threads
+ 10
+ Number of threads used for executing callables
+
+
+
+ oozie.service.CallableQueueService.callable.concurrency
+ 3
+
+ Maximum concurrency for a given callable type.
+ Each command is a callable type (submit, start, run, signal, job, jobs, suspend,resume, etc).
+ Each action type is a callable type (Map-Reduce, Pig, SSH, FS, sub-workflow, etc).
+ All commands that use action executors (action-start, action-end, action-kill and action-check) use
+ the action type as the callable type.
+
+
+
+
+ oozie.service.coord.normal.default.timeout
+ 120
+ Default timeout for a coordinator action input check (in minutes) for normal job.
+ -1 means infinite timeout
+
+
+
+ oozie.db.schema.name
+ oozie
+
+ Oozie DataBase Name
+
+
+
+
+ oozie.service.HadoopAccessorService.jobTracker.whitelist
+
+
+ Whitelisted job tracker for Oozie service.
+
+
+
+
+ oozie.authentication.type
+ simple
+
+
+
+
+
+ oozie.service.HadoopAccessorService.nameNode.whitelist
+
+
+
+
+
+
+ oozie.service.WorkflowAppService.system.libpath
+ /user/${user.name}/share/lib
+
+ System library path to use for workflow applications.
+ This path is added to workflow application if their job properties sets
+ the property 'oozie.use.system.libpath' to true.
+
+
+
+
+ use.system.libpath.for.mapreduce.and.pig.jobs
+ false
+
+ If set to true, submissions of MapReduce and Pig jobs will include
+ automatically the system library path, thus not requiring users to
+ specify where the Pig JAR files are. Instead, the ones from the system
+ library path are used.
+
+
+
+ oozie.authentication.kerberos.name.rules
+
+ RULE:[2:$1@$0]([jt]t@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-MAPREDUSER/
+ RULE:[2:$1@$0]([nd]n@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HDFSUSER/
+ RULE:[2:$1@$0](hm@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/
+ RULE:[2:$1@$0](rs@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/
+ DEFAULT
+
+ The mapping from kerberos principal names to local OS user names.
+
+
+ oozie.service.HadoopAccessorService.hadoop.configurations
+ *=/etc/hadoop/conf
+
+ Comma separated AUTHORITY=HADOOP_CONF_DIR, where AUTHORITY is the HOST:PORT of
+ the Hadoop service (JobTracker, HDFS). The wildcard '*' configuration is
+ used when there is no exact match for an authority. The HADOOP_CONF_DIR contains
+ the relevant Hadoop *-site.xml files. If the path is relative is looked within
+ the Oozie configuration directory; though the path can be absolute (i.e. to point
+ to Hadoop client conf/ directories in the local filesystem.
+
+
+
+ oozie.service.ActionService.executor.ext.classes
+
+ org.apache.oozie.action.email.EmailActionExecutor,
+ org.apache.oozie.action.hadoop.HiveActionExecutor,
+ org.apache.oozie.action.hadoop.ShellActionExecutor,
+ org.apache.oozie.action.hadoop.SqoopActionExecutor,
+ org.apache.oozie.action.hadoop.DistcpActionExecutor
+
+
+
+
+ oozie.service.SchemaService.wf.ext.schemas
+ shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd
+
+
+ oozie.service.JPAService.create.db.schema
+ false
+
+ Creates Oozie DB.
+
+ If set to true, it creates the DB schema if it does not exist. If the DB schema exists is a NOP.
+ If set to false, it does not create the DB schema. If the DB schema does not exist it fails start up.
+
+
+
+
+ oozie.service.JPAService.jdbc.driver
+ org.apache.derby.jdbc.EmbeddedDriver
+
+ JDBC driver class.
+
+
+
+
+ oozie.service.JPAService.jdbc.url
+ jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true
+
+ JDBC URL.
+
+
+
+
+ oozie.service.JPAService.jdbc.username
+ sa
+
+ DB user name.
+
+
+
+
+ oozie.service.JPAService.jdbc.password
+
+
+ DB user password.
+
+ IMPORTANT: if password is emtpy leave a 1 space string, the service trims the value,
+ if empty Configuration assumes it is NULL.
+
+
+
+
+ oozie.service.JPAService.pool.max.active.conn
+ 10
+
+ Max number of connections.
+
+
+
\ No newline at end of file
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/OOZIE/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/OOZIE/metainfo.xml
new file mode 100644
index 00000000000..46460b42d97
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/OOZIE/metainfo.xml
@@ -0,0 +1,35 @@
+
+
+
+ root
+ System for workflow coordination and execution of Apache Hadoop jobs
+ 3.3.2.1.3.0.0
+
+
+
+ OOZIE_SERVER
+ MASTER
+
+
+
+ OOZIE_CLIENT
+ CLIENT
+
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/PIG/configuration/pig.properties b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/PIG/configuration/pig.properties
new file mode 100644
index 00000000000..01000b53ab2
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/PIG/configuration/pig.properties
@@ -0,0 +1,52 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Pig default configuration file. All values can be overwritten by pig.properties and command line arguments.
+# see bin/pig -help
+
+# brief logging (no timestamps)
+brief=false
+
+#debug level, INFO is default
+debug=INFO
+
+#verbose print all log messages to screen (default to print only INFO and above to screen)
+verbose=false
+
+#exectype local|mapreduce, mapreduce is default
+exectype=mapreduce
+
+#Enable insertion of information about script into hadoop job conf
+pig.script.info.enabled=true
+
+#Do not spill temp files smaller than this size (bytes)
+pig.spill.size.threshold=5000000
+#EXPERIMENT: Activate garbage collection when spilling a file bigger than this size (bytes)
+#This should help reduce the number of files being spilled.
+pig.spill.gc.activation.size=40000000
+
+#the following two parameters are to help estimate the reducer number
+pig.exec.reducers.bytes.per.reducer=1000000000
+pig.exec.reducers.max=999
+
+#Temporary location to store the intermediate data.
+pig.temp.dir=/tmp/
+
+#Threshold for merging FRJoin fragment files
+pig.files.concatenation.threshold=100
+pig.optimistic.files.concatenation=false;
+
+pig.disable.counter=false
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/PIG/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/PIG/metainfo.xml
new file mode 100644
index 00000000000..6806c549f80
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/PIG/metainfo.xml
@@ -0,0 +1,30 @@
+
+
+
+ root
+ Scripting platform for analyzing large datasets
+ 0.11.1.1.3.0.0
+
+
+
+ PIG
+ CLIENT
+
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/SQOOP/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/SQOOP/metainfo.xml
new file mode 100644
index 00000000000..1924c546444
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/SQOOP/metainfo.xml
@@ -0,0 +1,30 @@
+
+
+
+ root
+ Tool for transferring bulk data between Apache Hadoop and structured data stores such as relational databases
+ 1.4.3.1.3.0.0
+
+
+
+ SQOOP
+ CLIENT
+
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/WEBHCAT/configuration/webhcat-site.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/WEBHCAT/configuration/webhcat-site.xml
new file mode 100644
index 00000000000..31d0113faa2
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/WEBHCAT/configuration/webhcat-site.xml
@@ -0,0 +1,126 @@
+
+
+
+
+
+
+
+
+
+
+ templeton.port
+ 50111
+ The HTTP port for the main server.
+
+
+
+ templeton.hadoop.conf.dir
+ /etc/hadoop/conf
+ The path to the Hadoop configuration.
+
+
+
+ templeton.jar
+ /usr/lib/hcatalog/share/webhcat/svr/webhcat.jar
+ The path to the Templeton jar file.
+
+
+
+ templeton.libjars
+ /usr/lib/zookeeper/zookeeper.jar
+ Jars to add the the classpath.
+
+
+
+
+ templeton.hadoop
+ /usr/bin/hadoop
+ The path to the Hadoop executable.
+
+
+
+ templeton.pig.archive
+ hdfs:///apps/webhcat/pig.tar.gz
+ The path to the Pig archive.
+
+
+
+ templeton.pig.path
+ pig.tar.gz/pig/bin/pig
+ The path to the Pig executable.
+
+
+
+ templeton.hcat
+ /usr/bin/hcat
+ The path to the hcatalog executable.
+
+
+
+ templeton.hive.archive
+ hdfs:///apps/webhcat/hive.tar.gz
+ The path to the Hive archive.
+
+
+
+ templeton.hive.path
+ hive.tar.gz/hive/bin/hive
+ The path to the Hive executable.
+
+
+
+ templeton.hive.properties
+
+ Properties to set when running hive.
+
+
+
+
+ templeton.zookeeper.hosts
+
+ ZooKeeper servers, as comma separated host:port pairs
+
+
+
+ templeton.storage.class
+ org.apache.hcatalog.templeton.tool.ZooKeeperStorage
+ The class to use as storage
+
+
+
+ templeton.override.enabled
+ false
+
+ Enable the override path in templeton.override.jars
+
+
+
+
+ templeton.streaming.jar
+ hdfs:///apps/webhcat/hadoop-streaming.jar
+ The hdfs path to the Hadoop streaming jar file.
+
+
+
+ templeton.exec.timeout
+ 60000
+ Time out for templeton api
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/WEBHCAT/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/WEBHCAT/metainfo.xml
new file mode 100644
index 00000000000..15c8daadf5e
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/WEBHCAT/metainfo.xml
@@ -0,0 +1,31 @@
+
+
+
+ root
+ This is comment for WEBHCAT service
+ 0.11.0.1.3.0.0
+
+
+
+ WEBHCAT_SERVER
+ MASTER
+
+
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/ZOOKEEPER/configuration/global.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/ZOOKEEPER/configuration/global.xml
new file mode 100644
index 00000000000..f78df89d131
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/ZOOKEEPER/configuration/global.xml
@@ -0,0 +1,75 @@
+
+
+
+
+
+
+ zk_user
+ zookeeper
+ ZooKeeper User.
+
+
+ zookeeperserver_host
+
+ ZooKeeper Server Hosts.
+
+
+ zk_data_dir
+ /hadoop/zookeeper
+ Data directory for ZooKeeper.
+
+
+ zk_log_dir
+ /var/log/zookeeper
+ ZooKeeper Log Dir
+
+
+ zk_pid_dir
+ /var/run/zookeeper
+ ZooKeeper Pid Dir
+
+
+ zk_pid_file
+ /var/run/zookeeper/zookeeper_server.pid
+ ZooKeeper Pid File
+
+
+ tickTime
+ 2000
+ The length of a single tick in milliseconds, which is the basic time unit used by ZooKeeper
+
+
+ initLimit
+ 10
+ Ticks to allow for sync at Init.
+
+
+ syncLimit
+ 5
+ Ticks to allow for sync at Runtime.
+
+
+ clientPort
+ 2181
+ Port for running ZK Server.
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/ZOOKEEPER/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/ZOOKEEPER/metainfo.xml
new file mode 100644
index 00000000000..3dc129b2d3c
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/ZOOKEEPER/metainfo.xml
@@ -0,0 +1,35 @@
+
+
+
+ root
+ Centralized service which provides highly reliable distributed coordination
+ 3.4.5.1.3.0.0
+
+
+
+ ZOOKEEPER_SERVER
+ MASTER
+
+
+
+ ZOOKEEPER_CLIENT
+ CLIENT
+
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.1/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.1/metainfo.xml
new file mode 100644
index 00000000000..5d9cb06e0fe
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.1/metainfo.xml
@@ -0,0 +1,23 @@
+
+
+
+
+ 1.2.0
+ true
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.1/repos/repoinfo.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.1/repos/repoinfo.xml
new file mode 100644
index 00000000000..f5c0fee5c3f
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.1/repos/repoinfo.xml
@@ -0,0 +1,97 @@
+
+
+
+
+
+ http://public-repo-1.hortonworks.com/HDP/centos6/1.x/updates/1.2.1
+ HDP-1.2.1
+ HDP
+
+
+
+ HDP-epel
+ HDP-epel
+
+
+
+
+
+ http://public-repo-1.hortonworks.com/HDP/centos5/1.x/updates/1.2.1
+ HDP-1.2.1
+ HDP
+
+
+
+ HDP-epel
+ HDP-epel
+
+
+
+
+
+ http://public-repo-1.hortonworks.com/HDP/centos6/1.x/updates/1.2.1
+ HDP-1.2.1
+ HDP
+
+
+
+ HDP-epel
+ HDP-epel
+
+
+
+
+
+ http://public-repo-1.hortonworks.com/HDP/centos5/1.x/updates/1.2.1
+ HDP-1.2.1
+ HDP
+
+
+
+ HDP-epel
+ HDP-epel
+
+
+
+
+
+ http://public-repo-1.hortonworks.com/HDP/suse11/1.x/updates/1.2.1
+ HDP-1.2.1
+ HDP
+
+
+ http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/suse11
+ HDP-UTILS-1.1.0.15
+ HDP-UTILS
+
+
+
+
+
+ http://public-repo-1.hortonworks.com/HDP/suse11/1.x/updates/1.2.1
+ HDP-1.2.1
+ HDP
+
+
+ http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/suse11
+ HDP-UTILS-1.1.0.15
+ HDP-UTILS
+
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/GANGLIA/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/GANGLIA/metainfo.xml
new file mode 100644
index 00000000000..0b21f0f6348
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/GANGLIA/metainfo.xml
@@ -0,0 +1,40 @@
+
+
+
+ root
+ Ganglia Metrics Collection system
+ 3.2.0
+
+
+
+ GANGLIA_SERVER
+ MASTER
+
+
+
+ GANGLIA_MONITOR
+ SLAVE
+
+
+
+ MONITOR_WEBSERVER
+ MASTER
+
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/HBASE/configuration/hbase-policy.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/HBASE/configuration/hbase-policy.xml
new file mode 100644
index 00000000000..e45f23c962c
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/HBASE/configuration/hbase-policy.xml
@@ -0,0 +1,53 @@
+
+
+
+
+
+
+ security.client.protocol.acl
+ *
+ ACL for HRegionInterface protocol implementations (ie.
+ clients talking to HRegionServers)
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+ security.admin.protocol.acl
+ *
+ ACL for HMasterInterface protocol implementation (ie.
+ clients talking to HMaster for admin operations).
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+ security.masterregion.protocol.acl
+ *
+ ACL for HMasterRegionInterface protocol implementations
+ (for HRegionServers communicating with HMaster)
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/HBASE/configuration/hbase-site.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/HBASE/configuration/hbase-site.xml
new file mode 100644
index 00000000000..149751e8718
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/HBASE/configuration/hbase-site.xml
@@ -0,0 +1,345 @@
+
+
+
+
+
+ hbase.rootdir
+
+ The directory shared by region servers and into
+ which HBase persists. The URL should be 'fully-qualified'
+ to include the filesystem scheme. For example, to specify the
+ HDFS directory '/hbase' where the HDFS instance's namenode is
+ running at namenode.example.org on port 9000, set this value to:
+ hdfs://namenode.example.org:9000/hbase. By default HBase writes
+ into /tmp. Change this configuration else all data will be lost
+ on machine restart.
+
+
+
+ hbase.cluster.distributed
+ true
+ The mode the cluster will be in. Possible values are
+ false for standalone mode and true for distributed mode. If
+ false, startup will run all HBase and ZooKeeper daemons together
+ in the one JVM.
+
+
+
+ hbase.tmp.dir
+
+ Temporary directory on the local filesystem.
+ Change this setting to point to a location more permanent
+ than '/tmp' (The '/tmp' directory is often cleared on
+ machine restart).
+
+
+
+ hbase.master.info.bindAddress
+
+ The bind address for the HBase Master web UI
+
+
+
+ hbase.regionserver.global.memstore.upperLimit
+
+ Maximum size of all memstores in a region server before new
+ updates are blocked and flushes are forced. Defaults to 40% of heap
+
+
+
+ hbase.regionserver.handler.count
+
+ Count of RPC Listener instances spun up on RegionServers.
+ Same property is used by the Master for count of master handlers.
+ Default is 10.
+
+
+
+ hbase.hregion.majorcompaction
+
+ The time (in miliseconds) between 'major' compactions of all
+ HStoreFiles in a region. Default: 1 day.
+ Set to 0 to disable automated major compactions.
+
+
+
+ hbase.master.lease.thread.wakefrequency
+ 3000
+ The interval between checks for expired region server leases.
+ This value has been reduced due to the other reduced values above so that
+ the master will notice a dead region server sooner. The default is 15 seconds.
+
+
+
+ hbase.regionserver.global.memstore.lowerLimit
+
+ When memstores are being forced to flush to make room in
+ memory, keep flushing until we hit this mark. Defaults to 35% of heap.
+ This value equal to hbase.regionserver.global.memstore.upperLimit causes
+ the minimum possible flushing to occur when updates are blocked due to
+ memstore limiting.
+
+
+
+ hbase.hregion.memstore.block.multiplier
+
+ Block updates if memstore has hbase.hregion.memstore.block.multiplier
+ time hbase.hregion.flush.size bytes. Useful preventing
+ runaway memstore during spikes in update traffic. Without an
+ upper-bound, memstore fills such that when it flushes the
+ resultant flush files take a long time to compact or split, or
+ worse, we OOME
+
+
+
+ hbase.hregion.memstore.flush.size
+
+
+ Memstore will be flushed to disk if size of the memstore
+ exceeds this number of bytes. Value is checked by a thread that runs
+ every hbase.server.thread.wakefrequency.
+
+
+
+ hbase.hregion.memstore.mslab.enabled
+
+
+ Enables the MemStore-Local Allocation Buffer,
+ a feature which works to prevent heap fragmentation under
+ heavy write loads. This can reduce the frequency of stop-the-world
+ GC pauses on large heaps.
+
+
+
+ hbase.hregion.max.filesize
+
+
+ Maximum HStoreFile size. If any one of a column families' HStoreFiles has
+ grown to exceed this value, the hosting HRegion is split in two.
+ Default: 1G.
+
+
+
+ hbase.client.scanner.caching
+
+ Number of rows that will be fetched when calling next
+ on a scanner if it is not served from (local, client) memory. Higher
+ caching values will enable faster scanners but will eat up more memory
+ and some calls of next may take longer and longer times when the cache is empty.
+ Do not set this value such that the time between invocations is greater
+ than the scanner timeout; i.e. hbase.regionserver.lease.period
+
+
+
+ zookeeper.session.timeout
+
+ ZooKeeper session timeout.
+ HBase passes this to the zk quorum as suggested maximum time for a
+ session (This setting becomes zookeeper's 'maxSessionTimeout'). See
+ http://hadoop.apache.org/zookeeper/docs/current/zookeeperProgrammers.html#ch_zkSessions
+ "The client sends a requested timeout, the server responds with the
+ timeout that it can give the client. " In milliseconds.
+
+
+
+ hbase.client.keyvalue.maxsize
+
+ Specifies the combined maximum allowed size of a KeyValue
+ instance. This is to set an upper boundary for a single entry saved in a
+ storage file. Since they cannot be split it helps avoiding that a region
+ cannot be split any further because the data is too large. It seems wise
+ to set this to a fraction of the maximum region size. Setting it to zero
+ or less disables the check.
+
+
+
+ hbase.hstore.compactionThreshold
+
+
+ If more than this number of HStoreFiles in any one HStore
+ (one HStoreFile is written per flush of memstore) then a compaction
+ is run to rewrite all HStoreFiles files as one. Larger numbers
+ put off compaction but when it runs, it takes longer to complete.
+
+
+
+ hbase.hstore.blockingStoreFiles
+
+
+ If more than this number of StoreFiles in any one Store
+ (one StoreFile is written per flush of MemStore) then updates are
+ blocked for this HRegion until a compaction is completed, or
+ until hbase.hstore.blockingWaitTime has been exceeded.
+
+
+
+ hfile.block.cache.size
+
+
+ Percentage of maximum heap (-Xmx setting) to allocate to block cache
+ used by HFile/StoreFile. Default of 0.25 means allocate 25%.
+ Set to 0 to disable but it's not recommended.
+
+
+
+
+
+ hbase.master.keytab.file
+
+ Full path to the kerberos keytab file to use for logging in
+ the configured HMaster server principal.
+
+
+
+ hbase.master.kerberos.principal
+
+ Ex. "hbase/_HOST@EXAMPLE.COM". The kerberos principal name
+ that should be used to run the HMaster process. The principal name should
+ be in the form: user/hostname@DOMAIN. If "_HOST" is used as the hostname
+ portion, it will be replaced with the actual hostname of the running
+ instance.
+
+
+
+ hbase.regionserver.keytab.file
+
+ Full path to the kerberos keytab file to use for logging in
+ the configured HRegionServer server principal.
+
+
+
+ hbase.regionserver.kerberos.principal
+
+ Ex. "hbase/_HOST@EXAMPLE.COM". The kerberos principal name
+ that should be used to run the HRegionServer process. The principal name
+ should be in the form: user/hostname@DOMAIN. If "_HOST" is used as the
+ hostname portion, it will be replaced with the actual hostname of the
+ running instance. An entry for this principal must exist in the file
+ specified in hbase.regionserver.keytab.file
+
+
+
+
+
+ hbase.superuser
+ hbase
+ List of users or groups (comma-separated), who are allowed
+ full privileges, regardless of stored ACLs, across the cluster.
+ Only used when HBase security is enabled.
+
+
+
+
+ hbase.coprocessor.region.classes
+
+ A comma-separated list of Coprocessors that are loaded by
+ default on all tables. For any override coprocessor method, these classes
+ will be called in order. After implementing your own Coprocessor, just put
+ it in HBase's classpath and add the fully qualified class name here.
+ A coprocessor can also be loaded on demand by setting HTableDescriptor.
+
+
+
+
+ hbase.coprocessor.master.classes
+
+ A comma-separated list of
+ org.apache.hadoop.hbase.coprocessor.MasterObserver coprocessors that are
+ loaded by default on the active HMaster process. For any implemented
+ coprocessor methods, the listed classes will be called in order. After
+ implementing your own MasterObserver, just put it in HBase's classpath
+ and add the fully qualified class name here.
+
+
+
+
+ hbase.zookeeper.property.clientPort
+ 2181
+ Property from ZooKeeper's config zoo.cfg.
+ The port at which the clients will connect.
+
+
+
+
+
+ hbase.zookeeper.quorum
+
+ Comma separated list of servers in the ZooKeeper Quorum.
+ For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
+ By default this is set to localhost for local and pseudo-distributed modes
+ of operation. For a fully-distributed setup, this should be set to a full
+ list of ZooKeeper quorum servers. If HBASE_MANAGES_ZK is set in hbase-env.sh
+ this is the list of servers which we will start/stop ZooKeeper on.
+
+
+
+
+
+ dfs.support.append
+
+ Does HDFS allow appends to files?
+ This is an hdfs config. set in here so the hdfs client will do append support.
+ You must ensure that this config. is true serverside too when running hbase
+ (You will have to restart your cluster after setting it).
+
+
+
+
+ dfs.client.read.shortcircuit
+
+ Enable/Disable short circuit read for your client.
+ Hadoop servers should be configured to allow short circuit read
+ for the hbase user for this to take effect
+
+
+
+
+ dfs.client.read.shortcircuit.skip.checksum
+
+ Enable/disbale skipping the checksum check
+
+
+
+ hbase.regionserver.optionalcacheflushinterval
+ 10000
+
+ Amount of time to wait since the last time a region was flushed before
+ invoking an optional cache flush. Default 60,000.
+
+
+
+ hbase.zookeeper.useMulti
+ true
+ Instructs HBase to make use of ZooKeeper's multi-update functionality.
+ This allows certain ZooKeeper operations to complete more quickly and prevents some issues
+ with rare Replication failure scenarios (see the release note of HBASE-2611 for an example).·
+ IMPORTANT: only set this to true if all ZooKeeper servers in the cluster are on version 3.4+
+ and will not be downgraded. ZooKeeper versions before 3.4 do not support multi-update and will
+ not fail gracefully if multi-update is invoked (see ZOOKEEPER-1495).
+
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/HBASE/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/HBASE/metainfo.xml
new file mode 100644
index 00000000000..553fa2b0eb2
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/HBASE/metainfo.xml
@@ -0,0 +1,40 @@
+
+
+
+ mapred
+ Non-relational distributed database and centralized service for configuration management & synchronization
+ 0.94.5
+
+
+
+ HBASE_MASTER
+ MASTER
+
+
+
+ HBASE_REGIONSERVER
+ SLAVE
+
+
+
+ HBASE_CLIENT
+ CLIENT
+
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/HCATALOG/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/HCATALOG/metainfo.xml
new file mode 100644
index 00000000000..1951a5dcd1d
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/HCATALOG/metainfo.xml
@@ -0,0 +1,30 @@
+
+
+
+ root
+ This is comment for HCATALOG service
+ 0.5.0
+
+
+
+ HCAT
+ CLIENT
+
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/HCFS/configuration/core-site.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/HCFS/configuration/core-site.xml
new file mode 100644
index 00000000000..a312e68fe62
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/HCFS/configuration/core-site.xml
@@ -0,0 +1,251 @@
+
+
+
+
+
+
+
+
+
+
+
+
+ io.file.buffer.size
+ 131072
+ The size of buffer for use in sequence files.
+ The size of this buffer should probably be a multiple of hardware
+ page size (4096 on Intel x86), and it determines how much data is
+ buffered during read and write operations.
+
+
+
+ io.serializations
+ org.apache.hadoop.io.serializer.WritableSerialization
+
+
+
+ io.compression.codecs
+
+ A list of the compression codec classes that can be used
+ for compression/decompression.
+
+
+
+ io.compression.codec.lzo.class
+ com.hadoop.compression.lzo.LzoCodec
+ The implementation for lzo codec.
+
+
+
+
+
+ fs.default.name
+
+
+ The name of the default file system. Either the
+ literal string "local" or a host:port for NDFS.
+ true
+
+
+
+ fs.trash.interval
+ 360
+ Number of minutes between trash checkpoints.
+ If zero, the trash feature is disabled.
+
+
+
+
+ fs.checkpoint.dir
+
+ Determines where on the local filesystem the DFS secondary
+ name node should store the temporary images to merge.
+ If this is a comma-delimited list of directories then the image is
+ replicated in all of the directories for redundancy.
+
+
+
+
+ fs.checkpoint.edits.dir
+ ${fs.checkpoint.dir}
+ Determines where on the local filesystem the DFS secondary
+ name node should store the temporary edits to merge.
+ If this is a comma-delimited list of directoires then teh edits is
+ replicated in all of the directoires for redundancy.
+ Default value is same as fs.checkpoint.dir
+
+
+
+
+ fs.checkpoint.period
+ 21600
+ The number of seconds between two periodic checkpoints.
+
+
+
+
+ fs.checkpoint.size
+ 536870912
+ The size of the current edit log (in bytes) that triggers
+ a periodic checkpoint even if the fs.checkpoint.period hasn't expired.
+
+
+
+
+
+ ipc.client.idlethreshold
+ 8000
+ Defines the threshold number of connections after which
+ connections will be inspected for idleness.
+
+
+
+
+ ipc.client.connection.maxidletime
+ 30000
+ The maximum time after which a client will bring down the
+ connection to the server.
+
+
+
+
+ ipc.client.connect.max.retries
+ 50
+ Defines the maximum number of retries for IPC connections.
+
+
+
+
+ webinterface.private.actions
+ false
+ If set to true, the web interfaces of JT and NN may contain
+ actions, such as kill job, delete file, etc., that should
+ not be exposed to public. Enable this option if the interfaces
+ are only reachable by those who have the right authorization.
+
+
+
+
+ hadoop.security.authentication
+
+
+ Set the authentication for the cluster. Valid values are: simple or
+ kerberos.
+
+
+
+ hadoop.security.authorization
+
+
+ Enable authorization for different protocols.
+
+
+
+
+ hadoop.security.auth_to_local
+
+The mapping from kerberos principal names to local OS user names.
+ So the default rule is just "DEFAULT" which takes all principals in your default domain to their first component.
+ "omalley@APACHE.ORG" and "omalley/admin@APACHE.ORG" to "omalley", if your default domain is APACHE.ORG.
+The translations rules have 3 sections:
+ base filter substitution
+The base consists of a number that represents the number of components in the principal name excluding the realm and the pattern for building the name from the sections of the principal name. The base uses $0 to mean the realm, $1 to mean the first component and $2 to mean the second component.
+
+[1:$1@$0] translates "omalley@APACHE.ORG" to "omalley@APACHE.ORG"
+[2:$1] translates "omalley/admin@APACHE.ORG" to "omalley"
+[2:$1%$2] translates "omalley/admin@APACHE.ORG" to "omalley%admin"
+
+The filter is a regex in parens that must the generated string for the rule to apply.
+
+"(.*%admin)" will take any string that ends in "%admin"
+"(.*@ACME.COM)" will take any string that ends in "@ACME.COM"
+
+Finally, the substitution is a sed rule to translate a regex into a fixed string.
+
+"s/@ACME\.COM//" removes the first instance of "@ACME.COM".
+"s/@[A-Z]*\.COM//" removes the first instance of "@" followed by a name followed by ".COM".
+"s/X/Y/g" replaces all of the "X" in the name with "Y"
+
+So, if your default realm was APACHE.ORG, but you also wanted to take all principals from ACME.COM that had a single component "joe@ACME.COM", you'd do:
+
+RULE:[1:$1@$0](.@ACME.ORG)s/@.//
+DEFAULT
+
+To also translate the names with a second component, you'd make the rules:
+
+RULE:[1:$1@$0](.@ACME.ORG)s/@.//
+RULE:[2:$1@$0](.@ACME.ORG)s/@.//
+DEFAULT
+
+If you want to treat all principals from APACHE.ORG with /admin as "admin", your rules would look like:
+
+RULE[2:$1%$2@$0](.%admin@APACHE.ORG)s/./admin/
+DEFAULT
+
+
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/HCFS/configuration/hadoop-policy.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/HCFS/configuration/hadoop-policy.xml
new file mode 100644
index 00000000000..900da99ef0f
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/HCFS/configuration/hadoop-policy.xml
@@ -0,0 +1,134 @@
+
+
+
+
+
+
+
+
+
+ security.client.protocol.acl
+ *
+ ACL for ClientProtocol, which is used by user code
+ via the DistributedFileSystem.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+ security.client.datanode.protocol.acl
+ *
+ ACL for ClientDatanodeProtocol, the client-to-datanode protocol
+ for block recovery.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+ security.datanode.protocol.acl
+ *
+ ACL for DatanodeProtocol, which is used by datanodes to
+ communicate with the namenode.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+ security.inter.datanode.protocol.acl
+ *
+ ACL for InterDatanodeProtocol, the inter-datanode protocol
+ for updating generation timestamp.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+ security.namenode.protocol.acl
+ *
+ ACL for NamenodeProtocol, the protocol used by the secondary
+ namenode to communicate with the namenode.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+ security.inter.tracker.protocol.acl
+ *
+ ACL for InterTrackerProtocol, used by the tasktrackers to
+ communicate with the jobtracker.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+ security.job.submission.protocol.acl
+ *
+ ACL for JobSubmissionProtocol, used by job clients to
+ communciate with the jobtracker for job submission, querying job status etc.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+ security.task.umbilical.protocol.acl
+ *
+ ACL for TaskUmbilicalProtocol, used by the map and reduce
+ tasks to communicate with the parent tasktracker.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+ security.admin.operations.protocol.acl
+
+ ACL for AdminOperationsProtocol. Used for admin commands.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+ security.refresh.usertogroups.mappings.protocol.acl
+
+ ACL for RefreshUserMappingsProtocol. Used to refresh
+ users mappings. The ACL is a comma-separated list of user and
+ group names. The user and group list is separated by a blank. For
+ e.g. "alice,bob users,wheel". A special value of "*" means all
+ users are allowed.
+
+
+
+ security.refresh.policy.protocol.acl
+
+ ACL for RefreshAuthorizationPolicyProtocol, used by the
+ dfsadmin and mradmin commands to refresh the security policy in-effect.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/HCFS/configuration/hdfs-site.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/HCFS/configuration/hdfs-site.xml
new file mode 100644
index 00000000000..db92d4bd2c9
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/HCFS/configuration/hdfs-site.xml
@@ -0,0 +1,415 @@
+
+
+
+
+
+
+
+
+
+
+
+
+ dfs.name.dir
+
+
+ Determines where on the local filesystem the DFS name node
+ should store the name table. If this is a comma-delimited list
+ of directories then the name table is replicated in all of the
+ directories, for redundancy.
+ true
+
+
+
+ dfs.support.append
+
+ to enable dfs append
+ true
+
+
+
+ dfs.webhdfs.enabled
+
+ to enable webhdfs
+ true
+
+
+
+ dfs.datanode.socket.write.timeout
+ 0
+ DFS Client write socket timeout
+
+
+
+ dfs.datanode.failed.volumes.tolerated
+
+ #of failed disks dn would tolerate
+ true
+
+
+
+ dfs.block.local-path-access.user
+
+ the user who is allowed to perform short
+ circuit reads.
+
+ true
+
+
+
+ dfs.data.dir
+
+ Determines where on the local filesystem an DFS data node
+ should store its blocks. If this is a comma-delimited
+ list of directories, then data will be stored in all named
+ directories, typically on different devices.
+ Directories that do not exist are ignored.
+
+ true
+
+
+
+ dfs.hosts.exclude
+
+ Names a file that contains a list of hosts that are
+ not permitted to connect to the namenode. The full pathname of the
+ file must be specified. If the value is empty, no hosts are
+ excluded.
+
+
+
+ dfs.hosts
+
+ Names a file that contains a list of hosts that are
+ permitted to connect to the namenode. The full pathname of the file
+ must be specified. If the value is empty, all hosts are
+ permitted.
+
+
+
+ dfs.replication.max
+ 50
+ Maximal block replication.
+
+
+
+
+ dfs.replication
+
+ Default block replication.
+
+
+
+
+ dfs.heartbeat.interval
+ 3
+ Determines datanode heartbeat interval in seconds.
+
+
+
+ dfs.safemode.threshold.pct
+ 1.0f
+
+ Specifies the percentage of blocks that should satisfy
+ the minimal replication requirement defined by dfs.replication.min.
+ Values less than or equal to 0 mean not to start in safe mode.
+ Values greater than 1 will make safe mode permanent.
+
+
+
+
+ dfs.balance.bandwidthPerSec
+ 6250000
+
+ Specifies the maximum amount of bandwidth that each datanode
+ can utilize for the balancing purpose in term of
+ the number of bytes per second.
+
+
+
+
+ dfs.datanode.address
+
+
+
+
+ dfs.datanode.http.address
+
+
+
+
+ dfs.block.size
+ 134217728
+ The default block size for new files.
+
+
+
+ dfs.http.address
+
+The name of the default file system. Either the
+literal string "local" or a host:port for NDFS.
+true
+
+
+
+dfs.datanode.du.reserved
+
+
+Reserved space in bytes per volume. Always leave this much space free for non dfs use.
+
+
+
+
+dfs.datanode.ipc.address
+0.0.0.0:8010
+
+The datanode ipc server address and port.
+If the port is 0 then the server will start on a free port.
+
+
+
+
+dfs.blockreport.initialDelay
+120
+Delay for first block report in seconds.
+
+
+
+dfs.datanode.du.pct
+0.85f
+When calculating remaining space, only use this percentage of the real available space
+
+
+
+
+dfs.namenode.handler.count
+40
+The number of server threads for the namenode.
+
+
+
+dfs.datanode.max.xcievers
+4096
+PRIVATE CONFIG VARIABLE
+
+
+
+
+
+dfs.umaskmode
+077
+
+The octal umask used when creating files and directories.
+
+
+
+
+dfs.web.ugi
+
+gopher,gopher
+The user account used by the web interface.
+Syntax: USERNAME,GROUP1,GROUP2, ...
+
+
+
+
+dfs.permissions
+true
+
+If "true", enable permission checking in HDFS.
+If "false", permission checking is turned off,
+but all other behavior is unchanged.
+Switching from one parameter value to the other does not change the mode,
+owner or group of files or directories.
+
+
+
+
+dfs.permissions.supergroup
+hdfs
+The name of the group of super-users.
+
+
+
+dfs.namenode.handler.count
+100
+Added to grow Queue size so that more client connections are allowed
+
+
+
+ipc.server.max.response.size
+5242880
+
+
+dfs.block.access.token.enable
+true
+
+If "true", access tokens are used as capabilities for accessing datanodes.
+If "false", no access tokens are checked on accessing datanodes.
+
+
+
+
+dfs.namenode.kerberos.principal
+
+
+Kerberos principal name for the NameNode
+
+
+
+
+dfs.secondary.namenode.kerberos.principal
+
+
+ Kerberos principal name for the secondary NameNode.
+
+
+
+
+
+
+ dfs.namenode.kerberos.https.principal
+
+ The Kerberos principal for the host that the NameNode runs on.
+
+
+
+
+ dfs.secondary.namenode.kerberos.https.principal
+
+ The Kerberos principal for the hostthat the secondary NameNode runs on.
+
+
+
+
+
+ dfs.secondary.http.address
+
+ Address of secondary namenode web server
+
+
+
+ dfs.secondary.https.port
+ 50490
+ The https port where secondary-namenode binds
+
+
+
+ dfs.web.authentication.kerberos.principal
+
+
+ The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+ The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
+ HTTP SPENGO specification.
+
+
+
+
+ dfs.web.authentication.kerberos.keytab
+
+
+ The Kerberos keytab file with the credentials for the
+ HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+
+
+
+
+ dfs.datanode.kerberos.principal
+
+
+ The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
+
+
+
+
+ dfs.namenode.keytab.file
+
+
+ Combined keytab file containing the namenode service and host principals.
+
+
+
+
+ dfs.secondary.namenode.keytab.file
+
+
+ Combined keytab file containing the namenode service and host principals.
+
+
+
+
+ dfs.datanode.keytab.file
+
+
+ The filename of the keytab file for the DataNode.
+
+
+
+
+ dfs.https.port
+ 50470
+ The https port where namenode binds
+
+
+
+
+ dfs.https.address
+
+ The https address where namenode binds
+
+
+
+
+ dfs.datanode.data.dir.perm
+
+The permissions that should be there on dfs.data.dir
+directories. The datanode will not come up if the permissions are
+different on existing dfs.data.dir directories. If the directories
+don't exist, they will be created with this permission.
+
+
+
+ dfs.access.time.precision
+ 0
+ The access time for HDFS file is precise upto this value.
+ The default value is 1 hour. Setting a value of 0 disables
+ access times for HDFS.
+
+
+
+
+ dfs.cluster.administrators
+ hdfs
+ ACL for who all can view the default servlets in the HDFS
+
+
+
+ ipc.server.read.threadpool.size
+ 5
+
+
+
+
+ dfs.datanode.failed.volumes.tolerated
+ 0
+ Number of failed disks datanode would tolerate
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/HCFS/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/HCFS/metainfo.xml
new file mode 100644
index 00000000000..2f42b7f88b8
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/HCFS/metainfo.xml
@@ -0,0 +1,30 @@
+
+
+
+ root
+ Hadoop Compatible File System
+ 1.0.0
+
+
+ HCFS_CLIENT
+ CLIENT
+
+
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/HDFS/configuration/core-site.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/HDFS/configuration/core-site.xml
new file mode 100644
index 00000000000..a312e68fe62
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/HDFS/configuration/core-site.xml
@@ -0,0 +1,251 @@
+
+
+
+
+
+
+
+
+
+
+
+
+ io.file.buffer.size
+ 131072
+ The size of buffer for use in sequence files.
+ The size of this buffer should probably be a multiple of hardware
+ page size (4096 on Intel x86), and it determines how much data is
+ buffered during read and write operations.
+
+
+
+ io.serializations
+ org.apache.hadoop.io.serializer.WritableSerialization
+
+
+
+ io.compression.codecs
+
+ A list of the compression codec classes that can be used
+ for compression/decompression.
+
+
+
+ io.compression.codec.lzo.class
+ com.hadoop.compression.lzo.LzoCodec
+ The implementation for lzo codec.
+
+
+
+
+
+ fs.default.name
+
+
+ The name of the default file system. Either the
+ literal string "local" or a host:port for NDFS.
+ true
+
+
+
+ fs.trash.interval
+ 360
+ Number of minutes between trash checkpoints.
+ If zero, the trash feature is disabled.
+
+
+
+
+ fs.checkpoint.dir
+
+ Determines where on the local filesystem the DFS secondary
+ name node should store the temporary images to merge.
+ If this is a comma-delimited list of directories then the image is
+ replicated in all of the directories for redundancy.
+
+
+
+
+ fs.checkpoint.edits.dir
+ ${fs.checkpoint.dir}
+ Determines where on the local filesystem the DFS secondary
+ name node should store the temporary edits to merge.
+ If this is a comma-delimited list of directoires then teh edits is
+ replicated in all of the directoires for redundancy.
+ Default value is same as fs.checkpoint.dir
+
+
+
+
+ fs.checkpoint.period
+ 21600
+ The number of seconds between two periodic checkpoints.
+
+
+
+
+ fs.checkpoint.size
+ 536870912
+ The size of the current edit log (in bytes) that triggers
+ a periodic checkpoint even if the fs.checkpoint.period hasn't expired.
+
+
+
+
+
+ ipc.client.idlethreshold
+ 8000
+ Defines the threshold number of connections after which
+ connections will be inspected for idleness.
+
+
+
+
+ ipc.client.connection.maxidletime
+ 30000
+ The maximum time after which a client will bring down the
+ connection to the server.
+
+
+
+
+ ipc.client.connect.max.retries
+ 50
+ Defines the maximum number of retries for IPC connections.
+
+
+
+
+ webinterface.private.actions
+ false
+ If set to true, the web interfaces of JT and NN may contain
+ actions, such as kill job, delete file, etc., that should
+ not be exposed to public. Enable this option if the interfaces
+ are only reachable by those who have the right authorization.
+
+
+
+
+ hadoop.security.authentication
+
+
+ Set the authentication for the cluster. Valid values are: simple or
+ kerberos.
+
+
+
+ hadoop.security.authorization
+
+
+ Enable authorization for different protocols.
+
+
+
+
+ hadoop.security.auth_to_local
+
+The mapping from kerberos principal names to local OS user names.
+ So the default rule is just "DEFAULT" which takes all principals in your default domain to their first component.
+ "omalley@APACHE.ORG" and "omalley/admin@APACHE.ORG" to "omalley", if your default domain is APACHE.ORG.
+The translations rules have 3 sections:
+ base filter substitution
+The base consists of a number that represents the number of components in the principal name excluding the realm and the pattern for building the name from the sections of the principal name. The base uses $0 to mean the realm, $1 to mean the first component and $2 to mean the second component.
+
+[1:$1@$0] translates "omalley@APACHE.ORG" to "omalley@APACHE.ORG"
+[2:$1] translates "omalley/admin@APACHE.ORG" to "omalley"
+[2:$1%$2] translates "omalley/admin@APACHE.ORG" to "omalley%admin"
+
+The filter is a regex in parens that must the generated string for the rule to apply.
+
+"(.*%admin)" will take any string that ends in "%admin"
+"(.*@ACME.COM)" will take any string that ends in "@ACME.COM"
+
+Finally, the substitution is a sed rule to translate a regex into a fixed string.
+
+"s/@ACME\.COM//" removes the first instance of "@ACME.COM".
+"s/@[A-Z]*\.COM//" removes the first instance of "@" followed by a name followed by ".COM".
+"s/X/Y/g" replaces all of the "X" in the name with "Y"
+
+So, if your default realm was APACHE.ORG, but you also wanted to take all principals from ACME.COM that had a single component "joe@ACME.COM", you'd do:
+
+RULE:[1:$1@$0](.@ACME.ORG)s/@.//
+DEFAULT
+
+To also translate the names with a second component, you'd make the rules:
+
+RULE:[1:$1@$0](.@ACME.ORG)s/@.//
+RULE:[2:$1@$0](.@ACME.ORG)s/@.//
+DEFAULT
+
+If you want to treat all principals from APACHE.ORG with /admin as "admin", your rules would look like:
+
+RULE[2:$1%$2@$0](.%admin@APACHE.ORG)s/./admin/
+DEFAULT
+
+
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/HDFS/configuration/hadoop-policy.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/HDFS/configuration/hadoop-policy.xml
new file mode 100644
index 00000000000..900da99ef0f
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/HDFS/configuration/hadoop-policy.xml
@@ -0,0 +1,134 @@
+
+
+
+
+
+
+
+
+
+ security.client.protocol.acl
+ *
+ ACL for ClientProtocol, which is used by user code
+ via the DistributedFileSystem.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+ security.client.datanode.protocol.acl
+ *
+ ACL for ClientDatanodeProtocol, the client-to-datanode protocol
+ for block recovery.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+ security.datanode.protocol.acl
+ *
+ ACL for DatanodeProtocol, which is used by datanodes to
+ communicate with the namenode.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+ security.inter.datanode.protocol.acl
+ *
+ ACL for InterDatanodeProtocol, the inter-datanode protocol
+ for updating generation timestamp.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+ security.namenode.protocol.acl
+ *
+ ACL for NamenodeProtocol, the protocol used by the secondary
+ namenode to communicate with the namenode.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+ security.inter.tracker.protocol.acl
+ *
+ ACL for InterTrackerProtocol, used by the tasktrackers to
+ communicate with the jobtracker.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+ security.job.submission.protocol.acl
+ *
+ ACL for JobSubmissionProtocol, used by job clients to
+ communciate with the jobtracker for job submission, querying job status etc.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+ security.task.umbilical.protocol.acl
+ *
+ ACL for TaskUmbilicalProtocol, used by the map and reduce
+ tasks to communicate with the parent tasktracker.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+ security.admin.operations.protocol.acl
+
+ ACL for AdminOperationsProtocol. Used for admin commands.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+ security.refresh.usertogroups.mappings.protocol.acl
+
+ ACL for RefreshUserMappingsProtocol. Used to refresh
+ users mappings. The ACL is a comma-separated list of user and
+ group names. The user and group list is separated by a blank. For
+ e.g. "alice,bob users,wheel". A special value of "*" means all
+ users are allowed.
+
+
+
+ security.refresh.policy.protocol.acl
+
+ ACL for RefreshAuthorizationPolicyProtocol, used by the
+ dfsadmin and mradmin commands to refresh the security policy in-effect.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/HDFS/configuration/hdfs-site.xml
new file mode 100644
index 00000000000..db92d4bd2c9
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/HDFS/configuration/hdfs-site.xml
@@ -0,0 +1,415 @@
+
+
+
+
+
+
+
+
+
+
+
+
+ dfs.name.dir
+
+
+ Determines where on the local filesystem the DFS name node
+ should store the name table. If this is a comma-delimited list
+ of directories then the name table is replicated in all of the
+ directories, for redundancy.
+ true
+
+
+
+ dfs.support.append
+
+ to enable dfs append
+ true
+
+
+
+ dfs.webhdfs.enabled
+
+ to enable webhdfs
+ true
+
+
+
+ dfs.datanode.socket.write.timeout
+ 0
+ DFS Client write socket timeout
+
+
+
+ dfs.datanode.failed.volumes.tolerated
+
+ #of failed disks dn would tolerate
+ true
+
+
+
+ dfs.block.local-path-access.user
+
+ the user who is allowed to perform short
+ circuit reads.
+
+ true
+
+
+
+ dfs.data.dir
+
+ Determines where on the local filesystem an DFS data node
+ should store its blocks. If this is a comma-delimited
+ list of directories, then data will be stored in all named
+ directories, typically on different devices.
+ Directories that do not exist are ignored.
+
+ true
+
+
+
+ dfs.hosts.exclude
+
+ Names a file that contains a list of hosts that are
+ not permitted to connect to the namenode. The full pathname of the
+ file must be specified. If the value is empty, no hosts are
+ excluded.
+
+
+
+ dfs.hosts
+
+ Names a file that contains a list of hosts that are
+ permitted to connect to the namenode. The full pathname of the file
+ must be specified. If the value is empty, all hosts are
+ permitted.
+
+
+
+ dfs.replication.max
+ 50
+ Maximal block replication.
+
+
+
+
+ dfs.replication
+
+ Default block replication.
+
+
+
+
+ dfs.heartbeat.interval
+ 3
+ Determines datanode heartbeat interval in seconds.
+
+
+
+ dfs.safemode.threshold.pct
+ 1.0f
+
+ Specifies the percentage of blocks that should satisfy
+ the minimal replication requirement defined by dfs.replication.min.
+ Values less than or equal to 0 mean not to start in safe mode.
+ Values greater than 1 will make safe mode permanent.
+
+
+
+
+ dfs.balance.bandwidthPerSec
+ 6250000
+
+ Specifies the maximum amount of bandwidth that each datanode
+ can utilize for the balancing purpose in term of
+ the number of bytes per second.
+
+
+
+
+ dfs.datanode.address
+
+
+
+
+ dfs.datanode.http.address
+
+
+
+
+ dfs.block.size
+ 134217728
+ The default block size for new files.
+
+
+
+ dfs.http.address
+
+The name of the default file system. Either the
+literal string "local" or a host:port for NDFS.
+true
+
+
+
+dfs.datanode.du.reserved
+
+
+Reserved space in bytes per volume. Always leave this much space free for non dfs use.
+
+
+
+
+dfs.datanode.ipc.address
+0.0.0.0:8010
+
+The datanode ipc server address and port.
+If the port is 0 then the server will start on a free port.
+
+
+
+
+dfs.blockreport.initialDelay
+120
+Delay for first block report in seconds.
+
+
+
+dfs.datanode.du.pct
+0.85f
+When calculating remaining space, only use this percentage of the real available space
+
+
+
+
+dfs.namenode.handler.count
+40
+The number of server threads for the namenode.
+
+
+
+dfs.datanode.max.xcievers
+4096
+PRIVATE CONFIG VARIABLE
+
+
+
+
+
+dfs.umaskmode
+077
+
+The octal umask used when creating files and directories.
+
+
+
+
+dfs.web.ugi
+
+gopher,gopher
+The user account used by the web interface.
+Syntax: USERNAME,GROUP1,GROUP2, ...
+
+
+
+
+dfs.permissions
+true
+
+If "true", enable permission checking in HDFS.
+If "false", permission checking is turned off,
+but all other behavior is unchanged.
+Switching from one parameter value to the other does not change the mode,
+owner or group of files or directories.
+
+
+
+
+dfs.permissions.supergroup
+hdfs
+The name of the group of super-users.
+
+
+
+dfs.namenode.handler.count
+100
+Added to grow Queue size so that more client connections are allowed
+
+
+
+ipc.server.max.response.size
+5242880
+
+
+dfs.block.access.token.enable
+true
+
+If "true", access tokens are used as capabilities for accessing datanodes.
+If "false", no access tokens are checked on accessing datanodes.
+
+
+
+
+dfs.namenode.kerberos.principal
+
+
+Kerberos principal name for the NameNode
+
+
+
+
+dfs.secondary.namenode.kerberos.principal
+
+
+ Kerberos principal name for the secondary NameNode.
+
+
+
+
+
+
+ dfs.namenode.kerberos.https.principal
+
+ The Kerberos principal for the host that the NameNode runs on.
+
+
+
+
+ dfs.secondary.namenode.kerberos.https.principal
+
+ The Kerberos principal for the hostthat the secondary NameNode runs on.
+
+
+
+
+
+ dfs.secondary.http.address
+
+ Address of secondary namenode web server
+
+
+
+ dfs.secondary.https.port
+ 50490
+ The https port where secondary-namenode binds
+
+
+
+ dfs.web.authentication.kerberos.principal
+
+
+ The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+ The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
+ HTTP SPENGO specification.
+
+
+
+
+ dfs.web.authentication.kerberos.keytab
+
+
+ The Kerberos keytab file with the credentials for the
+ HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+
+
+
+
+ dfs.datanode.kerberos.principal
+
+
+ The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
+
+
+
+
+ dfs.namenode.keytab.file
+
+
+ Combined keytab file containing the namenode service and host principals.
+
+
+
+
+ dfs.secondary.namenode.keytab.file
+
+
+ Combined keytab file containing the namenode service and host principals.
+
+
+
+
+ dfs.datanode.keytab.file
+
+
+ The filename of the keytab file for the DataNode.
+
+
+
+
+ dfs.https.port
+ 50470
+ The https port where namenode binds
+
+
+
+
+ dfs.https.address
+
+ The https address where namenode binds
+
+
+
+
+ dfs.datanode.data.dir.perm
+
+The permissions that should be there on dfs.data.dir
+directories. The datanode will not come up if the permissions are
+different on existing dfs.data.dir directories. If the directories
+don't exist, they will be created with this permission.
+
+
+
+ dfs.access.time.precision
+ 0
+ The access time for HDFS file is precise upto this value.
+ The default value is 1 hour. Setting a value of 0 disables
+ access times for HDFS.
+
+
+
+
+ dfs.cluster.administrators
+ hdfs
+ ACL for who all can view the default servlets in the HDFS
+
+
+
+ ipc.server.read.threadpool.size
+ 5
+
+
+
+
+ dfs.datanode.failed.volumes.tolerated
+ 0
+ Number of failed disks datanode would tolerate
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/HDFS/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/HDFS/metainfo.xml
new file mode 100644
index 00000000000..1b185e1579c
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/HDFS/metainfo.xml
@@ -0,0 +1,46 @@
+
+
+
+ root
+ Apache Hadoop Distributed File System
+ 1.1.2
+
+
+
+ NAMENODE
+ MASTER
+
+
+
+ DATANODE
+ SLAVE
+
+
+
+ SECONDARY_NAMENODE
+ MASTER
+
+
+
+ HDFS_CLIENT
+ CLIENT
+
+
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/HIVE/configuration/hive-site.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/HIVE/configuration/hive-site.xml
new file mode 100644
index 00000000000..7d35558b8c6
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/HIVE/configuration/hive-site.xml
@@ -0,0 +1,138 @@
+
+
+
+
+
+
+ hive.metastore.local
+ false
+ controls whether to connect to remove metastore server or
+ open a new metastore server in Hive Client JVM
+
+
+
+ javax.jdo.option.ConnectionURL
+
+ JDBC connect string for a JDBC metastore
+
+
+
+ javax.jdo.option.ConnectionDriverName
+ com.mysql.jdbc.Driver
+ Driver class name for a JDBC metastore
+
+
+
+ javax.jdo.option.ConnectionUserName
+
+ username to use against metastore database
+
+
+
+ javax.jdo.option.ConnectionPassword
+
+ password to use against metastore database
+
+
+
+ hive.metastore.warehouse.dir
+ /apps/hive/warehouse
+ location of default database for the warehouse
+
+
+
+ hive.metastore.sasl.enabled
+
+ If true, the metastore thrift interface will be secured with SASL.
+ Clients must authenticate with Kerberos.
+
+
+
+ hive.metastore.kerberos.keytab.file
+
+ The path to the Kerberos Keytab file containing the metastore
+ thrift server's service principal.
+
+
+
+ hive.metastore.kerberos.principal
+
+ The service principal for the metastore thrift server. The special
+ string _HOST will be replaced automatically with the correct host name.
+
+
+
+ hive.metastore.cache.pinobjtypes
+ Table,Database,Type,FieldSchema,Order
+ List of comma separated metastore object types that should be pinned in the cache
+
+
+
+ hive.metastore.uris
+
+ URI for client to contact metastore server
+
+
+
+ hive.semantic.analyzer.factory.impl
+ org.apache.hivealog.cli.HCatSemanticAnalyzerFactory
+ controls which SemanticAnalyzerFactory implemenation class is used by CLI
+
+
+
+ hadoop.clientside.fs.operations
+ true
+ FS operations are owned by client
+
+
+
+ hive.metastore.client.socket.timeout
+ 60
+ MetaStore Client socket timeout in seconds
+
+
+
+ hive.metastore.execute.setugi
+ true
+ In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using the client's reported user and group permissions. Note that this property must be set on both the client and server sides. Further note that its best effort. If client sets its to true and server sets it to false, client setting will be ignored.
+
+
+
+ hive.security.authorization.enabled
+ true
+ enable or disable the hive client authorization
+
+
+
+ hive.security.authorization.manager
+ org.apache.hcatalog.security.HdfsAuthorizationProvider
+ the hive client authorization manager class name.
+ The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.
+
+
+
+ hive.server2.enable.doAs
+ true
+
+
+
+ fs.hdfs.impl.disable.cache
+ true
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/HIVE/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/HIVE/metainfo.xml
new file mode 100644
index 00000000000..6a52064cb6e
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/HIVE/metainfo.xml
@@ -0,0 +1,43 @@
+
+
+
+ root
+ Data warehouse system for ad-hoc queries & analysis of large datasets and table & storage management service
+ 0.10.0
+
+
+
+ HIVE_METASTORE
+ MASTER
+
+
+ HIVE_SERVER
+ MASTER
+
+
+ MYSQL_SERVER
+ MASTER
+
+
+ HIVE_CLIENT
+ CLIENT
+
+
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/MAPREDUCE/configuration/capacity-scheduler.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/MAPREDUCE/configuration/capacity-scheduler.xml
new file mode 100644
index 00000000000..8034d1911f6
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/MAPREDUCE/configuration/capacity-scheduler.xml
@@ -0,0 +1,195 @@
+
+
+
+
+
+
+
+
+
+
+
+
+ mapred.capacity-scheduler.maximum-system-jobs
+ 3000
+ Maximum number of jobs in the system which can be initialized,
+ concurrently, by the CapacityScheduler.
+
+
+
+
+ mapred.capacity-scheduler.queue.default.capacity
+ 100
+ Percentage of the number of slots in the cluster that are
+ to be available for jobs in this queue.
+
+
+
+
+ mapred.capacity-scheduler.queue.default.maximum-capacity
+ -1
+
+ maximum-capacity defines a limit beyond which a queue cannot use the capacity of the cluster.
+ This provides a means to limit how much excess capacity a queue can use. By default, there is no limit.
+ The maximum-capacity of a queue can only be greater than or equal to its minimum capacity.
+ Default value of -1 implies a queue can use complete capacity of the cluster.
+
+ This property could be to curtail certain jobs which are long running in nature from occupying more than a
+ certain percentage of the cluster, which in the absence of pre-emption, could lead to capacity guarantees of
+ other queues being affected.
+
+ One important thing to note is that maximum-capacity is a percentage , so based on the cluster's capacity
+ the max capacity would change. So if large no of nodes or racks get added to the cluster , max Capacity in
+ absolute terms would increase accordingly.
+
+
+
+
+ mapred.capacity-scheduler.queue.default.supports-priority
+ false
+ If true, priorities of jobs will be taken into
+ account in scheduling decisions.
+
+
+
+
+ mapred.capacity-scheduler.queue.default.minimum-user-limit-percent
+ 100
+ Each queue enforces a limit on the percentage of resources
+ allocated to a user at any given time, if there is competition for them.
+ This user limit can vary between a minimum and maximum value. The former
+ depends on the number of users who have submitted jobs, and the latter is
+ set to this property value. For example, suppose the value of this
+ property is 25. If two users have submitted jobs to a queue, no single
+ user can use more than 50% of the queue resources. If a third user submits
+ a job, no single user can use more than 33% of the queue resources. With 4
+ or more users, no user can use more than 25% of the queue's resources. A
+ value of 100 implies no user limits are imposed.
+
+
+
+
+ mapred.capacity-scheduler.queue.default.user-limit-factor
+ 1
+ The multiple of the queue capacity which can be configured to
+ allow a single user to acquire more slots.
+
+
+
+
+ mapred.capacity-scheduler.queue.default.maximum-initialized-active-tasks
+ 200000
+ The maximum number of tasks, across all jobs in the queue,
+ which can be initialized concurrently. Once the queue's jobs exceed this
+ limit they will be queued on disk.
+
+
+
+
+ mapred.capacity-scheduler.queue.default.maximum-initialized-active-tasks-per-user
+ 100000
+ The maximum number of tasks per-user, across all the of the
+ user's jobs in the queue, which can be initialized concurrently. Once the
+ user's jobs exceed this limit they will be queued on disk.
+
+
+
+
+ mapred.capacity-scheduler.queue.default.init-accept-jobs-factor
+ 10
+ The multipe of (maximum-system-jobs * queue-capacity) used to
+ determine the number of jobs which are accepted by the scheduler.
+
+
+
+
+
+
+
+ mapred.capacity-scheduler.default-supports-priority
+ false
+ If true, priorities of jobs will be taken into
+ account in scheduling decisions by default in a job queue.
+
+
+
+
+ mapred.capacity-scheduler.default-minimum-user-limit-percent
+ 100
+ The percentage of the resources limited to a particular user
+ for the job queue at any given point of time by default.
+
+
+
+
+
+ mapred.capacity-scheduler.default-user-limit-factor
+ 1
+ The default multiple of queue-capacity which is used to
+ determine the amount of slots a single user can consume concurrently.
+
+
+
+
+ mapred.capacity-scheduler.default-maximum-active-tasks-per-queue
+ 200000
+ The default maximum number of tasks, across all jobs in the
+ queue, which can be initialized concurrently. Once the queue's jobs exceed
+ this limit they will be queued on disk.
+
+
+
+
+ mapred.capacity-scheduler.default-maximum-active-tasks-per-user
+ 100000
+ The default maximum number of tasks per-user, across all the of
+ the user's jobs in the queue, which can be initialized concurrently. Once
+ the user's jobs exceed this limit they will be queued on disk.
+
+
+
+
+ mapred.capacity-scheduler.default-init-accept-jobs-factor
+ 10
+ The default multipe of (maximum-system-jobs * queue-capacity)
+ used to determine the number of jobs which are accepted by the scheduler.
+
+
+
+
+
+ mapred.capacity-scheduler.init-poll-interval
+ 5000
+ The amount of time in miliseconds which is used to poll
+ the job queues for jobs to initialize.
+
+
+
+ mapred.capacity-scheduler.init-worker-threads
+ 5
+ Number of worker threads which would be used by
+ Initialization poller to initialize jobs in a set of queue.
+ If number mentioned in property is equal to number of job queues
+ then a single thread would initialize jobs in a queue. If lesser
+ then a thread would get a set of queues assigned. If the number
+ is greater then number of threads would be equal to number of
+ job queues.
+
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/MAPREDUCE/configuration/core-site.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/MAPREDUCE/configuration/core-site.xml
new file mode 100644
index 00000000000..3a2af490593
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/MAPREDUCE/configuration/core-site.xml
@@ -0,0 +1,20 @@
+
+
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/MAPREDUCE/configuration/mapred-queue-acls.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/MAPREDUCE/configuration/mapred-queue-acls.xml
new file mode 100644
index 00000000000..ce12380767c
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/MAPREDUCE/configuration/mapred-queue-acls.xml
@@ -0,0 +1,39 @@
+
+
+
+
+
+
+
+
+
+
+
+
+ mapred.queue.default.acl-submit-job
+ *
+
+
+
+ mapred.queue.default.acl-administer-jobs
+ *
+
+
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/MAPREDUCE/configuration/mapred-site.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/MAPREDUCE/configuration/mapred-site.xml
new file mode 100644
index 00000000000..11a72b1446a
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/MAPREDUCE/configuration/mapred-site.xml
@@ -0,0 +1,531 @@
+
+
+
+
+
+
+
+
+
+
+
+
+ io.sort.mb
+
+ No description
+
+
+
+ io.sort.record.percent
+ .2
+ No description
+
+
+
+ io.sort.spill.percent
+
+ No description
+
+
+
+ io.sort.factor
+ 100
+ No description
+
+
+
+
+
+ mapred.tasktracker.tasks.sleeptime-before-sigkill
+ 250
+ Normally, this is the amount of time before killing
+ processes, and the recommended-default is 5.000 seconds - a value of
+ 5000 here. In this case, we are using it solely to blast tasks before
+ killing them, and killing them very quickly (1/4 second) to guarantee
+ that we do not leave VMs around for later jobs.
+
+
+
+
+ mapred.job.tracker.handler.count
+ 50
+
+ The number of server threads for the JobTracker. This should be roughly
+ 4% of the number of tasktracker nodes.
+
+
+
+
+ mapred.system.dir
+ /mapred/system
+ No description
+ true
+
+
+
+ mapred.job.tracker
+
+
+ No description
+ true
+
+
+
+ mapred.job.tracker.http.address
+
+
+ No description
+ true
+
+
+
+
+ mapred.local.dir
+
+ No description
+ true
+
+
+
+ mapreduce.cluster.administrators
+ hadoop
+
+
+
+ mapred.reduce.parallel.copies
+ 30
+ No description
+
+
+
+ mapred.tasktracker.map.tasks.maximum
+
+ No description
+
+
+
+ mapred.tasktracker.reduce.tasks.maximum
+
+ No description
+
+
+
+ tasktracker.http.threads
+ 50
+
+
+
+ mapred.map.tasks.speculative.execution
+ false
+ If true, then multiple instances of some map tasks
+ may be executed in parallel.
+
+
+
+ mapred.reduce.tasks.speculative.execution
+ false
+ If true, then multiple instances of some reduce tasks
+ may be executed in parallel.
+
+
+
+ mapred.reduce.slowstart.completed.maps
+ 0.05
+
+
+
+ mapred.inmem.merge.threshold
+ 1000
+ The threshold, in terms of the number of files
+ for the in-memory merge process. When we accumulate threshold number of files
+ we initiate the in-memory merge and spill to disk. A value of 0 or less than
+ 0 indicates we want to DON'T have any threshold and instead depend only on
+ the ramfs's memory consumption to trigger the merge.
+
+
+
+
+ mapred.job.shuffle.merge.percent
+ 0.66
+ The usage threshold at which an in-memory merge will be
+ initiated, expressed as a percentage of the total memory allocated to
+ storing in-memory map outputs, as defined by
+ mapred.job.shuffle.input.buffer.percent.
+
+
+
+
+ mapred.job.shuffle.input.buffer.percent
+ 0.7
+ The percentage of memory to be allocated from the maximum heap
+ size to storing map outputs during the shuffle.
+
+
+
+
+ mapred.map.output.compression.codec
+
+ If the map outputs are compressed, how should they be
+ compressed
+
+
+
+
+ mapred.output.compression.type
+ BLOCK
+ If the job outputs are to compressed as SequenceFiles, how should
+ they be compressed? Should be one of NONE, RECORD or BLOCK.
+
+
+
+
+
+ mapred.jobtracker.completeuserjobs.maximum
+ 0
+
+
+
+ mapred.jobtracker.taskScheduler
+
+
+
+
+ mapred.jobtracker.restart.recover
+ false
+ "true" to enable (job) recovery upon restart,
+ "false" to start afresh
+
+
+
+
+ mapred.job.reduce.input.buffer.percent
+ 0.0
+ The percentage of memory- relative to the maximum heap size- to
+ retain map outputs during the reduce. When the shuffle is concluded, any
+ remaining map outputs in memory must consume less than this threshold before
+ the reduce can begin.
+
+
+
+
+ mapreduce.reduce.input.limit
+ 10737418240
+ The limit on the input size of the reduce. (This value
+ is 10 Gb.) If the estimated input size of the reduce is greater than
+ this value, job is failed. A value of -1 means that there is no limit
+ set.
+
+
+
+
+
+ mapred.compress.map.output
+
+
+
+
+
+ mapred.task.timeout
+ 600000
+ The number of milliseconds before a task will be
+ terminated if it neither reads an input, writes an output, nor
+ updates its status string.
+
+
+
+
+ jetty.connector
+ org.mortbay.jetty.nio.SelectChannelConnector
+ No description
+
+
+
+ mapred.task.tracker.task-controller
+
+
+ TaskController which is used to launch and manage task execution.
+
+
+
+
+ mapred.child.root.logger
+ INFO,TLA
+
+
+
+ mapred.child.java.opts
+
+
+ No description
+
+
+
+ mapred.cluster.map.memory.mb
+
+
+
+
+ mapred.cluster.reduce.memory.mb
+
+
+
+
+ mapred.job.map.memory.mb
+
+
+
+
+ mapred.job.reduce.memory.mb
+
+
+
+
+ mapred.cluster.max.map.memory.mb
+
+
+
+
+ mapred.cluster.max.reduce.memory.mb
+
+
+
+
+ mapred.hosts
+
+
+
+
+ mapred.hosts.exclude
+
+
+
+
+ mapred.max.tracker.blacklists
+ 16
+
+ if node is reported blacklisted by 16 successful jobs within timeout-window, it will be graylisted
+
+
+
+
+ mapred.healthChecker.script.path
+
+
+
+
+ mapred.healthChecker.interval
+ 135000
+
+
+
+ mapred.healthChecker.script.timeout
+ 60000
+
+
+
+ mapred.job.tracker.persist.jobstatus.active
+ false
+ Indicates if persistency of job status information is
+ active or not.
+
+
+
+
+ mapred.job.tracker.persist.jobstatus.hours
+ 1
+ The number of hours job status information is persisted in DFS.
+ The job status information will be available after it drops of the memory
+ queue and between jobtracker restarts. With a zero value the job status
+ information is not persisted at all in DFS.
+
+
+
+
+ mapred.job.tracker.persist.jobstatus.dir
+
+ The directory where the job status information is persisted
+ in a file system to be available after it drops of the memory queue and
+ between jobtracker restarts.
+
+
+
+
+ mapred.jobtracker.retirejob.check
+ 10000
+
+
+
+ mapred.jobtracker.retirejob.interval
+ 0
+
+
+
+ mapred.job.tracker.history.completed.location
+ /mapred/history/done
+ No description
+
+
+
+ mapred.task.maxvmem
+
+ true
+ No description
+
+
+
+ mapred.jobtracker.maxtasks.per.job
+
+ true
+ The maximum number of tasks for a single job.
+ A value of -1 indicates that there is no maximum.
+
+
+
+ mapreduce.fileoutputcommitter.marksuccessfuljobs
+ false
+
+
+
+ mapred.userlog.retain.hours
+
+
+
+
+ mapred.job.reuse.jvm.num.tasks
+ 1
+
+ How many tasks to run per jvm. If set to -1, there is no limit
+
+ true
+
+
+
+ mapreduce.jobtracker.kerberos.principal
+
+
+ JT user name key.
+
+
+
+
+ mapreduce.tasktracker.kerberos.principal
+
+
+ tt user name key. "_HOST" is replaced by the host name of the task tracker.
+
+
+
+
+
+ hadoop.job.history.user.location
+ none
+ true
+
+
+
+
+ mapreduce.jobtracker.keytab.file
+
+
+ The keytab for the jobtracker principal.
+
+
+
+
+
+ mapreduce.tasktracker.keytab.file
+
+ The filename of the keytab for the task tracker
+
+
+
+ mapreduce.jobtracker.staging.root.dir
+ /user
+ The Path prefix for where the staging directories should be placed. The next level is always the user's
+ name. It is a path in the default file system.
+
+
+
+ mapreduce.tasktracker.group
+ hadoop
+ The group that the task controller uses for accessing the task controller. The mapred user must be a member and users should *not* be members.
+
+
+
+
+ mapreduce.jobtracker.split.metainfo.maxsize
+ 50000000
+ true
+ If the size of the split metainfo file is larger than this, the JobTracker will fail the job during
+ initialize.
+
+
+
+ mapreduce.history.server.embedded
+ false
+ Should job history server be embedded within Job tracker
+process
+ true
+
+
+
+ mapreduce.history.server.http.address
+
+
+ Http address of the history server
+ true
+
+
+
+ mapreduce.jobhistory.kerberos.principal
+
+
+ Job history user name key. (must map to same user as JT
+user)
+
+
+
+ mapreduce.jobhistory.keytab.file
+
+
+ The keytab for the job history server principal.
+
+
+
+ mapred.jobtracker.blacklist.fault-timeout-window
+ 180
+
+ 3-hour sliding window (value is in minutes)
+
+
+
+
+ mapred.jobtracker.blacklist.fault-bucket-width
+ 15
+
+ 15-minute bucket size (value is in minutes)
+
+
+
+
+ mapred.queue.names
+ default
+ Comma separated list of queues configured for this jobtracker.
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/MAPREDUCE/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/MAPREDUCE/metainfo.xml
new file mode 100644
index 00000000000..79d219bd19e
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/MAPREDUCE/metainfo.xml
@@ -0,0 +1,41 @@
+
+
+
+ mapred
+ Apache Hadoop Distributed Processing Framework
+ 1.1.2
+
+
+
+ JOBTRACKER
+ MASTER
+
+
+
+ TASKTRACKER
+ SLAVE
+
+
+
+ MAPREDUCE_CLIENT
+ CLIENT
+
+
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/NAGIOS/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/NAGIOS/metainfo.xml
new file mode 100644
index 00000000000..bd7de072f7d
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/NAGIOS/metainfo.xml
@@ -0,0 +1,30 @@
+
+
+
+ root
+ Nagios Monitoring and Alerting system
+ 3.2.3
+
+
+
+ NAGIOS_SERVER
+ MASTER
+
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/OOZIE/configuration/oozie-site.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/OOZIE/configuration/oozie-site.xml
new file mode 100644
index 00000000000..1665ba8a581
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/OOZIE/configuration/oozie-site.xml
@@ -0,0 +1,245 @@
+
+
+
+
+
+
+
+ oozie.base.url
+ http://localhost:11000/oozie
+ Base Oozie URL.
+
+
+
+ oozie.system.id
+ oozie-${user.name}
+
+ The Oozie system ID.
+
+
+
+
+ oozie.systemmode
+ NORMAL
+
+ System mode for Oozie at startup.
+
+
+
+
+ oozie.service.AuthorizationService.security.enabled
+ true
+
+ Specifies whether security (user name/admin role) is enabled or not.
+ If disabled any user can manage Oozie system and manage any job.
+
+
+
+
+ oozie.service.PurgeService.older.than
+ 30
+
+ Jobs older than this value, in days, will be purged by the PurgeService.
+
+
+
+
+ oozie.service.PurgeService.purge.interval
+ 3600
+
+ Interval at which the purge service will run, in seconds.
+
+
+
+
+ oozie.service.CallableQueueService.queue.size
+ 1000
+ Max callable queue size
+
+
+
+ oozie.service.CallableQueueService.threads
+ 10
+ Number of threads used for executing callables
+
+
+
+ oozie.service.CallableQueueService.callable.concurrency
+ 3
+
+ Maximum concurrency for a given callable type.
+ Each command is a callable type (submit, start, run, signal, job, jobs, suspend,resume, etc).
+ Each action type is a callable type (Map-Reduce, Pig, SSH, FS, sub-workflow, etc).
+ All commands that use action executors (action-start, action-end, action-kill and action-check) use
+ the action type as the callable type.
+
+
+
+
+ oozie.service.coord.normal.default.timeout
+ 120
+ Default timeout for a coordinator action input check (in minutes) for normal job.
+ -1 means infinite timeout
+
+
+
+ oozie.db.schema.name
+ oozie
+
+ Oozie DataBase Name
+
+
+
+
+ oozie.service.HadoopAccessorService.jobTracker.whitelist
+
+
+ Whitelisted job tracker for Oozie service.
+
+
+
+
+ oozie.authentication.type
+ simple
+
+
+
+
+
+ oozie.service.HadoopAccessorService.nameNode.whitelist
+
+
+
+
+
+
+ oozie.service.WorkflowAppService.system.libpath
+ /user/${user.name}/share/lib
+
+ System library path to use for workflow applications.
+ This path is added to workflow application if their job properties sets
+ the property 'oozie.use.system.libpath' to true.
+
+
+
+
+ use.system.libpath.for.mapreduce.and.pig.jobs
+ false
+
+ If set to true, submissions of MapReduce and Pig jobs will include
+ automatically the system library path, thus not requiring users to
+ specify where the Pig JAR files are. Instead, the ones from the system
+ library path are used.
+
+
+
+ oozie.authentication.kerberos.name.rules
+
+ RULE:[2:$1@$0]([jt]t@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-MAPREDUSER/
+ RULE:[2:$1@$0]([nd]n@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HDFSUSER/
+ RULE:[2:$1@$0](hm@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/
+ RULE:[2:$1@$0](rs@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/
+ DEFAULT
+
+ The mapping from kerberos principal names to local OS user names.
+
+
+ oozie.service.HadoopAccessorService.hadoop.configurations
+ *=/etc/hadoop/conf
+
+ Comma separated AUTHORITY=HADOOP_CONF_DIR, where AUTHORITY is the HOST:PORT of
+ the Hadoop service (JobTracker, HDFS). The wildcard '*' configuration is
+ used when there is no exact match for an authority. The HADOOP_CONF_DIR contains
+ the relevant Hadoop *-site.xml files. If the path is relative is looked within
+ the Oozie configuration directory; though the path can be absolute (i.e. to point
+ to Hadoop client conf/ directories in the local filesystem.
+
+
+
+ oozie.service.ActionService.executor.ext.classes
+
+ org.apache.oozie.action.email.EmailActionExecutor,
+ org.apache.oozie.action.hadoop.HiveActionExecutor,
+ org.apache.oozie.action.hadoop.ShellActionExecutor,
+ org.apache.oozie.action.hadoop.SqoopActionExecutor,
+ org.apache.oozie.action.hadoop.DistcpActionExecutor
+
+
+
+
+ oozie.service.SchemaService.wf.ext.schemas
+ shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd
+
+
+ oozie.service.JPAService.create.db.schema
+ false
+
+ Creates Oozie DB.
+
+ If set to true, it creates the DB schema if it does not exist. If the DB schema exists is a NOP.
+ If set to false, it does not create the DB schema. If the DB schema does not exist it fails start up.
+
+
+
+
+ oozie.service.JPAService.jdbc.driver
+ org.apache.derby.jdbc.EmbeddedDriver
+
+ JDBC driver class.
+
+
+
+
+ oozie.service.JPAService.jdbc.url
+ jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true
+
+ JDBC URL.
+
+
+
+
+ oozie.service.JPAService.jdbc.username
+ sa
+
+ DB user name.
+
+
+
+
+ oozie.service.JPAService.jdbc.password
+
+
+ DB user password.
+
+ IMPORTANT: if password is emtpy leave a 1 space string, the service trims the value,
+ if empty Configuration assumes it is NULL.
+
+
+
+
+ oozie.service.JPAService.pool.max.active.conn
+ 10
+
+ Max number of connections.
+
+
+
\ No newline at end of file
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/OOZIE/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/OOZIE/metainfo.xml
new file mode 100644
index 00000000000..83ccb06d5bb
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/OOZIE/metainfo.xml
@@ -0,0 +1,35 @@
+
+
+
+ root
+ System for workflow coordination and execution of Apache Hadoop jobs
+ 3.2.0
+
+
+
+ OOZIE_SERVER
+ MASTER
+
+
+
+ OOZIE_CLIENT
+ CLIENT
+
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/PIG/configuration/pig.properties b/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/PIG/configuration/pig.properties
new file mode 100644
index 00000000000..01000b53ab2
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/PIG/configuration/pig.properties
@@ -0,0 +1,52 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Pig default configuration file. All values can be overwritten by pig.properties and command line arguments.
+# see bin/pig -help
+
+# brief logging (no timestamps)
+brief=false
+
+#debug level, INFO is default
+debug=INFO
+
+#verbose print all log messages to screen (default to print only INFO and above to screen)
+verbose=false
+
+#exectype local|mapreduce, mapreduce is default
+exectype=mapreduce
+
+#Enable insertion of information about script into hadoop job conf
+pig.script.info.enabled=true
+
+#Do not spill temp files smaller than this size (bytes)
+pig.spill.size.threshold=5000000
+#EXPERIMENT: Activate garbage collection when spilling a file bigger than this size (bytes)
+#This should help reduce the number of files being spilled.
+pig.spill.gc.activation.size=40000000
+
+#the following two parameters are to help estimate the reducer number
+pig.exec.reducers.bytes.per.reducer=1000000000
+pig.exec.reducers.max=999
+
+#Temporary location to store the intermediate data.
+pig.temp.dir=/tmp/
+
+#Threshold for merging FRJoin fragment files
+pig.files.concatenation.threshold=100
+pig.optimistic.files.concatenation=false;
+
+pig.disable.counter=false
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/PIG/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/PIG/metainfo.xml
new file mode 100644
index 00000000000..4982fd217ba
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/PIG/metainfo.xml
@@ -0,0 +1,30 @@
+
+
+
+ root
+ Scripting platform for analyzing large datasets
+ 0.10.1
+
+
+
+ PIG
+ CLIENT
+
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/SQOOP/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/SQOOP/metainfo.xml
new file mode 100644
index 00000000000..ae0e68b9c48
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/SQOOP/metainfo.xml
@@ -0,0 +1,30 @@
+
+
+
+ root
+ Tool for transferring bulk data between Apache Hadoop and structured data stores such as relational databases
+ 1.4.2
+
+
+
+ SQOOP
+ CLIENT
+
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/WEBHCAT/configuration/webhcat-site.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/WEBHCAT/configuration/webhcat-site.xml
new file mode 100644
index 00000000000..31d0113faa2
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/WEBHCAT/configuration/webhcat-site.xml
@@ -0,0 +1,126 @@
+
+
+
+
+
+
+
+
+
+
+ templeton.port
+ 50111
+ The HTTP port for the main server.
+
+
+
+ templeton.hadoop.conf.dir
+ /etc/hadoop/conf
+ The path to the Hadoop configuration.
+
+
+
+ templeton.jar
+ /usr/lib/hcatalog/share/webhcat/svr/webhcat.jar
+ The path to the Templeton jar file.
+
+
+
+ templeton.libjars
+ /usr/lib/zookeeper/zookeeper.jar
+ Jars to add the the classpath.
+
+
+
+
+ templeton.hadoop
+ /usr/bin/hadoop
+ The path to the Hadoop executable.
+
+
+
+ templeton.pig.archive
+ hdfs:///apps/webhcat/pig.tar.gz
+ The path to the Pig archive.
+
+
+
+ templeton.pig.path
+ pig.tar.gz/pig/bin/pig
+ The path to the Pig executable.
+
+
+
+ templeton.hcat
+ /usr/bin/hcat
+ The path to the hcatalog executable.
+
+
+
+ templeton.hive.archive
+ hdfs:///apps/webhcat/hive.tar.gz
+ The path to the Hive archive.
+
+
+
+ templeton.hive.path
+ hive.tar.gz/hive/bin/hive
+ The path to the Hive executable.
+
+
+
+ templeton.hive.properties
+
+ Properties to set when running hive.
+
+
+
+
+ templeton.zookeeper.hosts
+
+ ZooKeeper servers, as comma separated host:port pairs
+
+
+
+ templeton.storage.class
+ org.apache.hcatalog.templeton.tool.ZooKeeperStorage
+ The class to use as storage
+
+
+
+ templeton.override.enabled
+ false
+
+ Enable the override path in templeton.override.jars
+
+
+
+
+ templeton.streaming.jar
+ hdfs:///apps/webhcat/hadoop-streaming.jar
+ The hdfs path to the Hadoop streaming jar file.
+
+
+
+ templeton.exec.timeout
+ 60000
+ Time out for templeton api
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/WEBHCAT/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/WEBHCAT/metainfo.xml
new file mode 100644
index 00000000000..e65992f3237
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/WEBHCAT/metainfo.xml
@@ -0,0 +1,31 @@
+
+
+
+ root
+ This is comment for WEBHCAT service
+ 0.5.0
+
+
+
+ WEBHCAT_SERVER
+ MASTER
+
+
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/ZOOKEEPER/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/ZOOKEEPER/metainfo.xml
new file mode 100644
index 00000000000..0e21f4f94f6
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/ZOOKEEPER/metainfo.xml
@@ -0,0 +1,35 @@
+
+
+
+ root
+ This is comment for ZOOKEEPER service
+ 3.4.5
+
+
+
+ ZOOKEEPER_SERVER
+ MASTER
+
+
+
+ ZOOKEEPER_CLIENT
+ CLIENT
+
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.1/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.1/metainfo.xml
new file mode 100644
index 00000000000..ca458226042
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.1/metainfo.xml
@@ -0,0 +1,22 @@
+
+
+
+
+ true
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.1/repos/repoinfo.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.1/repos/repoinfo.xml
new file mode 100644
index 00000000000..5bb20d8522c
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.1/repos/repoinfo.xml
@@ -0,0 +1,99 @@
+
+
+
+
+
+ http://public-repo-1.hortonworks.com/HDP-2.0.0.2/repos/centos6
+ HDP-2.0.1
+ HDP
+
+
+
+ HDP-epel
+ HDP-epel
+
+
+
+
+
+ http://public-repo-1.hortonworks.com/HDP-2.0.0.2/repos/centos5
+ HDP-2.0.1
+ HDP
+
+
+
+ HDP-epel
+ HDP-epel
+
+
+
+
+
+ http://public-repo-1.hortonworks.com/HDP-2.0.0.2/repos/centos6
+ HDP-2.0.1
+ HDP
+
+
+
+ HDP-epel
+ HDP-epel
+
+
+
+
+
+ http://public-repo-1.hortonworks.com/HDP-2.0.0.2/repos/centos5
+ HDP-2.0.1
+ HDP
+
+
+
+ HDP-epel
+ HDP-epel
+
+
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/GANGLIA/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/GANGLIA/metainfo.xml
new file mode 100644
index 00000000000..395bb4fa70f
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/GANGLIA/metainfo.xml
@@ -0,0 +1,36 @@
+
+
+
+ root
+ Ganglia Metrics Collection system
+ 3.2.0
+
+
+
+ GANGLIA_SERVER
+ MASTER
+
+
+
+ GANGLIA_MONITOR
+ SLAVE
+
+
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HBASE/configuration/hbase-policy.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HBASE/configuration/hbase-policy.xml
new file mode 100644
index 00000000000..e45f23c962c
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HBASE/configuration/hbase-policy.xml
@@ -0,0 +1,53 @@
+
+
+
+
+
+
+ security.client.protocol.acl
+ *
+ ACL for HRegionInterface protocol implementations (ie.
+ clients talking to HRegionServers)
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+ security.admin.protocol.acl
+ *
+ ACL for HMasterInterface protocol implementation (ie.
+ clients talking to HMaster for admin operations).
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+ security.masterregion.protocol.acl
+ *
+ ACL for HMasterRegionInterface protocol implementations
+ (for HRegionServers communicating with HMaster)
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HBASE/configuration/hbase-site.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HBASE/configuration/hbase-site.xml
new file mode 100644
index 00000000000..ba47e760b65
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HBASE/configuration/hbase-site.xml
@@ -0,0 +1,334 @@
+
+
+
+
+
+ hbase.rootdir
+
+ The directory shared by region servers and into
+ which HBase persists. The URL should be 'fully-qualified'
+ to include the filesystem scheme. For example, to specify the
+ HDFS directory '/hbase' where the HDFS instance's namenode is
+ running at namenode.example.org on port 9000, set this value to:
+ hdfs://namenode.example.org:9000/hbase. By default HBase writes
+ into /tmp. Change this configuration else all data will be lost
+ on machine restart.
+
+
+
+ hbase.cluster.distributed
+ true
+ The mode the cluster will be in. Possible values are
+ false for standalone mode and true for distributed mode. If
+ false, startup will run all HBase and ZooKeeper daemons together
+ in the one JVM.
+
+
+
+ hbase.tmp.dir
+ /var/log/hbase
+ Temporary directory on the local filesystem.
+ Change this setting to point to a location more permanent
+ than '/tmp' (The '/tmp' directory is often cleared on
+ machine restart).
+
+
+
+ hbase.master.info.bindAddress
+
+ The bind address for the HBase Master web UI
+
+
+
+ hbase.regionserver.global.memstore.upperLimit
+ 0.4
+ Maximum size of all memstores in a region server before new
+ updates are blocked and flushes are forced. Defaults to 40% of heap
+
+
+
+ hbase.regionserver.handler.count
+
+ Count of RPC Listener instances spun up on RegionServers.
+ Same property is used by the Master for count of master handlers.
+ Default is 10.
+
+
+
+ hbase.hregion.majorcompaction
+
+ The time (in miliseconds) between 'major' compactions of all
+ HStoreFiles in a region. Default: 1 day.
+ Set to 0 to disable automated major compactions.
+
+
+
+ hbase.master.lease.thread.wakefrequency
+ 3000
+ The interval between checks for expired region server leases.
+ This value has been reduced due to the other reduced values above so that
+ the master will notice a dead region server sooner. The default is 15 seconds.
+
+
+
+ hbase.regionserver.global.memstore.lowerLimit
+ 0.35
+ When memstores are being forced to flush to make room in
+ memory, keep flushing until we hit this mark. Defaults to 35% of heap.
+ This value equal to hbase.regionserver.global.memstore.upperLimit causes
+ the minimum possible flushing to occur when updates are blocked due to
+ memstore limiting.
+
+
+
+ hbase.hregion.memstore.block.multiplier
+
+ Block updates if memstore has hbase.hregion.memstore.block.multiplier
+ time hbase.hregion.flush.size bytes. Useful preventing
+ runaway memstore during spikes in update traffic. Without an
+ upper-bound, memstore fills such that when it flushes the
+ resultant flush files take a long time to compact or split, or
+ worse, we OOME
+
+
+
+ hbase.hregion.memstore.flush.size
+
+
+ Memstore will be flushed to disk if size of the memstore
+ exceeds this number of bytes. Value is checked by a thread that runs
+ every hbase.server.thread.wakefrequency.
+
+
+
+ hbase.hregion.memstore.mslab.enabled
+ true
+
+ Enables the MemStore-Local Allocation Buffer,
+ a feature which works to prevent heap fragmentation under
+ heavy write loads. This can reduce the frequency of stop-the-world
+ GC pauses on large heaps.
+
+
+
+ hbase.hregion.max.filesize
+ 268435456
+
+ Maximum HStoreFile size. If any one of a column families' HStoreFiles has
+ grown to exceed this value, the hosting HRegion is split in two.
+ Default: 1G.
+
+
+
+ hbase.client.scanner.caching
+
+ Number of rows that will be fetched when calling next
+ on a scanner if it is not served from (local, client) memory. Higher
+ caching values will enable faster scanners but will eat up more memory
+ and some calls of next may take longer and longer times when the cache is empty.
+ Do not set this value such that the time between invocations is greater
+ than the scanner timeout; i.e. hbase.regionserver.lease.period
+
+
+
+ zookeeper.session.timeout
+
+ ZooKeeper session timeout.
+ HBase passes this to the zk quorum as suggested maximum time for a
+ session (This setting becomes zookeeper's 'maxSessionTimeout'). See
+ http://hadoop.apache.org/zookeeper/docs/current/zookeeperProgrammers.html#ch_zkSessions
+ "The client sends a requested timeout, the server responds with the
+ timeout that it can give the client. " In milliseconds.
+
+
+
+ hbase.client.keyvalue.maxsize
+
+ Specifies the combined maximum allowed size of a KeyValue
+ instance. This is to set an upper boundary for a single entry saved in a
+ storage file. Since they cannot be split it helps avoiding that a region
+ cannot be split any further because the data is too large. It seems wise
+ to set this to a fraction of the maximum region size. Setting it to zero
+ or less disables the check.
+
+
+
+ hbase.hstore.compactionThreshold
+ 3
+
+ If more than this number of HStoreFiles in any one HStore
+ (one HStoreFile is written per flush of memstore) then a compaction
+ is run to rewrite all HStoreFiles files as one. Larger numbers
+ put off compaction but when it runs, it takes longer to complete.
+
+
+
+ hbase.hstore.blockingStoreFiles
+ 7
+
+ If more than this number of StoreFiles in any one Store
+ (one StoreFile is written per flush of MemStore) then updates are
+ blocked for this HRegion until a compaction is completed, or
+ until hbase.hstore.blockingWaitTime has been exceeded.
+
+
+
+ hfile.block.cache.size
+ 0.25
+
+ Percentage of maximum heap (-Xmx setting) to allocate to block cache
+ used by HFile/StoreFile. Default of 0.25 means allocate 25%.
+ Set to 0 to disable but it's not recommended.
+
+
+
+
+
+ hbase.master.keytab.file
+ /etc/security/keytabs/hm.service.keytab
+ Full path to the kerberos keytab file to use for logging in
+ the configured HMaster server principal.
+
+
+
+ hbase.master.kerberos.principal
+
+ Ex. "hbase/_HOST@EXAMPLE.COM". The kerberos principal name
+ that should be used to run the HMaster process. The principal name should
+ be in the form: user/hostname@DOMAIN. If "_HOST" is used as the hostname
+ portion, it will be replaced with the actual hostname of the running
+ instance.
+
+
+
+ hbase.regionserver.keytab.file
+ /etc/security/keytabs/rs.service.keytab
+ Full path to the kerberos keytab file to use for logging in
+ the configured HRegionServer server principal.
+
+
+
+ hbase.regionserver.kerberos.principal
+
+ Ex. "hbase/_HOST@EXAMPLE.COM". The kerberos principal name
+ that should be used to run the HRegionServer process. The principal name
+ should be in the form: user/hostname@DOMAIN. If "_HOST" is used as the
+ hostname portion, it will be replaced with the actual hostname of the
+ running instance. An entry for this principal must exist in the file
+ specified in hbase.regionserver.keytab.file
+
+
+
+
+
+ hbase.superuser
+ hbase
+ List of users or groups (comma-separated), who are allowed
+ full privileges, regardless of stored ACLs, across the cluster.
+ Only used when HBase security is enabled.
+
+
+
+
+ hbase.coprocessor.region.classes
+
+ A comma-separated list of Coprocessors that are loaded by
+ default on all tables. For any override coprocessor method, these classes
+ will be called in order. After implementing your own Coprocessor, just put
+ it in HBase's classpath and add the fully qualified class name here.
+ A coprocessor can also be loaded on demand by setting HTableDescriptor.
+
+
+
+
+ hbase.coprocessor.master.classes
+
+ A comma-separated list of
+ org.apache.hadoop.hbase.coprocessor.MasterObserver coprocessors that are
+ loaded by default on the active HMaster process. For any implemented
+ coprocessor methods, the listed classes will be called in order. After
+ implementing your own MasterObserver, just put it in HBase's classpath
+ and add the fully qualified class name here.
+
+
+
+
+ hbase.zookeeper.property.clientPort
+ 2181
+ Property from ZooKeeper's config zoo.cfg.
+ The port at which the clients will connect.
+
+
+
+
+
+ hbase.zookeeper.quorum
+
+ Comma separated list of servers in the ZooKeeper Quorum.
+ For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
+ By default this is set to localhost for local and pseudo-distributed modes
+ of operation. For a fully-distributed setup, this should be set to a full
+ list of ZooKeeper quorum servers. If HBASE_MANAGES_ZK is set in hbase-env.sh
+ this is the list of servers which we will start/stop ZooKeeper on.
+
+
+
+
+
+ dfs.support.append
+ true
+ Does HDFS allow appends to files?
+ This is an hdfs config. set in here so the hdfs client will do append support.
+ You must ensure that this config. is true serverside too when running hbase
+ (You will have to restart your cluster after setting it).
+
+
+
+
+ dfs.client.read.shortcircuit
+ true
+ Enable/Disable short circuit read for your client.
+ Hadoop servers should be configured to allow short circuit read
+ for the hbase user for this to take effect
+
+
+
+
+ dfs.client.read.shortcircuit.skip.checksum
+ false
+ Enable/disbale skipping the checksum check
+
+
+
+ hbase.regionserver.optionalcacheflushinterval
+ 10000
+
+ Amount of time to wait since the last time a region was flushed before
+ invoking an optional cache flush. Default 60,000.
+
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HBASE/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HBASE/metainfo.xml
new file mode 100644
index 00000000000..645f7cefb4c
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HBASE/metainfo.xml
@@ -0,0 +1,40 @@
+
+
+
+ mapred
+ Non-relational distributed database and centralized service for configuration management & synchronization
+ 0.94.5.22-1
+
+
+
+ HBASE_MASTER
+ MASTER
+
+
+
+ HBASE_REGIONSERVER
+ SLAVE
+
+
+
+ HBASE_CLIENT
+ CLIENT
+
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HCATALOG/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HCATALOG/metainfo.xml
new file mode 100644
index 00000000000..45f3342ee75
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HCATALOG/metainfo.xml
@@ -0,0 +1,30 @@
+
+
+
+ root
+ This is comment for HCATALOG service
+ 0.5.0.22-1
+
+
+
+ HCAT
+ CLIENT
+
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HDFS/configuration/core-site.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HDFS/configuration/core-site.xml
new file mode 100644
index 00000000000..e646d5b0165
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HDFS/configuration/core-site.xml
@@ -0,0 +1,257 @@
+
+
+
+
+
+
+
+
+
+
+
+
+ io.file.buffer.size
+ 131072
+ The size of buffer for use in sequence files.
+ The size of this buffer should probably be a multiple of hardware
+ page size (4096 on Intel x86), and it determines how much data is
+ buffered during read and write operations.
+
+
+
+ io.serializations
+ org.apache.hadoop.io.serializer.WritableSerialization
+
+
+
+ io.compression.codecs
+ org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec
+ A list of the compression codec classes that can be used
+ for compression/decompression.
+
+
+
+ io.compression.codec.lzo.class
+ com.hadoop.compression.lzo.LzoCodec
+ The implementation for lzo codec.
+
+
+
+
+
+ fs.default.name
+
+
+ The name of the default file system. Either the
+ literal string "local" or a host:port for NDFS.
+ true
+
+
+
+ fs.trash.interval
+ 360
+ Number of minutes between trash checkpoints.
+ If zero, the trash feature is disabled.
+
+
+
+
+ fs.checkpoint.dir
+
+ Determines where on the local filesystem the DFS secondary
+ name node should store the temporary images to merge.
+ If this is a comma-delimited list of directories then the image is
+ replicated in all of the directories for redundancy.
+
+
+
+
+ fs.checkpoint.edits.dir
+ ${fs.checkpoint.dir}
+ Determines where on the local filesystem the DFS secondary
+ name node should store the temporary edits to merge.
+ If this is a comma-delimited list of directoires then teh edits is
+ replicated in all of the directoires for redundancy.
+ Default value is same as fs.checkpoint.dir
+
+
+
+
+ fs.checkpoint.period
+ 21600
+ The number of seconds between two periodic checkpoints.
+
+
+
+
+ fs.checkpoint.size
+ 536870912
+ The size of the current edit log (in bytes) that triggers
+ a periodic checkpoint even if the fs.checkpoint.period hasn't expired.
+
+
+
+
+
+ ipc.client.idlethreshold
+ 8000
+ Defines the threshold number of connections after which
+ connections will be inspected for idleness.
+
+
+
+
+ ipc.client.connection.maxidletime
+ 30000
+ The maximum time after which a client will bring down the
+ connection to the server.
+
+
+
+
+ ipc.client.connect.max.retries
+ 50
+ Defines the maximum number of retries for IPC connections.
+
+
+
+
+ webinterface.private.actions
+ false
+ If set to true, the web interfaces of JT and NN may contain
+ actions, such as kill job, delete file, etc., that should
+ not be exposed to public. Enable this option if the interfaces
+ are only reachable by those who have the right authorization.
+
+
+
+
+ hadoop.security.authentication
+ simple
+
+ Set the authentication for the cluster. Valid values are: simple or
+ kerberos.
+
+
+
+ hadoop.security.authorization
+ false
+
+ Enable authorization for different protocols.
+
+
+
+
+ hadoop.security.auth_to_local
+
+ RULE:[2:$1@$0]([rn]m@.*)s/.*/yarn/
+ RULE:[2:$1@$0]([nd]n@.*)s/.*/hdfs/
+ RULE:[2:$1@$0](hm@.*)s/.*/hbase/
+ RULE:[2:$1@$0](rs@.*)s/.*/hbase/
+ DEFAULT
+
+The mapping from kerberos principal names to local OS user names.
+ So the default rule is just "DEFAULT" which takes all principals in your default domain to their first component.
+ "omalley@APACHE.ORG" and "omalley/admin@APACHE.ORG" to "omalley", if your default domain is APACHE.ORG.
+The translations rules have 3 sections:
+ base filter substitution
+The base consists of a number that represents the number of components in the principal name excluding the realm and the pattern for building the name from the sections of the principal name. The base uses $0 to mean the realm, $1 to mean the first component and $2 to mean the second component.
+
+[1:$1@$0] translates "omalley@APACHE.ORG" to "omalley@APACHE.ORG"
+[2:$1] translates "omalley/admin@APACHE.ORG" to "omalley"
+[2:$1%$2] translates "omalley/admin@APACHE.ORG" to "omalley%admin"
+
+The filter is a regex in parens that must the generated string for the rule to apply.
+
+"(.*%admin)" will take any string that ends in "%admin"
+"(.*@ACME.COM)" will take any string that ends in "@ACME.COM"
+
+Finally, the substitution is a sed rule to translate a regex into a fixed string.
+
+"s/@ACME\.COM//" removes the first instance of "@ACME.COM".
+"s/@[A-Z]*\.COM//" removes the first instance of "@" followed by a name followed by ".COM".
+"s/X/Y/g" replaces all of the "X" in the name with "Y"
+
+So, if your default realm was APACHE.ORG, but you also wanted to take all principals from ACME.COM that had a single component "joe@ACME.COM", you'd do:
+
+RULE:[1:$1@$0](.@ACME.ORG)s/@.//
+DEFAULT
+
+To also translate the names with a second component, you'd make the rules:
+
+RULE:[1:$1@$0](.@ACME.ORG)s/@.//
+RULE:[2:$1@$0](.@ACME.ORG)s/@.//
+DEFAULT
+
+If you want to treat all principals from APACHE.ORG with /admin as "admin", your rules would look like:
+
+RULE[2:$1%$2@$0](.%admin@APACHE.ORG)s/./admin/
+DEFAULT
+
+
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HDFS/configuration/global.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HDFS/configuration/global.xml
new file mode 100644
index 00000000000..95fd56576dd
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HDFS/configuration/global.xml
@@ -0,0 +1,207 @@
+
+
+
+
+
+
+ namenode_host
+
+ NameNode Host.
+
+
+ dfs_name_dir
+ /hadoop/hdfs/namenode
+ NameNode Directories.
+
+
+ snamenode_host
+
+ Secondary NameNode.
+
+
+ rm_host
+
+ Resource Manager.
+
+
+ nm_hosts
+
+ List of Node Manager Hosts.
+
+
+ hs_host
+
+ History Server.
+
+
+ fs_checkpoint_dir
+ /hadoop/hdfs/namesecondary
+ Secondary NameNode checkpoint dir.
+
+
+ datanode_hosts
+
+ List of Datanode Hosts.
+
+
+ dfs_data_dir
+ /hadoop/hdfs/data
+ Data directories for Data Nodes.
+
+
+ hdfs_log_dir_prefix
+ /var/log/hadoop
+ Hadoop Log Dir Prefix
+
+
+ hadoop_pid_dir_prefix
+ /var/run/hadoop
+ Hadoop PID Dir Prefix
+
+
+ dfs_webhdfs_enabled
+ true
+ WebHDFS enabled
+
+
+ hadoop_heapsize
+ 1024
+ Hadoop maximum Java heap size
+
+
+ namenode_heapsize
+ 1024
+ NameNode Java heap size
+
+
+ namenode_opt_newsize
+ 200
+ NameNode new generation size
+
+
+ namenode_opt_maxnewsize
+ 640
+ NameNode maximum new generation size
+
+
+ datanode_du_reserved
+ 1
+ Reserved space for HDFS
+
+
+ dtnode_heapsize
+ 1024
+ DataNode maximum Java heap size
+
+
+ dfs_datanode_failed_volume_tolerated
+ 0
+ DataNode volumes failure toleration
+
+
+ fs_checkpoint_period
+ 21600
+ HDFS Maximum Checkpoint Delay
+
+
+ fs_checkpoint_size
+ 0.5
+ FS Checkpoint Size.
+
+
+ proxyuser_group
+ users
+ Proxy user group.
+
+
+ dfs_exclude
+
+ HDFS Exclude hosts.
+
+
+ dfs_include
+
+ HDFS Include hosts.
+
+
+ dfs_replication
+ 3
+ Default Block Replication.
+
+
+ dfs_block_local_path_access_user
+ hbase
+ Default Block Replication.
+
+
+ dfs_datanode_address
+ 50010
+ Port for datanode address.
+
+
+ dfs_datanode_http_address
+ 50075
+ Port for datanode address.
+
+
+ dfs_datanode_data_dir_perm
+ 750
+ Datanode dir perms.
+
+
+
+ security_enabled
+ false
+ Hadoop Security
+
+
+ kerberos_domain
+ EXAMPLE.COM
+ Kerberos realm.
+
+
+ kadmin_pw
+
+ Kerberos realm admin password
+
+
+ keytab_path
+ /etc/security/keytabs
+ Kerberos keytab path.
+
+
+
+ keytab_path
+ /etc/security/keytabs
+ KeyTab Directory.
+
+
+ namenode_formatted_mark_dir
+ /var/run/hadoop/hdfs/namenode/formatted/
+ Formatteed Mark Directory.
+
+
+ hdfs_user
+ hdfs
+ User and Groups.
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HDFS/configuration/hadoop-policy.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HDFS/configuration/hadoop-policy.xml
new file mode 100644
index 00000000000..6ec304d9972
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HDFS/configuration/hadoop-policy.xml
@@ -0,0 +1,134 @@
+
+
+
+
+
+
+
+
+
+ security.client.protocol.acl
+ *
+ ACL for ClientProtocol, which is used by user code
+ via the DistributedFileSystem.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+ security.client.datanode.protocol.acl
+ *
+ ACL for ClientDatanodeProtocol, the client-to-datanode protocol
+ for block recovery.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+ security.datanode.protocol.acl
+ *
+ ACL for DatanodeProtocol, which is used by datanodes to
+ communicate with the namenode.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+ security.inter.datanode.protocol.acl
+ *
+ ACL for InterDatanodeProtocol, the inter-datanode protocol
+ for updating generation timestamp.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+ security.namenode.protocol.acl
+ *
+ ACL for NamenodeProtocol, the protocol used by the secondary
+ namenode to communicate with the namenode.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+ security.inter.tracker.protocol.acl
+ *
+ ACL for InterTrackerProtocol, used by the tasktrackers to
+ communicate with the jobtracker.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+ security.job.submission.protocol.acl
+ *
+ ACL for JobSubmissionProtocol, used by job clients to
+ communciate with the jobtracker for job submission, querying job status etc.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+ security.task.umbilical.protocol.acl
+ *
+ ACL for TaskUmbilicalProtocol, used by the map and reduce
+ tasks to communicate with the parent tasktracker.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+ security.admin.operations.protocol.acl
+ hadoop
+ ACL for AdminOperationsProtocol. Used for admin commands.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+ security.refresh.usertogroups.mappings.protocol.acl
+ hadoop
+ ACL for RefreshUserMappingsProtocol. Used to refresh
+ users mappings. The ACL is a comma-separated list of user and
+ group names. The user and group list is separated by a blank. For
+ e.g. "alice,bob users,wheel". A special value of "*" means all
+ users are allowed.
+
+
+
+ security.refresh.policy.protocol.acl
+ hadoop
+ ACL for RefreshAuthorizationPolicyProtocol, used by the
+ dfsadmin and mradmin commands to refresh the security policy in-effect.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HDFS/configuration/hdfs-site.xml
new file mode 100644
index 00000000000..e1a244a3d33
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HDFS/configuration/hdfs-site.xml
@@ -0,0 +1,438 @@
+
+
+
+
+
+
+
+
+
+
+
+
+ dfs.name.dir
+
+
+ Determines where on the local filesystem the DFS name node
+ should store the name table. If this is a comma-delimited list
+ of directories then the name table is replicated in all of the
+ directories, for redundancy.
+ true
+
+
+
+ dfs.support.append
+ true
+ to enable dfs append
+ true
+
+
+
+ dfs.webhdfs.enabled
+ true
+ to enable webhdfs
+ true
+
+
+
+
+
+ dfs.datanode.failed.volumes.tolerated
+ 0
+ #of failed disks dn would tolerate
+ true
+
+
+
+ dfs.block.local-path-access.user
+ hbase
+ the user who is allowed to perform short
+ circuit reads.
+
+ true
+
+
+
+ dfs.data.dir
+
+ Determines where on the local filesystem an DFS data node
+ should store its blocks. If this is a comma-delimited
+ list of directories, then data will be stored in all named
+ directories, typically on different devices.
+ Directories that do not exist are ignored.
+
+ true
+
+
+
+ dfs.hosts.exclude
+
+ Names a file that contains a list of hosts that are
+ not permitted to connect to the namenode. The full pathname of the
+ file must be specified. If the value is empty, no hosts are
+ excluded.
+
+
+
+
+
+ dfs.checksum.type
+ CRC32
+ The checksum method to be used by default. To maintain
+ compatibility, it is being set to CRC32. Once all migration steps
+ are complete, we can change it to CRC32C and take advantage of the
+ additional performance benefit.
+
+
+
+ dfs.replication.max
+ 50
+ Maximal block replication.
+
+
+
+
+ dfs.replication
+ 3
+ Default block replication.
+
+
+
+
+ dfs.heartbeat.interval
+ 3
+ Determines datanode heartbeat interval in seconds.
+
+
+
+ dfs.heartbeat.interval
+ 3
+ Determines datanode heartbeat interval in seconds.
+
+
+
+ dfs.safemode.threshold.pct
+ 1.0f
+
+ Specifies the percentage of blocks that should satisfy
+ the minimal replication requirement defined by dfs.replication.min.
+ Values less than or equal to 0 mean not to start in safe mode.
+ Values greater than 1 will make safe mode permanent.
+
+
+
+
+ dfs.balance.bandwidthPerSec
+ 6250000
+
+ Specifies the maximum amount of bandwidth that each datanode
+ can utilize for the balancing purpose in term of
+ the number of bytes per second.
+
+
+
+
+ dfs.datanode.address
+ 0.0.0.0:50010
+
+
+
+ dfs.datanode.http.address
+ 0.0.0.0:50075
+
+
+
+ dfs.block.size
+ 134217728
+ The default block size for new files.
+
+
+
+ dfs.http.address
+
+The name of the default file system. Either the
+literal string "local" or a host:port for NDFS.
+true
+
+
+
+dfs.datanode.du.reserved
+
+1073741824
+Reserved space in bytes per volume. Always leave this much space free for non dfs use.
+
+
+
+
+dfs.datanode.ipc.address
+0.0.0.0:8010
+
+The datanode ipc server address and port.
+If the port is 0 then the server will start on a free port.
+
+
+
+
+dfs.blockreport.initialDelay
+120
+Delay for first block report in seconds.
+
+
+
+dfs.datanode.du.pct
+0.85f
+When calculating remaining space, only use this percentage of the real available space
+
+
+
+
+dfs.namenode.handler.count
+40
+The number of server threads for the namenode.
+
+
+
+dfs.datanode.max.xcievers
+1024
+PRIVATE CONFIG VARIABLE
+
+
+
+
+
+dfs.umaskmode
+022
+
+The octal umask used when creating files and directories.
+
+
+
+
+dfs.web.ugi
+
+gopher,gopher
+The user account used by the web interface.
+Syntax: USERNAME,GROUP1,GROUP2, ...
+
+
+
+
+dfs.permissions
+true
+
+If "true", enable permission checking in HDFS.
+If "false", permission checking is turned off,
+but all other behavior is unchanged.
+Switching from one parameter value to the other does not change the mode,
+owner or group of files or directories.
+
+
+
+
+dfs.permissions.supergroup
+hdfs
+The name of the group of super-users.
+
+
+
+dfs.namenode.handler.count
+100
+Added to grow Queue size so that more client connections are allowed
+
+
+
+ipc.server.max.response.size
+5242880
+
+
+dfs.block.access.token.enable
+true
+
+If "true", access tokens are used as capabilities for accessing datanodes.
+If "false", no access tokens are checked on accessing datanodes.
+
+
+
+
+dfs.namenode.kerberos.principal
+
+
+Kerberos principal name for the NameNode
+
+
+
+
+dfs.secondary.namenode.kerberos.principal
+
+
+ Kerberos principal name for the secondary NameNode.
+
+
+
+
+
+
+ dfs.namenode.kerberos.https.principal
+
+ The Kerberos principal for the host that the NameNode runs on.
+
+
+
+
+ dfs.secondary.namenode.kerberos.https.principal
+
+ The Kerberos principal for the hostthat the secondary NameNode runs on.
+
+
+
+
+
+ dfs.secondary.http.address
+
+ Address of secondary namenode web server
+
+
+
+ dfs.secondary.https.port
+ 50490
+ The https port where secondary-namenode binds
+
+
+
+ dfs.web.authentication.kerberos.principal
+
+
+ The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+ The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
+ HTTP SPENGO specification.
+
+
+
+
+ dfs.web.authentication.kerberos.keytab
+
+
+ The Kerberos keytab file with the credentials for the
+ HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+
+
+
+
+ dfs.datanode.kerberos.principal
+
+
+ The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
+
+
+
+
+ dfs.namenode.keytab.file
+
+
+ Combined keytab file containing the namenode service and host principals.
+
+
+
+
+ dfs.secondary.namenode.keytab.file
+
+
+ Combined keytab file containing the namenode service and host principals.
+
+
+
+
+ dfs.datanode.keytab.file
+
+
+ The filename of the keytab file for the DataNode.
+
+
+
+
+ dfs.https.port
+ 50470
+ The https port where namenode binds
+
+
+
+
+ dfs.https.address
+
+ The https address where namenode binds
+
+
+
+
+ dfs.datanode.data.dir.perm
+ 750
+The permissions that should be there on dfs.data.dir
+directories. The datanode will not come up if the permissions are
+different on existing dfs.data.dir directories. If the directories
+don't exist, they will be created with this permission.
+
+
+
+ dfs.access.time.precision
+ 0
+ The access time for HDFS file is precise upto this value.
+ The default value is 1 hour. Setting a value of 0 disables
+ access times for HDFS.
+
+
+
+
+ dfs.cluster.administrators
+ hdfs
+ ACL for who all can view the default servlets in the HDFS
+
+
+
+ ipc.server.read.threadpool.size
+ 5
+
+
+
+
+ dfs.namenode.check.stale.datanode
+ true
+
+ With this setting, the datanodes that have not replied to the heartbeat
+ for more than 30s (i.e. in a stale state) are used for reads only if all
+ other remote replicas have failed.
+
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HDFS/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HDFS/metainfo.xml
new file mode 100644
index 00000000000..1fbfbe6da5d
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HDFS/metainfo.xml
@@ -0,0 +1,46 @@
+
+
+
+ root
+ Apache Hadoop Distributed File System
+ 2.0.3.22-1
+
+
+
+ NAMENODE
+ MASTER
+
+
+
+ DATANODE
+ SLAVE
+
+
+
+ SECONDARY_NAMENODE
+ MASTER
+
+
+
+ HDFS_CLIENT
+ CLIENT
+
+
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HIVE/configuration/hive-site.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HIVE/configuration/hive-site.xml
new file mode 100644
index 00000000000..7d35558b8c6
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HIVE/configuration/hive-site.xml
@@ -0,0 +1,138 @@
+
+
+
+
+
+
+ hive.metastore.local
+ false
+ controls whether to connect to remove metastore server or
+ open a new metastore server in Hive Client JVM
+
+
+
+ javax.jdo.option.ConnectionURL
+
+ JDBC connect string for a JDBC metastore
+
+
+
+ javax.jdo.option.ConnectionDriverName
+ com.mysql.jdbc.Driver
+ Driver class name for a JDBC metastore
+
+
+
+ javax.jdo.option.ConnectionUserName
+
+ username to use against metastore database
+
+
+
+ javax.jdo.option.ConnectionPassword
+
+ password to use against metastore database
+
+
+
+ hive.metastore.warehouse.dir
+ /apps/hive/warehouse
+ location of default database for the warehouse
+
+
+
+ hive.metastore.sasl.enabled
+
+ If true, the metastore thrift interface will be secured with SASL.
+ Clients must authenticate with Kerberos.
+
+
+
+ hive.metastore.kerberos.keytab.file
+
+ The path to the Kerberos Keytab file containing the metastore
+ thrift server's service principal.
+
+
+
+ hive.metastore.kerberos.principal
+
+ The service principal for the metastore thrift server. The special
+ string _HOST will be replaced automatically with the correct host name.
+
+
+
+ hive.metastore.cache.pinobjtypes
+ Table,Database,Type,FieldSchema,Order
+ List of comma separated metastore object types that should be pinned in the cache
+
+
+
+ hive.metastore.uris
+
+ URI for client to contact metastore server
+
+
+
+ hive.semantic.analyzer.factory.impl
+ org.apache.hivealog.cli.HCatSemanticAnalyzerFactory
+ controls which SemanticAnalyzerFactory implemenation class is used by CLI
+
+
+
+ hadoop.clientside.fs.operations
+ true
+ FS operations are owned by client
+
+
+
+ hive.metastore.client.socket.timeout
+ 60
+ MetaStore Client socket timeout in seconds
+
+
+
+ hive.metastore.execute.setugi
+ true
+ In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using the client's reported user and group permissions. Note that this property must be set on both the client and server sides. Further note that its best effort. If client sets its to true and server sets it to false, client setting will be ignored.
+
+
+
+ hive.security.authorization.enabled
+ true
+ enable or disable the hive client authorization
+
+
+
+ hive.security.authorization.manager
+ org.apache.hcatalog.security.HdfsAuthorizationProvider
+ the hive client authorization manager class name.
+ The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.
+
+
+
+ hive.server2.enable.doAs
+ true
+
+
+
+ fs.hdfs.impl.disable.cache
+ true
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HIVE/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HIVE/metainfo.xml
new file mode 100644
index 00000000000..c87b49483c1
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HIVE/metainfo.xml
@@ -0,0 +1,43 @@
+
+
+
+ root
+ Data warehouse system for ad-hoc queries & analysis of large datasets and table & storage management service
+ 0.10.0.22-1
+
+
+
+ HIVE_METASTORE
+ MASTER
+
+
+ HIVE_SERVER
+ MASTER
+
+
+ MYSQL_SERVER
+ MASTER
+
+
+ HIVE_CLIENT
+ CLIENT
+
+
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/MAPREDUCE2/configuration/container-executor.cfg b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/MAPREDUCE2/configuration/container-executor.cfg
new file mode 100644
index 00000000000..502ddaae81b
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/MAPREDUCE2/configuration/container-executor.cfg
@@ -0,0 +1,20 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+yarn.nodemanager.local-dirs=TODO-YARN-LOCAL-DIR
+yarn.nodemanager.linux-container-executor.group=hadoop
+yarn.nodemanager.log-dirs=TODO-YARN-LOG-DIR
+banned.users=hfds,bin,0
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/MAPREDUCE2/configuration/core-site.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/MAPREDUCE2/configuration/core-site.xml
new file mode 100644
index 00000000000..3a2af490593
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/MAPREDUCE2/configuration/core-site.xml
@@ -0,0 +1,20 @@
+
+
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/MAPREDUCE2/configuration/mapred-queue-acls.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/MAPREDUCE2/configuration/mapred-queue-acls.xml
new file mode 100644
index 00000000000..ce12380767c
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/MAPREDUCE2/configuration/mapred-queue-acls.xml
@@ -0,0 +1,39 @@
+
+
+
+
+
+
+
+
+
+
+
+
+ mapred.queue.default.acl-submit-job
+ *
+
+
+
+ mapred.queue.default.acl-administer-jobs
+ *
+
+
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/MAPREDUCE2/configuration/mapred-site.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/MAPREDUCE2/configuration/mapred-site.xml
new file mode 100644
index 00000000000..29fcfebc301
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/MAPREDUCE2/configuration/mapred-site.xml
@@ -0,0 +1,549 @@
+
+
+
+
+
+
+
+
+
+
+
+
+ io.sort.mb
+
+ No description
+
+
+
+ io.sort.record.percent
+ .2
+ No description
+
+
+
+ io.sort.spill.percent
+
+ No description
+
+
+
+ io.sort.factor
+ 100
+ No description
+
+
+
+
+
+ mapred.tasktracker.tasks.sleeptime-before-sigkill
+ 250
+ Normally, this is the amount of time before killing
+ processes, and the recommended-default is 5.000 seconds - a value of
+ 5000 here. In this case, we are using it solely to blast tasks before
+ killing them, and killing them very quickly (1/4 second) to guarantee
+ that we do not leave VMs around for later jobs.
+
+
+
+
+ mapred.job.tracker.handler.count
+ 50
+
+ The number of server threads for the JobTracker. This should be roughly
+ 4% of the number of tasktracker nodes.
+
+
+
+
+ mapred.system.dir
+ /mapred/system
+ No description
+ true
+
+
+
+ mapred.job.tracker
+
+
+ No description
+ true
+
+
+
+ mapred.job.tracker.http.address
+
+
+ No description
+ true
+
+
+
+
+ mapred.local.dir
+
+ No description
+ true
+
+
+
+ mapreduce.cluster.administrators
+ hadoop
+
+
+
+ mapred.reduce.parallel.copies
+ 30
+ No description
+
+
+
+ mapred.tasktracker.map.tasks.maximum
+
+ No description
+
+
+
+ mapred.tasktracker.reduce.tasks.maximum
+
+ No description
+
+
+
+ tasktracker.http.threads
+ 50
+
+
+
+ mapred.map.tasks.speculative.execution
+ false
+ If true, then multiple instances of some map tasks
+ may be executed in parallel.
+
+
+
+ mapred.reduce.tasks.speculative.execution
+ false
+ If true, then multiple instances of some reduce tasks
+ may be executed in parallel.
+
+
+
+ mapred.reduce.slowstart.completed.maps
+ 0.05
+
+
+
+ mapred.inmem.merge.threshold
+ 1000
+ The threshold, in terms of the number of files
+ for the in-memory merge process. When we accumulate threshold number of files
+ we initiate the in-memory merge and spill to disk. A value of 0 or less than
+ 0 indicates we want to DON'T have any threshold and instead depend only on
+ the ramfs's memory consumption to trigger the merge.
+
+
+
+
+ mapred.job.shuffle.merge.percent
+ 0.66
+ The usage threshold at which an in-memory merge will be
+ initiated, expressed as a percentage of the total memory allocated to
+ storing in-memory map outputs, as defined by
+ mapred.job.shuffle.input.buffer.percent.
+
+
+
+
+ mapred.job.shuffle.input.buffer.percent
+ 0.7
+ The percentage of memory to be allocated from the maximum heap
+ size to storing map outputs during the shuffle.
+
+
+
+
+ mapred.map.output.compression.codec
+
+ If the map outputs are compressed, how should they be
+ compressed
+
+
+
+
+ mapred.output.compression.type
+ BLOCK
+ If the job outputs are to compressed as SequenceFiles, how should
+ they be compressed? Should be one of NONE, RECORD or BLOCK.
+
+
+
+
+
+ mapred.jobtracker.completeuserjobs.maximum
+ 5
+
+
+
+ mapred.jobtracker.taskScheduler
+
+
+
+
+ mapred.jobtracker.restart.recover
+ false
+ "true" to enable (job) recovery upon restart,
+ "false" to start afresh
+
+
+
+
+ mapred.job.reduce.input.buffer.percent
+ 0.0
+ The percentage of memory- relative to the maximum heap size- to
+ retain map outputs during the reduce. When the shuffle is concluded, any
+ remaining map outputs in memory must consume less than this threshold before
+ the reduce can begin.
+
+
+
+
+ mapreduce.reduce.input.limit
+ 10737418240
+ The limit on the input size of the reduce. (This value
+ is 10 Gb.) If the estimated input size of the reduce is greater than
+ this value, job is failed. A value of -1 means that there is no limit
+ set.
+
+
+
+
+
+ mapred.compress.map.output
+
+
+
+
+
+ mapred.task.timeout
+ 600000
+ The number of milliseconds before a task will be
+ terminated if it neither reads an input, writes an output, nor
+ updates its status string.
+
+
+
+
+ jetty.connector
+ org.mortbay.jetty.nio.SelectChannelConnector
+ No description
+
+
+
+ mapred.task.tracker.task-controller
+
+
+ TaskController which is used to launch and manage task execution.
+
+
+
+
+ mapred.child.root.logger
+ INFO,TLA
+
+
+
+ mapred.child.java.opts
+
+
+ No description
+
+
+
+ mapred.cluster.map.memory.mb
+
+
+
+
+ mapred.cluster.reduce.memory.mb
+
+
+
+
+ mapred.job.map.memory.mb
+
+
+
+
+ mapred.job.reduce.memory.mb
+
+
+
+
+ mapred.cluster.max.map.memory.mb
+
+
+
+
+ mapred.cluster.max.reduce.memory.mb
+
+
+
+
+ mapred.hosts
+
+
+
+
+ mapred.hosts.exclude
+
+
+
+
+ mapred.max.tracker.blacklists
+ 16
+
+ if node is reported blacklisted by 16 successful jobs within timeout-window, it will be graylisted
+
+
+
+
+ mapred.healthChecker.script.path
+
+
+
+
+ mapred.healthChecker.interval
+ 135000
+
+
+
+ mapred.healthChecker.script.timeout
+ 60000
+
+
+
+ mapred.job.tracker.persist.jobstatus.active
+ false
+ Indicates if persistency of job status information is
+ active or not.
+
+
+
+
+ mapred.job.tracker.persist.jobstatus.hours
+ 1
+ The number of hours job status information is persisted in DFS.
+ The job status information will be available after it drops of the memory
+ queue and between jobtracker restarts. With a zero value the job status
+ information is not persisted at all in DFS.
+
+
+
+
+ mapred.job.tracker.persist.jobstatus.dir
+
+ The directory where the job status information is persisted
+ in a file system to be available after it drops of the memory queue and
+ between jobtracker restarts.
+
+
+
+
+ mapred.jobtracker.retirejob.check
+ 10000
+
+
+
+ mapred.jobtracker.retirejob.interval
+ 21600000
+
+
+
+ mapred.job.tracker.history.completed.location
+ /mapred/history/done
+ No description
+
+
+
+ mapred.task.maxvmem
+
+ true
+ No description
+
+
+
+ mapred.jobtracker.maxtasks.per.job
+
+ true
+ The maximum number of tasks for a single job.
+ A value of -1 indicates that there is no maximum.
+
+
+
+ mapreduce.fileoutputcommitter.marksuccessfuljobs
+ false
+
+
+
+ mapred.userlog.retain.hours
+
+
+
+
+ mapred.job.reuse.jvm.num.tasks
+ 1
+
+ How many tasks to run per jvm. If set to -1, there is no limit
+
+ true
+
+
+
+ mapreduce.jobtracker.kerberos.principal
+
+
+ JT user name key.
+
+
+
+
+ mapreduce.tasktracker.kerberos.principal
+
+
+ tt user name key. "_HOST" is replaced by the host name of the task tracker.
+
+
+
+
+
+ hadoop.job.history.user.location
+ none
+ true
+
+
+
+
+ mapreduce.jobtracker.keytab.file
+
+
+ The keytab for the jobtracker principal.
+
+
+
+
+
+ mapreduce.tasktracker.keytab.file
+
+ The filename of the keytab for the task tracker
+
+
+
+ mapreduce.jobtracker.staging.root.dir
+ /user
+ The Path prefix for where the staging directories should be placed. The next level is always the user's
+ name. It is a path in the default file system.
+
+
+
+ mapreduce.tasktracker.group
+ hadoop
+ The group that the task controller uses for accessing the task controller. The mapred user must be a member and users should *not* be members.
+
+
+
+
+ mapreduce.jobtracker.split.metainfo.maxsize
+ 50000000
+ true
+ If the size of the split metainfo file is larger than this, the JobTracker will fail the job during
+ initialize.
+
+
+
+ mapreduce.history.server.embedded
+ false
+ Should job history server be embedded within Job tracker
+process
+ true
+
+
+
+ mapreduce.history.server.http.address
+
+
+ Http address of the history server
+ true
+
+
+
+ mapreduce.jobhistory.kerberos.principal
+
+
+ Job history user name key. (must map to same user as JT
+user)
+
+
+
+ mapreduce.jobhistory.keytab.file
+
+
+ The keytab for the job history server principal.
+
+
+
+ mapred.jobtracker.blacklist.fault-timeout-window
+ 180
+
+ 3-hour sliding window (value is in minutes)
+
+
+
+
+ mapred.jobtracker.blacklist.fault-bucket-width
+ 15
+
+ 15-minute bucket size (value is in minutes)
+
+
+
+
+ mapred.queue.names
+ default
+ Comma separated list of queues configured for this jobtracker.
+
+
+
+ mapreduce.shuffle.port
+ 8081
+ Default port that the ShuffleHandler will run on. ShuffleHandler is a service run at the NodeManager to facilitate transfers of intermediate Map outputs to requesting Reducers.
+
+
+
+ mapreduce.jobhistory.intermediate-done-dir
+ /mr-history/tmp
+ Directory where history files are written by MapReduce jobs.
+
+
+
+ mapreduce.jobhistory.done-dir
+ /mr-history/done
+ Directory where history files are managed by the MR JobHistory Server.
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/MAPREDUCE2/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/MAPREDUCE2/metainfo.xml
new file mode 100644
index 00000000000..7c4d1f403dc
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/MAPREDUCE2/metainfo.xml
@@ -0,0 +1,32 @@
+
+
+
+ mapred
+ Apache Hadoop NextGen MapReduce (client libraries)
+ 2.0.3.22-1
+
+
+ HISTORYSERVER
+ MASTER
+
+
+ MAPREDUCE2_CLIENT
+ CLIENT
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/NAGIOS/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/NAGIOS/metainfo.xml
new file mode 100644
index 00000000000..bd7de072f7d
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/NAGIOS/metainfo.xml
@@ -0,0 +1,30 @@
+
+
+
+ root
+ Nagios Monitoring and Alerting system
+ 3.2.3
+
+
+
+ NAGIOS_SERVER
+ MASTER
+
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/OOZIE/configuration/oozie-site.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/OOZIE/configuration/oozie-site.xml
new file mode 100644
index 00000000000..1665ba8a581
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/OOZIE/configuration/oozie-site.xml
@@ -0,0 +1,245 @@
+
+
+
+
+
+
+
+ oozie.base.url
+ http://localhost:11000/oozie
+ Base Oozie URL.
+
+
+
+ oozie.system.id
+ oozie-${user.name}
+
+ The Oozie system ID.
+
+
+
+
+ oozie.systemmode
+ NORMAL
+
+ System mode for Oozie at startup.
+
+
+
+
+ oozie.service.AuthorizationService.security.enabled
+ true
+
+ Specifies whether security (user name/admin role) is enabled or not.
+ If disabled any user can manage Oozie system and manage any job.
+
+
+
+
+ oozie.service.PurgeService.older.than
+ 30
+
+ Jobs older than this value, in days, will be purged by the PurgeService.
+
+
+
+
+ oozie.service.PurgeService.purge.interval
+ 3600
+
+ Interval at which the purge service will run, in seconds.
+
+
+
+
+ oozie.service.CallableQueueService.queue.size
+ 1000
+ Max callable queue size
+
+
+
+ oozie.service.CallableQueueService.threads
+ 10
+ Number of threads used for executing callables
+
+
+
+ oozie.service.CallableQueueService.callable.concurrency
+ 3
+
+ Maximum concurrency for a given callable type.
+ Each command is a callable type (submit, start, run, signal, job, jobs, suspend,resume, etc).
+ Each action type is a callable type (Map-Reduce, Pig, SSH, FS, sub-workflow, etc).
+ All commands that use action executors (action-start, action-end, action-kill and action-check) use
+ the action type as the callable type.
+
+
+
+
+ oozie.service.coord.normal.default.timeout
+ 120
+ Default timeout for a coordinator action input check (in minutes) for normal job.
+ -1 means infinite timeout
+
+
+
+ oozie.db.schema.name
+ oozie
+
+ Oozie DataBase Name
+
+
+
+
+ oozie.service.HadoopAccessorService.jobTracker.whitelist
+
+
+ Whitelisted job tracker for Oozie service.
+
+
+
+
+ oozie.authentication.type
+ simple
+
+
+
+
+
+ oozie.service.HadoopAccessorService.nameNode.whitelist
+
+
+
+
+
+
+ oozie.service.WorkflowAppService.system.libpath
+ /user/${user.name}/share/lib
+
+ System library path to use for workflow applications.
+ This path is added to workflow application if their job properties sets
+ the property 'oozie.use.system.libpath' to true.
+
+
+
+
+ use.system.libpath.for.mapreduce.and.pig.jobs
+ false
+
+ If set to true, submissions of MapReduce and Pig jobs will include
+ automatically the system library path, thus not requiring users to
+ specify where the Pig JAR files are. Instead, the ones from the system
+ library path are used.
+
+
+
+ oozie.authentication.kerberos.name.rules
+
+ RULE:[2:$1@$0]([jt]t@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-MAPREDUSER/
+ RULE:[2:$1@$0]([nd]n@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HDFSUSER/
+ RULE:[2:$1@$0](hm@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/
+ RULE:[2:$1@$0](rs@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/
+ DEFAULT
+
+ The mapping from kerberos principal names to local OS user names.
+
+
+ oozie.service.HadoopAccessorService.hadoop.configurations
+ *=/etc/hadoop/conf
+
+ Comma separated AUTHORITY=HADOOP_CONF_DIR, where AUTHORITY is the HOST:PORT of
+ the Hadoop service (JobTracker, HDFS). The wildcard '*' configuration is
+ used when there is no exact match for an authority. The HADOOP_CONF_DIR contains
+ the relevant Hadoop *-site.xml files. If the path is relative is looked within
+ the Oozie configuration directory; though the path can be absolute (i.e. to point
+ to Hadoop client conf/ directories in the local filesystem.
+
+
+
+ oozie.service.ActionService.executor.ext.classes
+
+ org.apache.oozie.action.email.EmailActionExecutor,
+ org.apache.oozie.action.hadoop.HiveActionExecutor,
+ org.apache.oozie.action.hadoop.ShellActionExecutor,
+ org.apache.oozie.action.hadoop.SqoopActionExecutor,
+ org.apache.oozie.action.hadoop.DistcpActionExecutor
+
+
+
+
+ oozie.service.SchemaService.wf.ext.schemas
+ shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd
+
+
+ oozie.service.JPAService.create.db.schema
+ false
+
+ Creates Oozie DB.
+
+ If set to true, it creates the DB schema if it does not exist. If the DB schema exists is a NOP.
+ If set to false, it does not create the DB schema. If the DB schema does not exist it fails start up.
+
+
+
+
+ oozie.service.JPAService.jdbc.driver
+ org.apache.derby.jdbc.EmbeddedDriver
+
+ JDBC driver class.
+
+
+
+
+ oozie.service.JPAService.jdbc.url
+ jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true
+
+ JDBC URL.
+
+
+
+
+ oozie.service.JPAService.jdbc.username
+ sa
+
+ DB user name.
+
+
+
+
+ oozie.service.JPAService.jdbc.password
+
+
+ DB user password.
+
+ IMPORTANT: if password is emtpy leave a 1 space string, the service trims the value,
+ if empty Configuration assumes it is NULL.
+
+
+
+
+ oozie.service.JPAService.pool.max.active.conn
+ 10
+
+ Max number of connections.
+
+
+
\ No newline at end of file
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/OOZIE/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/OOZIE/metainfo.xml
new file mode 100644
index 00000000000..a65b547bd1f
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/OOZIE/metainfo.xml
@@ -0,0 +1,35 @@
+
+
+
+ root
+ System for workflow coordination and execution of Apache Hadoop jobs
+ 3.3.1
+
+
+
+ OOZIE_SERVER
+ MASTER
+
+
+
+ OOZIE_CLIENT
+ CLIENT
+
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/PIG/configuration/pig.properties b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/PIG/configuration/pig.properties
new file mode 100644
index 00000000000..01000b53ab2
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/PIG/configuration/pig.properties
@@ -0,0 +1,52 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Pig default configuration file. All values can be overwritten by pig.properties and command line arguments.
+# see bin/pig -help
+
+# brief logging (no timestamps)
+brief=false
+
+#debug level, INFO is default
+debug=INFO
+
+#verbose print all log messages to screen (default to print only INFO and above to screen)
+verbose=false
+
+#exectype local|mapreduce, mapreduce is default
+exectype=mapreduce
+
+#Enable insertion of information about script into hadoop job conf
+pig.script.info.enabled=true
+
+#Do not spill temp files smaller than this size (bytes)
+pig.spill.size.threshold=5000000
+#EXPERIMENT: Activate garbage collection when spilling a file bigger than this size (bytes)
+#This should help reduce the number of files being spilled.
+pig.spill.gc.activation.size=40000000
+
+#the following two parameters are to help estimate the reducer number
+pig.exec.reducers.bytes.per.reducer=1000000000
+pig.exec.reducers.max=999
+
+#Temporary location to store the intermediate data.
+pig.temp.dir=/tmp/
+
+#Threshold for merging FRJoin fragment files
+pig.files.concatenation.threshold=100
+pig.optimistic.files.concatenation=false;
+
+pig.disable.counter=false
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/PIG/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/PIG/metainfo.xml
new file mode 100644
index 00000000000..731d7b0f63c
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/PIG/metainfo.xml
@@ -0,0 +1,30 @@
+
+
+
+ root
+ Scripting platform for analyzing large datasets
+ 0.10.1.22-1
+
+
+
+ PIG
+ CLIENT
+
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/TEZ/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/TEZ/metainfo.xml
new file mode 100644
index 00000000000..30aa43ee6be
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/TEZ/metainfo.xml
@@ -0,0 +1,30 @@
+
+
+
+ root
+ Tez is the next generation Hadoop Query Processing framework written on top of YARN
+ 0.1.0.22-1
+
+
+
+ TEZ_CLIENT
+ CLIENT
+
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/WEBHCAT/configuration/webhcat-site.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/WEBHCAT/configuration/webhcat-site.xml
new file mode 100644
index 00000000000..31d0113faa2
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/WEBHCAT/configuration/webhcat-site.xml
@@ -0,0 +1,126 @@
+
+
+
+
+
+
+
+
+
+
+ templeton.port
+ 50111
+ The HTTP port for the main server.
+
+
+
+ templeton.hadoop.conf.dir
+ /etc/hadoop/conf
+ The path to the Hadoop configuration.
+
+
+
+ templeton.jar
+ /usr/lib/hcatalog/share/webhcat/svr/webhcat.jar
+ The path to the Templeton jar file.
+
+
+
+ templeton.libjars
+ /usr/lib/zookeeper/zookeeper.jar
+ Jars to add the the classpath.
+
+
+
+
+ templeton.hadoop
+ /usr/bin/hadoop
+ The path to the Hadoop executable.
+
+
+
+ templeton.pig.archive
+ hdfs:///apps/webhcat/pig.tar.gz
+ The path to the Pig archive.
+
+
+
+ templeton.pig.path
+ pig.tar.gz/pig/bin/pig
+ The path to the Pig executable.
+
+
+
+ templeton.hcat
+ /usr/bin/hcat
+ The path to the hcatalog executable.
+
+
+
+ templeton.hive.archive
+ hdfs:///apps/webhcat/hive.tar.gz
+ The path to the Hive archive.
+
+
+
+ templeton.hive.path
+ hive.tar.gz/hive/bin/hive
+ The path to the Hive executable.
+
+
+
+ templeton.hive.properties
+
+ Properties to set when running hive.
+
+
+
+
+ templeton.zookeeper.hosts
+
+ ZooKeeper servers, as comma separated host:port pairs
+
+
+
+ templeton.storage.class
+ org.apache.hcatalog.templeton.tool.ZooKeeperStorage
+ The class to use as storage
+
+
+
+ templeton.override.enabled
+ false
+
+ Enable the override path in templeton.override.jars
+
+
+
+
+ templeton.streaming.jar
+ hdfs:///apps/webhcat/hadoop-streaming.jar
+ The hdfs path to the Hadoop streaming jar file.
+
+
+
+ templeton.exec.timeout
+ 60000
+ Time out for templeton api
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/WEBHCAT/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/WEBHCAT/metainfo.xml
new file mode 100644
index 00000000000..e65992f3237
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/WEBHCAT/metainfo.xml
@@ -0,0 +1,31 @@
+
+
+
+ root
+ This is comment for WEBHCAT service
+ 0.5.0
+
+
+
+ WEBHCAT_SERVER
+ MASTER
+
+
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/YARN/configuration/capacity-scheduler.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/YARN/configuration/capacity-scheduler.xml
new file mode 100644
index 00000000000..3f782923b54
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/YARN/configuration/capacity-scheduler.xml
@@ -0,0 +1,120 @@
+
+
+
+
+
+ yarn.scheduler.capacity.maximum-applications
+ 10000
+
+ Maximum number of applications that can be pending and running.
+
+
+
+
+ yarn.scheduler.capacity.maximum-am-resource-percent
+ 0.1
+
+ Maximum percent of resources in the cluster which can be used to run
+ application masters i.e. controls number of concurrent running
+ applications.
+
+
+
+
+ yarn.scheduler.capacity.root.queues
+ default
+
+ The queues at the this level (root is the root queue).
+
+
+
+
+ yarn.scheduler.capacity.root.capacity
+ 100
+
+ The total capacity as a percentage out of 100 for this queue.
+ If it has child queues then this includes their capacity as well.
+ The child queues capacity should add up to their parent queue's capacity
+ or less.
+
+
+
+
+ yarn.scheduler.capacity.root.default.capacity
+ 100
+ Default queue target capacity.
+
+
+
+ yarn.scheduler.capacity.root.default.user-limit-factor
+ 1
+
+ Default queue user limit a percentage from 0.0 to 1.0.
+
+
+
+
+ yarn.scheduler.capacity.root.default.maximum-capacity
+ 100
+
+ The maximum capacity of the default queue.
+
+
+
+
+ yarn.scheduler.capacity.root.default.state
+ RUNNING
+
+ The state of the default queue. State can be one of RUNNING or STOPPED.
+
+
+
+
+ yarn.scheduler.capacity.root.default.acl_submit_jobs
+ *
+
+ The ACL of who can submit jobs to the default queue.
+
+
+
+
+ yarn.scheduler.capacity.root.default.acl_administer_jobs
+ *
+
+ The ACL of who can administer jobs on the default queue.
+
+
+
+
+ yarn.scheduler.capacity.root.acl_administer_queues
+ *
+
+ The ACL for who can administer this queue i.e. change sub-queue
+ allocations.
+
+
+
+
+ yarn.scheduler.capacity.root.unfunded.capacity
+ 50
+
+ No description
+
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/YARN/configuration/container-executor.cfg b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/YARN/configuration/container-executor.cfg
new file mode 100644
index 00000000000..502ddaae81b
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/YARN/configuration/container-executor.cfg
@@ -0,0 +1,20 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+yarn.nodemanager.local-dirs=TODO-YARN-LOCAL-DIR
+yarn.nodemanager.linux-container-executor.group=hadoop
+yarn.nodemanager.log-dirs=TODO-YARN-LOG-DIR
+banned.users=hfds,bin,0
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/YARN/configuration/core-site.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/YARN/configuration/core-site.xml
new file mode 100644
index 00000000000..3a2af490593
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/YARN/configuration/core-site.xml
@@ -0,0 +1,20 @@
+
+
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/YARN/configuration/yarn-site.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/YARN/configuration/yarn-site.xml
new file mode 100644
index 00000000000..e6c02bd0a74
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/YARN/configuration/yarn-site.xml
@@ -0,0 +1,172 @@
+
+
+
+
+
+
+
+
+
+
+
+ yarn.resourcemanager.resource-tracker.address
+ TODO-RMNODE-HOSTNAME:8025
+
+
+
+ yarn.resourcemanager.scheduler.address
+ TODO-RMNODE-HOSTNAME:8030
+
+
+
+ yarn.resourcemanager.address
+ TODO-RMNODE-HOSTNAME:8050
+
+
+
+ yarn.resourcemanager.admin.address
+ TODO-RMNODE-HOSTNAME:8141
+
+
+
+ yarn.resourcemanager.scheduler.class
+ org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler
+
+
+
+ yarn.scheduler.minimum-allocation-mb
+ 1024
+
+
+
+ yarn.scheduler.maximum-allocation-mb
+ 8192
+
+
+
+
+
+ yarn.nodemanager.address
+ 0.0.0.0:45454
+
+
+
+ yarn.nodemanager.local-dirs
+ TODO-YARN-LOCAL-DIR
+
+
+
+ yarn.nodemanager.resource.memory-mb
+ 8192
+ Amount of physical memory, in MB, that can be allocated
+ for containers.
+
+
+
+ yarn.application.classpath
+ /etc/hadoop/conf,/usr/lib/hadoop/*,/usr/lib/hadoop/lib/*,/usr/lib/hadoop-hdfs/*,/usr/lib/hadoop-hdfs/lib/*,/usr/lib/hadoop-yarn/*,/usr/lib/hadoop-yarn/lib/*,/usr/lib/hadoop-mapreduce/*,/usr/lib/hadoop-mapreduce/lib/*
+ Classpath for typical applications.
+
+
+
+ yarn.nodemanager.vmem-pmem-ratio
+ 2.1
+ Ratio between virtual memory to physical memory when
+ setting memory limits for containers. Container allocations are
+ expressed in terms of physical memory, and virtual memory usage
+ is allowed to exceed this allocation by this ratio.
+
+
+
+
+ yarn.nodemanager.container-executor.class
+ org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor
+ ContainerExecutor for launching containers
+
+
+
+ yarn.nodemanager.aux-services
+ mapreduce.shuffle
+ Auxilliary services of NodeManager
+
+
+
+ yarn.nodemanager.aux-services.class
+ org.apache.hadoop.mapred.ShuffleHandler
+
+
+
+ yarn.nodemanager.log-dirs
+ TODO-YARN-LOG-DIR
+
+
+
+ yarn.nodemanager.container-monitor.interval-ms
+ 3000
+ The interval, in milliseconds, for which the node manager
+ waits between two cycles of monitoring its containers' memory usage.
+
+
+
+
+ yarn.nodemanager.health-checker.script.path
+ /etc/hadoop/conf/health_check
+
+
+
+ yarn.nodemanager.health-checker.interval-ms
+ 135000
+
+
+
+ yarn.nodemanager.health-checker.script.timeout-ms
+ 60000
+
+
+
+ yarn.nodemanager.log.retain-second
+ 604800
+
+
+
+ yarn.log-aggregation-enable
+ true
+
+
+
+ yarn.nodemanager.remote-app-log-dir
+ /app-logs
+
+
+
+ yarn.nodemanager.remote-app-log-dir-suffix
+ logs
+
+
+
+ yarn.nodemanager.log-aggregation.compression-type
+ gz
+
+
+
+ yarn.nodemanager.delete.debug-delay-sec
+ 36000
+
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/YARN/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/YARN/metainfo.xml
new file mode 100644
index 00000000000..743c40d2b33
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/YARN/metainfo.xml
@@ -0,0 +1,36 @@
+
+
+
+ mapred
+ Apache Hadoop NextGen MapReduce (YARN)
+ 2.0.3.22-1
+
+
+ RESOURCEMANAGER
+ MASTER
+
+
+ NODEMANAGER
+ SLAVE
+
+
+ YARN_CLIENT
+ CLIENT
+
+
+
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/ZOOKEEPER/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/ZOOKEEPER/metainfo.xml
new file mode 100644
index 00000000000..e72fd2ae1c6
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/ZOOKEEPER/metainfo.xml
@@ -0,0 +1,35 @@
+
+
+
+ root
+ Centralized service which provides highly reliable distributed coordination
+ 3.4.5.22-1
+
+
+
+ ZOOKEEPER_SERVER
+ MASTER
+
+
+
+ ZOOKEEPER_CLIENT
+ CLIENT
+
+
+
+
diff --git a/ambari-web/app/assets/data/wizard/stack/hdp/version/1.3.1.json b/ambari-web/app/assets/data/wizard/stack/hdp/version/1.3.1.json
new file mode 100644
index 00000000000..84b8980750f
--- /dev/null
+++ b/ambari-web/app/assets/data/wizard/stack/hdp/version/1.3.1.json
@@ -0,0 +1,148 @@
+{
+ "href" : "http://ec2-23-20-124-167.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices?fields=StackServices",
+ "items" : [
+ {
+ "href" : "http://ec2-23-20-124-167.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HBASE",
+ "StackServices" : {
+ "user_name" : "mapred",
+ "stack_version" : "1.2.1",
+ "service_name" : "HBASE",
+ "stack_name" : "HDP",
+ "comments" : "Non-relational distributed database and centralized service for configuration management & synchronization",
+ "service_version" : "0.94.5"
+ }
+ },
+ {
+ "href" : "http://ec2-23-20-124-167.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HCFS",
+ "StackServices" : {
+ "user_name" : "root",
+ "stack_version" : "1.3.1",
+ "service_name" : "HCFS",
+ "stack_name" : "HDP",
+ "comments" : "Hadoop Compatable File System",
+ "service_version" : "1.0.0"
+ }
+ },
+ {
+ "href" : "http://ec2-23-20-124-167.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/NAGIOS",
+ "StackServices" : {
+ "user_name" : "root",
+ "stack_version" : "1.2.1",
+ "service_name" : "NAGIOS",
+ "stack_name" : "HDP",
+ "comments" : "Nagios Monitoring and Alerting system",
+ "service_version" : "3.2.3"
+ }
+ },
+ {
+ "href" : "http://ec2-23-20-124-167.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/SQOOP",
+ "StackServices" : {
+ "user_name" : "root",
+ "stack_version" : "1.2.1",
+ "service_name" : "SQOOP",
+ "stack_name" : "HDP",
+ "comments" : "Tool for transferring bulk data between Apache Hadoop and structured data stores such as relational databases",
+ "service_version" : "1.4.2"
+ }
+ },
+ {
+ "href" : "http://ec2-23-20-124-167.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HIVE",
+ "StackServices" : {
+ "user_name" : "root",
+ "stack_version" : "1.2.1",
+ "service_name" : "HIVE",
+ "stack_name" : "HDP",
+ "comments" : "Data warehouse system for ad-hoc queries & analysis of large datasets and table & storage management service",
+ "service_version" : "0.10.0"
+ }
+ },
+ {
+ "href" : "http://ec2-23-20-124-167.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/PIG",
+ "StackServices" : {
+ "user_name" : "root",
+ "stack_version" : "1.2.1",
+ "service_name" : "PIG",
+ "stack_name" : "HDP",
+ "comments" : "Scripting platform for analyzing large datasets",
+ "service_version" : "0.10.1"
+ }
+ },
+ {
+ "href" : "http://ec2-23-20-124-167.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE",
+ "StackServices" : {
+ "user_name" : "mapred",
+ "stack_version" : "1.2.1",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP",
+ "comments" : "Apache Hadoop Distributed Processing Framework",
+ "service_version" : "1.1.2"
+ }
+ },
+ {
+ "href" : "http://ec2-23-20-124-167.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/WEBHCAT",
+ "StackServices" : {
+ "user_name" : "root",
+ "stack_version" : "1.2.1",
+ "service_name" : "WEBHCAT",
+ "stack_name" : "HDP",
+ "comments" : "This is comment for WEBHCAT service",
+ "service_version" : "0.5.0"
+ }
+ },
+ {
+ "href" : "http://ec2-23-20-124-167.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS",
+ "StackServices" : {
+ "user_name" : "root",
+ "stack_version" : "1.2.1",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP",
+ "comments" : "Apache Hadoop Distributed File System",
+ "service_version" : "1.1.2"
+ }
+ },
+ {
+ "href" : "http://ec2-23-20-124-167.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/OOZIE",
+ "StackServices" : {
+ "user_name" : "root",
+ "stack_version" : "1.2.1",
+ "service_name" : "OOZIE",
+ "stack_name" : "HDP",
+ "comments" : "System for workflow coordination and execution of Apache Hadoop jobs",
+ "service_version" : "3.2.0"
+ }
+ },
+ {
+ "href" : "http://ec2-23-20-124-167.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/ZOOKEEPER",
+ "StackServices" : {
+ "user_name" : "root",
+ "stack_version" : "1.2.1",
+ "service_name" : "ZOOKEEPER",
+ "stack_name" : "HDP",
+ "comments" : "This is comment for ZOOKEEPER service",
+ "service_version" : "3.4.5"
+ }
+ },
+ {
+ "href" : "http://ec2-23-20-124-167.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HCATALOG",
+ "StackServices" : {
+ "user_name" : "root",
+ "stack_version" : "1.2.1",
+ "service_name" : "HCATALOG",
+ "stack_name" : "HDP",
+ "comments" : "This is comment for HCATALOG service",
+ "service_version" : "0.5.0"
+ }
+ },
+ {
+ "href" : "http://ec2-23-20-124-167.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/GANGLIA",
+ "StackServices" : {
+ "user_name" : "root",
+ "stack_version" : "1.2.1",
+ "service_name" : "GANGLIA",
+ "stack_name" : "HDP",
+ "comments" : "Ganglia Metrics Collection system",
+ "service_version" : "3.2.0"
+ }
+ }
+ ]
+}
\ No newline at end of file
diff --git a/ambari-web/app/assets/data/wizard/stack/hdp/version131/HBASE.json b/ambari-web/app/assets/data/wizard/stack/hdp/version131/HBASE.json
new file mode 100644
index 00000000000..6aa748bfd0b
--- /dev/null
+++ b/ambari-web/app/assets/data/wizard/stack/hdp/version131/HBASE.json
@@ -0,0 +1,281 @@
+{
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations?fields=*",
+ "items" : [
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/zookeeper_sessiontimeout",
+ "StackConfigurations" : {
+ "property_description" : "ZooKeeper Session Timeout",
+ "property_value" : "60000",
+ "stack_version" : "1.3.1",
+ "property_name" : "zookeeper_sessiontimeout",
+ "service_name" : "HBASE",
+ "type" : "global.xml",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hbase_master_heapsize",
+ "StackConfigurations" : {
+ "property_description" : "HBase Master Heap Size",
+ "property_value" : "1024",
+ "stack_version" : "1.3.1",
+ "property_name" : "hbase_master_heapsize",
+ "service_name" : "HBASE",
+ "type" : "global.xml",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hstore_compactionthreshold",
+ "StackConfigurations" : {
+ "property_description" : "HBase HStore compaction threshold.",
+ "property_value" : "3",
+ "stack_version" : "1.3.1",
+ "property_name" : "hstore_compactionthreshold",
+ "service_name" : "HBASE",
+ "type" : "global.xml",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hfile_blockcache_size",
+ "StackConfigurations" : {
+ "property_description" : "HFile block cache size.",
+ "property_value" : "0.25",
+ "stack_version" : "1.3.1",
+ "property_name" : "hfile_blockcache_size",
+ "service_name" : "HBASE",
+ "type" : "global.xml",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/security.client.protocol.acl",
+ "StackConfigurations" : {
+ "property_description" : "ACL for HRegionInterface protocol implementations (ie. \n clients talking to HRegionServers)\n The ACL is a comma-separated list of user and group names. The user and \n group list is separated by a blank. For e.g. \"alice,bob users,wheel\". \n A special value of \"*\" means all users are allowed.",
+ "property_value" : "*",
+ "stack_version" : "1.3.1",
+ "property_name" : "security.client.protocol.acl",
+ "service_name" : "HBASE",
+ "type" : "hbase-policy.xml",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hregion_blockmultiplier",
+ "StackConfigurations" : {
+ "property_description" : "HBase Region Block Multiplier",
+ "property_value" : "2",
+ "stack_version" : "1.3.1",
+ "property_name" : "hregion_blockmultiplier",
+ "service_name" : "HBASE",
+ "type" : "global.xml",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hbase.zookeeper.useMulti",
+ "StackConfigurations" : {
+ "property_description" : "Instructs HBase to make use of ZooKeeper's multi-update functionality.\n This allows certain ZooKeeper operations to complete more quickly and prevents some issues\n with rare Replication failure scenarios (see the release note of HBASE-2611 for an example).В·\n IMPORTANT: only set this to true if all ZooKeeper servers in the cluster are on version 3.4+\n and will not be downgraded. ZooKeeper versions before 3.4 do not support multi-update and will\n not fail gracefully if multi-update is invoked (see ZOOKEEPER-1495).\n ",
+ "property_value" : "true",
+ "stack_version" : "1.3.1",
+ "property_name" : "hbase.zookeeper.useMulti",
+ "service_name" : "HBASE",
+ "type" : "hbase-site.xml",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hbase_conf_dir",
+ "StackConfigurations" : {
+ "property_description" : "Config Directory for HBase.",
+ "property_value" : "/etc/hbase",
+ "stack_version" : "1.3.1",
+ "property_name" : "hbase_conf_dir",
+ "service_name" : "HBASE",
+ "type" : "global.xml",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hbase.zookeeper.property.clientPort",
+ "StackConfigurations" : {
+ "property_description" : "Property from ZooKeeper's config zoo.cfg.\n The port at which the clients will connect.\n ",
+ "property_value" : "2181",
+ "stack_version" : "1.3.1",
+ "property_name" : "hbase.zookeeper.property.clientPort",
+ "service_name" : "HBASE",
+ "type" : "hbase-site.xml",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hbase_log_dir",
+ "StackConfigurations" : {
+ "property_description" : "Log Directories for HBase.",
+ "property_value" : "/var/log/hbase",
+ "stack_version" : "1.3.1",
+ "property_name" : "hbase_log_dir",
+ "service_name" : "HBASE",
+ "type" : "global.xml",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hbase_tmp_dir",
+ "StackConfigurations" : {
+ "property_description" : "Hbase temp directory",
+ "property_value" : "/var/log/hbase",
+ "stack_version" : "1.3.1",
+ "property_name" : "hbase_tmp_dir",
+ "service_name" : "HBASE",
+ "type" : "global.xml",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hfile_max_keyvalue_size",
+ "StackConfigurations" : {
+ "property_description" : "HBase Client Maximum key-value Size",
+ "property_value" : "10485760",
+ "stack_version" : "1.3.1",
+ "property_name" : "hfile_max_keyvalue_size",
+ "service_name" : "HBASE",
+ "type" : "global.xml",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hbase_regionserver_heapsize",
+ "StackConfigurations" : {
+ "property_description" : "Log Directories for HBase.",
+ "property_value" : "1024",
+ "stack_version" : "1.3.1",
+ "property_name" : "hbase_regionserver_heapsize",
+ "service_name" : "HBASE",
+ "type" : "global.xml",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hregion_majorcompaction",
+ "StackConfigurations" : {
+ "property_description" : "HBase Major Compaction.",
+ "property_value" : "86400000",
+ "stack_version" : "1.3.1",
+ "property_name" : "hregion_majorcompaction",
+ "service_name" : "HBASE",
+ "type" : "global.xml",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/client_scannercaching",
+ "StackConfigurations" : {
+ "property_description" : "Base Client Scanner Caching",
+ "property_value" : "100",
+ "stack_version" : "1.3.1",
+ "property_name" : "client_scannercaching",
+ "service_name" : "HBASE",
+ "type" : "global.xml",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/security.masterregion.protocol.acl",
+ "StackConfigurations" : {
+ "property_description" : "ACL for HMasterRegionInterface protocol implementations\n (for HRegionServers communicating with HMaster)\n The ACL is a comma-separated list of user and group names. The user and \n group list is separated by a blank. For e.g. \"alice,bob users,wheel\". \n A special value of \"*\" means all users are allowed.",
+ "property_value" : "*",
+ "stack_version" : "1.3.1",
+ "property_name" : "security.masterregion.protocol.acl",
+ "service_name" : "HBASE",
+ "type" : "hbase-policy.xml",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hbase.cluster.distributed",
+ "StackConfigurations" : {
+ "property_description" : "The mode the cluster will be in. Possible values are\n false for standalone mode and true for distributed mode. If\n false, startup will run all HBase and ZooKeeper daemons together\n in the one JVM.\n ",
+ "property_value" : "true",
+ "stack_version" : "1.3.1",
+ "property_name" : "hbase.cluster.distributed",
+ "service_name" : "HBASE",
+ "type" : "hbase-site.xml",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hbase.master.lease.thread.wakefrequency",
+ "StackConfigurations" : {
+ "property_description" : "The interval between checks for expired region server leases.\n This value has been reduced due to the other reduced values above so that\n the master will notice a dead region server sooner. The default is 15 seconds.\n ",
+ "property_value" : "3000",
+ "stack_version" : "1.3.1",
+ "property_name" : "hbase.master.lease.thread.wakefrequency",
+ "service_name" : "HBASE",
+ "type" : "hbase-site.xml",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/regionserver_handlers",
+ "StackConfigurations" : {
+ "property_description" : "HBase RegionServer Handler",
+ "property_value" : "30",
+ "stack_version" : "1.3.1",
+ "property_name" : "regionserver_handlers",
+ "service_name" : "HBASE",
+ "type" : "global.xml",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hbase.regionserver.optionalcacheflushinterval",
+ "StackConfigurations" : {
+ "property_description" : "\n Amount of time to wait since the last time a region was flushed before\n invoking an optional cache flush. Default 60,000.\n ",
+ "property_value" : "10000",
+ "stack_version" : "1.3.1",
+ "property_name" : "hbase.regionserver.optionalcacheflushinterval",
+ "service_name" : "HBASE",
+ "type" : "hbase-site.xml",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hbase_pid_dir",
+ "StackConfigurations" : {
+ "property_description" : "Log Directories for HBase.",
+ "property_value" : "/var/run/hbase",
+ "stack_version" : "1.3.1",
+ "property_name" : "hbase_pid_dir",
+ "service_name" : "HBASE",
+ "type" : "global.xml",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hstorefile_maxsize",
+ "StackConfigurations" : {
+ "property_description" : "Maximum HStoreFile Size",
+ "property_value" : "1073741824",
+ "stack_version" : "1.3.1",
+ "property_name" : "hstorefile_maxsize",
+ "service_name" : "HBASE",
+ "type" : "global.xml",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/security.admin.protocol.acl",
+ "StackConfigurations" : {
+ "property_description" : "ACL for HMasterInterface protocol implementation (ie. \n clients talking to HMaster for admin operations).\n The ACL is a comma-separated list of user and group names. The user and \n group list is separated by a blank. For e.g. \"alice,bob users,wheel\". \n A special value of \"*\" means all users are allowed.",
+ "property_value" : "*",
+ "stack_version" : "1.3.1",
+ "property_name" : "security.admin.protocol.acl",
+ "service_name" : "HBASE",
+ "type" : "hbase-policy.xml",
+ "stack_name" : "HDP"
+ }
+ }
+ ]
+}
\ No newline at end of file
diff --git a/ambari-web/app/assets/data/wizard/stack/hdp/version131/HCATALOG.json b/ambari-web/app/assets/data/wizard/stack/hdp/version131/HCATALOG.json
new file mode 100644
index 00000000000..1180bad2e7d
--- /dev/null
+++ b/ambari-web/app/assets/data/wizard/stack/hdp/version131/HCATALOG.json
@@ -0,0 +1,4 @@
+{
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HCATALOG/configurations?fields=*",
+ "items" : [ ]
+}
\ No newline at end of file
diff --git a/ambari-web/app/assets/data/wizard/stack/hdp/version131/HCFS.json b/ambari-web/app/assets/data/wizard/stack/hdp/version131/HCFS.json
new file mode 100644
index 00000000000..63fcf51449b
--- /dev/null
+++ b/ambari-web/app/assets/data/wizard/stack/hdp/version131/HCFS.json
@@ -0,0 +1,33 @@
+{
+ "name" : "HCFS",
+ "version" : "9.9.9.9-9",
+ "user" : "root",
+ "comment" : "This is comment for HCFS service",
+ "properties" : [ {
+ "name" : "dfs.replication.max",
+ "description" : "Maximal block replication.\n ",
+ "filename" : "hdfs-site.xml"
+ }, {
+ "name" : "dfs.heartbeat.interval",
+ "description" : "Determines datanode heartbeat interval in seconds.",
+ "filename" : "hdfs-site.xml"
+ } ],
+ "components" : [ {
+ "name" : "DATANODE",
+ "category" : "SLAVE",
+ "client" : false,
+ "master" : false
+ }, {
+ "name" : "HCFS_CLIENT",
+ "category" : "CLIENT",
+ "client" : true,
+ "master" : false
+ } ],
+ "clientOnlyService" : false,
+ "clientComponent" : {
+ "name" : "HCFS_CLIENT",
+ "category" : "CLIENT",
+ "client" : true,
+ "master" : false
+ }
+}
\ No newline at end of file
diff --git a/ambari-web/app/assets/data/wizard/stack/hdp/version131/HDFS.json b/ambari-web/app/assets/data/wizard/stack/hdp/version131/HDFS.json
new file mode 100644
index 00000000000..2b05dce85c5
--- /dev/null
+++ b/ambari-web/app/assets/data/wizard/stack/hdp/version131/HDFS.json
@@ -0,0 +1,737 @@
+{
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations?fields=*",
+ "items" : [
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.blockreport.initialDelay",
+ "StackConfigurations" : {
+ "property_description" : "Delay for first block report in seconds.",
+ "property_value" : "120",
+ "stack_version" : "1.3.1",
+ "property_name" : "dfs.blockreport.initialDelay",
+ "filename" : "hdfs-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.safemode.threshold.pct",
+ "StackConfigurations" : {
+ "property_description" : "\n Specifies the percentage of blocks that should satisfy\n the minimal replication requirement defined by dfs.replication.min.\n Values less than or equal to 0 mean not to start in safe mode.\n Values greater than 1 will make safe mode permanent.\n ",
+ "property_value" : "1.0f",
+ "stack_version" : "1.3.1",
+ "property_name" : "dfs.safemode.threshold.pct",
+ "filename" : "hdfs-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/fs_checkpoint_dir",
+ "StackConfigurations" : {
+ "property_description" : "Secondary NameNode checkpoint dir.",
+ "property_value" : "/hadoop/hdfs/namesecondary",
+ "stack_version" : "1.3.1",
+ "property_name" : "fs_checkpoint_dir",
+ "filename" : "global.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.umaskmode",
+ "StackConfigurations" : {
+ "property_description" : "\nThe octal umask used when creating files and directories.\n",
+ "property_value" : "077",
+ "stack_version" : "1.3.1",
+ "property_name" : "dfs.umaskmode",
+ "filename" : "hdfs-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/io.compression.codec.lzo.class",
+ "StackConfigurations" : {
+ "property_description" : "The implementation for lzo codec.",
+ "property_value" : "com.hadoop.compression.lzo.LzoCodec",
+ "stack_version" : "1.3.1",
+ "property_name" : "io.compression.codec.lzo.class",
+ "filename" : "core-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.heartbeat.interval",
+ "StackConfigurations" : {
+ "property_description" : "Determines datanode heartbeat interval in seconds.",
+ "property_value" : "3",
+ "stack_version" : "1.3.1",
+ "property_name" : "dfs.heartbeat.interval",
+ "filename" : "hdfs-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs_datanode_failed_volume_tolerated",
+ "StackConfigurations" : {
+ "property_description" : "DataNode volumes failure toleration",
+ "property_value" : "0",
+ "stack_version" : "1.3.1",
+ "property_name" : "dfs_datanode_failed_volume_tolerated",
+ "filename" : "global.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs_name_dir",
+ "StackConfigurations" : {
+ "property_description" : "NameNode Directories.",
+ "property_value" : "/hadoop/hdfs/namenode",
+ "stack_version" : "1.3.1",
+ "property_name" : "dfs_name_dir",
+ "filename" : "global.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/fs_checkpoint_size",
+ "StackConfigurations" : {
+ "property_description" : "FS Checkpoint Size.",
+ "property_value" : "0.5",
+ "stack_version" : "1.3.1",
+ "property_name" : "fs_checkpoint_size",
+ "filename" : "global.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.balance.bandwidthPerSec",
+ "StackConfigurations" : {
+ "property_description" : "\n Specifies the maximum amount of bandwidth that each datanode\n can utilize for the balancing purpose in term of\n the number of bytes per second.\n ",
+ "property_value" : "6250000",
+ "stack_version" : "1.3.1",
+ "property_name" : "dfs.balance.bandwidthPerSec",
+ "filename" : "hdfs-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/namenode_opt_maxnewsize",
+ "StackConfigurations" : {
+ "property_description" : "NameNode maximum new generation size",
+ "property_value" : "640",
+ "stack_version" : "1.3.1",
+ "property_name" : "namenode_opt_maxnewsize",
+ "filename" : "global.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/fs.checkpoint.edits.dir",
+ "StackConfigurations" : {
+ "property_description" : "Determines where on the local filesystem the DFS secondary\n name node should store the temporary edits to merge.\n If this is a comma-delimited list of directoires then teh edits is\n replicated in all of the directoires for redundancy.\n Default value is same as fs.checkpoint.dir\n ",
+ "property_value" : "${fs.checkpoint.dir}",
+ "stack_version" : "1.3.1",
+ "property_name" : "fs.checkpoint.edits.dir",
+ "filename" : "core-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/keytab_path",
+ "StackConfigurations" : {
+ "property_description" : "KeyTab Directory.",
+ "property_value" : "/etc/security/keytabs",
+ "stack_version" : "1.3.1",
+ "property_name" : "keytab_path",
+ "filename" : "global.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.block.size",
+ "StackConfigurations" : {
+ "property_description" : "The default block size for new files.",
+ "property_value" : "134217728",
+ "stack_version" : "1.3.1",
+ "property_name" : "dfs.block.size",
+ "filename" : "hdfs-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/security_enabled",
+ "StackConfigurations" : {
+ "property_description" : "Hadoop Security",
+ "property_value" : "false",
+ "stack_version" : "1.3.1",
+ "property_name" : "security_enabled",
+ "filename" : "global.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/io.serializations",
+ "StackConfigurations" : {
+ "property_description" : null,
+ "property_value" : "org.apache.hadoop.io.serializer.WritableSerialization",
+ "stack_version" : "1.3.1",
+ "property_name" : "io.serializations",
+ "filename" : "core-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/security.task.umbilical.protocol.acl",
+ "StackConfigurations" : {
+ "property_description" : "ACL for TaskUmbilicalProtocol, used by the map and reduce\n tasks to communicate with the parent tasktracker.\n The ACL is a comma-separated list of user and group names. The user and\n group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n A special value of \"*\" means all users are allowed.",
+ "property_value" : "*",
+ "stack_version" : "1.3.1",
+ "property_name" : "security.task.umbilical.protocol.acl",
+ "filename" : "hadoop-policy.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/kerberos_domain",
+ "StackConfigurations" : {
+ "property_description" : "Kerberos realm.",
+ "property_value" : "EXAMPLE.COM",
+ "stack_version" : "1.3.1",
+ "property_name" : "kerberos_domain",
+ "filename" : "global.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs_data_dir",
+ "StackConfigurations" : {
+ "property_description" : "Data directories for Data Nodes.",
+ "property_value" : "/hadoop/hdfs/data",
+ "stack_version" : "1.3.1",
+ "property_name" : "dfs_data_dir",
+ "filename" : "global.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/hdfs_log_dir_prefix",
+ "StackConfigurations" : {
+ "property_description" : "Hadoop Log Dir Prefix",
+ "property_value" : "/var/log/hadoop",
+ "stack_version" : "1.3.1",
+ "property_name" : "hdfs_log_dir_prefix",
+ "filename" : "global.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/security.datanode.protocol.acl",
+ "StackConfigurations" : {
+ "property_description" : "ACL for DatanodeProtocol, which is used by datanodes to\n communicate with the namenode.\n The ACL is a comma-separated list of user and group names. The user and\n group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n A special value of \"*\" means all users are allowed.",
+ "property_value" : "*",
+ "stack_version" : "1.3.1",
+ "property_name" : "security.datanode.protocol.acl",
+ "filename" : "hadoop-policy.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/ipc.client.connect.max.retries",
+ "StackConfigurations" : {
+ "property_description" : "Defines the maximum number of retries for IPC connections.",
+ "property_value" : "50",
+ "stack_version" : "1.3.1",
+ "property_name" : "ipc.client.connect.max.retries",
+ "filename" : "core-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/fs_checkpoint_period",
+ "StackConfigurations" : {
+ "property_description" : "HDFS Maximum Checkpoint Delay",
+ "property_value" : "21600",
+ "stack_version" : "1.3.1",
+ "property_name" : "fs_checkpoint_period",
+ "filename" : "global.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.namenode.handler.count",
+ "StackConfigurations" : {
+ "property_description" : "The number of server threads for the namenode.",
+ "property_value" : "40",
+ "stack_version" : "1.3.1",
+ "property_name" : "dfs.namenode.handler.count",
+ "filename" : "hdfs-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.datanode.ipc.address",
+ "StackConfigurations" : {
+ "property_description" : "\nThe datanode ipc server address and port.\nIf the port is 0 then the server will start on a free port.\n",
+ "property_value" : "0.0.0.0:8010",
+ "stack_version" : "1.3.1",
+ "property_name" : "dfs.datanode.ipc.address",
+ "filename" : "hdfs-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs_webhdfs_enabled",
+ "StackConfigurations" : {
+ "property_description" : "WebHDFS enabled",
+ "property_value" : "true",
+ "stack_version" : "1.3.1",
+ "property_name" : "dfs_webhdfs_enabled",
+ "filename" : "global.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/fs.trash.interval",
+ "StackConfigurations" : {
+ "property_description" : "Number of minutes between trash checkpoints.\n If zero, the trash feature is disabled.\n ",
+ "property_value" : "360",
+ "stack_version" : "1.3.1",
+ "property_name" : "fs.trash.interval",
+ "filename" : "core-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.secondary.https.port",
+ "StackConfigurations" : {
+ "property_description" : "The https port where secondary-namenode binds",
+ "property_value" : "50490",
+ "stack_version" : "1.3.1",
+ "property_name" : "dfs.secondary.https.port",
+ "filename" : "hdfs-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/datanode_du_reserved",
+ "StackConfigurations" : {
+ "property_description" : "Reserved space for HDFS",
+ "property_value" : "1",
+ "stack_version" : "1.3.1",
+ "property_name" : "datanode_du_reserved",
+ "filename" : "global.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/io.file.buffer.size",
+ "StackConfigurations" : {
+ "property_description" : "The size of buffer for use in sequence files.\n The size of this buffer should probably be a multiple of hardware\n page size (4096 on Intel x86), and it determines how much data is\n buffered during read and write operations.",
+ "property_value" : "131072",
+ "stack_version" : "1.3.1",
+ "property_name" : "io.file.buffer.size",
+ "filename" : "core-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.replication.max",
+ "StackConfigurations" : {
+ "property_description" : "Maximal block replication.\n ",
+ "property_value" : "50",
+ "stack_version" : "1.3.1",
+ "property_name" : "dfs.replication.max",
+ "filename" : "hdfs-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/hadoop_pid_dir_prefix",
+ "StackConfigurations" : {
+ "property_description" : "Hadoop PID Dir Prefix",
+ "property_value" : "/var/run/hadoop",
+ "stack_version" : "1.3.1",
+ "property_name" : "hadoop_pid_dir_prefix",
+ "filename" : "global.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/security.inter.datanode.protocol.acl",
+ "StackConfigurations" : {
+ "property_description" : "ACL for InterDatanodeProtocol, the inter-datanode protocol\n for updating generation timestamp.\n The ACL is a comma-separated list of user and group names. The user and\n group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n A special value of \"*\" means all users are allowed.",
+ "property_value" : "*",
+ "stack_version" : "1.3.1",
+ "property_name" : "security.inter.datanode.protocol.acl",
+ "filename" : "hadoop-policy.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.datanode.socket.write.timeout",
+ "StackConfigurations" : {
+ "property_description" : "DFS Client write socket timeout",
+ "property_value" : "0",
+ "stack_version" : "1.3.1",
+ "property_name" : "dfs.datanode.socket.write.timeout",
+ "filename" : "hdfs-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.datanode.max.xcievers",
+ "StackConfigurations" : {
+ "property_description" : "PRIVATE CONFIG VARIABLE",
+ "property_value" : "4096",
+ "stack_version" : "1.3.1",
+ "property_name" : "dfs.datanode.max.xcievers",
+ "filename" : "hdfs-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/ipc.server.max.response.size",
+ "StackConfigurations" : {
+ "property_description" : null,
+ "property_value" : "5242880",
+ "stack_version" : "1.3.1",
+ "property_name" : "ipc.server.max.response.size",
+ "filename" : "hdfs-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/fs.checkpoint.size",
+ "StackConfigurations" : {
+ "property_description" : "The size of the current edit log (in bytes) that triggers\n a periodic checkpoint even if the fs.checkpoint.period hasn't expired.\n ",
+ "property_value" : "536870912",
+ "stack_version" : "1.3.1",
+ "property_name" : "fs.checkpoint.size",
+ "filename" : "core-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/security.namenode.protocol.acl",
+ "StackConfigurations" : {
+ "property_description" : "ACL for NamenodeProtocol, the protocol used by the secondary\n namenode to communicate with the namenode.\n The ACL is a comma-separated list of user and group names. The user and\n group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n A special value of \"*\" means all users are allowed.",
+ "property_value" : "*",
+ "stack_version" : "1.3.1",
+ "property_name" : "security.namenode.protocol.acl",
+ "filename" : "hadoop-policy.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.permissions",
+ "StackConfigurations" : {
+ "property_description" : "\nIf \"true\", enable permission checking in HDFS.\nIf \"false\", permission checking is turned off,\nbut all other behavior is unchanged.\nSwitching from one parameter value to the other does not change the mode,\nowner or group of files or directories.\n",
+ "property_value" : "true",
+ "stack_version" : "1.3.1",
+ "property_name" : "dfs.permissions",
+ "filename" : "hdfs-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.https.port",
+ "StackConfigurations" : {
+ "property_description" : "The https port where namenode binds",
+ "property_value" : "50470",
+ "stack_version" : "1.3.1",
+ "property_name" : "dfs.https.port",
+ "filename" : "hdfs-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/namenode_heapsize",
+ "StackConfigurations" : {
+ "property_description" : "NameNode Java heap size",
+ "property_value" : "1024",
+ "stack_version" : "1.3.1",
+ "property_name" : "namenode_heapsize",
+ "filename" : "global.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.namenode.handler.count",
+ "StackConfigurations" : {
+ "property_description" : "Added to grow Queue size so that more client connections are allowed",
+ "property_value" : "100",
+ "stack_version" : "1.3.1",
+ "property_name" : "dfs.namenode.handler.count",
+ "filename" : "hdfs-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.web.ugi",
+ "StackConfigurations" : {
+ "property_description" : "The user account used by the web interface.\nSyntax: USERNAME,GROUP1,GROUP2, ...\n",
+ "property_value" : "gopher,gopher",
+ "stack_version" : "1.3.1",
+ "property_name" : "dfs.web.ugi",
+ "filename" : "hdfs-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.datanode.du.pct",
+ "StackConfigurations" : {
+ "property_description" : "When calculating remaining space, only use this percentage of the real available space\n",
+ "property_value" : "0.85f",
+ "stack_version" : "1.3.1",
+ "property_name" : "dfs.datanode.du.pct",
+ "filename" : "hdfs-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/fs.checkpoint.period",
+ "StackConfigurations" : {
+ "property_description" : "The number of seconds between two periodic checkpoints.\n ",
+ "property_value" : "21600",
+ "stack_version" : "1.3.1",
+ "property_name" : "fs.checkpoint.period",
+ "filename" : "core-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.block.access.token.enable",
+ "StackConfigurations" : {
+ "property_description" : "\nIf \"true\", access tokens are used as capabilities for accessing datanodes.\nIf \"false\", no access tokens are checked on accessing datanodes.\n",
+ "property_value" : "true",
+ "stack_version" : "1.3.1",
+ "property_name" : "dfs.block.access.token.enable",
+ "filename" : "hdfs-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.cluster.administrators",
+ "StackConfigurations" : {
+ "property_description" : "ACL for who all can view the default servlets in the HDFS",
+ "property_value" : " hdfs",
+ "stack_version" : "1.3.1",
+ "property_name" : "dfs.cluster.administrators",
+ "filename" : "hdfs-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dtnode_heapsize",
+ "StackConfigurations" : {
+ "property_description" : "DataNode maximum Java heap size",
+ "property_value" : "1024",
+ "stack_version" : "1.3.1",
+ "property_name" : "dtnode_heapsize",
+ "filename" : "global.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/security.job.submission.protocol.acl",
+ "StackConfigurations" : {
+ "property_description" : "ACL for JobSubmissionProtocol, used by job clients to\n communciate with the jobtracker for job submission, querying job status etc.\n The ACL is a comma-separated list of user and group names. The user and\n group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n A special value of \"*\" means all users are allowed.",
+ "property_value" : "*",
+ "stack_version" : "1.3.1",
+ "property_name" : "security.job.submission.protocol.acl",
+ "filename" : "hadoop-policy.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/webinterface.private.actions",
+ "StackConfigurations" : {
+ "property_description" : " If set to true, the web interfaces of JT and NN may contain\n actions, such as kill job, delete file, etc., that should\n not be exposed to public. Enable this option if the interfaces\n are only reachable by those who have the right authorization.\n ",
+ "property_value" : "false",
+ "stack_version" : "1.3.1",
+ "property_name" : "webinterface.private.actions",
+ "filename" : "core-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/ipc.client.connection.maxidletime",
+ "StackConfigurations" : {
+ "property_description" : "The maximum time after which a client will bring down the\n connection to the server.\n ",
+ "property_value" : "30000",
+ "stack_version" : "1.3.1",
+ "property_name" : "ipc.client.connection.maxidletime",
+ "filename" : "core-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.permissions.supergroup",
+ "StackConfigurations" : {
+ "property_description" : "The name of the group of super-users.",
+ "property_value" : "hdfs",
+ "stack_version" : "1.3.1",
+ "property_name" : "dfs.permissions.supergroup",
+ "filename" : "hdfs-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/hadoop_heapsize",
+ "StackConfigurations" : {
+ "property_description" : "Hadoop maximum Java heap size",
+ "property_value" : "1024",
+ "stack_version" : "1.3.1",
+ "property_name" : "hadoop_heapsize",
+ "filename" : "global.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/ipc.client.idlethreshold",
+ "StackConfigurations" : {
+ "property_description" : "Defines the threshold number of connections after which\n connections will be inspected for idleness.\n ",
+ "property_value" : "8000",
+ "stack_version" : "1.3.1",
+ "property_name" : "ipc.client.idlethreshold",
+ "filename" : "core-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/security.inter.tracker.protocol.acl",
+ "StackConfigurations" : {
+ "property_description" : "ACL for InterTrackerProtocol, used by the tasktrackers to\n communicate with the jobtracker.\n The ACL is a comma-separated list of user and group names. The user and\n group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n A special value of \"*\" means all users are allowed.",
+ "property_value" : "*",
+ "stack_version" : "1.3.1",
+ "property_name" : "security.inter.tracker.protocol.acl",
+ "filename" : "hadoop-policy.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.datanode.failed.volumes.tolerated",
+ "StackConfigurations" : {
+ "property_description" : "Number of failed disks datanode would tolerate",
+ "property_value" : "0",
+ "stack_version" : "1.3.1",
+ "property_name" : "dfs.datanode.failed.volumes.tolerated",
+ "filename" : "hdfs-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/namenode_opt_newsize",
+ "StackConfigurations" : {
+ "property_description" : "NameNode new generation size",
+ "property_value" : "200",
+ "stack_version" : "1.3.1",
+ "property_name" : "namenode_opt_newsize",
+ "filename" : "global.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/security.client.datanode.protocol.acl",
+ "StackConfigurations" : {
+ "property_description" : "ACL for ClientDatanodeProtocol, the client-to-datanode protocol\n for block recovery.\n The ACL is a comma-separated list of user and group names. The user and\n group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n A special value of \"*\" means all users are allowed.",
+ "property_value" : "*",
+ "stack_version" : "1.3.1",
+ "property_name" : "security.client.datanode.protocol.acl",
+ "filename" : "hadoop-policy.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/security.client.protocol.acl",
+ "StackConfigurations" : {
+ "property_description" : "ACL for ClientProtocol, which is used by user code\n via the DistributedFileSystem.\n The ACL is a comma-separated list of user and group names. The user and\n group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n A special value of \"*\" means all users are allowed.",
+ "property_value" : "*",
+ "stack_version" : "1.3.1",
+ "property_name" : "security.client.protocol.acl",
+ "filename" : "hadoop-policy.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/ipc.server.read.threadpool.size",
+ "StackConfigurations" : {
+ "property_description" : null,
+ "property_value" : "5",
+ "stack_version" : "1.3.1",
+ "property_name" : "ipc.server.read.threadpool.size",
+ "filename" : "hdfs-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.access.time.precision",
+ "StackConfigurations" : {
+ "property_description" : "The access time for HDFS file is precise upto this value.\n The default value is 1 hour. Setting a value of 0 disables\n access times for HDFS.\n ",
+ "property_value" : "0",
+ "stack_version" : "1.3.1",
+ "property_name" : "dfs.access.time.precision",
+ "filename" : "hdfs-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ }
+ ]
+}
\ No newline at end of file
diff --git a/ambari-web/app/assets/data/wizard/stack/hdp/version131/HIVE.json b/ambari-web/app/assets/data/wizard/stack/hdp/version131/HIVE.json
new file mode 100644
index 00000000000..bdc77b12330
--- /dev/null
+++ b/ambari-web/app/assets/data/wizard/stack/hdp/version131/HIVE.json
@@ -0,0 +1,209 @@
+{
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations?fields=*",
+ "items" : [
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/javax.jdo.option.ConnectionDriverName",
+ "StackConfigurations" : {
+ "property_description" : "Driver class name for a JDBC metastore",
+ "property_value" : "com.mysql.jdbc.Driver",
+ "stack_version" : "1.3.1",
+ "property_name" : "javax.jdo.option.ConnectionDriverName",
+ "filename" : "hive-site.xml",
+ "service_name" : "HIVE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive_pid_dir",
+ "StackConfigurations" : {
+ "property_description" : "Hive PID Dir.",
+ "property_value" : "/var/run/hive",
+ "stack_version" : "1.3.1",
+ "property_name" : "hive_pid_dir",
+ "filename" : "global.xml",
+ "service_name" : "HIVE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.security.authorization.enabled",
+ "StackConfigurations" : {
+ "property_description" : "enable or disable the hive client authorization",
+ "property_value" : "true",
+ "stack_version" : "1.3.1",
+ "property_name" : "hive.security.authorization.enabled",
+ "filename" : "hive-site.xml",
+ "service_name" : "HIVE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive_conf_dir",
+ "StackConfigurations" : {
+ "property_description" : "Hive Conf Dir.",
+ "property_value" : "/etc/hive/conf",
+ "stack_version" : "1.3.1",
+ "property_name" : "hive_conf_dir",
+ "filename" : "global.xml",
+ "service_name" : "HIVE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hadoop.clientside.fs.operations",
+ "StackConfigurations" : {
+ "property_description" : "FS operations are owned by client",
+ "property_value" : "true",
+ "stack_version" : "1.3.1",
+ "property_name" : "hadoop.clientside.fs.operations",
+ "filename" : "hive-site.xml",
+ "service_name" : "HIVE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.security.authorization.manager",
+ "StackConfigurations" : {
+ "property_description" : "the hive client authorization manager class name.\n The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider. ",
+ "property_value" : "org.apache.hcatalog.security.HdfsAuthorizationProvider",
+ "stack_version" : "1.3.1",
+ "property_name" : "hive.security.authorization.manager",
+ "filename" : "hive-site.xml",
+ "service_name" : "HIVE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/fs.hdfs.impl.disable.cache",
+ "StackConfigurations" : {
+ "property_description" : null,
+ "property_value" : "true",
+ "stack_version" : "1.3.1",
+ "property_name" : "fs.hdfs.impl.disable.cache",
+ "filename" : "hive-site.xml",
+ "service_name" : "HIVE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.metastore.warehouse.dir",
+ "StackConfigurations" : {
+ "property_description" : "location of default database for the warehouse",
+ "property_value" : "/apps/hive/warehouse",
+ "stack_version" : "1.3.1",
+ "property_name" : "hive.metastore.warehouse.dir",
+ "filename" : "hive-site.xml",
+ "service_name" : "HIVE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.semantic.analyzer.factory.impl",
+ "StackConfigurations" : {
+ "property_description" : "controls which SemanticAnalyzerFactory implemenation class is used by CLI",
+ "property_value" : "org.apache.hivealog.cli.HCatSemanticAnalyzerFactory",
+ "stack_version" : "1.3.1",
+ "property_name" : "hive.semantic.analyzer.factory.impl",
+ "filename" : "hive-site.xml",
+ "service_name" : "HIVE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive_aux_jars_path",
+ "StackConfigurations" : {
+ "property_description" : "Hive auxiliary jar path.",
+ "property_value" : "/usr/lib/hcatalog/share/hcatalog/hcatalog-core.jar",
+ "stack_version" : "1.3.1",
+ "property_name" : "hive_aux_jars_path",
+ "filename" : "global.xml",
+ "service_name" : "HIVE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.metastore.client.socket.timeout",
+ "StackConfigurations" : {
+ "property_description" : "MetaStore Client socket timeout in seconds",
+ "property_value" : "60",
+ "stack_version" : "1.3.1",
+ "property_name" : "hive.metastore.client.socket.timeout",
+ "filename" : "hive-site.xml",
+ "service_name" : "HIVE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.server2.enable.doAs",
+ "StackConfigurations" : {
+ "property_description" : null,
+ "property_value" : "true",
+ "stack_version" : "1.3.1",
+ "property_name" : "hive.server2.enable.doAs",
+ "filename" : "hive-site.xml",
+ "service_name" : "HIVE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.metastore.cache.pinobjtypes",
+ "StackConfigurations" : {
+ "property_description" : "List of comma separated metastore object types that should be pinned in the cache",
+ "property_value" : "Table,Database,Type,FieldSchema,Order",
+ "stack_version" : "1.3.1",
+ "property_name" : "hive.metastore.cache.pinobjtypes",
+ "filename" : "hive-site.xml",
+ "service_name" : "HIVE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.metastore.execute.setugi",
+ "StackConfigurations" : {
+ "property_description" : "In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using the client's reported user and group permissions. Note that this property must be set on both the client and server sides. Further note that its best effort. If client sets its to true and server sets it to false, client setting will be ignored.",
+ "property_value" : "true",
+ "stack_version" : "1.3.1",
+ "property_name" : "hive.metastore.execute.setugi",
+ "filename" : "hive-site.xml",
+ "service_name" : "HIVE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/mysql_connector_url",
+ "StackConfigurations" : {
+ "property_description" : "Hive PID Dir.",
+ "property_value" : "${download_url}/mysql-connector-java-5.1.18.zip",
+ "stack_version" : "1.3.1",
+ "property_name" : "mysql_connector_url",
+ "filename" : "global.xml",
+ "service_name" : "HIVE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.metastore.local",
+ "StackConfigurations" : {
+ "property_description" : "controls whether to connect to remove metastore server or\n open a new metastore server in Hive Client JVM",
+ "property_value" : "false",
+ "stack_version" : "1.3.1",
+ "property_name" : "hive.metastore.local",
+ "filename" : "hive-site.xml",
+ "service_name" : "HIVE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive_log_dir",
+ "StackConfigurations" : {
+ "property_description" : "Directory for Hive Log files.",
+ "property_value" : "/var/log/hive",
+ "stack_version" : "1.3.1",
+ "property_name" : "hive_log_dir",
+ "filename" : "global.xml",
+ "service_name" : "HIVE",
+ "stack_name" : "HDP"
+ }
+ }
+ ]
+}
\ No newline at end of file
diff --git a/ambari-web/app/assets/data/wizard/stack/hdp/version131/HUE.json b/ambari-web/app/assets/data/wizard/stack/hdp/version131/HUE.json
new file mode 100644
index 00000000000..895aa35213e
--- /dev/null
+++ b/ambari-web/app/assets/data/wizard/stack/hdp/version131/HUE.json
@@ -0,0 +1,353 @@
+{
+ "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations?fields=*",
+ "items" : [
+ {
+ "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/pig_shell_command",
+ "StackConfigurations" : {
+ "property_description" : "Define and configure a new shell type pig.",
+ "property_value" : "/usr/bin/pig -l /dev/null",
+ "stack_version" : "1.3.1",
+ "property_name" : "pig_shell_command",
+ "service_name" : "HUE",
+ "type" : "hue-site.xml",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/db_name",
+ "StackConfigurations" : {
+ "property_description" : "Configuration options for specifying the Desktop Database.",
+ "property_value" : "sandbox",
+ "stack_version" : "1.3.1",
+ "property_name" : "db_name",
+ "service_name" : "HUE",
+ "type" : "hue-site.xml",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/db_user",
+ "StackConfigurations" : {
+ "property_description" : "Configuration options for specifying the Desktop Database.",
+ "property_value" : "sandbox",
+ "stack_version" : "1.3.1",
+ "property_name" : "db_user",
+ "service_name" : "HUE",
+ "type" : "hue-site.xml",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/db_host",
+ "StackConfigurations" : {
+ "property_description" : "Configuration options for specifying the Desktop Database.",
+ "property_value" : "localhost",
+ "stack_version" : "1.3.1",
+ "property_name" : "db_host",
+ "service_name" : "HUE",
+ "type" : "hue-site.xml",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/db_password",
+ "StackConfigurations" : {
+ "property_description" : "Configuration options for specifying the Desktop Database.",
+ "property_value" : "1111",
+ "stack_version" : "1.3.1",
+ "property_name" : "db_password",
+ "service_name" : "HUE",
+ "type" : "hue-site.xml",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/time_zone",
+ "StackConfigurations" : {
+ "property_description" : "Time zone name",
+ "property_value" : "America/Los_Angeles",
+ "stack_version" : "1.3.1",
+ "property_name" : "time_zone",
+ "service_name" : "HUE",
+ "type" : "hue-site.xml",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/http_host",
+ "StackConfigurations" : {
+ "property_description" : "Webserver listens on this address and port",
+ "property_value" : "0.0.0.0",
+ "stack_version" : "1.3.1",
+ "property_name" : "http_host",
+ "service_name" : "HUE",
+ "type" : "hue-site.xml",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/hue_pid_dir",
+ "StackConfigurations" : {
+ "property_description" : "Hue Pid Dir.",
+ "property_value" : "/var/run/hue",
+ "stack_version" : "1.3.1",
+ "property_name" : "hue_pid_dir",
+ "service_name" : "HUE",
+ "type" : "global.xml",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/tls",
+ "StackConfigurations" : {
+ "property_description" : "Whether to use a TLS (secure) connection when talking to the SMTP server.",
+ "property_value" : "no",
+ "stack_version" : "1.3.1",
+ "property_name" : "tls",
+ "service_name" : "HUE",
+ "type" : "hue-site.xml",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/hadoop_mapred_home",
+ "StackConfigurations" : {
+ "property_description" : "The SMTP server information for email notification delivery.",
+ "property_value" : "/usr/lib/hadoop/lib",
+ "stack_version" : "1.3.1",
+ "property_name" : "hadoop_mapred_home",
+ "service_name" : "HUE",
+ "type" : "hue-site.xml",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/default_from_email",
+ "StackConfigurations" : {
+ "property_description" : "The SMTP server information for email notification delivery.",
+ "property_value" : "sandbox@hortonworks.com",
+ "stack_version" : "1.3.1",
+ "property_name" : "default_from_email",
+ "service_name" : "HUE",
+ "type" : "hue-site.xml",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/backend_auth_policy",
+ "StackConfigurations" : {
+ "property_description" : "Authentication backend.",
+ "property_value" : "desktop.auth.backend.AllowAllBackend",
+ "stack_version" : "1.3.1",
+ "property_name" : "backend_auth_policy",
+ "service_name" : "HUE",
+ "type" : "hue-site.xml",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/hue_log_dir",
+ "StackConfigurations" : {
+ "property_description" : "Hue Log Dir.",
+ "property_value" : "/var/log/hue",
+ "stack_version" : "1.3.1",
+ "property_name" : "hue_log_dir",
+ "service_name" : "HUE",
+ "type" : "global.xml",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/whitelist",
+ "StackConfigurations" : {
+ "property_description" : "proxy settings",
+ "property_value" : "(localhost|127\\.0\\.0\\.1):(50030|50070|50060|50075|50111)",
+ "stack_version" : "1.3.1",
+ "property_name" : "whitelist",
+ "service_name" : "HUE",
+ "type" : "hue-site.xml",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/jobtracker_port",
+ "StackConfigurations" : {
+ "property_description" : "The port where the JobTracker IPC listens on.",
+ "property_value" : "50030",
+ "stack_version" : "1.3.1",
+ "property_name" : "jobtracker_port",
+ "service_name" : "HUE",
+ "type" : "hue-site.xml",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/db_port",
+ "StackConfigurations" : {
+ "property_description" : "Configuration options for specifying the Desktop Database.",
+ "property_value" : "3306",
+ "stack_version" : "1.3.1",
+ "property_name" : "db_port",
+ "service_name" : "HUE",
+ "type" : "hue-site.xml",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/smtp_port",
+ "StackConfigurations" : {
+ "property_description" : "The SMTP server information for email notification delivery.",
+ "property_value" : "25",
+ "stack_version" : "1.3.1",
+ "property_name" : "smtp_port",
+ "service_name" : "HUE",
+ "type" : "hue-site.xml",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/database_logging",
+ "StackConfigurations" : {
+ "property_description" : "To show database transactions, set database_logging to 1.\n default, database_logging=0",
+ "property_value" : "0",
+ "stack_version" : "1.3.1",
+ "property_name" : "database_logging",
+ "service_name" : "HUE",
+ "type" : "hue-site.xml",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/send_debug_messages",
+ "StackConfigurations" : {
+ "property_description" : null,
+ "property_value" : "1",
+ "stack_version" : "1.3.1",
+ "property_name" : "send_debug_messages",
+ "service_name" : "HUE",
+ "type" : "hue-site.xml",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/smtp_password",
+ "StackConfigurations" : {
+ "property_description" : "The SMTP server information for email notification delivery.",
+ "property_value" : "25",
+ "stack_version" : "1.3.1",
+ "property_name" : "smtp_password",
+ "service_name" : "HUE",
+ "type" : "hue-site.xml",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/django_debug_mode",
+ "StackConfigurations" : {
+ "property_description" : "Turn off debug",
+ "property_value" : "1",
+ "stack_version" : "1.3.1",
+ "property_name" : "django_debug_mode",
+ "service_name" : "HUE",
+ "type" : "hue-site.xml",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/use_cherrypy_server",
+ "StackConfigurations" : {
+ "property_description" : "Set to true to use CherryPy as the webserver, set to false\n to use Spawning as the webserver. Defaults to Spawning if\n key is not specified.",
+ "property_value" : "false",
+ "stack_version" : "1.3.1",
+ "property_name" : "use_cherrypy_server",
+ "service_name" : "HUE",
+ "type" : "hue-site.xml",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/hbase_shell_command",
+ "StackConfigurations" : {
+ "property_description" : "Define and configure a new shell type hbase.",
+ "property_value" : "/usr/bin/hbase shell",
+ "stack_version" : "1.3.1",
+ "property_name" : "hbase_shell_command",
+ "service_name" : "HUE",
+ "type" : "hue-site.xml",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/bash_shell_command",
+ "StackConfigurations" : {
+ "property_description" : "Define and configure a new shell type bash for testing only\n .",
+ "property_value" : "/bin/bash",
+ "stack_version" : "1.3.1",
+ "property_name" : "bash_shell_command",
+ "service_name" : "HUE",
+ "type" : "hue-site.xml",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/hbase_nice_name",
+ "StackConfigurations" : {
+ "property_description" : "Define and configure a new shell type hbase",
+ "property_value" : "HBase Shell",
+ "stack_version" : "1.3.1",
+ "property_name" : "hbase_nice_name",
+ "service_name" : "HUE",
+ "type" : "hue-site.xml",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/http_port",
+ "StackConfigurations" : {
+ "property_description" : "Webserver listens on this address and port",
+ "property_value" : "8000",
+ "stack_version" : "1.3.1",
+ "property_name" : "http_port",
+ "service_name" : "HUE",
+ "type" : "hue-site.xml",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/smtp_host",
+ "StackConfigurations" : {
+ "property_description" : "The SMTP server information for email notification delivery.",
+ "property_value" : "localhost",
+ "stack_version" : "1.3.1",
+ "property_name" : "smtp_host",
+ "service_name" : "HUE",
+ "type" : "hue-site.xml",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/db_engine",
+ "StackConfigurations" : {
+ "property_description" : "Configuration options for specifying the Desktop Database.",
+ "property_value" : "mysql",
+ "stack_version" : "1.3.1",
+ "property_name" : "db_engine",
+ "service_name" : "HUE",
+ "type" : "hue-site.xml",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/http_500_debug_mode",
+ "StackConfigurations" : {
+ "property_description" : "Turn off backtrace for server error",
+ "property_value" : "1",
+ "stack_version" : "1.3.1",
+ "property_name" : "http_500_debug_mode",
+ "service_name" : "HUE",
+ "type" : "hue-site.xml",
+ "stack_name" : "HDP"
+ }
+ }
+ ]
+}
\ No newline at end of file
diff --git a/ambari-web/app/assets/data/wizard/stack/hdp/version131/MAPREDUCE.json b/ambari-web/app/assets/data/wizard/stack/hdp/version131/MAPREDUCE.json
new file mode 100644
index 00000000000..1ea5a244e67
--- /dev/null
+++ b/ambari-web/app/assets/data/wizard/stack/hdp/version131/MAPREDUCE.json
@@ -0,0 +1,773 @@
+{
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations?fields=*",
+ "items" : [
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.reduce.tasks.speculative.execution",
+ "StackConfigurations" : {
+ "property_description" : "If true, then multiple instances of some reduce tasks\n may be executed in parallel.",
+ "property_value" : "false",
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.reduce.tasks.speculative.execution",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.default-maximum-active-tasks-per-user",
+ "StackConfigurations" : {
+ "property_description" : "The default maximum number of tasks per-user, across all the of \n the user's jobs in the queue, which can be initialized concurrently. Once \n the user's jobs exceed this limit they will be queued on disk. \n ",
+ "property_value" : "100000",
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.capacity-scheduler.default-maximum-active-tasks-per-user",
+ "filename" : "capacity-scheduler.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.queue.default.init-accept-jobs-factor",
+ "StackConfigurations" : {
+ "property_description" : "The multipe of (maximum-system-jobs * queue-capacity) used to \n determine the number of jobs which are accepted by the scheduler. \n ",
+ "property_value" : "10",
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.capacity-scheduler.queue.default.init-accept-jobs-factor",
+ "filename" : "capacity-scheduler.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.init-poll-interval",
+ "StackConfigurations" : {
+ "property_description" : "The amount of time in miliseconds which is used to poll \n the job queues for jobs to initialize.\n ",
+ "property_value" : "5000",
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.capacity-scheduler.init-poll-interval",
+ "filename" : "capacity-scheduler.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapreduce.history.server.embedded",
+ "StackConfigurations" : {
+ "property_description" : "Should job history server be embedded within Job tracker\nprocess",
+ "property_value" : "false",
+ "stack_version" : "1.3.1",
+ "property_name" : "mapreduce.history.server.embedded",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.jobtracker.blacklist.fault-bucket-width",
+ "StackConfigurations" : {
+ "property_description" : "\n 15-minute bucket size (value is in minutes)\n ",
+ "property_value" : "15",
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.jobtracker.blacklist.fault-bucket-width",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.map.tasks.speculative.execution",
+ "StackConfigurations" : {
+ "property_description" : "If true, then multiple instances of some map tasks\n may be executed in parallel.",
+ "property_value" : "false",
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.map.tasks.speculative.execution",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.queue.default.supports-priority",
+ "StackConfigurations" : {
+ "property_description" : "If true, priorities of jobs will be taken into \n account in scheduling decisions.\n ",
+ "property_value" : "false",
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.capacity-scheduler.queue.default.supports-priority",
+ "filename" : "capacity-scheduler.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.jobtracker.blacklist.fault-timeout-window",
+ "StackConfigurations" : {
+ "property_description" : "\n 3-hour sliding window (value is in minutes)\n ",
+ "property_value" : "180",
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.jobtracker.blacklist.fault-timeout-window",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.job.tracker.persist.jobstatus.active",
+ "StackConfigurations" : {
+ "property_description" : "Indicates if persistency of job status information is\n active or not.\n ",
+ "property_value" : "false",
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.job.tracker.persist.jobstatus.active",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.default-supports-priority",
+ "StackConfigurations" : {
+ "property_description" : "If true, priorities of jobs will be taken into \n account in scheduling decisions by default in a job queue.\n ",
+ "property_value" : "false",
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.capacity-scheduler.default-supports-priority",
+ "filename" : "capacity-scheduler.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.default-user-limit-factor",
+ "StackConfigurations" : {
+ "property_description" : "The default multiple of queue-capacity which is used to \n determine the amount of slots a single user can consume concurrently.\n ",
+ "property_value" : "1",
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.capacity-scheduler.default-user-limit-factor",
+ "filename" : "capacity-scheduler.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/io.sort.record.percent",
+ "StackConfigurations" : {
+ "property_description" : "No description",
+ "property_value" : ".2",
+ "stack_version" : "1.3.1",
+ "property_name" : "io.sort.record.percent",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.jobtracker.retirejob.check",
+ "StackConfigurations" : {
+ "property_description" : null,
+ "property_value" : "10000",
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.jobtracker.retirejob.check",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.system.dir",
+ "StackConfigurations" : {
+ "property_description" : "No description",
+ "property_value" : "/mapred/system",
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.system.dir",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.queue.default.maximum-capacity",
+ "StackConfigurations" : {
+ "property_description" : "\n\tmaximum-capacity defines a limit beyond which a queue cannot use the capacity of the cluster.\n\tThis provides a means to limit how much excess capacity a queue can use. By default, there is no limit.\n\tThe maximum-capacity of a queue can only be greater than or equal to its minimum capacity.\n Default value of -1 implies a queue can use complete capacity of the cluster.\n\n This property could be to curtail certain jobs which are long running in nature from occupying more than a \n certain percentage of the cluster, which in the absence of pre-emption, could lead to capacity guarantees of \n other queues being affected.\n \n One important thing to note is that maximum-capacity is a percentage , so based on the cluster's capacity\n the max capacity would change. So if large no of nodes or racks get added to the cluster , max Capacity in \n absolute terms would increase accordingly.\n ",
+ "property_value" : "-1",
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.capacity-scheduler.queue.default.maximum-capacity",
+ "filename" : "capacity-scheduler.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/jetty.connector",
+ "StackConfigurations" : {
+ "property_description" : "No description",
+ "property_value" : "org.mortbay.jetty.nio.SelectChannelConnector",
+ "stack_version" : "1.3.1",
+ "property_name" : "jetty.connector",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.default-maximum-active-tasks-per-queue",
+ "StackConfigurations" : {
+ "property_description" : "The default maximum number of tasks, across all jobs in the \n queue, which can be initialized concurrently. Once the queue's jobs exceed \n this limit they will be queued on disk. \n ",
+ "property_value" : "200000",
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.capacity-scheduler.default-maximum-active-tasks-per-queue",
+ "filename" : "capacity-scheduler.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/scheduler_name",
+ "StackConfigurations" : {
+ "property_description" : "MapRed Capacity Scheduler.",
+ "property_value" : "org.apache.hadoop.mapred.CapacityTaskScheduler",
+ "stack_version" : "1.3.1",
+ "property_name" : "scheduler_name",
+ "filename" : "global.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.job.reuse.jvm.num.tasks",
+ "StackConfigurations" : {
+ "property_description" : "\n How many tasks to run per jvm. If set to -1, there is no limit\n ",
+ "property_value" : "1",
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.job.reuse.jvm.num.tasks",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.max.tracker.blacklists",
+ "StackConfigurations" : {
+ "property_description" : "\n if node is reported blacklisted by 16 successful jobs within timeout-window, it will be graylisted\n ",
+ "property_value" : "16",
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.max.tracker.blacklists",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.queue.default.acl-submit-job",
+ "StackConfigurations" : {
+ "property_description" : null,
+ "property_value" : "*",
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.queue.default.acl-submit-job",
+ "filename" : "mapred-queue-acls.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.queue.names",
+ "StackConfigurations" : {
+ "property_description" : " Comma separated list of queues configured for this jobtracker.",
+ "property_value" : "default",
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.queue.names",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.job.reduce.input.buffer.percent",
+ "StackConfigurations" : {
+ "property_description" : "The percentage of memory- relative to the maximum heap size- to\n retain map outputs during the reduce. When the shuffle is concluded, any\n remaining map outputs in memory must consume less than this threshold before\n the reduce can begin.\n ",
+ "property_value" : "0.0",
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.job.reduce.input.buffer.percent",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapreduce.jobtracker.split.metainfo.maxsize",
+ "StackConfigurations" : {
+ "property_description" : "If the size of the split metainfo file is larger than this, the JobTracker will fail the job during\n initialize.\n ",
+ "property_value" : "50000000",
+ "stack_version" : "1.3.1",
+ "property_name" : "mapreduce.jobtracker.split.metainfo.maxsize",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/hadoop.job.history.user.location",
+ "StackConfigurations" : {
+ "property_description" : null,
+ "property_value" : "none",
+ "stack_version" : "1.3.1",
+ "property_name" : "hadoop.job.history.user.location",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.jobtracker.completeuserjobs.maximum",
+ "StackConfigurations" : {
+ "property_description" : null,
+ "property_value" : "100",
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.jobtracker.completeuserjobs.maximum",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.output.compression.type",
+ "StackConfigurations" : {
+ "property_description" : "If the job outputs are to compressed as SequenceFiles, how should\n they be compressed? Should be one of NONE, RECORD or BLOCK.\n ",
+ "property_value" : "BLOCK",
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.output.compression.type",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.jobtracker.retirejob.interval",
+ "StackConfigurations" : {
+ "property_description" : null,
+ "property_value" : "0",
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.jobtracker.retirejob.interval",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.healthChecker.interval",
+ "StackConfigurations" : {
+ "property_description" : null,
+ "property_value" : "135000",
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.healthChecker.interval",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/jtnode_opt_newsize",
+ "StackConfigurations" : {
+ "property_description" : "MapRed Capacity Scheduler.",
+ "property_value" : "200",
+ "stack_version" : "1.3.1",
+ "property_name" : "jtnode_opt_newsize",
+ "filename" : "global.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.task.timeout",
+ "StackConfigurations" : {
+ "property_description" : "The number of milliseconds before a task will be\n terminated if it neither reads an input, writes an output, nor\n updates its status string.\n ",
+ "property_value" : "600000",
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.task.timeout",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.job.tracker.persist.jobstatus.hours",
+ "StackConfigurations" : {
+ "property_description" : "The number of hours job status information is persisted in DFS.\n The job status information will be available after it drops of the memory\n queue and between jobtracker restarts. With a zero value the job status\n information is not persisted at all in DFS.\n ",
+ "property_value" : "1",
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.job.tracker.persist.jobstatus.hours",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.job.shuffle.merge.percent",
+ "StackConfigurations" : {
+ "property_description" : "The usage threshold at which an in-memory merge will be\n initiated, expressed as a percentage of the total memory allocated to\n storing in-memory map outputs, as defined by\n mapred.job.shuffle.input.buffer.percent.\n ",
+ "property_value" : "0.66",
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.job.shuffle.merge.percent",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.jobtracker.restart.recover",
+ "StackConfigurations" : {
+ "property_description" : "\"true\" to enable (job) recovery upon restart,\n \"false\" to start afresh\n ",
+ "property_value" : "false",
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.jobtracker.restart.recover",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.job.tracker.handler.count",
+ "StackConfigurations" : {
+ "property_description" : "\n The number of server threads for the JobTracker. This should be roughly\n 4% of the number of tasktracker nodes.\n ",
+ "property_value" : "50",
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.job.tracker.handler.count",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.queue.default.maximum-initialized-active-tasks-per-user",
+ "StackConfigurations" : {
+ "property_description" : "The maximum number of tasks per-user, across all the of the \n user's jobs in the queue, which can be initialized concurrently. Once the \n user's jobs exceed this limit they will be queued on disk. \n ",
+ "property_value" : "100000",
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.capacity-scheduler.queue.default.maximum-initialized-active-tasks-per-user",
+ "filename" : "capacity-scheduler.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapreduce.tasktracker.group",
+ "StackConfigurations" : {
+ "property_description" : "The group that the task controller uses for accessing the task controller. The mapred user must be a member and users should *not* be members.",
+ "property_value" : "hadoop",
+ "stack_version" : "1.3.1",
+ "property_name" : "mapreduce.tasktracker.group",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.queue.default.maximum-initialized-active-tasks",
+ "StackConfigurations" : {
+ "property_description" : "The maximum number of tasks, across all jobs in the queue, \n which can be initialized concurrently. Once the queue's jobs exceed this \n limit they will be queued on disk. \n ",
+ "property_value" : "200000",
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.capacity-scheduler.queue.default.maximum-initialized-active-tasks",
+ "filename" : "capacity-scheduler.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred_local_dir",
+ "StackConfigurations" : {
+ "property_description" : "MapRed Local Directories.",
+ "property_value" : "/hadoop/mapred",
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred_local_dir",
+ "filename" : "global.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.inmem.merge.threshold",
+ "StackConfigurations" : {
+ "property_description" : "The threshold, in terms of the number of files\n for the in-memory merge process. When we accumulate threshold number of files\n we initiate the in-memory merge and spill to disk. A value of 0 or less than\n 0 indicates we want to DON'T have any threshold and instead depend only on\n the ramfs's memory consumption to trigger the merge.\n ",
+ "property_value" : "1000",
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.inmem.merge.threshold",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.queue.default.acl-administer-jobs",
+ "StackConfigurations" : {
+ "property_description" : null,
+ "property_value" : "*",
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.queue.default.acl-administer-jobs",
+ "filename" : "mapred-queue-acls.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapreduce.reduce.input.limit",
+ "StackConfigurations" : {
+ "property_description" : "The limit on the input size of the reduce. (This value\n is 10 Gb.) If the estimated input size of the reduce is greater than\n this value, job is failed. A value of -1 means that there is no limit\n set. ",
+ "property_value" : "10737418240",
+ "stack_version" : "1.3.1",
+ "property_name" : "mapreduce.reduce.input.limit",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapreduce.cluster.administrators",
+ "StackConfigurations" : {
+ "property_description" : null,
+ "property_value" : " hadoop",
+ "stack_version" : "1.3.1",
+ "property_name" : "mapreduce.cluster.administrators",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.healthChecker.script.timeout",
+ "StackConfigurations" : {
+ "property_description" : null,
+ "property_value" : "60000",
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.healthChecker.script.timeout",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.reduce.parallel.copies",
+ "StackConfigurations" : {
+ "property_description" : "No description",
+ "property_value" : "30",
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.reduce.parallel.copies",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.job.tracker.history.completed.location",
+ "StackConfigurations" : {
+ "property_description" : "No description",
+ "property_value" : "/mapred/history/done",
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.job.tracker.history.completed.location",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.reduce.slowstart.completed.maps",
+ "StackConfigurations" : {
+ "property_description" : null,
+ "property_value" : "0.05",
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.reduce.slowstart.completed.maps",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.tasktracker.tasks.sleeptime-before-sigkill",
+ "StackConfigurations" : {
+ "property_description" : "Normally, this is the amount of time before killing\n processes, and the recommended-default is 5.000 seconds - a value of\n 5000 here. In this case, we are using it solely to blast tasks before\n killing them, and killing them very quickly (1/4 second) to guarantee\n that we do not leave VMs around for later jobs.\n ",
+ "property_value" : "250",
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.tasktracker.tasks.sleeptime-before-sigkill",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.queue.default.capacity",
+ "StackConfigurations" : {
+ "property_description" : "Percentage of the number of slots in the cluster that are\n to be available for jobs in this queue.\n ",
+ "property_value" : "100",
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.capacity-scheduler.queue.default.capacity",
+ "filename" : "capacity-scheduler.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.job.shuffle.input.buffer.percent",
+ "StackConfigurations" : {
+ "property_description" : "The percentage of memory to be allocated from the maximum heap\n size to storing map outputs during the shuffle.\n ",
+ "property_value" : "0.7",
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.job.shuffle.input.buffer.percent",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.child.root.logger",
+ "StackConfigurations" : {
+ "property_description" : null,
+ "property_value" : "INFO,TLA",
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.child.root.logger",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapreduce.fileoutputcommitter.marksuccessfuljobs",
+ "StackConfigurations" : {
+ "property_description" : null,
+ "property_value" : "false",
+ "stack_version" : "1.3.1",
+ "property_name" : "mapreduce.fileoutputcommitter.marksuccessfuljobs",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.default-init-accept-jobs-factor",
+ "StackConfigurations" : {
+ "property_description" : "The default multipe of (maximum-system-jobs * queue-capacity) \n used to determine the number of jobs which are accepted by the scheduler. \n ",
+ "property_value" : "10",
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.capacity-scheduler.default-init-accept-jobs-factor",
+ "filename" : "capacity-scheduler.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred_system_dir",
+ "StackConfigurations" : {
+ "property_description" : "MapRed System Directories.",
+ "property_value" : "/mapred/system",
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred_system_dir",
+ "filename" : "global.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/io.sort.factor",
+ "StackConfigurations" : {
+ "property_description" : "No description",
+ "property_value" : "100",
+ "stack_version" : "1.3.1",
+ "property_name" : "io.sort.factor",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/jtnode_opt_maxnewsize",
+ "StackConfigurations" : {
+ "property_description" : "MapRed Capacity Scheduler.",
+ "property_value" : "200",
+ "stack_version" : "1.3.1",
+ "property_name" : "jtnode_opt_maxnewsize",
+ "filename" : "global.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.queue.default.user-limit-factor",
+ "StackConfigurations" : {
+ "property_description" : "The multiple of the queue capacity which can be configured to \n allow a single user to acquire more slots. \n ",
+ "property_value" : "1",
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.capacity-scheduler.queue.default.user-limit-factor",
+ "filename" : "capacity-scheduler.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.maximum-system-jobs",
+ "StackConfigurations" : {
+ "property_description" : "Maximum number of jobs in the system which can be initialized,\n concurrently, by the CapacityScheduler.\n ",
+ "property_value" : "3000",
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.capacity-scheduler.maximum-system-jobs",
+ "filename" : "capacity-scheduler.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.default-minimum-user-limit-percent",
+ "StackConfigurations" : {
+ "property_description" : "The percentage of the resources limited to a particular user\n for the job queue at any given point of time by default.\n ",
+ "property_value" : "100",
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.capacity-scheduler.default-minimum-user-limit-percent",
+ "filename" : "capacity-scheduler.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.queue.default.minimum-user-limit-percent",
+ "StackConfigurations" : {
+ "property_description" : " Each queue enforces a limit on the percentage of resources \n allocated to a user at any given time, if there is competition for them. \n This user limit can vary between a minimum and maximum value. The former\n depends on the number of users who have submitted jobs, and the latter is\n set to this property value. For example, suppose the value of this \n property is 25. If two users have submitted jobs to a queue, no single \n user can use more than 50% of the queue resources. If a third user submits\n a job, no single user can use more than 33% of the queue resources. With 4 \n or more users, no user can use more than 25% of the queue's resources. A \n value of 100 implies no user limits are imposed. \n ",
+ "property_value" : "100",
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.capacity-scheduler.queue.default.minimum-user-limit-percent",
+ "filename" : "capacity-scheduler.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapreduce.jobtracker.staging.root.dir",
+ "StackConfigurations" : {
+ "property_description" : "The Path prefix for where the staging directories should be placed. The next level is always the user's\n name. It is a path in the default file system.",
+ "property_value" : "/user",
+ "stack_version" : "1.3.1",
+ "property_name" : "mapreduce.jobtracker.staging.root.dir",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.init-worker-threads",
+ "StackConfigurations" : {
+ "property_description" : "Number of worker threads which would be used by\n Initialization poller to initialize jobs in a set of queue.\n If number mentioned in property is equal to number of job queues\n then a single thread would initialize jobs in a queue. If lesser\n then a thread would get a set of queues assigned. If the number\n is greater then number of threads would be equal to number of \n job queues.\n ",
+ "property_value" : "5",
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.capacity-scheduler.init-worker-threads",
+ "filename" : "capacity-scheduler.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/tasktracker.http.threads",
+ "StackConfigurations" : {
+ "property_description" : null,
+ "property_value" : "50",
+ "stack_version" : "1.3.1",
+ "property_name" : "tasktracker.http.threads",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ }
+ ]
+}
\ No newline at end of file
diff --git a/ambari-web/app/assets/data/wizard/stack/hdp/version131/OOZIE.json b/ambari-web/app/assets/data/wizard/stack/hdp/version131/OOZIE.json
new file mode 100644
index 00000000000..2f0d0badc53
--- /dev/null
+++ b/ambari-web/app/assets/data/wizard/stack/hdp/version131/OOZIE.json
@@ -0,0 +1,317 @@
+{
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations?fields=*",
+ "items" : [
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.systemmode",
+ "StackConfigurations" : {
+ "property_description" : "\n System mode for Oozie at startup.\n ",
+ "property_value" : "NORMAL",
+ "stack_version" : "1.3.1",
+ "property_name" : "oozie.systemmode",
+ "filename" : "oozie-site.xml",
+ "service_name" : "OOZIE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.authentication.kerberos.name.rules",
+ "StackConfigurations" : {
+ "property_description" : "The mapping from kerberos principal names to local OS user names.",
+ "property_value" : "\n RULE:[2:$1@$0]([jt]t@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-MAPREDUSER/\n RULE:[2:$1@$0]([nd]n@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HDFSUSER/\n RULE:[2:$1@$0](hm@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/\n RULE:[2:$1@$0](rs@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/\n DEFAULT\n ",
+ "stack_version" : "1.3.1",
+ "property_name" : "oozie.authentication.kerberos.name.rules",
+ "filename" : "oozie-site.xml",
+ "service_name" : "OOZIE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.AuthorizationService.security.enabled",
+ "StackConfigurations" : {
+ "property_description" : "\n Specifies whether security (user name/admin role) is enabled or not.\n If disabled any user can manage Oozie system and manage any job.\n ",
+ "property_value" : "true",
+ "stack_version" : "1.3.1",
+ "property_name" : "oozie.service.AuthorizationService.security.enabled",
+ "filename" : "oozie-site.xml",
+ "service_name" : "OOZIE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.CallableQueueService.threads",
+ "StackConfigurations" : {
+ "property_description" : "Number of threads used for executing callables",
+ "property_value" : "10",
+ "stack_version" : "1.3.1",
+ "property_name" : "oozie.service.CallableQueueService.threads",
+ "filename" : "oozie-site.xml",
+ "service_name" : "OOZIE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.HadoopAccessorService.jobTracker.whitelist",
+ "StackConfigurations" : {
+ "property_description" : "\n Whitelisted job tracker for Oozie service.\n ",
+ "property_value" : " ",
+ "stack_version" : "1.3.1",
+ "property_name" : "oozie.service.HadoopAccessorService.jobTracker.whitelist",
+ "filename" : "oozie-site.xml",
+ "service_name" : "OOZIE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.JPAService.pool.max.active.conn",
+ "StackConfigurations" : {
+ "property_description" : "\n Max number of connections.\n ",
+ "property_value" : "10",
+ "stack_version" : "1.3.1",
+ "property_name" : "oozie.service.JPAService.pool.max.active.conn",
+ "filename" : "oozie-site.xml",
+ "service_name" : "OOZIE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.base.url",
+ "StackConfigurations" : {
+ "property_description" : "Base Oozie URL.",
+ "property_value" : "http://localhost:11000/oozie",
+ "stack_version" : "1.3.1",
+ "property_name" : "oozie.base.url",
+ "filename" : "oozie-site.xml",
+ "service_name" : "OOZIE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.SchemaService.wf.ext.schemas",
+ "StackConfigurations" : {
+ "property_description" : null,
+ "property_value" : "shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd",
+ "stack_version" : "1.3.1",
+ "property_name" : "oozie.service.SchemaService.wf.ext.schemas",
+ "filename" : "oozie-site.xml",
+ "service_name" : "OOZIE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.PurgeService.older.than",
+ "StackConfigurations" : {
+ "property_description" : "\n Jobs older than this value, in days, will be purged by the PurgeService.\n ",
+ "property_value" : "30",
+ "stack_version" : "1.3.1",
+ "property_name" : "oozie.service.PurgeService.older.than",
+ "filename" : "oozie-site.xml",
+ "service_name" : "OOZIE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.HadoopAccessorService.nameNode.whitelist",
+ "StackConfigurations" : {
+ "property_description" : "\n ",
+ "property_value" : " ",
+ "stack_version" : "1.3.1",
+ "property_name" : "oozie.service.HadoopAccessorService.nameNode.whitelist",
+ "filename" : "oozie-site.xml",
+ "service_name" : "OOZIE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.coord.normal.default.timeout",
+ "StackConfigurations" : {
+ "property_description" : "Default timeout for a coordinator action input check (in minutes) for normal job.\n -1 means infinite timeout",
+ "property_value" : "120",
+ "stack_version" : "1.3.1",
+ "property_name" : "oozie.service.coord.normal.default.timeout",
+ "filename" : "oozie-site.xml",
+ "service_name" : "OOZIE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/use.system.libpath.for.mapreduce.and.pig.jobs",
+ "StackConfigurations" : {
+ "property_description" : "\n If set to true, submissions of MapReduce and Pig jobs will include\n automatically the system library path, thus not requiring users to\n specify where the Pig JAR files are. Instead, the ones from the system\n library path are used.\n ",
+ "property_value" : "false",
+ "stack_version" : "1.3.1",
+ "property_name" : "use.system.libpath.for.mapreduce.and.pig.jobs",
+ "filename" : "oozie-site.xml",
+ "service_name" : "OOZIE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.CallableQueueService.queue.size",
+ "StackConfigurations" : {
+ "property_description" : "Max callable queue size",
+ "property_value" : "1000",
+ "stack_version" : "1.3.1",
+ "property_name" : "oozie.service.CallableQueueService.queue.size",
+ "filename" : "oozie-site.xml",
+ "service_name" : "OOZIE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.ActionService.executor.ext.classes",
+ "StackConfigurations" : {
+ "property_description" : null,
+ "property_value" : "\n org.apache.oozie.action.email.EmailActionExecutor,\n org.apache.oozie.action.hadoop.HiveActionExecutor,\n org.apache.oozie.action.hadoop.ShellActionExecutor,\n org.apache.oozie.action.hadoop.SqoopActionExecutor,\n org.apache.oozie.action.hadoop.DistcpActionExecutor\n ",
+ "stack_version" : "1.3.1",
+ "property_name" : "oozie.service.ActionService.executor.ext.classes",
+ "filename" : "oozie-site.xml",
+ "service_name" : "OOZIE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.JPAService.jdbc.password",
+ "StackConfigurations" : {
+ "property_description" : "\n DB user password.\n\n IMPORTANT: if password is emtpy leave a 1 space string, the service trims the value,\n if empty Configuration assumes it is NULL.\n ",
+ "property_value" : " ",
+ "stack_version" : "1.3.1",
+ "property_name" : "oozie.service.JPAService.jdbc.password",
+ "filename" : "oozie-site.xml",
+ "service_name" : "OOZIE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.WorkflowAppService.system.libpath",
+ "StackConfigurations" : {
+ "property_description" : "\n System library path to use for workflow applications.\n This path is added to workflow application if their job properties sets\n the property 'oozie.use.system.libpath' to true.\n ",
+ "property_value" : "/user/${user.name}/share/lib",
+ "stack_version" : "1.3.1",
+ "property_name" : "oozie.service.WorkflowAppService.system.libpath",
+ "filename" : "oozie-site.xml",
+ "service_name" : "OOZIE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.JPAService.jdbc.driver",
+ "StackConfigurations" : {
+ "property_description" : "\n JDBC driver class.\n ",
+ "property_value" : "org.apache.derby.jdbc.EmbeddedDriver",
+ "stack_version" : "1.3.1",
+ "property_name" : "oozie.service.JPAService.jdbc.driver",
+ "filename" : "oozie-site.xml",
+ "service_name" : "OOZIE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.HadoopAccessorService.hadoop.configurations",
+ "StackConfigurations" : {
+ "property_description" : "\n Comma separated AUTHORITY=HADOOP_CONF_DIR, where AUTHORITY is the HOST:PORT of\n the Hadoop service (JobTracker, HDFS). The wildcard '*' configuration is\n used when there is no exact match for an authority. The HADOOP_CONF_DIR contains\n the relevant Hadoop *-site.xml files. If the path is relative is looked within\n the Oozie configuration directory; though the path can be absolute (i.e. to point\n to Hadoop client conf/ directories in the local filesystem.\n ",
+ "property_value" : "*=/etc/hadoop/conf",
+ "stack_version" : "1.3.1",
+ "property_name" : "oozie.service.HadoopAccessorService.hadoop.configurations",
+ "filename" : "oozie-site.xml",
+ "service_name" : "OOZIE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.PurgeService.purge.interval",
+ "StackConfigurations" : {
+ "property_description" : "\n Interval at which the purge service will run, in seconds.\n ",
+ "property_value" : "3600",
+ "stack_version" : "1.3.1",
+ "property_name" : "oozie.service.PurgeService.purge.interval",
+ "filename" : "oozie-site.xml",
+ "service_name" : "OOZIE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.JPAService.create.db.schema",
+ "StackConfigurations" : {
+ "property_description" : "\n Creates Oozie DB.\n\n If set to true, it creates the DB schema if it does not exist. If the DB schema exists is a NOP.\n If set to false, it does not create the DB schema. If the DB schema does not exist it fails start up.\n ",
+ "property_value" : "false",
+ "stack_version" : "1.3.1",
+ "property_name" : "oozie.service.JPAService.create.db.schema",
+ "filename" : "oozie-site.xml",
+ "service_name" : "OOZIE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.system.id",
+ "StackConfigurations" : {
+ "property_description" : "\n The Oozie system ID.\n ",
+ "property_value" : "oozie-${user.name}",
+ "stack_version" : "1.3.1",
+ "property_name" : "oozie.system.id",
+ "filename" : "oozie-site.xml",
+ "service_name" : "OOZIE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.JPAService.jdbc.username",
+ "StackConfigurations" : {
+ "property_description" : "\n DB user name.\n ",
+ "property_value" : "sa",
+ "stack_version" : "1.3.1",
+ "property_name" : "oozie.service.JPAService.jdbc.username",
+ "filename" : "oozie-site.xml",
+ "service_name" : "OOZIE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.db.schema.name",
+ "StackConfigurations" : {
+ "property_description" : "\n Oozie DataBase Name\n ",
+ "property_value" : "oozie",
+ "stack_version" : "1.3.1",
+ "property_name" : "oozie.db.schema.name",
+ "filename" : "oozie-site.xml",
+ "service_name" : "OOZIE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.authentication.type",
+ "StackConfigurations" : {
+ "property_description" : "\n ",
+ "property_value" : "simple",
+ "stack_version" : "1.3.1",
+ "property_name" : "oozie.authentication.type",
+ "filename" : "oozie-site.xml",
+ "service_name" : "OOZIE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.JPAService.jdbc.url",
+ "StackConfigurations" : {
+ "property_description" : "\n JDBC URL.\n ",
+ "property_value" : "jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true",
+ "stack_version" : "1.3.1",
+ "property_name" : "oozie.service.JPAService.jdbc.url",
+ "filename" : "oozie-site.xml",
+ "service_name" : "OOZIE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.CallableQueueService.callable.concurrency",
+ "StackConfigurations" : {
+ "property_description" : "\n Maximum concurrency for a given callable type.\n Each command is a callable type (submit, start, run, signal, job, jobs, suspend,resume, etc).\n Each action type is a callable type (Map-Reduce, Pig, SSH, FS, sub-workflow, etc).\n All commands that use action executors (action-start, action-end, action-kill and action-check) use\n the action type as the callable type.\n ",
+ "property_value" : "3",
+ "stack_version" : "1.3.1",
+ "property_name" : "oozie.service.CallableQueueService.callable.concurrency",
+ "filename" : "oozie-site.xml",
+ "service_name" : "OOZIE",
+ "stack_name" : "HDP"
+ }
+ }
+ ]
+}
\ No newline at end of file
diff --git a/ambari-web/app/assets/data/wizard/stack/hdp/version131/WEBHCAT.json b/ambari-web/app/assets/data/wizard/stack/hdp/version131/WEBHCAT.json
new file mode 100644
index 00000000000..349d44c98c0
--- /dev/null
+++ b/ambari-web/app/assets/data/wizard/stack/hdp/version131/WEBHCAT.json
@@ -0,0 +1,173 @@
+{
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/WEBHCAT/configurations?fields=*",
+ "items" : [
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/WEBHCAT/configurations/templeton.streaming.jar",
+ "StackConfigurations" : {
+ "property_description" : "The hdfs path to the Hadoop streaming jar file.",
+ "property_value" : "hdfs:///apps/webhcat/hadoop-streaming.jar",
+ "stack_version" : "1.3.1",
+ "property_name" : "templeton.streaming.jar",
+ "filename" : "webhcat-site.xml",
+ "service_name" : "WEBHCAT",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/WEBHCAT/configurations/templeton.override.enabled",
+ "StackConfigurations" : {
+ "property_description" : "\n Enable the override path in templeton.override.jars\n ",
+ "property_value" : "false",
+ "stack_version" : "1.3.1",
+ "property_name" : "templeton.override.enabled",
+ "filename" : "webhcat-site.xml",
+ "service_name" : "WEBHCAT",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/WEBHCAT/configurations/templeton.pig.archive",
+ "StackConfigurations" : {
+ "property_description" : "The path to the Pig archive.",
+ "property_value" : "hdfs:///apps/webhcat/pig.tar.gz",
+ "stack_version" : "1.3.1",
+ "property_name" : "templeton.pig.archive",
+ "filename" : "webhcat-site.xml",
+ "service_name" : "WEBHCAT",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/WEBHCAT/configurations/templeton.hadoop",
+ "StackConfigurations" : {
+ "property_description" : "The path to the Hadoop executable.",
+ "property_value" : "/usr/bin/hadoop",
+ "stack_version" : "1.3.1",
+ "property_name" : "templeton.hadoop",
+ "filename" : "webhcat-site.xml",
+ "service_name" : "WEBHCAT",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/WEBHCAT/configurations/templeton.hcat",
+ "StackConfigurations" : {
+ "property_description" : "The path to the hcatalog executable.",
+ "property_value" : "/usr/bin/hcat",
+ "stack_version" : "1.3.1",
+ "property_name" : "templeton.hcat",
+ "filename" : "webhcat-site.xml",
+ "service_name" : "WEBHCAT",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/WEBHCAT/configurations/templeton.exec.timeout",
+ "StackConfigurations" : {
+ "property_description" : "Time out for templeton api",
+ "property_value" : "60000",
+ "stack_version" : "1.3.1",
+ "property_name" : "templeton.exec.timeout",
+ "filename" : "webhcat-site.xml",
+ "service_name" : "WEBHCAT",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/WEBHCAT/configurations/templeton.hadoop.conf.dir",
+ "StackConfigurations" : {
+ "property_description" : "The path to the Hadoop configuration.",
+ "property_value" : "/etc/hadoop/conf",
+ "stack_version" : "1.3.1",
+ "property_name" : "templeton.hadoop.conf.dir",
+ "filename" : "webhcat-site.xml",
+ "service_name" : "WEBHCAT",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/WEBHCAT/configurations/templeton.jar",
+ "StackConfigurations" : {
+ "property_description" : "The path to the Templeton jar file.",
+ "property_value" : "/usr/lib/hcatalog/share/webhcat/svr/webhcat.jar",
+ "stack_version" : "1.3.1",
+ "property_name" : "templeton.jar",
+ "filename" : "webhcat-site.xml",
+ "service_name" : "WEBHCAT",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/WEBHCAT/configurations/templeton.libjars",
+ "StackConfigurations" : {
+ "property_description" : "Jars to add the the classpath.",
+ "property_value" : "/usr/lib/zookeeper/zookeeper.jar",
+ "stack_version" : "1.3.1",
+ "property_name" : "templeton.libjars",
+ "filename" : "webhcat-site.xml",
+ "service_name" : "WEBHCAT",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/WEBHCAT/configurations/templeton.port",
+ "StackConfigurations" : {
+ "property_description" : "The HTTP port for the main server.",
+ "property_value" : "50111",
+ "stack_version" : "1.3.1",
+ "property_name" : "templeton.port",
+ "filename" : "webhcat-site.xml",
+ "service_name" : "WEBHCAT",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/WEBHCAT/configurations/templeton.storage.class",
+ "StackConfigurations" : {
+ "property_description" : "The class to use as storage",
+ "property_value" : "org.apache.hcatalog.templeton.tool.ZooKeeperStorage",
+ "stack_version" : "1.3.1",
+ "property_name" : "templeton.storage.class",
+ "filename" : "webhcat-site.xml",
+ "service_name" : "WEBHCAT",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/WEBHCAT/configurations/templeton.hive.archive",
+ "StackConfigurations" : {
+ "property_description" : "The path to the Hive archive.",
+ "property_value" : "hdfs:///apps/webhcat/hive.tar.gz",
+ "stack_version" : "1.3.1",
+ "property_name" : "templeton.hive.archive",
+ "filename" : "webhcat-site.xml",
+ "service_name" : "WEBHCAT",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/WEBHCAT/configurations/templeton.hive.path",
+ "StackConfigurations" : {
+ "property_description" : "The path to the Hive executable.",
+ "property_value" : "hive.tar.gz/hive/bin/hive",
+ "stack_version" : "1.3.1",
+ "property_name" : "templeton.hive.path",
+ "filename" : "webhcat-site.xml",
+ "service_name" : "WEBHCAT",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/WEBHCAT/configurations/templeton.pig.path",
+ "StackConfigurations" : {
+ "property_description" : "The path to the Pig executable.",
+ "property_value" : "pig.tar.gz/pig/bin/pig",
+ "stack_version" : "1.3.1",
+ "property_name" : "templeton.pig.path",
+ "filename" : "webhcat-site.xml",
+ "service_name" : "WEBHCAT",
+ "stack_name" : "HDP"
+ }
+ }
+ ]
+}
\ No newline at end of file
diff --git a/ambari-web/app/assets/data/wizard/stack/hdp/version131/ZOOKEEPER.json b/ambari-web/app/assets/data/wizard/stack/hdp/version131/ZOOKEEPER.json
new file mode 100644
index 00000000000..aeeac38f74d
--- /dev/null
+++ b/ambari-web/app/assets/data/wizard/stack/hdp/version131/ZOOKEEPER.json
@@ -0,0 +1,4 @@
+{
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/ZOOKEEPER/configurations?fields=*",
+ "items" : [ ]
+}
\ No newline at end of file
diff --git a/ambari-web/app/assets/data/wizard/stack/hdp/version131/global.json b/ambari-web/app/assets/data/wizard/stack/hdp/version131/global.json
new file mode 100644
index 00000000000..bc78d63a3c5
--- /dev/null
+++ b/ambari-web/app/assets/data/wizard/stack/hdp/version131/global.json
@@ -0,0 +1,2490 @@
+{
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices?fields=configurations/StackConfigurations/filename",
+ "items" : [
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE",
+ "StackServices" : {
+ "stack_version" : "1.3.1",
+ "service_name" : "HIVE",
+ "stack_name" : "HDP"
+ },
+ "configurations" : [
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.metastore.cache.pinobjtypes",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "hive.metastore.cache.pinobjtypes",
+ "filename" : "hive-site.xml",
+ "service_name" : "HIVE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/javax.jdo.option.ConnectionDriverName",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "javax.jdo.option.ConnectionDriverName",
+ "filename" : "hive-site.xml",
+ "service_name" : "HIVE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.metastore.execute.setugi",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "hive.metastore.execute.setugi",
+ "filename" : "hive-site.xml",
+ "service_name" : "HIVE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/fs.hdfs.impl.disable.cache",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "fs.hdfs.impl.disable.cache",
+ "filename" : "hive-site.xml",
+ "service_name" : "HIVE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.metastore.client.socket.timeout",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "hive.metastore.client.socket.timeout",
+ "filename" : "hive-site.xml",
+ "service_name" : "HIVE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive_pid_dir",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "hive_pid_dir",
+ "filename" : "global.xml",
+ "service_name" : "HIVE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.semantic.analyzer.factory.impl",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "hive.semantic.analyzer.factory.impl",
+ "filename" : "hive-site.xml",
+ "service_name" : "HIVE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/mysql_connector_url",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "mysql_connector_url",
+ "filename" : "global.xml",
+ "service_name" : "HIVE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive_log_dir",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "hive_log_dir",
+ "filename" : "global.xml",
+ "service_name" : "HIVE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.metastore.local",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "hive.metastore.local",
+ "filename" : "hive-site.xml",
+ "service_name" : "HIVE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive_conf_dir",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "hive_conf_dir",
+ "filename" : "global.xml",
+ "service_name" : "HIVE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.security.authorization.enabled",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "hive.security.authorization.enabled",
+ "filename" : "hive-site.xml",
+ "service_name" : "HIVE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.security.authorization.manager",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "hive.security.authorization.manager",
+ "filename" : "hive-site.xml",
+ "service_name" : "HIVE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.server2.enable.doAs",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "hive.server2.enable.doAs",
+ "filename" : "hive-site.xml",
+ "service_name" : "HIVE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.metastore.warehouse.dir",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "hive.metastore.warehouse.dir",
+ "filename" : "hive-site.xml",
+ "service_name" : "HIVE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive_aux_jars_path",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "hive_aux_jars_path",
+ "filename" : "global.xml",
+ "service_name" : "HIVE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hadoop.clientside.fs.operations",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "hadoop.clientside.fs.operations",
+ "filename" : "hive-site.xml",
+ "service_name" : "HIVE",
+ "stack_name" : "HDP"
+ }
+ }
+ ]
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/NAGIOS",
+ "StackServices" : {
+ "stack_version" : "1.3.1",
+ "service_name" : "NAGIOS",
+ "stack_name" : "HDP"
+ },
+ "configurations" : [ ]
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HCATALOG",
+ "StackServices" : {
+ "stack_version" : "1.3.1",
+ "service_name" : "HCATALOG",
+ "stack_name" : "HDP"
+ },
+ "configurations" : [ ]
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE",
+ "StackServices" : {
+ "stack_version" : "1.3.1",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ },
+ "configurations" : [
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.reduce.parallel.copies",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.reduce.parallel.copies",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.jobtracker.blacklist.fault-bucket-width",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.jobtracker.blacklist.fault-bucket-width",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.default-user-limit-factor",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.capacity-scheduler.default-user-limit-factor",
+ "filename" : "capacity-scheduler.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.default-supports-priority",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.capacity-scheduler.default-supports-priority",
+ "filename" : "capacity-scheduler.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.job.tracker.handler.count",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.job.tracker.handler.count",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.max.tracker.blacklists",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.max.tracker.blacklists",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.map.tasks.speculative.execution",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.map.tasks.speculative.execution",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.job.tracker.persist.jobstatus.active",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.job.tracker.persist.jobstatus.active",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.job.shuffle.input.buffer.percent",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.job.shuffle.input.buffer.percent",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/tasktracker.http.threads",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "tasktracker.http.threads",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapreduce.history.server.embedded",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "mapreduce.history.server.embedded",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.job.tracker.persist.jobstatus.hours",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.job.tracker.persist.jobstatus.hours",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.jobtracker.completeuserjobs.maximum",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.jobtracker.completeuserjobs.maximum",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred_system_dir",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred_system_dir",
+ "filename" : "global.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/scheduler_name",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "scheduler_name",
+ "filename" : "global.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.queue.default.acl-submit-job",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.queue.default.acl-submit-job",
+ "filename" : "mapred-queue-acls.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.reduce.slowstart.completed.maps",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.reduce.slowstart.completed.maps",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.queue.default.capacity",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.capacity-scheduler.queue.default.capacity",
+ "filename" : "capacity-scheduler.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.job.shuffle.merge.percent",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.job.shuffle.merge.percent",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.jobtracker.retirejob.interval",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.jobtracker.retirejob.interval",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.queue.default.maximum-initialized-active-tasks",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.capacity-scheduler.queue.default.maximum-initialized-active-tasks",
+ "filename" : "capacity-scheduler.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/io.sort.factor",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "io.sort.factor",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapreduce.fileoutputcommitter.marksuccessfuljobs",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "mapreduce.fileoutputcommitter.marksuccessfuljobs",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.child.root.logger",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.child.root.logger",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.job.tracker.history.completed.location",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.job.tracker.history.completed.location",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.queue.default.minimum-user-limit-percent",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.capacity-scheduler.queue.default.minimum-user-limit-percent",
+ "filename" : "capacity-scheduler.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/jetty.connector",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "jetty.connector",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.job.reduce.input.buffer.percent",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.job.reduce.input.buffer.percent",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/jtnode_opt_newsize",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "jtnode_opt_newsize",
+ "filename" : "global.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.default-maximum-active-tasks-per-user",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.capacity-scheduler.default-maximum-active-tasks-per-user",
+ "filename" : "capacity-scheduler.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/hadoop.job.history.user.location",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "hadoop.job.history.user.location",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.init-worker-threads",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.capacity-scheduler.init-worker-threads",
+ "filename" : "capacity-scheduler.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred_local_dir",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred_local_dir",
+ "filename" : "global.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.jobtracker.retirejob.check",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.jobtracker.retirejob.check",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapreduce.tasktracker.group",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "mapreduce.tasktracker.group",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/io.sort.record.percent",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "io.sort.record.percent",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.default-maximum-active-tasks-per-queue",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.capacity-scheduler.default-maximum-active-tasks-per-queue",
+ "filename" : "capacity-scheduler.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.queue.default.acl-administer-jobs",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.queue.default.acl-administer-jobs",
+ "filename" : "mapred-queue-acls.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.queue.default.user-limit-factor",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.capacity-scheduler.queue.default.user-limit-factor",
+ "filename" : "capacity-scheduler.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.reduce.tasks.speculative.execution",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.reduce.tasks.speculative.execution",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.queue.default.maximum-capacity",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.capacity-scheduler.queue.default.maximum-capacity",
+ "filename" : "capacity-scheduler.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.jobtracker.restart.recover",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.jobtracker.restart.recover",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.output.compression.type",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.output.compression.type",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.system.dir",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.system.dir",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.queue.default.supports-priority",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.capacity-scheduler.queue.default.supports-priority",
+ "filename" : "capacity-scheduler.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.job.reuse.jvm.num.tasks",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.job.reuse.jvm.num.tasks",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.tasktracker.tasks.sleeptime-before-sigkill",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.tasktracker.tasks.sleeptime-before-sigkill",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.healthChecker.interval",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.healthChecker.interval",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapreduce.cluster.administrators",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "mapreduce.cluster.administrators",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapreduce.jobtracker.staging.root.dir",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "mapreduce.jobtracker.staging.root.dir",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapreduce.jobtracker.split.metainfo.maxsize",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "mapreduce.jobtracker.split.metainfo.maxsize",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.queue.default.maximum-initialized-active-tasks-per-user",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.capacity-scheduler.queue.default.maximum-initialized-active-tasks-per-user",
+ "filename" : "capacity-scheduler.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.init-poll-interval",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.capacity-scheduler.init-poll-interval",
+ "filename" : "capacity-scheduler.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.queue.names",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.queue.names",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapreduce.reduce.input.limit",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "mapreduce.reduce.input.limit",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.maximum-system-jobs",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.capacity-scheduler.maximum-system-jobs",
+ "filename" : "capacity-scheduler.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.queue.default.init-accept-jobs-factor",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.capacity-scheduler.queue.default.init-accept-jobs-factor",
+ "filename" : "capacity-scheduler.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.default-minimum-user-limit-percent",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.capacity-scheduler.default-minimum-user-limit-percent",
+ "filename" : "capacity-scheduler.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.task.timeout",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.task.timeout",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.default-init-accept-jobs-factor",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.capacity-scheduler.default-init-accept-jobs-factor",
+ "filename" : "capacity-scheduler.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.healthChecker.script.timeout",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.healthChecker.script.timeout",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/jtnode_opt_maxnewsize",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "jtnode_opt_maxnewsize",
+ "filename" : "global.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.inmem.merge.threshold",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.inmem.merge.threshold",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.jobtracker.blacklist.fault-timeout-window",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "mapred.jobtracker.blacklist.fault-timeout-window",
+ "filename" : "mapred-site.xml",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP"
+ }
+ }
+ ]
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/ZOOKEEPER",
+ "StackServices" : {
+ "stack_version" : "1.3.1",
+ "service_name" : "ZOOKEEPER",
+ "stack_name" : "HDP"
+ },
+ "configurations" : [ ]
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/SQOOP",
+ "StackServices" : {
+ "stack_version" : "1.3.1",
+ "service_name" : "SQOOP",
+ "stack_name" : "HDP"
+ },
+ "configurations" : [ ]
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE",
+ "StackServices" : {
+ "stack_version" : "1.3.1",
+ "service_name" : "HBASE",
+ "stack_name" : "HDP"
+ },
+ "configurations" : [
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hbase.zookeeper.useMulti",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "hbase.zookeeper.useMulti",
+ "filename" : "hbase-site.xml",
+ "service_name" : "HBASE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hstore_compactionthreshold",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "hstore_compactionthreshold",
+ "filename" : "global.xml",
+ "service_name" : "HBASE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hfile_blockcache_size",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "hfile_blockcache_size",
+ "filename" : "global.xml",
+ "service_name" : "HBASE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hbase_pid_dir",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "hbase_pid_dir",
+ "filename" : "global.xml",
+ "service_name" : "HBASE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hbase_regionserver_heapsize",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "hbase_regionserver_heapsize",
+ "filename" : "global.xml",
+ "service_name" : "HBASE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/client_scannercaching",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "client_scannercaching",
+ "filename" : "global.xml",
+ "service_name" : "HBASE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/regionserver_handlers",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "regionserver_handlers",
+ "filename" : "global.xml",
+ "service_name" : "HBASE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hbase.regionserver.optionalcacheflushinterval",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "hbase.regionserver.optionalcacheflushinterval",
+ "filename" : "hbase-site.xml",
+ "service_name" : "HBASE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/security.admin.protocol.acl",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "security.admin.protocol.acl",
+ "filename" : "hbase-policy.xml",
+ "service_name" : "HBASE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hregion_blockmultiplier",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "hregion_blockmultiplier",
+ "filename" : "global.xml",
+ "service_name" : "HBASE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hbase_master_heapsize",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "hbase_master_heapsize",
+ "filename" : "global.xml",
+ "service_name" : "HBASE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hbase.master.lease.thread.wakefrequency",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "hbase.master.lease.thread.wakefrequency",
+ "filename" : "hbase-site.xml",
+ "service_name" : "HBASE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hbase_conf_dir",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "hbase_conf_dir",
+ "filename" : "global.xml",
+ "service_name" : "HBASE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hbase_log_dir",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "hbase_log_dir",
+ "filename" : "global.xml",
+ "service_name" : "HBASE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/security.masterregion.protocol.acl",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "security.masterregion.protocol.acl",
+ "filename" : "hbase-policy.xml",
+ "service_name" : "HBASE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hbase_tmp_dir",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "hbase_tmp_dir",
+ "filename" : "global.xml",
+ "service_name" : "HBASE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hregion_majorcompaction",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "hregion_majorcompaction",
+ "filename" : "global.xml",
+ "service_name" : "HBASE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hfile_max_keyvalue_size",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "hfile_max_keyvalue_size",
+ "filename" : "global.xml",
+ "service_name" : "HBASE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/zookeeper_sessiontimeout",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "zookeeper_sessiontimeout",
+ "filename" : "global.xml",
+ "service_name" : "HBASE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hbase.cluster.distributed",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "hbase.cluster.distributed",
+ "filename" : "hbase-site.xml",
+ "service_name" : "HBASE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hbase.zookeeper.property.clientPort",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "hbase.zookeeper.property.clientPort",
+ "filename" : "hbase-site.xml",
+ "service_name" : "HBASE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hstorefile_maxsize",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "hstorefile_maxsize",
+ "filename" : "global.xml",
+ "service_name" : "HBASE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/security.client.protocol.acl",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "security.client.protocol.acl",
+ "filename" : "hbase-policy.xml",
+ "service_name" : "HBASE",
+ "stack_name" : "HDP"
+ }
+ }
+ ]
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE",
+ "StackServices" : {
+ "stack_version" : "1.3.1",
+ "service_name" : "OOZIE",
+ "stack_name" : "HDP"
+ },
+ "configurations" : [
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.JPAService.jdbc.driver",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "oozie.service.JPAService.jdbc.driver",
+ "filename" : "oozie-site.xml",
+ "service_name" : "OOZIE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.PurgeService.purge.interval",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "oozie.service.PurgeService.purge.interval",
+ "filename" : "oozie-site.xml",
+ "service_name" : "OOZIE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.JPAService.jdbc.username",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "oozie.service.JPAService.jdbc.username",
+ "filename" : "oozie-site.xml",
+ "service_name" : "OOZIE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.authentication.type",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "oozie.authentication.type",
+ "filename" : "oozie-site.xml",
+ "service_name" : "OOZIE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.HadoopAccessorService.hadoop.configurations",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "oozie.service.HadoopAccessorService.hadoop.configurations",
+ "filename" : "oozie-site.xml",
+ "service_name" : "OOZIE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.ActionService.executor.ext.classes",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "oozie.service.ActionService.executor.ext.classes",
+ "filename" : "oozie-site.xml",
+ "service_name" : "OOZIE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.CallableQueueService.callable.concurrency",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "oozie.service.CallableQueueService.callable.concurrency",
+ "filename" : "oozie-site.xml",
+ "service_name" : "OOZIE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.SchemaService.wf.ext.schemas",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "oozie.service.SchemaService.wf.ext.schemas",
+ "filename" : "oozie-site.xml",
+ "service_name" : "OOZIE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.systemmode",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "oozie.systemmode",
+ "filename" : "oozie-site.xml",
+ "service_name" : "OOZIE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.db.schema.name",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "oozie.db.schema.name",
+ "filename" : "oozie-site.xml",
+ "service_name" : "OOZIE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.JPAService.jdbc.url",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "oozie.service.JPAService.jdbc.url",
+ "filename" : "oozie-site.xml",
+ "service_name" : "OOZIE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.JPAService.jdbc.password",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "oozie.service.JPAService.jdbc.password",
+ "filename" : "oozie-site.xml",
+ "service_name" : "OOZIE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.JPAService.create.db.schema",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "oozie.service.JPAService.create.db.schema",
+ "filename" : "oozie-site.xml",
+ "service_name" : "OOZIE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.HadoopAccessorService.nameNode.whitelist",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "oozie.service.HadoopAccessorService.nameNode.whitelist",
+ "filename" : "oozie-site.xml",
+ "service_name" : "OOZIE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.system.id",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "oozie.system.id",
+ "filename" : "oozie-site.xml",
+ "service_name" : "OOZIE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.HadoopAccessorService.jobTracker.whitelist",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "oozie.service.HadoopAccessorService.jobTracker.whitelist",
+ "filename" : "oozie-site.xml",
+ "service_name" : "OOZIE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.base.url",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "oozie.base.url",
+ "filename" : "oozie-site.xml",
+ "service_name" : "OOZIE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/use.system.libpath.for.mapreduce.and.pig.jobs",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "use.system.libpath.for.mapreduce.and.pig.jobs",
+ "filename" : "oozie-site.xml",
+ "service_name" : "OOZIE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.PurgeService.older.than",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "oozie.service.PurgeService.older.than",
+ "filename" : "oozie-site.xml",
+ "service_name" : "OOZIE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.JPAService.pool.max.active.conn",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "oozie.service.JPAService.pool.max.active.conn",
+ "filename" : "oozie-site.xml",
+ "service_name" : "OOZIE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.WorkflowAppService.system.libpath",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "oozie.service.WorkflowAppService.system.libpath",
+ "filename" : "oozie-site.xml",
+ "service_name" : "OOZIE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.AuthorizationService.security.enabled",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "oozie.service.AuthorizationService.security.enabled",
+ "filename" : "oozie-site.xml",
+ "service_name" : "OOZIE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.coord.normal.default.timeout",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "oozie.service.coord.normal.default.timeout",
+ "filename" : "oozie-site.xml",
+ "service_name" : "OOZIE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.authentication.kerberos.name.rules",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "oozie.authentication.kerberos.name.rules",
+ "filename" : "oozie-site.xml",
+ "service_name" : "OOZIE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.CallableQueueService.threads",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "oozie.service.CallableQueueService.threads",
+ "filename" : "oozie-site.xml",
+ "service_name" : "OOZIE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.CallableQueueService.queue.size",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "oozie.service.CallableQueueService.queue.size",
+ "filename" : "oozie-site.xml",
+ "service_name" : "OOZIE",
+ "stack_name" : "HDP"
+ }
+ }
+ ]
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/PIG",
+ "StackServices" : {
+ "stack_version" : "1.3.1",
+ "service_name" : "PIG",
+ "stack_name" : "HDP"
+ },
+ "configurations" : [ ]
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS",
+ "StackServices" : {
+ "stack_version" : "1.3.1",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ },
+ "configurations" : [
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs_datanode_failed_volume_tolerated",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "dfs_datanode_failed_volume_tolerated",
+ "filename" : "global.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/fs.trash.interval",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "fs.trash.interval",
+ "filename" : "core-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/io.serializations",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "io.serializations",
+ "filename" : "core-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.secondary.https.port",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "dfs.secondary.https.port",
+ "filename" : "hdfs-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.datanode.ipc.address",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "dfs.datanode.ipc.address",
+ "filename" : "hdfs-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/security.datanode.protocol.acl",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "security.datanode.protocol.acl",
+ "filename" : "hadoop-policy.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/hadoop_heapsize",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "hadoop_heapsize",
+ "filename" : "global.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/ipc.client.idlethreshold",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "ipc.client.idlethreshold",
+ "filename" : "core-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/security.inter.tracker.protocol.acl",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "security.inter.tracker.protocol.acl",
+ "filename" : "hadoop-policy.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/io.file.buffer.size",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "io.file.buffer.size",
+ "filename" : "core-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/fs.checkpoint.size",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "fs.checkpoint.size",
+ "filename" : "core-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.heartbeat.interval",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "dfs.heartbeat.interval",
+ "filename" : "hdfs-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.block.access.token.enable",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "dfs.block.access.token.enable",
+ "filename" : "hdfs-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.web.ugi",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "dfs.web.ugi",
+ "filename" : "hdfs-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/kerberos_domain",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "kerberos_domain",
+ "filename" : "global.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.balance.bandwidthPerSec",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "dfs.balance.bandwidthPerSec",
+ "filename" : "hdfs-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs_name_dir",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "dfs_name_dir",
+ "filename" : "global.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/datanode_du_reserved",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "datanode_du_reserved",
+ "filename" : "global.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.datanode.du.pct",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "dfs.datanode.du.pct",
+ "filename" : "hdfs-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.safemode.threshold.pct",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "dfs.safemode.threshold.pct",
+ "filename" : "hdfs-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/namenode_opt_maxnewsize",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "namenode_opt_maxnewsize",
+ "filename" : "global.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/security.inter.datanode.protocol.acl",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "security.inter.datanode.protocol.acl",
+ "filename" : "hadoop-policy.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/ipc.server.max.response.size",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "ipc.server.max.response.size",
+ "filename" : "hdfs-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.datanode.socket.write.timeout",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "dfs.datanode.socket.write.timeout",
+ "filename" : "hdfs-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/security.client.datanode.protocol.acl",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "security.client.datanode.protocol.acl",
+ "filename" : "hadoop-policy.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/keytab_path",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "keytab_path",
+ "filename" : "global.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.datanode.failed.volumes.tolerated",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "dfs.datanode.failed.volumes.tolerated",
+ "filename" : "hdfs-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.permissions",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "dfs.permissions",
+ "filename" : "hdfs-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.datanode.max.xcievers",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "dfs.datanode.max.xcievers",
+ "filename" : "hdfs-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/io.compression.codec.lzo.class",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "io.compression.codec.lzo.class",
+ "filename" : "core-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.cluster.administrators",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "dfs.cluster.administrators",
+ "filename" : "hdfs-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/namenode_opt_newsize",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "namenode_opt_newsize",
+ "filename" : "global.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.access.time.precision",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "dfs.access.time.precision",
+ "filename" : "hdfs-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/ipc.server.read.threadpool.size",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "ipc.server.read.threadpool.size",
+ "filename" : "hdfs-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/security_enabled",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "security_enabled",
+ "filename" : "global.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.umaskmode",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "dfs.umaskmode",
+ "filename" : "hdfs-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.namenode.handler.count",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "dfs.namenode.handler.count",
+ "filename" : "hdfs-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.replication.max",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "dfs.replication.max",
+ "filename" : "hdfs-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/webinterface.private.actions",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "webinterface.private.actions",
+ "filename" : "core-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.permissions.supergroup",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "dfs.permissions.supergroup",
+ "filename" : "hdfs-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/hadoop_pid_dir_prefix",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "hadoop_pid_dir_prefix",
+ "filename" : "global.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/fs.checkpoint.edits.dir",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "fs.checkpoint.edits.dir",
+ "filename" : "core-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.block.size",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "dfs.block.size",
+ "filename" : "hdfs-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/security.task.umbilical.protocol.acl",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "security.task.umbilical.protocol.acl",
+ "filename" : "hadoop-policy.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/security.job.submission.protocol.acl",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "security.job.submission.protocol.acl",
+ "filename" : "hadoop-policy.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/ipc.client.connect.max.retries",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "ipc.client.connect.max.retries",
+ "filename" : "core-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.https.port",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "dfs.https.port",
+ "filename" : "hdfs-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/security.client.protocol.acl",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "security.client.protocol.acl",
+ "filename" : "hadoop-policy.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.namenode.handler.count",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "dfs.namenode.handler.count",
+ "filename" : "hdfs-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/ipc.client.connection.maxidletime",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "ipc.client.connection.maxidletime",
+ "filename" : "core-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs_webhdfs_enabled",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "dfs_webhdfs_enabled",
+ "filename" : "global.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/hdfs_log_dir_prefix",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "hdfs_log_dir_prefix",
+ "filename" : "global.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/fs.checkpoint.period",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "fs.checkpoint.period",
+ "filename" : "core-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/fs_checkpoint_size",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "fs_checkpoint_size",
+ "filename" : "global.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/security.namenode.protocol.acl",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "security.namenode.protocol.acl",
+ "filename" : "hadoop-policy.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.blockreport.initialDelay",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "dfs.blockreport.initialDelay",
+ "filename" : "hdfs-site.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/fs_checkpoint_period",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "fs_checkpoint_period",
+ "filename" : "global.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/fs_checkpoint_dir",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "fs_checkpoint_dir",
+ "filename" : "global.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dtnode_heapsize",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "dtnode_heapsize",
+ "filename" : "global.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs_data_dir",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "dfs_data_dir",
+ "filename" : "global.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/namenode_heapsize",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "namenode_heapsize",
+ "filename" : "global.xml",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP"
+ }
+ }
+ ]
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/WEBHCAT",
+ "StackServices" : {
+ "stack_version" : "1.3.1",
+ "service_name" : "WEBHCAT",
+ "stack_name" : "HDP"
+ },
+ "configurations" : [
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/WEBHCAT/configurations/templeton.port",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "templeton.port",
+ "filename" : "webhcat-site.xml",
+ "service_name" : "WEBHCAT",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/WEBHCAT/configurations/templeton.pig.archive",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "templeton.pig.archive",
+ "filename" : "webhcat-site.xml",
+ "service_name" : "WEBHCAT",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/WEBHCAT/configurations/templeton.hive.archive",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "templeton.hive.archive",
+ "filename" : "webhcat-site.xml",
+ "service_name" : "WEBHCAT",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/WEBHCAT/configurations/templeton.streaming.jar",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "templeton.streaming.jar",
+ "filename" : "webhcat-site.xml",
+ "service_name" : "WEBHCAT",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/WEBHCAT/configurations/templeton.jar",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "templeton.jar",
+ "filename" : "webhcat-site.xml",
+ "service_name" : "WEBHCAT",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/WEBHCAT/configurations/templeton.hcat",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "templeton.hcat",
+ "filename" : "webhcat-site.xml",
+ "service_name" : "WEBHCAT",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/WEBHCAT/configurations/templeton.hadoop",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "templeton.hadoop",
+ "filename" : "webhcat-site.xml",
+ "service_name" : "WEBHCAT",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/WEBHCAT/configurations/templeton.hive.path",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "templeton.hive.path",
+ "filename" : "webhcat-site.xml",
+ "service_name" : "WEBHCAT",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/WEBHCAT/configurations/templeton.hadoop.conf.dir",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "templeton.hadoop.conf.dir",
+ "filename" : "webhcat-site.xml",
+ "service_name" : "WEBHCAT",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/WEBHCAT/configurations/templeton.storage.class",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "templeton.storage.class",
+ "filename" : "webhcat-site.xml",
+ "service_name" : "WEBHCAT",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/WEBHCAT/configurations/templeton.override.enabled",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "templeton.override.enabled",
+ "filename" : "webhcat-site.xml",
+ "service_name" : "WEBHCAT",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/WEBHCAT/configurations/templeton.pig.path",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "templeton.pig.path",
+ "filename" : "webhcat-site.xml",
+ "service_name" : "WEBHCAT",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/WEBHCAT/configurations/templeton.libjars",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "templeton.libjars",
+ "filename" : "webhcat-site.xml",
+ "service_name" : "WEBHCAT",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/WEBHCAT/configurations/templeton.exec.timeout",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "templeton.exec.timeout",
+ "filename" : "webhcat-site.xml",
+ "service_name" : "WEBHCAT",
+ "stack_name" : "HDP"
+ }
+ }
+ ]
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE",
+ "StackServices" : {
+ "stack_version" : "1.3.1",
+ "service_name" : "HUE",
+ "stack_name" : "HDP"
+ },
+ "configurations" : [
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/jobtracker_port",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "jobtracker_port",
+ "filename" : "hue-site.xml",
+ "service_name" : "HUE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/db_user",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "db_user",
+ "filename" : "hue-site.xml",
+ "service_name" : "HUE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/http_host",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "http_host",
+ "filename" : "hue-site.xml",
+ "service_name" : "HUE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/db_port",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "db_port",
+ "filename" : "hue-site.xml",
+ "service_name" : "HUE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/whitelist",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "whitelist",
+ "filename" : "hue-site.xml",
+ "service_name" : "HUE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/django_debug_mode",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "django_debug_mode",
+ "filename" : "hue-site.xml",
+ "service_name" : "HUE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/smtp_host",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "smtp_host",
+ "filename" : "hue-site.xml",
+ "service_name" : "HUE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/smtp_password",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "smtp_password",
+ "filename" : "hue-site.xml",
+ "service_name" : "HUE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/db_engine",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "db_engine",
+ "filename" : "hue-site.xml",
+ "service_name" : "HUE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/send_debug_messages",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "send_debug_messages",
+ "filename" : "hue-site.xml",
+ "service_name" : "HUE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/tls",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "tls",
+ "filename" : "hue-site.xml",
+ "service_name" : "HUE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/bash_shell_command",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "bash_shell_command",
+ "filename" : "hue-site.xml",
+ "service_name" : "HUE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/pig_shell_command",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "pig_shell_command",
+ "filename" : "hue-site.xml",
+ "service_name" : "HUE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/http_500_debug_mode",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "http_500_debug_mode",
+ "filename" : "hue-site.xml",
+ "service_name" : "HUE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/hadoop_mapred_home",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "hadoop_mapred_home",
+ "filename" : "hue-site.xml",
+ "service_name" : "HUE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/time_zone",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "time_zone",
+ "filename" : "hue-site.xml",
+ "service_name" : "HUE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/db_host",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "db_host",
+ "filename" : "hue-site.xml",
+ "service_name" : "HUE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/db_password",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "db_password",
+ "filename" : "hue-site.xml",
+ "service_name" : "HUE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/database_logging",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "database_logging",
+ "filename" : "hue-site.xml",
+ "service_name" : "HUE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/db_name",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "db_name",
+ "filename" : "hue-site.xml",
+ "service_name" : "HUE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/hbase_shell_command",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "hbase_shell_command",
+ "filename" : "hue-site.xml",
+ "service_name" : "HUE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/default_from_email",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "default_from_email",
+ "filename" : "hue-site.xml",
+ "service_name" : "HUE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/use_cherrypy_server",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "use_cherrypy_server",
+ "filename" : "hue-site.xml",
+ "service_name" : "HUE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/http_port",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "http_port",
+ "filename" : "hue-site.xml",
+ "service_name" : "HUE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/hbase_nice_name",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "hbase_nice_name",
+ "filename" : "hue-site.xml",
+ "service_name" : "HUE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/backend_auth_policy",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "backend_auth_policy",
+ "filename" : "hue-site.xml",
+ "service_name" : "HUE",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/smtp_port",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "smtp_port",
+ "filename" : "hue-site.xml",
+ "service_name" : "HUE",
+ "stack_name" : "HDP"
+ }
+ }
+ ]
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/GANGLIA",
+ "StackServices" : {
+ "stack_version" : "1.3.1",
+ "service_name" : "GANGLIA",
+ "stack_name" : "HDP"
+ },
+ "configurations" : [
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/GANGLIA/configurations/ganglia_runtime_dir",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "ganglia_runtime_dir",
+ "filename" : "global.xml",
+ "service_name" : "GANGLIA",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/GANGLIA/configurations/gmetad_user",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "gmetad_user",
+ "filename" : "global.xml",
+ "service_name" : "GANGLIA",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/GANGLIA/configurations/rrdcached_base_dir",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "rrdcached_base_dir",
+ "filename" : "global.xml",
+ "service_name" : "GANGLIA",
+ "stack_name" : "HDP"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/GANGLIA/configurations/ganglia_conf_dir",
+ "StackConfigurations" : {
+ "stack_version" : "1.3.1",
+ "property_name" : "ganglia_conf_dir",
+ "filename" : "global.xml",
+ "service_name" : "GANGLIA",
+ "stack_name" : "HDP"
+ }
+ }
+ ]
+ }
+ ]
+}
\ No newline at end of file
diff --git a/ambari-web/app/assets/data/wizard/stack/stacks.json b/ambari-web/app/assets/data/wizard/stack/stacks.json
index 4067c16dd94..f5e1236edba 100644
--- a/ambari-web/app/assets/data/wizard/stack/stacks.json
+++ b/ambari-web/app/assets/data/wizard/stack/stacks.json
@@ -579,6 +579,159 @@
}
}
]
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.1",
+ "Versions" : {
+ "stack_version" : "1.3.1",
+ "stack_name" : "HDP",
+ "min_upgrade_version" : "1.2.0"
+ },
+ "stackServices" : [
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE",
+ "StackServices" : {
+ "user_name" : "root",
+ "stack_version" : "1.3.0",
+ "service_name" : "OOZIE",
+ "stack_name" : "HDP",
+ "service_version" : "3.2.0",
+ "comments" : "System for workflow coordination and execution of Apache Hadoop jobs"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HCATALOG",
+ "StackServices" : {
+ "user_name" : "root",
+ "stack_version" : "1.3.0",
+ "service_name" : "HCATALOG",
+ "stack_name" : "HDP",
+ "service_version" : "0.5.0",
+ "comments" : "This is comment for HCATALOG service"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE",
+ "StackServices" : {
+ "user_name" : "mapred",
+ "stack_version" : "1.3.0",
+ "service_name" : "MAPREDUCE",
+ "stack_name" : "HDP",
+ "service_version" : "1.1.2",
+ "comments" : "Apache Hadoop Distributed Processing Framework"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE",
+ "StackServices" : {
+ "user_name" : "mapred",
+ "stack_version" : "1.3.0",
+ "service_name" : "HBASE",
+ "stack_name" : "HDP",
+ "service_version" : "0.94.5",
+ "comments" : "Non-relational distributed database and centralized service for configuration management & synchronization"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/ZOOKEEPER",
+ "StackServices" : {
+ "user_name" : "root",
+ "stack_version" : "1.3.0",
+ "service_name" : "ZOOKEEPER",
+ "stack_name" : "HDP",
+ "service_version" : "3.4.5",
+ "comments" : "This is comment for ZOOKEEPER service"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE",
+ "StackServices" : {
+ "user_name" : "root",
+ "stack_version" : "1.3.0",
+ "service_name" : "HUE",
+ "stack_name" : "HDP",
+ "service_version" : "2.2.0",
+ "comments" : "Hue is a graphical user interface to operate and develop\n applications for Apache Hadoop."
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS",
+ "StackServices" : {
+ "user_name" : "root",
+ "stack_version" : "1.3.0",
+ "service_name" : "HDFS",
+ "stack_name" : "HDP",
+ "service_version" : "1.1.2",
+ "comments" : "Apache Hadoop Distributed File System"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/GANGLIA",
+ "StackServices" : {
+ "user_name" : "root",
+ "stack_version" : "1.3.0",
+ "service_name" : "GANGLIA",
+ "stack_name" : "HDP",
+ "service_version" : "3.2.0",
+ "comments" : "Ganglia Metrics Collection system"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/WEBHCAT",
+ "StackServices" : {
+ "user_name" : "root",
+ "stack_version" : "1.3.0",
+ "service_name" : "WEBHCAT",
+ "stack_name" : "HDP",
+ "service_version" : "0.5.0",
+ "comments" : "This is comment for WEBHCAT service"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/SQOOP",
+ "StackServices" : {
+ "user_name" : "root",
+ "stack_version" : "1.3.0",
+ "service_name" : "SQOOP",
+ "stack_name" : "HDP",
+ "service_version" : "1.4.2",
+ "comments" : "Tool for transferring bulk data between Apache Hadoop and structured data stores such as relational databases"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/NAGIOS",
+ "StackServices" : {
+ "user_name" : "root",
+ "stack_version" : "1.3.0",
+ "service_name" : "NAGIOS",
+ "stack_name" : "HDP",
+ "service_version" : "3.2.3",
+ "comments" : "Nagios Monitoring and Alerting system"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/PIG",
+ "StackServices" : {
+ "user_name" : "root",
+ "stack_version" : "1.3.0",
+ "service_name" : "PIG",
+ "stack_name" : "HDP",
+ "service_version" : "0.10.1",
+ "comments" : "Scripting platform for analyzing large datasets"
+ }
+ },
+ {
+ "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE",
+ "StackServices" : {
+ "user_name" : "root",
+ "stack_version" : "1.3.0",
+ "service_name" : "HIVE",
+ "stack_name" : "HDP",
+ "service_version" : "0.10.0",
+ "comments" : "Data warehouse system for ad-hoc queries & analysis of large datasets and table & storage management service"
+ }
+ }
+ ]
}
]
}
\ No newline at end of file
diff --git a/ambari-web/app/config.js b/ambari-web/app/config.js
index 7fb985ec1f9..3915fb73b6b 100644
--- a/ambari-web/app/config.js
+++ b/ambari-web/app/config.js
@@ -19,12 +19,13 @@
var App = require('app');
App.testMode = (location.port == '3333'); // test mode is automatically enabled if running on brunch server
+App.stubStack = false; // stubs out only the stack calls
App.testModeDelayForActions = 10000;
App.skipBootstrap = false;
-App.alwaysGoToInstaller = false;
+App.alwaysGoToInstaller = true;
App.testEnableSecurity = true; // By default enable security is tested; turning it false tests disable security
App.apiPrefix = '/api/v1';
-App.defaultStackVersion = 'HDP-1.3.0';
+App.defaultStackVersion = 'HDP-1.3.1';
App.defaultLocalStackVersion = 'HDPLocal-1.3.0';
App.defaultJavaHome = '/usr/jdk/jdk1.6.0_31';
App.timeout = 180000; // default AJAX timeout
diff --git a/ambari-web/app/controllers/main/admin/cluster.js b/ambari-web/app/controllers/main/admin/cluster.js
index 6db36b73727..a67880fdeda 100644
--- a/ambari-web/app/controllers/main/admin/cluster.js
+++ b/ambari-web/app/controllers/main/admin/cluster.js
@@ -87,7 +87,7 @@ App.MainAdminClusterController = Em.Controller.extend({
var myService = Em.Object.create({
serviceName: entry.service_name,
displayName: displayOrderConfig[i].displayName,
- isDisabled: i === 0,
+ isDisabled: displayOrderConfig[i].isDisabled,
isSelected: true,
isInstalled: false,
isHidden: displayOrderConfig[i].isHidden,
diff --git a/ambari-web/app/controllers/main/service/info/configs.js b/ambari-web/app/controllers/main/service/info/configs.js
index 45ca4859308..fc35d24abad 100644
--- a/ambari-web/app/controllers/main/service/info/configs.js
+++ b/ambari-web/app/controllers/main/service/info/configs.js
@@ -1100,7 +1100,7 @@ App.MainServiceInfoConfigsController = Em.Controller.extend({
}
} else if (_serviceTags.siteName === 'core-site') {
console.log("TRACE: Inside core-site");
- if (this.get('content.serviceName') === 'HDFS') {
+ if (this.get('content.serviceName') === 'HDFS' || this.get('content.serviceName') === 'HCFS') {
var coreSiteConfigs = this.createCoreSiteObj(_serviceTags.newTagName);
siteNameToServerDataMap['core-site'] = coreSiteConfigs;
if(this.isConfigChanged(App.config.loadedConfigurationsCache['core-site_'+this.loadedClusterSiteToTagMap['core-site']], coreSiteConfigs.properties)){
@@ -1444,8 +1444,12 @@ App.MainServiceInfoConfigsController = Em.Controller.extend({
var serviceConfigs = this.get('serviceConfigs').findProperty('serviceName', serviceName).configs;
//namenode_host is required to derive "fs.default.name" a property of core-site
var nameNodeHost = this.get('serviceConfigs').findProperty('serviceName', 'HDFS').configs.findProperty('name', 'namenode_host');
- nameNodeHost.defaultValue = App.Service.find('HDFS').get('hostComponents').findProperty('componentName', 'NAMENODE').get('host.hostName');
- globalConfigs.push(nameNodeHost);
+ try {
+ nameNodeHost.defaultValue = App.Service.find('HDFS').get('hostComponents').findProperty('componentName', 'NAMENODE').get('host.hostName');
+ globalConfigs.push(nameNodeHost);
+ } catch (err) {
+ console.log("No NameNode Host available. This is expected if you're using HCFS rather than HDFS.");
+ }
//zooKeeperserver_host
var zooKeperHost = this.get('serviceConfigs').findProperty('serviceName', 'ZOOKEEPER').configs.findProperty('name', 'zookeeperserver_hosts');
diff --git a/ambari-web/app/controllers/wizard.js b/ambari-web/app/controllers/wizard.js
index 743a29c23f9..a999f5d6c55 100644
--- a/ambari-web/app/controllers/wizard.js
+++ b/ambari-web/app/controllers/wizard.js
@@ -491,7 +491,7 @@ App.WizardController = Em.Controller.extend({
var myService = Service.create({
serviceName: entry.StackServices.service_name,
displayName: displayOrderConfig[i].displayName,
- isDisabled: i === 0,
+ isDisabled: displayOrderConfig[i].isDisabled,
isSelected: displayOrderConfig[i].isSelected,
canBeSelected: displayOrderConfig[i].canBeSelected,
isInstalled: false,
@@ -520,9 +520,9 @@ App.WizardController = Em.Controller.extend({
loadServicesFromServer: function() {
var services = App.db.getService();
- if (services) {
- return;
- }
+// if (services) {
+// return;
+// }
var apiService = this.loadServiceComponents();
this.set('content.services', apiService);
App.db.setService(apiService);
diff --git a/ambari-web/app/controllers/wizard/step4_controller.js b/ambari-web/app/controllers/wizard/step4_controller.js
index 6f1f78a3510..bd37d33190e 100644
--- a/ambari-web/app/controllers/wizard/step4_controller.js
+++ b/ambari-web/app/controllers/wizard/step4_controller.js
@@ -95,6 +95,24 @@ App.WizardStep4Controller = Em.ArrayController.extend({
return false;
},
+ /**
+ * Check whether we should turn on HDFS or HCFS service
+ * @return {Boolean}
+ */
+ needToAddHDFS: function () {
+ return (this.findProperty('serviceName', 'HDFS').get('isSelected') === false &&
+ (!this.findProperty('serviceName', 'HCFS') || this.findProperty('serviceName', 'HCFS').get('isSelected') === false));
+ },
+
+ /**
+ * Check if multiple distributed file systems were selected
+ * @return {Boolean}
+ */
+ multipleDFSs: function () {
+ return (this.findProperty('serviceName', 'HDFS').get('isSelected') === true &&
+ (this.findProperty('serviceName', 'HCFS') && this.findProperty('serviceName', 'HCFS').get('isSelected') === true));
+ },
+
/**
* Check do we have any monitoring service turned on
* @return {Boolean}
@@ -121,11 +139,49 @@ App.WizardStep4Controller = Em.ArrayController.extend({
if(!this.get("isSubmitDisabled")){
if (this.needToAddMapReduce()) {
this.mapReduceCheckPopup();
- } else {
+ } else if (this.needToAddHDFS()) {
+ this.needToAddHDFSPopup();
+ } else if (this.multipleDFSs()) {
+ this.multipleDFSPopup();
+ }
+ else {
this.validateMonitoring();
}
}
},
+
+ multipleDFSPopup: function() {
+ var self = this;
+ App.ModalPopup.show({
+ header: Em.I18n.t('installer.step4.multileDFS.popup.header'),
+ body: Em.I18n.t('installer.step4.multileDFS.popup.body'),
+ onPrimary: function () {
+ self.findProperty('serviceName', 'HDFS').set('isSelected', true);
+ self.findProperty('serviceName', 'HCFS').set('isSelected', false);
+ this.hide();
+ self.validateMonitoring();
+ },
+ onSecondary: function () {
+ this.hide();
+ }
+ });
+ },
+
+ needToAddHDFSPopup: function() {
+ var self = this;
+ App.ModalPopup.show({
+ header: Em.I18n.t('installer.step4.hdfsCheck.popup.header'),
+ body: Em.I18n.t('installer.step4.hdfsCheck.popup.body'),
+ onPrimary: function () {
+ self.findProperty('serviceName', 'HDFS').set('isSelected', true);
+ this.hide();
+ self.validateMonitoring();
+ },
+ onSecondary: function () {
+ this.hide();
+ }
+ });
+ },
mapReduceCheckPopup: function () {
var self = this;
diff --git a/ambari-web/app/controllers/wizard/step6_controller.js b/ambari-web/app/controllers/wizard/step6_controller.js
index 23f4477d2f7..236fa417d8a 100644
--- a/ambari-web/app/controllers/wizard/step6_controller.js
+++ b/ambari-web/app/controllers/wizard/step6_controller.js
@@ -201,10 +201,12 @@ App.WizardStep6Controller = Em.Controller.extend({
}
}
else {
- headers.pushObject(Ember.Object.create({
- name: 'DATANODE',
- label: self.getComponentDisplayName('DATANODE')
- }));
+ if (this.isServiceSelected('HDFS')) {
+ headers.pushObject(Ember.Object.create({
+ name: 'DATANODE',
+ label: self.getComponentDisplayName('DATANODE')
+ }));
+ }
if (this.isServiceSelected('MAPREDUCE')) {
headers.pushObject(Em.Object.create({
name: 'TASKTRACKER',
@@ -325,10 +327,12 @@ App.WizardStep6Controller = Em.Controller.extend({
checkboxes.findProperty('title', headers.findProperty('name', 'CLIENT').get('label')).set('checked', false);
// First not Master should have Client (only first!)
if (!client_is_set) {
- var checkboxDatanode = checkboxes.findProperty('title', headers.findProperty('name', 'DATANODE').get('label'));
- if (checkboxDatanode && checkboxDatanode.get('checked')) {
- checkboxes.findProperty('title', headers.findProperty('name', 'CLIENT').get('label')).set('checked', true);
- client_is_set = true;
+ if (self.isServiceSelected("HDFS")) {
+ var checkboxDatanode = checkboxes.findProperty('title', headers.findProperty('name', 'DATANODE').get('label'));
+ if (checkboxDatanode && checkboxDatanode.get('checked')) {
+ checkboxes.findProperty('title', headers.findProperty('name', 'CLIENT').get('label')).set('checked', true);
+ client_is_set = true;
+ }
}
}
});
diff --git a/ambari-web/app/controllers/wizard/step7_controller.js b/ambari-web/app/controllers/wizard/step7_controller.js
index 90f56b7328e..b2ee157ef5c 100644
--- a/ambari-web/app/controllers/wizard/step7_controller.js
+++ b/ambari-web/app/controllers/wizard/step7_controller.js
@@ -86,6 +86,7 @@ App.WizardStep7Controller = Em.Controller.extend({
this.activateSpecialConfigs();
this.set('selectedService', this.get('stepConfigs').filterProperty('showConfig', true).objectAt(0));
},
+
/**
* make some configs visible depending on active services
*/
diff --git a/ambari-web/app/controllers/wizard/step8_controller.js b/ambari-web/app/controllers/wizard/step8_controller.js
index f6555a37fe5..e5401f8fe1f 100644
--- a/ambari-web/app/controllers/wizard/step8_controller.js
+++ b/ambari-web/app/controllers/wizard/step8_controller.js
@@ -479,6 +479,9 @@ App.WizardStep8Controller = Em.Controller.extend({
case 'HDFS':
this.loadHDFS(serviceObj);
break;
+ case 'HCFS':
+ this.loadHCFS(serviceObj);
+ break;
case 'MAPREDUCE':
this.loadMapReduce(serviceObj);
break;
@@ -548,6 +551,28 @@ App.WizardStep8Controller = Em.Controller.extend({
//var
this.get('services').pushObject(hdfsObj);
},
+
+ /**
+ * load all info about HCFS service
+ * @param hcfsObj
+ */
+ loadHCFS: function (hcfsObj) {
+ hcfsObj.get('service_components').forEach(function (_component) {
+ switch (_component.get('display_name')) {
+ case 'HCFS Client':
+ this.loadHCFSClientValue(_component);
+ break;
+ default:
+ }
+ }, this);
+ this.get('services').pushObject(hcfsObj);
+ },
+
+ loadHCFSClientValue: function (hcfsComponent) {
+ var hcfsClientHosts = this.get('content.slaveComponentHosts').findProperty('displayName', 'Client');
+ var totalHCFSHosts = hcfsClientHosts.hosts.length;
+ hcfsComponent.set('component_value', totalHCFSHosts + ' hosts');
+ },
loadNnValue: function (nnComponent) {
var nnHostName = this.get('content.masterComponentHosts').findProperty('display_name', nnComponent.display_name);
@@ -1401,11 +1426,25 @@ App.WizardStep8Controller = Em.Controller.extend({
var hiveUser = this.get('globals').someProperty('name', 'hive_user') ? this.get('globals').findProperty('name', 'hive_user').value : null;
var isHcatSelected = this.get('selectedServices').someProperty('serviceName', 'WEBHCAT');
var hcatUser = this.get('globals').someProperty('name', 'hcat_user') ? this.get('globals').findProperty('name', 'hcat_user').value : null;
+ var isHCFSSelected = this.get('selectedServices').someProperty('serviceName', 'HCFS');
+ if (!isHCFSSelected) {
+ // screen out the HCFS-specific core-site.xml entries
+ HCFSOnly = ["fs.glusterfs.automount", "fs.glusterfs.impl", "fs.glusterfs.volname", "fs.glusterfs.mount", "fs.glusterfs.server"];
+ HCFSOnly.forEach(function (_HCFSOnly) {
+ configToRemove = coreSiteObj.findProperty("name", _HCFSOnly);
+ if(configToRemove) {
+ coreSiteObj.removeObject(configToRemove);
+ }
+ }, this);
+ }
coreSiteObj.forEach(function (_coreSiteObj) {
if ((isOozieSelected || (_coreSiteObj.name != 'hadoop.proxyuser.' + oozieUser + '.hosts' && _coreSiteObj.name != 'hadoop.proxyuser.' + oozieUser + '.groups')) && (isHiveSelected || (_coreSiteObj.name != 'hadoop.proxyuser.' + hiveUser + '.hosts' && _coreSiteObj.name != 'hadoop.proxyuser.' + hiveUser + '.groups')) && (isHcatSelected || (_coreSiteObj.name != 'hadoop.proxyuser.' + hcatUser + '.hosts' && _coreSiteObj.name != 'hadoop.proxyuser.' + hcatUser + '.groups'))) {
coreSiteProperties[_coreSiteObj.name] = _coreSiteObj.value;
this._recordHostOverrideFromObj(_coreSiteObj, 'core-site', 'version1', this);
}
+ if (isHCFSSelected && _coreSiteObj.name == "fs.default.name") {
+ coreSiteProperties[_coreSiteObj.name] = this.get('globals').someProperty('name', 'fs_default_name') ? this.get('globals').findProperty('name', 'fs_default_name').value : null;
+ }
console.log("STEP*: name of the property is: " + _coreSiteObj.name);
console.log("STEP8: value of the property is: " + _coreSiteObj.value);
}, this);
@@ -1578,6 +1617,8 @@ App.WizardStep8Controller = Em.Controller.extend({
switch (serviceName) {
case 'HDFS':
return {config: {'global': 'version1', 'core-site': 'version1', 'hdfs-site': 'version1'}};
+ case 'HCFS':
+ return {config: {'global': 'version1', 'core-site': 'version1'}};
case 'MAPREDUCE':
return {config: {'global': 'version1', 'core-site': 'version1', 'mapred-site': 'version1', 'capacity-scheduler': 'version1', 'mapred-queue-acls': 'version1'}};
case 'MAPREDUCE2':
diff --git a/ambari-web/app/controllers/wizard/step9_controller.js b/ambari-web/app/controllers/wizard/step9_controller.js
index e1f93436908..91b85970387 100644
--- a/ambari-web/app/controllers/wizard/step9_controller.js
+++ b/ambari-web/app/controllers/wizard/step9_controller.js
@@ -378,13 +378,15 @@ App.WizardStep9Controller = Em.Controller.extend({
launchStartServicesSuccessCallback: function (jsonData) {
console.log("TRACE: Step9 -> In success function for the startService call");
console.log("TRACE: Step9 -> value of the received data is: " + jsonData);
- var requestId = jsonData.Requests.id;
+ var requestId = (jsonData && jsonData.Requests && jsonData.Requests.id) ? jsonData.Requests.id : this.get('content.cluster.requestId');
+ var status = (jsonData && jsonData.Requests) ? "INSTALLED" : "STARTED";
+ var completed = (jsonData && jsonData.Requests) ? false : true;
console.log('requestId is: ' + requestId);
var clusterStatus = {
- status: 'INSTALLED',
+ status: status,
requestId: requestId,
isStartError: false,
- isCompleted: false
+ isCompleted: completed
};
App.router.get(this.get('content.controllerName')).saveClusterStatus(clusterStatus);
@@ -394,8 +396,9 @@ App.WizardStep9Controller = Em.Controller.extend({
clusterState: 'SERVICE_STARTING_3',
localdb: App.db.data
});
-
- this.startPolling();
+ if (!completed) {
+ this.startPolling();
+ }
},
launchStartServicesErrorCallback: function () {
@@ -461,29 +464,35 @@ App.WizardStep9Controller = Em.Controller.extend({
progressPerHost: function (actions, contentHost) {
var progress = 0;
var actionsPerHost = actions.length;
- // TODO: consolidate to a single filter function for better performance
- var completedActions = actions.filterProperty('Tasks.status', 'COMPLETED').length
+
+ if (actionsPerHost != 0) {
+ // TODO: consolidate to a single filter function for better performance
+ var completedActions = actions.filterProperty('Tasks.status', 'COMPLETED').length
+ actions.filterProperty('Tasks.status', 'FAILED').length
+ actions.filterProperty('Tasks.status', 'ABORTED').length
+ actions.filterProperty('Tasks.status', 'TIMEDOUT').length;
- var queuedActions = actions.filterProperty('Tasks.status', 'QUEUED').length;
- var inProgressActions = actions.filterProperty('Tasks.status', 'IN_PROGRESS').length;
- /** for the install phase (PENDING), % completed per host goes up to 33%; floor(100 / 3)
- * for the start phase (INSTALLED), % completed starts from 34%
- * when task in queued state means it's completed on 9%
- * in progress - 35%
- * completed - 100%
- */
- switch (this.get('content.cluster.status')) {
- case 'PENDING':
- progress = Math.ceil(((queuedActions * 0.09) + (inProgressActions * 0.35) + completedActions ) / actionsPerHost * 33);
- break;
- case 'INSTALLED':
- progress = 34 + Math.ceil(((queuedActions * 0.09) + (inProgressActions * 0.35) + completedActions ) / actionsPerHost * 66);
- break;
- default:
- progress = 100;
- break;
+ var queuedActions = actions.filterProperty('Tasks.status', 'QUEUED').length;
+ var inProgressActions = actions.filterProperty('Tasks.status', 'IN_PROGRESS').length;
+ /** for the install phase (PENDING), % completed per host goes up to 33%; floor(100 / 3)
+ * for the start phase (INSTALLED), % completed starts from 34%
+ * when task in queued state means it's completed on 9%
+ * in progress - 35%
+ * completed - 100%
+ */
+ switch (this.get('content.cluster.status')) {
+ case 'PENDING':
+ progress = Math.ceil(((queuedActions * 0.09) + (inProgressActions * 0.35) + completedActions ) / actionsPerHost * 33);
+ break;
+ case 'INSTALLED':
+ progress = 34 + Math.ceil(((queuedActions * 0.09) + (inProgressActions * 0.35) + completedActions ) / actionsPerHost * 66);
+ break;
+ default:
+ progress = 100;
+ break;
+ }
+ } else {
+ progress = 100; // if there are no more actions for this host, it's done.
+ contentHost.set('status', 'success');
}
console.log('INFO: progressPerHost is: ' + progress);
contentHost.set('progress', progress.toString());
@@ -645,16 +654,16 @@ App.WizardStep9Controller = Em.Controller.extend({
this.hosts.forEach(function (_host) {
var actionsPerHost = tasksData.filterProperty('Tasks.host_name', _host.name); // retrieved from polled Data
if (actionsPerHost.length === 0) {
- _host.set('message', this.t('installer.step9.host.status.nothingToInstall'));
+ if (_host.get('status') != "success") {
+ _host.set('message', this.t('installer.step9.host.status.nothingToInstall'));
+ }
console.log("INFO: No task is hosted on the host");
}
- if (actionsPerHost !== null && actionsPerHost !== undefined && actionsPerHost.length !== 0) {
- this.setLogTasksStatePerHost(actionsPerHost, _host);
- this.onSuccessPerHost(actionsPerHost, _host); // every action should be a success
- this.onErrorPerHost(actionsPerHost, _host); // any action should be a failure
- this.onInProgressPerHost(actionsPerHost, _host); // current running action for a host
- totalProgress += self.progressPerHost(actionsPerHost, _host);
- }
+ this.setLogTasksStatePerHost(actionsPerHost, _host);
+ this.onSuccessPerHost(actionsPerHost, _host); // every action should be a success
+ this.onErrorPerHost(actionsPerHost, _host); // any action should be a failure
+ this.onInProgressPerHost(actionsPerHost, _host); // current running action for a host
+ totalProgress += self.progressPerHost(actionsPerHost, _host);
}, this);
totalProgress = Math.floor(totalProgress / this.hosts.length);
this.set('progress', totalProgress.toString());
diff --git a/ambari-web/app/data/config_mapping.js b/ambari-web/app/data/config_mapping.js
index 992875f57a6..6539d551237 100644
--- a/ambari-web/app/data/config_mapping.js
+++ b/ambari-web/app/data/config_mapping.js
@@ -784,6 +784,48 @@ var configs = [
"foreignKey": null,
"value": "/hbase-unsecure",
"filename": "hbase-site.xml"
+ },
+ {
+ "name": "fs.glusterfs.impl",
+ "templateName": ["fs_glusterfs_impl"],
+ "foreignKey": null,
+ "value": "",
+ "filename": "core-site.xml"
+ },
+ {
+ "name": "fs.glusterfs.volname",
+ "templateName": ["fs_glusterfs_volname"],
+ "foreignKey": null,
+ "value": "",
+ "filename": "core-site.xml"
+ },
+ {
+ "name": "fs.glusterfs.mount",
+ "templateName": ["fs_glusterfs_mount"],
+ "foreignKey": null,
+ "value": "",
+ "filename": "core-site.xml"
+ },
+ {
+ "name": "fs.glusterfs.server",
+ "templateName": ["fs_glusterfs_server"],
+ "foreignKey": null,
+ "value": "",
+ "filename": "core-site.xml"
+ },
+ {
+ "name": "fs.glusterfs.automount",
+ "templateName": ["fs_glusterfs_automount"],
+ "foreignKey": null,
+ "value": "",
+ "filename": "core-site.xml"
+ },
+ {
+ "name": "fs.glusterfs.getfattrcmd",
+ "templateName": ["fs_glusterfs_getfattrcmd"],
+ "foreignKey": null,
+ "value": "",
+ "filename": "core-site.xml"
}
];
diff --git a/ambari-web/app/data/config_properties.js b/ambari-web/app/data/config_properties.js
index 17a67c73bc0..51844a19de8 100644
--- a/ambari-web/app/data/config_properties.js
+++ b/ambari-web/app/data/config_properties.js
@@ -491,6 +491,97 @@ module.exports =
"serviceName": "HDFS",
"category": "Advanced"
},
+ /**********************************************HCFS***************************************/
+ {
+ "id": "puppet var",
+ "name": "fs_glusterfs_server",
+ "displayName": "Gluster server",
+ "description": "Gluster server (Slave FQDN)",
+ "defaultValue": "{firstHost}",
+ "displayType": "string",
+ "isVisible": true,
+ "domain": "global",
+ "serviceName": "HCFS",
+ "category": "General",
+ "filename": "core-site.xml",
+ },
+ {
+ "id": "puppet var",
+ "name": "fs_default_name",
+ "displayName": "Gluster default fs name",
+ "description": "Gluster default filesystem name (glusterfs://{MasterFQDN}:9000)",
+ "defaultValue": "glusterfs://{firstHost}:9000",
+ "displayType": "string",
+ "isVisible": false,
+ "domain": "global",
+ "serviceName": "HCFS",
+ "category": "General",
+ "filename": "core-site.xml",
+ },
+ {
+ "id": "puppet var",
+ "name": "fs_glusterfs_volname",
+ "displayName": "Gluster volume name",
+ "description": "Gluster volume name",
+ "defaultValue": "HadoopVol",
+ "displayType": "string",
+ "isVisible": true,
+ "domain": "global",
+ "serviceName": "HCFS",
+ "category": "General",
+ "filename": "core-site.xml",
+ },
+ {
+ "id": "puppet var",
+ "name": "fs_glusterfs_mount",
+ "displayName": "Gluster mount point",
+ "description": "Gluster mount point",
+ "defaultValue": "/mnt/glusterfs",
+ "displayType": "string",
+ "isVisible": true,
+ "domain": "global",
+ "serviceName": "HCFS",
+ "category": "General",
+ "filename": "core-site.xml",
+ },
+ {
+ "id": "puppet var",
+ "name": "fs_glusterfs_automount",
+ "displayName": "Automount HCFS",
+ "description": "Automount the HCFS Volume",
+ "defaultValue": "false",
+ "displayType": "string",
+ "isVisible": false,
+ "domain": "global",
+ "serviceName": "HCFS",
+ "category": "General",
+ "filename": "core-site.xml",
+ },
+ {
+ "id": "puppet var",
+ "name": "fs_glusterfs_impl",
+ "displayName": "Gluster fs impl",
+ "description": "Gluster fs impl",
+ "defaultValue": "org.apache.hadoop.fs.glusterfs.GlusterFileSystem",
+ "displayType": "string",
+ "isVisible": false,
+ "domain": "global",
+ "serviceName": "HCFS",
+ "category": "General",
+ "filename": "core-site.xml",
+ },
+ {
+ "id": "puppet var",
+ "name": "fs_glusterfs_getfattrcmd",
+ "displayName": "Gluster getfattr command",
+ "description": "Gluster getfattr command",
+ "defaultValue": "sudo getfattr -m . -n trusted.glusterfs.pathinfo",
+ "displayType": "string",
+ "isVisible": false,
+ "domain": "global",
+ "serviceName": "HCFS",
+ "category": "General"
+ },
/**********************************************MAPREDUCE***************************************/
{
"id": "puppet var",
diff --git a/ambari-web/app/data/review_configs.js b/ambari-web/app/data/review_configs.js
index 5013fc46b13..cdf2de61edc 100644
--- a/ambari-web/app/data/review_configs.js
+++ b/ambari-web/app/data/review_configs.js
@@ -60,6 +60,16 @@ module.exports = [
})
]
}),
+ Ember.Object.create({
+ service_name: 'HCFS',
+ display_name: 'HCFS',
+ service_components: [
+ Ember.Object.create({
+ display_name: 'HCFS Client',
+ component_value: ''
+ })
+ ]
+ }),
Ember.Object.create({
service_name: 'MAPREDUCE',
display_name: 'MapReduce',
diff --git a/ambari-web/app/data/service_components.js b/ambari-web/app/data/service_components.js
index dac3af57db6..467bbe102c4 100644
--- a/ambari-web/app/data/service_components.js
+++ b/ambari-web/app/data/service_components.js
@@ -313,5 +313,13 @@ module.exports = new Ember.Set([
isMaster: true,
isClient: false,
description: ''
+ },
+ {
+ service_name: 'HCFS',
+ component_name: 'HCFS_CLIENT',
+ display_name: 'HCFS Client',
+ isMaster: false,
+ isClient: true,
+ description: 'Client component for HCFS'
}
]);
\ No newline at end of file
diff --git a/ambari-web/app/data/service_configs.js b/ambari-web/app/data/service_configs.js
index 51dd1794976..d5c6e17f04c 100644
--- a/ambari-web/app/data/service_configs.js
+++ b/ambari-web/app/data/service_configs.js
@@ -38,7 +38,15 @@ module.exports = [
sites: ['global', 'core-site', 'hdfs-site'],
configs: configProperties.filterProperty('serviceName', 'HDFS')
},
-
+ {
+ serviceName: 'HCFS',
+ displayName: 'HCFS',
+ filename: 'core-site',
+ configCategories: [
+ App.ServiceConfigCategory.create({ name: 'General', displayName : 'General'})
+ ],
+ configs: configProperties.filterProperty('serviceName', 'HCFS')
+ },
{
serviceName: 'MAPREDUCE',
displayName: 'MapReduce',
diff --git a/ambari-web/app/data/services.js b/ambari-web/app/data/services.js
index 22fa22c91df..20d5dd15e45 100644
--- a/ambari-web/app/data/services.js
+++ b/ambari-web/app/data/services.js
@@ -22,11 +22,19 @@ module.exports = [
{
serviceName: 'HDFS',
displayName: 'HDFS',
- isDisabled: true,
+ isDisabled: false,
isSelected: true,
canBeSelected: true,
description: Em.I18n.t('services.hdfs.description')
},
+ {
+ serviceName: 'HCFS',
+ displayName: 'HCFS',
+ isDisabled: false,
+ isSelected: false,
+ canBeSelected: true,
+ description: Em.I18n.t('services.hcfs.description')
+ },
{
serviceName: 'MAPREDUCE',
displayName: 'MapReduce',
diff --git a/ambari-web/app/messages.js b/ambari-web/app/messages.js
index 9f1288ef349..0b851e84ef8 100644
--- a/ambari-web/app/messages.js
+++ b/ambari-web/app/messages.js
@@ -183,6 +183,7 @@ Em.I18n.translations = {
'services.nagios.description':'Nagios Monitoring and Alerting system',
'services.ganglia.description':'Ganglia Metrics Collection system',
'services.hdfs.description':'Apache Hadoop Distributed File System',
+ 'services.hcfs.description':'Apache Hadoop Compatible File System (must be installed manually)',
'services.mapreduce.description':'Apache Hadoop Distributed Processing Framework',
'services.sqoop.description':'Tool for transferring bulk data between Apache Hadoop and structured data stores such as relational databases',
'services.pig.description':'Scripting platform for analyzing large datasets',
@@ -340,6 +341,10 @@ Em.I18n.translations = {
'installer.step4.header':'Choose Services',
'installer.step4.body':'Choose which services you want to install on your cluster.',
+ 'installer.step4.hdfsCheck.popup.header':'Hadoop File System Needed', //JHV
+ 'installer.step4.hdfsCheck.popup.body':'You did not select HDFS or HCFS, but one is required. We will automatically add HDFS. Is this OK?', //JHV
+ 'installer.step4.multileDFS.popup.header':'Multiple File Systems Selected', //JHV
+ 'installer.step4.multileDFS.popup.body':'You selected more than one file system. We will automatically select only HDFS. Is this OK?', //JHV
'installer.step4.mapreduceCheck.popup.header':'MapReduce Needed',
'installer.step4.mapreduceCheck.popup.body':'You did not select MapReduce, but it is needed by other services you selected. We will automatically add MapReduce. Is this OK?',
'installer.step4.monitoringCheck.popup.header':'Limited Functionality Warning',
diff --git a/ambari-web/app/utils/config.js b/ambari-web/app/utils/config.js
index 80aaf4aa227..8cb6d764e8d 100644
--- a/ambari-web/app/utils/config.js
+++ b/ambari-web/app/utils/config.js
@@ -389,6 +389,7 @@ App.config = Em.Object.create({
serviceConfigProperty = App.ServiceConfigProperty.create(_config);
this.updateHostOverrides(serviceConfigProperty, _config);
serviceConfigProperty.initialValue(localDB);
+ this.tweakDynamicDefaults(localDB, serviceConfigProperty, _config);
serviceConfigProperty.validate();
configsByService.pushObject(serviceConfigProperty);
}, this);
@@ -399,6 +400,26 @@ App.config = Em.Object.create({
}, this);
return renderedServiceConfigs;
},
+ /**
+ Takes care of the "dynamic defaults" for the HCFS configs. Sets
+ some of the config defaults to previously user-entered data.
+ **/
+ tweakDynamicDefaults: function (localDB, serviceConfigProperty, config) {
+ console.log("Step7: Tweaking Dynamic defaults");
+ var firstHost = null;
+ for(var host in localDB.hosts) {
+ firstHost = host;
+ break;
+ }
+ try {
+ if (typeof(config == "string") && config.defaultValue.indexOf("{firstHost}") >= 0) {
+ serviceConfigProperty.set('value', serviceConfigProperty.value.replace(new RegExp("{firstHost}"), firstHost));
+ serviceConfigProperty.set('defaultValue', serviceConfigProperty.defaultValue.replace(new RegExp("{firstHost}"), firstHost));
+ }
+ } catch (err) {
+ // Nothing to worry about here, most likely trying indexOf on a non-string
+ }
+ },
/**
* create new child configs from overrides, attach them to parent config
* override - value of config, related to particular host(s)
diff --git a/ambari-web/app/utils/helper.js b/ambari-web/app/utils/helper.js
index badb4e95511..9db1f8dbce3 100644
--- a/ambari-web/app/utils/helper.js
+++ b/ambari-web/app/utils/helper.js
@@ -346,6 +346,10 @@ App.format = {
return 'Update Exclude File';
case 'HUE_SERVER':
return 'Hue Server';
+ case 'HCFS_CLIENT':
+ return 'HCFS Client';
+ case 'HCFS_SERVICE_CHECK':
+ return 'HCFS Service Check';
}
},
diff --git a/ambari-web/pom.xml b/ambari-web/pom.xml
index 6dd7a384696..a72596f8cd1 100644
--- a/ambari-web/pom.xml
+++ b/ambari-web/pom.xml
@@ -75,6 +75,9 @@
+
+
+
diff --git a/ambari-web/test/controllers/main/admin/cluster_test.js b/ambari-web/test/controllers/main/admin/cluster_test.js
index 3f1c6151743..93823b3513c 100644
--- a/ambari-web/test/controllers/main/admin/cluster_test.js
+++ b/ambari-web/test/controllers/main/admin/cluster_test.js
@@ -39,7 +39,7 @@ describe('App.MainAdminClusterController', function () {
"items" : [
{
"Versions" : {
- "stack_version" : "1.3.0",
+ "stack_version" : "1.3.1",
"min_upgrade_version" : "1.2.0"
}
},
diff --git a/hcfs_noHadoop20.patch b/hcfs_noHadoop20.patch
new file mode 100644
index 00000000000..4c039fd7e08
--- /dev/null
+++ b/hcfs_noHadoop20.patch
@@ -0,0 +1,3739 @@
+diff --git ambari-server/src/test/resources/stacks/HDP/2.0.1/metainfo.xml ambari-server/src/test/resources/stacks/HDP/2.0.1/metainfo.xml
+deleted file mode 100644
+index ca45822..0000000
+--- ambari-server/src/test/resources/stacks/HDP/2.0.1/metainfo.xml
++++ /dev/null
+@@ -1,22 +0,0 @@
+-
+-
+-
+-
+- true
+-
+-
+diff --git ambari-server/src/test/resources/stacks/HDP/2.0.1/repos/repoinfo.xml ambari-server/src/test/resources/stacks/HDP/2.0.1/repos/repoinfo.xml
+deleted file mode 100644
+index 5bb20d8..0000000
+--- ambari-server/src/test/resources/stacks/HDP/2.0.1/repos/repoinfo.xml
++++ /dev/null
+@@ -1,99 +0,0 @@
+-
+-
+-
+-
+-
+- http://public-repo-1.hortonworks.com/HDP-2.0.0.2/repos/centos6
+- HDP-2.0.1
+- HDP
+-
+-
+-
+- HDP-epel
+- HDP-epel
+-
+-
+-
+-
+-
+- http://public-repo-1.hortonworks.com/HDP-2.0.0.2/repos/centos5
+- HDP-2.0.1
+- HDP
+-
+-
+-
+- HDP-epel
+- HDP-epel
+-
+-
+-
+-
+-
+- http://public-repo-1.hortonworks.com/HDP-2.0.0.2/repos/centos6
+- HDP-2.0.1
+- HDP
+-
+-
+-
+- HDP-epel
+- HDP-epel
+-
+-
+-
+-
+-
+- http://public-repo-1.hortonworks.com/HDP-2.0.0.2/repos/centos5
+- HDP-2.0.1
+- HDP
+-
+-
+-
+- HDP-epel
+- HDP-epel
+-
+-
+-
+-
+-
+diff --git ambari-server/src/test/resources/stacks/HDP/2.0.1/services/GANGLIA/metainfo.xml ambari-server/src/test/resources/stacks/HDP/2.0.1/services/GANGLIA/metainfo.xml
+deleted file mode 100644
+index 395bb4f..0000000
+--- ambari-server/src/test/resources/stacks/HDP/2.0.1/services/GANGLIA/metainfo.xml
++++ /dev/null
+@@ -1,36 +0,0 @@
+-
+-
+-
+- root
+- Ganglia Metrics Collection system
+- 3.2.0
+-
+-
+-
+- GANGLIA_SERVER
+- MASTER
+-
+-
+-
+- GANGLIA_MONITOR
+- SLAVE
+-
+-
+-
+-
+-
+diff --git ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HBASE/configuration/hbase-policy.xml ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HBASE/configuration/hbase-policy.xml
+deleted file mode 100644
+index e45f23c..0000000
+--- ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HBASE/configuration/hbase-policy.xml
++++ /dev/null
+@@ -1,53 +0,0 @@
+-
+-
+-
+-
+-
+-
+- security.client.protocol.acl
+- *
+- ACL for HRegionInterface protocol implementations (ie.
+- clients talking to HRegionServers)
+- The ACL is a comma-separated list of user and group names. The user and
+- group list is separated by a blank. For e.g. "alice,bob users,wheel".
+- A special value of "*" means all users are allowed.
+-
+-
+-
+- security.admin.protocol.acl
+- *
+- ACL for HMasterInterface protocol implementation (ie.
+- clients talking to HMaster for admin operations).
+- The ACL is a comma-separated list of user and group names. The user and
+- group list is separated by a blank. For e.g. "alice,bob users,wheel".
+- A special value of "*" means all users are allowed.
+-
+-
+-
+- security.masterregion.protocol.acl
+- *
+- ACL for HMasterRegionInterface protocol implementations
+- (for HRegionServers communicating with HMaster)
+- The ACL is a comma-separated list of user and group names. The user and
+- group list is separated by a blank. For e.g. "alice,bob users,wheel".
+- A special value of "*" means all users are allowed.
+-
+-
+diff --git ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HBASE/configuration/hbase-site.xml ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HBASE/configuration/hbase-site.xml
+deleted file mode 100644
+index ba47e76..0000000
+--- ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HBASE/configuration/hbase-site.xml
++++ /dev/null
+@@ -1,334 +0,0 @@
+-
+-
+-
+-
+-
+- hbase.rootdir
+-
+- The directory shared by region servers and into
+- which HBase persists. The URL should be 'fully-qualified'
+- to include the filesystem scheme. For example, to specify the
+- HDFS directory '/hbase' where the HDFS instance's namenode is
+- running at namenode.example.org on port 9000, set this value to:
+- hdfs://namenode.example.org:9000/hbase. By default HBase writes
+- into /tmp. Change this configuration else all data will be lost
+- on machine restart.
+-
+-
+-
+- hbase.cluster.distributed
+- true
+- The mode the cluster will be in. Possible values are
+- false for standalone mode and true for distributed mode. If
+- false, startup will run all HBase and ZooKeeper daemons together
+- in the one JVM.
+-
+-
+-
+- hbase.tmp.dir
+- /var/log/hbase
+- Temporary directory on the local filesystem.
+- Change this setting to point to a location more permanent
+- than '/tmp' (The '/tmp' directory is often cleared on
+- machine restart).
+-
+-
+-
+- hbase.master.info.bindAddress
+-
+- The bind address for the HBase Master web UI
+-
+-
+-
+- hbase.regionserver.global.memstore.upperLimit
+- 0.4
+- Maximum size of all memstores in a region server before new
+- updates are blocked and flushes are forced. Defaults to 40% of heap
+-
+-
+-
+- hbase.regionserver.handler.count
+-
+- Count of RPC Listener instances spun up on RegionServers.
+- Same property is used by the Master for count of master handlers.
+- Default is 10.
+-
+-
+-
+- hbase.hregion.majorcompaction
+-
+- The time (in miliseconds) between 'major' compactions of all
+- HStoreFiles in a region. Default: 1 day.
+- Set to 0 to disable automated major compactions.
+-
+-
+-
+- hbase.master.lease.thread.wakefrequency
+- 3000
+- The interval between checks for expired region server leases.
+- This value has been reduced due to the other reduced values above so that
+- the master will notice a dead region server sooner. The default is 15 seconds.
+-
+-
+-
+- hbase.regionserver.global.memstore.lowerLimit
+- 0.35
+- When memstores are being forced to flush to make room in
+- memory, keep flushing until we hit this mark. Defaults to 35% of heap.
+- This value equal to hbase.regionserver.global.memstore.upperLimit causes
+- the minimum possible flushing to occur when updates are blocked due to
+- memstore limiting.
+-
+-
+-
+- hbase.hregion.memstore.block.multiplier
+-
+- Block updates if memstore has hbase.hregion.memstore.block.multiplier
+- time hbase.hregion.flush.size bytes. Useful preventing
+- runaway memstore during spikes in update traffic. Without an
+- upper-bound, memstore fills such that when it flushes the
+- resultant flush files take a long time to compact or split, or
+- worse, we OOME
+-
+-
+-
+- hbase.hregion.memstore.flush.size
+-
+-
+- Memstore will be flushed to disk if size of the memstore
+- exceeds this number of bytes. Value is checked by a thread that runs
+- every hbase.server.thread.wakefrequency.
+-
+-
+-
+- hbase.hregion.memstore.mslab.enabled
+- true
+-
+- Enables the MemStore-Local Allocation Buffer,
+- a feature which works to prevent heap fragmentation under
+- heavy write loads. This can reduce the frequency of stop-the-world
+- GC pauses on large heaps.
+-
+-
+-
+- hbase.hregion.max.filesize
+- 268435456
+-
+- Maximum HStoreFile size. If any one of a column families' HStoreFiles has
+- grown to exceed this value, the hosting HRegion is split in two.
+- Default: 1G.
+-
+-
+-
+- hbase.client.scanner.caching
+-
+- Number of rows that will be fetched when calling next
+- on a scanner if it is not served from (local, client) memory. Higher
+- caching values will enable faster scanners but will eat up more memory
+- and some calls of next may take longer and longer times when the cache is empty.
+- Do not set this value such that the time between invocations is greater
+- than the scanner timeout; i.e. hbase.regionserver.lease.period
+-
+-
+-
+- zookeeper.session.timeout
+-
+- ZooKeeper session timeout.
+- HBase passes this to the zk quorum as suggested maximum time for a
+- session (This setting becomes zookeeper's 'maxSessionTimeout'). See
+- http://hadoop.apache.org/zookeeper/docs/current/zookeeperProgrammers.html#ch_zkSessions
+- "The client sends a requested timeout, the server responds with the
+- timeout that it can give the client. " In milliseconds.
+-
+-
+-
+- hbase.client.keyvalue.maxsize
+-
+- Specifies the combined maximum allowed size of a KeyValue
+- instance. This is to set an upper boundary for a single entry saved in a
+- storage file. Since they cannot be split it helps avoiding that a region
+- cannot be split any further because the data is too large. It seems wise
+- to set this to a fraction of the maximum region size. Setting it to zero
+- or less disables the check.
+-
+-
+-
+- hbase.hstore.compactionThreshold
+- 3
+-
+- If more than this number of HStoreFiles in any one HStore
+- (one HStoreFile is written per flush of memstore) then a compaction
+- is run to rewrite all HStoreFiles files as one. Larger numbers
+- put off compaction but when it runs, it takes longer to complete.
+-
+-
+-
+- hbase.hstore.blockingStoreFiles
+- 7
+-
+- If more than this number of StoreFiles in any one Store
+- (one StoreFile is written per flush of MemStore) then updates are
+- blocked for this HRegion until a compaction is completed, or
+- until hbase.hstore.blockingWaitTime has been exceeded.
+-
+-
+-
+- hfile.block.cache.size
+- 0.25
+-
+- Percentage of maximum heap (-Xmx setting) to allocate to block cache
+- used by HFile/StoreFile. Default of 0.25 means allocate 25%.
+- Set to 0 to disable but it's not recommended.
+-
+-
+-
+-
+-
+- hbase.master.keytab.file
+- /etc/security/keytabs/hm.service.keytab
+- Full path to the kerberos keytab file to use for logging in
+- the configured HMaster server principal.
+-
+-
+-
+- hbase.master.kerberos.principal
+-
+- Ex. "hbase/_HOST@EXAMPLE.COM". The kerberos principal name
+- that should be used to run the HMaster process. The principal name should
+- be in the form: user/hostname@DOMAIN. If "_HOST" is used as the hostname
+- portion, it will be replaced with the actual hostname of the running
+- instance.
+-
+-
+-
+- hbase.regionserver.keytab.file
+- /etc/security/keytabs/rs.service.keytab
+- Full path to the kerberos keytab file to use for logging in
+- the configured HRegionServer server principal.
+-
+-
+-
+- hbase.regionserver.kerberos.principal
+-
+- Ex. "hbase/_HOST@EXAMPLE.COM". The kerberos principal name
+- that should be used to run the HRegionServer process. The principal name
+- should be in the form: user/hostname@DOMAIN. If "_HOST" is used as the
+- hostname portion, it will be replaced with the actual hostname of the
+- running instance. An entry for this principal must exist in the file
+- specified in hbase.regionserver.keytab.file
+-
+-
+-
+-
+-
+- hbase.superuser
+- hbase
+- List of users or groups (comma-separated), who are allowed
+- full privileges, regardless of stored ACLs, across the cluster.
+- Only used when HBase security is enabled.
+-
+-
+-
+-
+- hbase.coprocessor.region.classes
+-
+- A comma-separated list of Coprocessors that are loaded by
+- default on all tables. For any override coprocessor method, these classes
+- will be called in order. After implementing your own Coprocessor, just put
+- it in HBase's classpath and add the fully qualified class name here.
+- A coprocessor can also be loaded on demand by setting HTableDescriptor.
+-
+-
+-
+-
+- hbase.coprocessor.master.classes
+-
+- A comma-separated list of
+- org.apache.hadoop.hbase.coprocessor.MasterObserver coprocessors that are
+- loaded by default on the active HMaster process. For any implemented
+- coprocessor methods, the listed classes will be called in order. After
+- implementing your own MasterObserver, just put it in HBase's classpath
+- and add the fully qualified class name here.
+-
+-
+-
+-
+- hbase.zookeeper.property.clientPort
+- 2181
+- Property from ZooKeeper's config zoo.cfg.
+- The port at which the clients will connect.
+-
+-
+-
+-
+-
+- hbase.zookeeper.quorum
+-
+- Comma separated list of servers in the ZooKeeper Quorum.
+- For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
+- By default this is set to localhost for local and pseudo-distributed modes
+- of operation. For a fully-distributed setup, this should be set to a full
+- list of ZooKeeper quorum servers. If HBASE_MANAGES_ZK is set in hbase-env.sh
+- this is the list of servers which we will start/stop ZooKeeper on.
+-
+-
+-
+-
+-
+- dfs.support.append
+- true
+- Does HDFS allow appends to files?
+- This is an hdfs config. set in here so the hdfs client will do append support.
+- You must ensure that this config. is true serverside too when running hbase
+- (You will have to restart your cluster after setting it).
+-
+-
+-
+-
+- dfs.client.read.shortcircuit
+- true
+- Enable/Disable short circuit read for your client.
+- Hadoop servers should be configured to allow short circuit read
+- for the hbase user for this to take effect
+-
+-
+-
+-
+- dfs.client.read.shortcircuit.skip.checksum
+- false
+- Enable/disbale skipping the checksum check
+-
+-
+-
+- hbase.regionserver.optionalcacheflushinterval
+- 10000
+-
+- Amount of time to wait since the last time a region was flushed before
+- invoking an optional cache flush. Default 60,000.
+-
+-
+-
+-
+diff --git ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HBASE/metainfo.xml ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HBASE/metainfo.xml
+deleted file mode 100644
+index 645f7ce..0000000
+--- ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HBASE/metainfo.xml
++++ /dev/null
+@@ -1,40 +0,0 @@
+-
+-
+-
+- mapred
+- Non-relational distributed database and centralized service for configuration management & synchronization
+- 0.94.5.22-1
+-
+-
+-
+- HBASE_MASTER
+- MASTER
+-
+-
+-
+- HBASE_REGIONSERVER
+- SLAVE
+-
+-
+-
+- HBASE_CLIENT
+- CLIENT
+-
+-
+-
+-
+diff --git ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HCATALOG/metainfo.xml ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HCATALOG/metainfo.xml
+deleted file mode 100644
+index 45f3342..0000000
+--- ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HCATALOG/metainfo.xml
++++ /dev/null
+@@ -1,30 +0,0 @@
+-
+-
+-
+- root
+- This is comment for HCATALOG service
+- 0.5.0.22-1
+-
+-
+-
+- HCAT
+- CLIENT
+-
+-
+-
+-
+diff --git ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HDFS/configuration/core-site.xml ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HDFS/configuration/core-site.xml
+deleted file mode 100644
+index e646d5b..0000000
+--- ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HDFS/configuration/core-site.xml
++++ /dev/null
+@@ -1,257 +0,0 @@
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+- io.file.buffer.size
+- 131072
+- The size of buffer for use in sequence files.
+- The size of this buffer should probably be a multiple of hardware
+- page size (4096 on Intel x86), and it determines how much data is
+- buffered during read and write operations.
+-
+-
+-
+- io.serializations
+- org.apache.hadoop.io.serializer.WritableSerialization
+-
+-
+-
+- io.compression.codecs
+- org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec
+- A list of the compression codec classes that can be used
+- for compression/decompression.
+-
+-
+-
+- io.compression.codec.lzo.class
+- com.hadoop.compression.lzo.LzoCodec
+- The implementation for lzo codec.
+-
+-
+-
+-
+-
+- fs.default.name
+-
+-
+- The name of the default file system. Either the
+- literal string "local" or a host:port for NDFS.
+- true
+-
+-
+-
+- fs.trash.interval
+- 360
+- Number of minutes between trash checkpoints.
+- If zero, the trash feature is disabled.
+-
+-
+-
+-
+- fs.checkpoint.dir
+-
+- Determines where on the local filesystem the DFS secondary
+- name node should store the temporary images to merge.
+- If this is a comma-delimited list of directories then the image is
+- replicated in all of the directories for redundancy.
+-
+-
+-
+-
+- fs.checkpoint.edits.dir
+- ${fs.checkpoint.dir}
+- Determines where on the local filesystem the DFS secondary
+- name node should store the temporary edits to merge.
+- If this is a comma-delimited list of directoires then teh edits is
+- replicated in all of the directoires for redundancy.
+- Default value is same as fs.checkpoint.dir
+-
+-
+-
+-
+- fs.checkpoint.period
+- 21600
+- The number of seconds between two periodic checkpoints.
+-
+-
+-
+-
+- fs.checkpoint.size
+- 536870912
+- The size of the current edit log (in bytes) that triggers
+- a periodic checkpoint even if the fs.checkpoint.period hasn't expired.
+-
+-
+-
+-
+-
+- ipc.client.idlethreshold
+- 8000
+- Defines the threshold number of connections after which
+- connections will be inspected for idleness.
+-
+-
+-
+-
+- ipc.client.connection.maxidletime
+- 30000
+- The maximum time after which a client will bring down the
+- connection to the server.
+-
+-
+-
+-
+- ipc.client.connect.max.retries
+- 50
+- Defines the maximum number of retries for IPC connections.
+-
+-
+-
+-
+- webinterface.private.actions
+- false
+- If set to true, the web interfaces of JT and NN may contain
+- actions, such as kill job, delete file, etc., that should
+- not be exposed to public. Enable this option if the interfaces
+- are only reachable by those who have the right authorization.
+-
+-
+-
+-
+- hadoop.security.authentication
+- simple
+-
+- Set the authentication for the cluster. Valid values are: simple or
+- kerberos.
+-
+-
+-
+- hadoop.security.authorization
+- false
+-
+- Enable authorization for different protocols.
+-
+-
+-
+-
+- hadoop.security.auth_to_local
+-
+- RULE:[2:$1@$0]([rn]m@.*)s/.*/yarn/
+- RULE:[2:$1@$0]([nd]n@.*)s/.*/hdfs/
+- RULE:[2:$1@$0](hm@.*)s/.*/hbase/
+- RULE:[2:$1@$0](rs@.*)s/.*/hbase/
+- DEFAULT
+-
+-The mapping from kerberos principal names to local OS user names.
+- So the default rule is just "DEFAULT" which takes all principals in your default domain to their first component.
+- "omalley@APACHE.ORG" and "omalley/admin@APACHE.ORG" to "omalley", if your default domain is APACHE.ORG.
+-The translations rules have 3 sections:
+- base filter substitution
+-The base consists of a number that represents the number of components in the principal name excluding the realm and the pattern for building the name from the sections of the principal name. The base uses $0 to mean the realm, $1 to mean the first component and $2 to mean the second component.
+-
+-[1:$1@$0] translates "omalley@APACHE.ORG" to "omalley@APACHE.ORG"
+-[2:$1] translates "omalley/admin@APACHE.ORG" to "omalley"
+-[2:$1%$2] translates "omalley/admin@APACHE.ORG" to "omalley%admin"
+-
+-The filter is a regex in parens that must the generated string for the rule to apply.
+-
+-"(.*%admin)" will take any string that ends in "%admin"
+-"(.*@ACME.COM)" will take any string that ends in "@ACME.COM"
+-
+-Finally, the substitution is a sed rule to translate a regex into a fixed string.
+-
+-"s/@ACME\.COM//" removes the first instance of "@ACME.COM".
+-"s/@[A-Z]*\.COM//" removes the first instance of "@" followed by a name followed by ".COM".
+-"s/X/Y/g" replaces all of the "X" in the name with "Y"
+-
+-So, if your default realm was APACHE.ORG, but you also wanted to take all principals from ACME.COM that had a single component "joe@ACME.COM", you'd do:
+-
+-RULE:[1:$1@$0](.@ACME.ORG)s/@.//
+-DEFAULT
+-
+-To also translate the names with a second component, you'd make the rules:
+-
+-RULE:[1:$1@$0](.@ACME.ORG)s/@.//
+-RULE:[2:$1@$0](.@ACME.ORG)s/@.//
+-DEFAULT
+-
+-If you want to treat all principals from APACHE.ORG with /admin as "admin", your rules would look like:
+-
+-RULE[2:$1%$2@$0](.%admin@APACHE.ORG)s/./admin/
+-DEFAULT
+-
+-
+-
+-
+-
+diff --git ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HDFS/configuration/global.xml ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HDFS/configuration/global.xml
+deleted file mode 100644
+index 95fd565..0000000
+--- ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HDFS/configuration/global.xml
++++ /dev/null
+@@ -1,207 +0,0 @@
+-
+-
+-
+-
+-
+-
+- namenode_host
+-
+- NameNode Host.
+-
+-
+- dfs_name_dir
+- /hadoop/hdfs/namenode
+- NameNode Directories.
+-
+-
+- snamenode_host
+-
+- Secondary NameNode.
+-
+-
+- rm_host
+-
+- Resource Manager.
+-
+-
+- nm_hosts
+-
+- List of Node Manager Hosts.
+-
+-
+- hs_host
+-
+- History Server.
+-
+-
+- fs_checkpoint_dir
+- /hadoop/hdfs/namesecondary
+- Secondary NameNode checkpoint dir.
+-
+-
+- datanode_hosts
+-
+- List of Datanode Hosts.
+-
+-
+- dfs_data_dir
+- /hadoop/hdfs/data
+- Data directories for Data Nodes.
+-
+-
+- hdfs_log_dir_prefix
+- /var/log/hadoop
+- Hadoop Log Dir Prefix
+-
+-
+- hadoop_pid_dir_prefix
+- /var/run/hadoop
+- Hadoop PID Dir Prefix
+-
+-
+- dfs_webhdfs_enabled
+- true
+- WebHDFS enabled
+-
+-
+- hadoop_heapsize
+- 1024
+- Hadoop maximum Java heap size
+-
+-
+- namenode_heapsize
+- 1024
+- NameNode Java heap size
+-
+-
+- namenode_opt_newsize
+- 200
+- NameNode new generation size
+-
+-
+- namenode_opt_maxnewsize
+- 640
+- NameNode maximum new generation size
+-
+-
+- datanode_du_reserved
+- 1
+- Reserved space for HDFS
+-
+-
+- dtnode_heapsize
+- 1024
+- DataNode maximum Java heap size
+-
+-
+- dfs_datanode_failed_volume_tolerated
+- 0
+- DataNode volumes failure toleration
+-
+-
+- fs_checkpoint_period
+- 21600
+- HDFS Maximum Checkpoint Delay
+-
+-
+- fs_checkpoint_size
+- 0.5
+- FS Checkpoint Size.
+-
+-
+- proxyuser_group
+- users
+- Proxy user group.
+-
+-
+- dfs_exclude
+-
+- HDFS Exclude hosts.
+-
+-
+- dfs_include
+-
+- HDFS Include hosts.
+-
+-
+- dfs_replication
+- 3
+- Default Block Replication.
+-
+-
+- dfs_block_local_path_access_user
+- hbase
+- Default Block Replication.
+-
+-
+- dfs_datanode_address
+- 50010
+- Port for datanode address.
+-
+-
+- dfs_datanode_http_address
+- 50075
+- Port for datanode address.
+-
+-
+- dfs_datanode_data_dir_perm
+- 750
+- Datanode dir perms.
+-
+-
+-
+- security_enabled
+- false
+- Hadoop Security
+-
+-
+- kerberos_domain
+- EXAMPLE.COM
+- Kerberos realm.
+-
+-
+- kadmin_pw
+-
+- Kerberos realm admin password
+-
+-
+- keytab_path
+- /etc/security/keytabs
+- Kerberos keytab path.
+-
+-
+-
+- keytab_path
+- /etc/security/keytabs
+- KeyTab Directory.
+-
+-
+- namenode_formatted_mark_dir
+- /var/run/hadoop/hdfs/namenode/formatted/
+- Formatteed Mark Directory.
+-
+-
+- hdfs_user
+- hdfs
+- User and Groups.
+-
+-
+-
+diff --git ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HDFS/configuration/hadoop-policy.xml ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HDFS/configuration/hadoop-policy.xml
+deleted file mode 100644
+index 6ec304d..0000000
+--- ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HDFS/configuration/hadoop-policy.xml
++++ /dev/null
+@@ -1,134 +0,0 @@
+-
+-
+-
+-
+-
+-
+-
+-
+-
+- security.client.protocol.acl
+- *
+- ACL for ClientProtocol, which is used by user code
+- via the DistributedFileSystem.
+- The ACL is a comma-separated list of user and group names. The user and
+- group list is separated by a blank. For e.g. "alice,bob users,wheel".
+- A special value of "*" means all users are allowed.
+-
+-
+-
+- security.client.datanode.protocol.acl
+- *
+- ACL for ClientDatanodeProtocol, the client-to-datanode protocol
+- for block recovery.
+- The ACL is a comma-separated list of user and group names. The user and
+- group list is separated by a blank. For e.g. "alice,bob users,wheel".
+- A special value of "*" means all users are allowed.
+-
+-
+-
+- security.datanode.protocol.acl
+- *
+- ACL for DatanodeProtocol, which is used by datanodes to
+- communicate with the namenode.
+- The ACL is a comma-separated list of user and group names. The user and
+- group list is separated by a blank. For e.g. "alice,bob users,wheel".
+- A special value of "*" means all users are allowed.
+-
+-
+-
+- security.inter.datanode.protocol.acl
+- *
+- ACL for InterDatanodeProtocol, the inter-datanode protocol
+- for updating generation timestamp.
+- The ACL is a comma-separated list of user and group names. The user and
+- group list is separated by a blank. For e.g. "alice,bob users,wheel".
+- A special value of "*" means all users are allowed.
+-
+-
+-
+- security.namenode.protocol.acl
+- *
+- ACL for NamenodeProtocol, the protocol used by the secondary
+- namenode to communicate with the namenode.
+- The ACL is a comma-separated list of user and group names. The user and
+- group list is separated by a blank. For e.g. "alice,bob users,wheel".
+- A special value of "*" means all users are allowed.
+-
+-
+-
+- security.inter.tracker.protocol.acl
+- *
+- ACL for InterTrackerProtocol, used by the tasktrackers to
+- communicate with the jobtracker.
+- The ACL is a comma-separated list of user and group names. The user and
+- group list is separated by a blank. For e.g. "alice,bob users,wheel".
+- A special value of "*" means all users are allowed.
+-
+-
+-
+- security.job.submission.protocol.acl
+- *
+- ACL for JobSubmissionProtocol, used by job clients to
+- communciate with the jobtracker for job submission, querying job status etc.
+- The ACL is a comma-separated list of user and group names. The user and
+- group list is separated by a blank. For e.g. "alice,bob users,wheel".
+- A special value of "*" means all users are allowed.
+-
+-
+-
+- security.task.umbilical.protocol.acl
+- *
+- ACL for TaskUmbilicalProtocol, used by the map and reduce
+- tasks to communicate with the parent tasktracker.
+- The ACL is a comma-separated list of user and group names. The user and
+- group list is separated by a blank. For e.g. "alice,bob users,wheel".
+- A special value of "*" means all users are allowed.
+-
+-
+-
+- security.admin.operations.protocol.acl
+- hadoop
+- ACL for AdminOperationsProtocol. Used for admin commands.
+- The ACL is a comma-separated list of user and group names. The user and
+- group list is separated by a blank. For e.g. "alice,bob users,wheel".
+- A special value of "*" means all users are allowed.
+-
+-
+-
+- security.refresh.usertogroups.mappings.protocol.acl
+- hadoop
+- ACL for RefreshUserMappingsProtocol. Used to refresh
+- users mappings. The ACL is a comma-separated list of user and
+- group names. The user and group list is separated by a blank. For
+- e.g. "alice,bob users,wheel". A special value of "*" means all
+- users are allowed.
+-
+-
+-
+- security.refresh.policy.protocol.acl
+- hadoop
+- ACL for RefreshAuthorizationPolicyProtocol, used by the
+- dfsadmin and mradmin commands to refresh the security policy in-effect.
+- The ACL is a comma-separated list of user and group names. The user and
+- group list is separated by a blank. For e.g. "alice,bob users,wheel".
+- A special value of "*" means all users are allowed.
+-
+-
+-
+-
+diff --git ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HDFS/configuration/hdfs-site.xml ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HDFS/configuration/hdfs-site.xml
+deleted file mode 100644
+index e1a244a..0000000
+--- ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HDFS/configuration/hdfs-site.xml
++++ /dev/null
+@@ -1,438 +0,0 @@
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+- dfs.name.dir
+-
+-
+- Determines where on the local filesystem the DFS name node
+- should store the name table. If this is a comma-delimited list
+- of directories then the name table is replicated in all of the
+- directories, for redundancy.
+- true
+-
+-
+-
+- dfs.support.append
+- true
+- to enable dfs append
+- true
+-
+-
+-
+- dfs.webhdfs.enabled
+- true
+- to enable webhdfs
+- true
+-
+-
+-
+-
+-
+- dfs.datanode.failed.volumes.tolerated
+- 0
+- #of failed disks dn would tolerate
+- true
+-
+-
+-
+- dfs.block.local-path-access.user
+- hbase
+- the user who is allowed to perform short
+- circuit reads.
+-
+- true
+-
+-
+-
+- dfs.data.dir
+-
+- Determines where on the local filesystem an DFS data node
+- should store its blocks. If this is a comma-delimited
+- list of directories, then data will be stored in all named
+- directories, typically on different devices.
+- Directories that do not exist are ignored.
+-
+- true
+-
+-
+-
+- dfs.hosts.exclude
+-
+- Names a file that contains a list of hosts that are
+- not permitted to connect to the namenode. The full pathname of the
+- file must be specified. If the value is empty, no hosts are
+- excluded.
+-
+-
+-
+-
+-
+- dfs.checksum.type
+- CRC32
+- The checksum method to be used by default. To maintain
+- compatibility, it is being set to CRC32. Once all migration steps
+- are complete, we can change it to CRC32C and take advantage of the
+- additional performance benefit.
+-
+-
+-
+- dfs.replication.max
+- 50
+- Maximal block replication.
+-
+-
+-
+-
+- dfs.replication
+- 3
+- Default block replication.
+-
+-
+-
+-
+- dfs.heartbeat.interval
+- 3
+- Determines datanode heartbeat interval in seconds.
+-
+-
+-
+- dfs.heartbeat.interval
+- 3
+- Determines datanode heartbeat interval in seconds.
+-
+-
+-
+- dfs.safemode.threshold.pct
+- 1.0f
+-
+- Specifies the percentage of blocks that should satisfy
+- the minimal replication requirement defined by dfs.replication.min.
+- Values less than or equal to 0 mean not to start in safe mode.
+- Values greater than 1 will make safe mode permanent.
+-
+-
+-
+-
+- dfs.balance.bandwidthPerSec
+- 6250000
+-
+- Specifies the maximum amount of bandwidth that each datanode
+- can utilize for the balancing purpose in term of
+- the number of bytes per second.
+-
+-
+-
+-
+- dfs.datanode.address
+- 0.0.0.0:50010
+-
+-
+-
+- dfs.datanode.http.address
+- 0.0.0.0:50075
+-
+-
+-
+- dfs.block.size
+- 134217728
+- The default block size for new files.
+-
+-
+-
+- dfs.http.address
+-
+-The name of the default file system. Either the
+-literal string "local" or a host:port for NDFS.
+-true
+-
+-
+-
+-dfs.datanode.du.reserved
+-
+-1073741824
+-Reserved space in bytes per volume. Always leave this much space free for non dfs use.
+-
+-
+-
+-
+-dfs.datanode.ipc.address
+-0.0.0.0:8010
+-
+-The datanode ipc server address and port.
+-If the port is 0 then the server will start on a free port.
+-
+-
+-
+-
+-dfs.blockreport.initialDelay
+-120
+-Delay for first block report in seconds.
+-
+-
+-
+-dfs.datanode.du.pct
+-0.85f
+-When calculating remaining space, only use this percentage of the real available space
+-
+-
+-
+-
+-dfs.namenode.handler.count
+-40
+-The number of server threads for the namenode.
+-
+-
+-
+-dfs.datanode.max.xcievers
+-1024
+-PRIVATE CONFIG VARIABLE
+-
+-
+-
+-
+-
+-dfs.umaskmode
+-022
+-
+-The octal umask used when creating files and directories.
+-
+-
+-
+-
+-dfs.web.ugi
+-
+-gopher,gopher
+-The user account used by the web interface.
+-Syntax: USERNAME,GROUP1,GROUP2, ...
+-
+-
+-
+-
+-dfs.permissions
+-true
+-
+-If "true", enable permission checking in HDFS.
+-If "false", permission checking is turned off,
+-but all other behavior is unchanged.
+-Switching from one parameter value to the other does not change the mode,
+-owner or group of files or directories.
+-
+-
+-
+-
+-dfs.permissions.supergroup
+-hdfs
+-The name of the group of super-users.
+-
+-
+-
+-dfs.namenode.handler.count
+-100
+-Added to grow Queue size so that more client connections are allowed
+-
+-
+-
+-ipc.server.max.response.size
+-5242880
+-
+-
+-dfs.block.access.token.enable
+-true
+-
+-If "true", access tokens are used as capabilities for accessing datanodes.
+-If "false", no access tokens are checked on accessing datanodes.
+-
+-
+-
+-
+-dfs.namenode.kerberos.principal
+-
+-
+-Kerberos principal name for the NameNode
+-
+-
+-
+-
+-dfs.secondary.namenode.kerberos.principal
+-
+-
+- Kerberos principal name for the secondary NameNode.
+-
+-
+-
+-
+-
+-
+- dfs.namenode.kerberos.https.principal
+-
+- The Kerberos principal for the host that the NameNode runs on.
+-
+-
+-
+-
+- dfs.secondary.namenode.kerberos.https.principal
+-
+- The Kerberos principal for the hostthat the secondary NameNode runs on.
+-
+-
+-
+-
+-
+- dfs.secondary.http.address
+-
+- Address of secondary namenode web server
+-
+-
+-
+- dfs.secondary.https.port
+- 50490
+- The https port where secondary-namenode binds
+-
+-
+-
+- dfs.web.authentication.kerberos.principal
+-
+-
+- The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+- The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
+- HTTP SPENGO specification.
+-
+-
+-
+-
+- dfs.web.authentication.kerberos.keytab
+-
+-
+- The Kerberos keytab file with the credentials for the
+- HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+-
+-
+-
+-
+- dfs.datanode.kerberos.principal
+-
+-
+- The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
+-
+-
+-
+-
+- dfs.namenode.keytab.file
+-
+-
+- Combined keytab file containing the namenode service and host principals.
+-
+-
+-
+-
+- dfs.secondary.namenode.keytab.file
+-
+-
+- Combined keytab file containing the namenode service and host principals.
+-
+-
+-
+-
+- dfs.datanode.keytab.file
+-
+-
+- The filename of the keytab file for the DataNode.
+-
+-
+-
+-
+- dfs.https.port
+- 50470
+- The https port where namenode binds
+-
+-
+-
+-
+- dfs.https.address
+-
+- The https address where namenode binds
+-
+-
+-
+-
+- dfs.datanode.data.dir.perm
+- 750
+-The permissions that should be there on dfs.data.dir
+-directories. The datanode will not come up if the permissions are
+-different on existing dfs.data.dir directories. If the directories
+-don't exist, they will be created with this permission.
+-
+-
+-
+- dfs.access.time.precision
+- 0
+- The access time for HDFS file is precise upto this value.
+- The default value is 1 hour. Setting a value of 0 disables
+- access times for HDFS.
+-
+-
+-
+-
+- dfs.cluster.administrators
+- hdfs
+- ACL for who all can view the default servlets in the HDFS
+-
+-
+-
+- ipc.server.read.threadpool.size
+- 5
+-
+-
+-
+-
+- dfs.namenode.check.stale.datanode
+- true
+-
+- With this setting, the datanodes that have not replied to the heartbeat
+- for more than 30s (i.e. in a stale state) are used for reads only if all
+- other remote replicas have failed.
+-
+-
+-
+-
+diff --git ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HDFS/metainfo.xml ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HDFS/metainfo.xml
+deleted file mode 100644
+index 1fbfbe6..0000000
+--- ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HDFS/metainfo.xml
++++ /dev/null
+@@ -1,46 +0,0 @@
+-
+-
+-
+- root
+- Apache Hadoop Distributed File System
+- 2.0.3.22-1
+-
+-
+-
+- NAMENODE
+- MASTER
+-
+-
+-
+- DATANODE
+- SLAVE
+-
+-
+-
+- SECONDARY_NAMENODE
+- MASTER
+-
+-
+-
+- HDFS_CLIENT
+- CLIENT
+-
+-
+-
+-
+-
+diff --git ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HIVE/configuration/hive-site.xml ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HIVE/configuration/hive-site.xml
+deleted file mode 100644
+index 7d35558..0000000
+--- ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HIVE/configuration/hive-site.xml
++++ /dev/null
+@@ -1,138 +0,0 @@
+-
+-
+-
+-
+-
+-
+- hive.metastore.local
+- false
+- controls whether to connect to remove metastore server or
+- open a new metastore server in Hive Client JVM
+-
+-
+-
+- javax.jdo.option.ConnectionURL
+-
+- JDBC connect string for a JDBC metastore
+-
+-
+-
+- javax.jdo.option.ConnectionDriverName
+- com.mysql.jdbc.Driver
+- Driver class name for a JDBC metastore
+-
+-
+-
+- javax.jdo.option.ConnectionUserName
+-
+- username to use against metastore database
+-
+-
+-
+- javax.jdo.option.ConnectionPassword
+-
+- password to use against metastore database
+-
+-
+-
+- hive.metastore.warehouse.dir
+- /apps/hive/warehouse
+- location of default database for the warehouse
+-
+-
+-
+- hive.metastore.sasl.enabled
+-
+- If true, the metastore thrift interface will be secured with SASL.
+- Clients must authenticate with Kerberos.
+-
+-
+-
+- hive.metastore.kerberos.keytab.file
+-
+- The path to the Kerberos Keytab file containing the metastore
+- thrift server's service principal.
+-
+-
+-
+- hive.metastore.kerberos.principal
+-
+- The service principal for the metastore thrift server. The special
+- string _HOST will be replaced automatically with the correct host name.
+-
+-
+-
+- hive.metastore.cache.pinobjtypes
+- Table,Database,Type,FieldSchema,Order
+- List of comma separated metastore object types that should be pinned in the cache
+-
+-
+-
+- hive.metastore.uris
+-
+- URI for client to contact metastore server
+-
+-
+-
+- hive.semantic.analyzer.factory.impl
+- org.apache.hivealog.cli.HCatSemanticAnalyzerFactory
+- controls which SemanticAnalyzerFactory implemenation class is used by CLI
+-
+-
+-
+- hadoop.clientside.fs.operations
+- true
+- FS operations are owned by client
+-
+-
+-
+- hive.metastore.client.socket.timeout
+- 60
+- MetaStore Client socket timeout in seconds
+-
+-
+-
+- hive.metastore.execute.setugi
+- true
+- In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using the client's reported user and group permissions. Note that this property must be set on both the client and server sides. Further note that its best effort. If client sets its to true and server sets it to false, client setting will be ignored.
+-
+-
+-
+- hive.security.authorization.enabled
+- true
+- enable or disable the hive client authorization
+-
+-
+-
+- hive.security.authorization.manager
+- org.apache.hcatalog.security.HdfsAuthorizationProvider
+- the hive client authorization manager class name.
+- The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.
+-
+-
+-
+- hive.server2.enable.doAs
+- true
+-
+-
+-
+- fs.hdfs.impl.disable.cache
+- true
+-
+-
+-
+diff --git ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HIVE/metainfo.xml ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HIVE/metainfo.xml
+deleted file mode 100644
+index c87b494..0000000
+--- ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HIVE/metainfo.xml
++++ /dev/null
+@@ -1,43 +0,0 @@
+-
+-
+-
+- root
+- Data warehouse system for ad-hoc queries & analysis of large datasets and table & storage management service
+- 0.10.0.22-1
+-
+-
+-
+- HIVE_METASTORE
+- MASTER
+-
+-
+- HIVE_SERVER
+- MASTER
+-
+-
+- MYSQL_SERVER
+- MASTER
+-
+-
+- HIVE_CLIENT
+- CLIENT
+-
+-
+-
+-
+-
+diff --git ambari-server/src/test/resources/stacks/HDP/2.0.1/services/MAPREDUCE2/configuration/container-executor.cfg ambari-server/src/test/resources/stacks/HDP/2.0.1/services/MAPREDUCE2/configuration/container-executor.cfg
+deleted file mode 100644
+index 502ddaa..0000000
+--- ambari-server/src/test/resources/stacks/HDP/2.0.1/services/MAPREDUCE2/configuration/container-executor.cfg
++++ /dev/null
+@@ -1,20 +0,0 @@
+-#
+-# Licensed to the Apache Software Foundation (ASF) under one or more
+-# contributor license agreements. See the NOTICE file distributed with
+-# this work for additional information regarding copyright ownership.
+-# The ASF licenses this file to You under the Apache License, Version 2.0
+-# (the "License"); you may not use this file except in compliance with
+-# the License. You may obtain a copy of the License at
+-#
+-# http://www.apache.org/licenses/LICENSE-2.0
+-#
+-# Unless required by applicable law or agreed to in writing, software
+-# distributed under the License is distributed on an "AS IS" BASIS,
+-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-# See the License for the specific language governing permissions and
+-# limitations under the License.
+-#
+-yarn.nodemanager.local-dirs=TODO-YARN-LOCAL-DIR
+-yarn.nodemanager.linux-container-executor.group=hadoop
+-yarn.nodemanager.log-dirs=TODO-YARN-LOG-DIR
+-banned.users=hfds,bin,0
+diff --git ambari-server/src/test/resources/stacks/HDP/2.0.1/services/MAPREDUCE2/configuration/core-site.xml ambari-server/src/test/resources/stacks/HDP/2.0.1/services/MAPREDUCE2/configuration/core-site.xml
+deleted file mode 100644
+index 3a2af49..0000000
+--- ambari-server/src/test/resources/stacks/HDP/2.0.1/services/MAPREDUCE2/configuration/core-site.xml
++++ /dev/null
+@@ -1,20 +0,0 @@
+-
+-
+-
+-
+-
+diff --git ambari-server/src/test/resources/stacks/HDP/2.0.1/services/MAPREDUCE2/configuration/mapred-queue-acls.xml ambari-server/src/test/resources/stacks/HDP/2.0.1/services/MAPREDUCE2/configuration/mapred-queue-acls.xml
+deleted file mode 100644
+index ce12380..0000000
+--- ambari-server/src/test/resources/stacks/HDP/2.0.1/services/MAPREDUCE2/configuration/mapred-queue-acls.xml
++++ /dev/null
+@@ -1,39 +0,0 @@
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+- mapred.queue.default.acl-submit-job
+- *
+-
+-
+-
+- mapred.queue.default.acl-administer-jobs
+- *
+-
+-
+-
+-
+-
+diff --git ambari-server/src/test/resources/stacks/HDP/2.0.1/services/MAPREDUCE2/configuration/mapred-site.xml ambari-server/src/test/resources/stacks/HDP/2.0.1/services/MAPREDUCE2/configuration/mapred-site.xml
+deleted file mode 100644
+index 29fcfeb..0000000
+--- ambari-server/src/test/resources/stacks/HDP/2.0.1/services/MAPREDUCE2/configuration/mapred-site.xml
++++ /dev/null
+@@ -1,549 +0,0 @@
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+- io.sort.mb
+-
+- No description
+-
+-
+-
+- io.sort.record.percent
+- .2
+- No description
+-
+-
+-
+- io.sort.spill.percent
+-
+- No description
+-
+-
+-
+- io.sort.factor
+- 100
+- No description
+-
+-
+-
+-
+-
+- mapred.tasktracker.tasks.sleeptime-before-sigkill
+- 250
+- Normally, this is the amount of time before killing
+- processes, and the recommended-default is 5.000 seconds - a value of
+- 5000 here. In this case, we are using it solely to blast tasks before
+- killing them, and killing them very quickly (1/4 second) to guarantee
+- that we do not leave VMs around for later jobs.
+-
+-
+-
+-
+- mapred.job.tracker.handler.count
+- 50
+-
+- The number of server threads for the JobTracker. This should be roughly
+- 4% of the number of tasktracker nodes.
+-
+-
+-
+-
+- mapred.system.dir
+- /mapred/system
+- No description
+- true
+-
+-
+-
+- mapred.job.tracker
+-
+-
+- No description
+- true
+-
+-
+-
+- mapred.job.tracker.http.address
+-
+-
+- No description
+- true
+-
+-
+-
+-
+- mapred.local.dir
+-
+- No description
+- true
+-
+-
+-
+- mapreduce.cluster.administrators
+- hadoop
+-
+-
+-
+- mapred.reduce.parallel.copies
+- 30
+- No description
+-
+-
+-
+- mapred.tasktracker.map.tasks.maximum
+-
+- No description
+-
+-
+-
+- mapred.tasktracker.reduce.tasks.maximum
+-
+- No description
+-
+-
+-
+- tasktracker.http.threads
+- 50
+-
+-
+-
+- mapred.map.tasks.speculative.execution
+- false
+- If true, then multiple instances of some map tasks
+- may be executed in parallel.
+-
+-
+-
+- mapred.reduce.tasks.speculative.execution
+- false
+- If true, then multiple instances of some reduce tasks
+- may be executed in parallel.
+-
+-
+-
+- mapred.reduce.slowstart.completed.maps
+- 0.05
+-
+-
+-
+- mapred.inmem.merge.threshold
+- 1000
+- The threshold, in terms of the number of files
+- for the in-memory merge process. When we accumulate threshold number of files
+- we initiate the in-memory merge and spill to disk. A value of 0 or less than
+- 0 indicates we want to DON'T have any threshold and instead depend only on
+- the ramfs's memory consumption to trigger the merge.
+-
+-
+-
+-
+- mapred.job.shuffle.merge.percent
+- 0.66
+- The usage threshold at which an in-memory merge will be
+- initiated, expressed as a percentage of the total memory allocated to
+- storing in-memory map outputs, as defined by
+- mapred.job.shuffle.input.buffer.percent.
+-
+-
+-
+-
+- mapred.job.shuffle.input.buffer.percent
+- 0.7
+- The percentage of memory to be allocated from the maximum heap
+- size to storing map outputs during the shuffle.
+-
+-
+-
+-
+- mapred.map.output.compression.codec
+-
+- If the map outputs are compressed, how should they be
+- compressed
+-
+-
+-
+-
+- mapred.output.compression.type
+- BLOCK
+- If the job outputs are to compressed as SequenceFiles, how should
+- they be compressed? Should be one of NONE, RECORD or BLOCK.
+-
+-
+-
+-
+-
+- mapred.jobtracker.completeuserjobs.maximum
+- 5
+-
+-
+-
+- mapred.jobtracker.taskScheduler
+-
+-
+-
+-
+- mapred.jobtracker.restart.recover
+- false
+- "true" to enable (job) recovery upon restart,
+- "false" to start afresh
+-
+-
+-
+-
+- mapred.job.reduce.input.buffer.percent
+- 0.0
+- The percentage of memory- relative to the maximum heap size- to
+- retain map outputs during the reduce. When the shuffle is concluded, any
+- remaining map outputs in memory must consume less than this threshold before
+- the reduce can begin.
+-
+-
+-
+-
+- mapreduce.reduce.input.limit
+- 10737418240
+- The limit on the input size of the reduce. (This value
+- is 10 Gb.) If the estimated input size of the reduce is greater than
+- this value, job is failed. A value of -1 means that there is no limit
+- set.
+-
+-
+-
+-
+-
+- mapred.compress.map.output
+-
+-
+-
+-
+-
+- mapred.task.timeout
+- 600000
+- The number of milliseconds before a task will be
+- terminated if it neither reads an input, writes an output, nor
+- updates its status string.
+-
+-
+-
+-
+- jetty.connector
+- org.mortbay.jetty.nio.SelectChannelConnector
+- No description
+-
+-
+-
+- mapred.task.tracker.task-controller
+-
+-
+- TaskController which is used to launch and manage task execution.
+-
+-
+-
+-
+- mapred.child.root.logger
+- INFO,TLA
+-
+-
+-
+- mapred.child.java.opts
+-
+-
+- No description
+-
+-
+-
+- mapred.cluster.map.memory.mb
+-
+-
+-
+-
+- mapred.cluster.reduce.memory.mb
+-
+-
+-
+-
+- mapred.job.map.memory.mb
+-
+-
+-
+-
+- mapred.job.reduce.memory.mb
+-
+-
+-
+-
+- mapred.cluster.max.map.memory.mb
+-
+-
+-
+-
+- mapred.cluster.max.reduce.memory.mb
+-
+-
+-
+-
+- mapred.hosts
+-
+-
+-
+-
+- mapred.hosts.exclude
+-
+-
+-
+-
+- mapred.max.tracker.blacklists
+- 16
+-
+- if node is reported blacklisted by 16 successful jobs within timeout-window, it will be graylisted
+-
+-
+-
+-
+- mapred.healthChecker.script.path
+-
+-
+-
+-
+- mapred.healthChecker.interval
+- 135000
+-
+-
+-
+- mapred.healthChecker.script.timeout
+- 60000
+-
+-
+-
+- mapred.job.tracker.persist.jobstatus.active
+- false
+- Indicates if persistency of job status information is
+- active or not.
+-
+-
+-
+-
+- mapred.job.tracker.persist.jobstatus.hours
+- 1
+- The number of hours job status information is persisted in DFS.
+- The job status information will be available after it drops of the memory
+- queue and between jobtracker restarts. With a zero value the job status
+- information is not persisted at all in DFS.
+-
+-
+-
+-
+- mapred.job.tracker.persist.jobstatus.dir
+-
+- The directory where the job status information is persisted
+- in a file system to be available after it drops of the memory queue and
+- between jobtracker restarts.
+-
+-
+-
+-
+- mapred.jobtracker.retirejob.check
+- 10000
+-
+-
+-
+- mapred.jobtracker.retirejob.interval
+- 21600000
+-
+-
+-
+- mapred.job.tracker.history.completed.location
+- /mapred/history/done
+- No description
+-
+-
+-
+- mapred.task.maxvmem
+-
+- true
+- No description
+-
+-
+-
+- mapred.jobtracker.maxtasks.per.job
+-
+- true
+- The maximum number of tasks for a single job.
+- A value of -1 indicates that there is no maximum.
+-
+-
+-
+- mapreduce.fileoutputcommitter.marksuccessfuljobs
+- false
+-
+-
+-
+- mapred.userlog.retain.hours
+-
+-
+-
+-
+- mapred.job.reuse.jvm.num.tasks
+- 1
+-
+- How many tasks to run per jvm. If set to -1, there is no limit
+-
+- true
+-
+-
+-
+- mapreduce.jobtracker.kerberos.principal
+-
+-
+- JT user name key.
+-
+-
+-
+-
+- mapreduce.tasktracker.kerberos.principal
+-
+-
+- tt user name key. "_HOST" is replaced by the host name of the task tracker.
+-
+-
+-
+-
+-
+- hadoop.job.history.user.location
+- none
+- true
+-
+-
+-
+-
+- mapreduce.jobtracker.keytab.file
+-
+-
+- The keytab for the jobtracker principal.
+-
+-
+-
+-
+-
+- mapreduce.tasktracker.keytab.file
+-
+- The filename of the keytab for the task tracker
+-
+-
+-
+- mapreduce.jobtracker.staging.root.dir
+- /user
+- The Path prefix for where the staging directories should be placed. The next level is always the user's
+- name. It is a path in the default file system.
+-
+-
+-
+- mapreduce.tasktracker.group
+- hadoop
+- The group that the task controller uses for accessing the task controller. The mapred user must be a member and users should *not* be members.
+-
+-
+-
+-
+- mapreduce.jobtracker.split.metainfo.maxsize
+- 50000000
+- true
+- If the size of the split metainfo file is larger than this, the JobTracker will fail the job during
+- initialize.
+-
+-
+-
+- mapreduce.history.server.embedded
+- false
+- Should job history server be embedded within Job tracker
+-process
+- true
+-
+-
+-
+- mapreduce.history.server.http.address
+-
+-
+- Http address of the history server
+- true
+-
+-
+-
+- mapreduce.jobhistory.kerberos.principal
+-
+-
+- Job history user name key. (must map to same user as JT
+-user)
+-
+-
+-
+- mapreduce.jobhistory.keytab.file
+-
+-
+- The keytab for the job history server principal.
+-
+-
+-
+- mapred.jobtracker.blacklist.fault-timeout-window
+- 180
+-
+- 3-hour sliding window (value is in minutes)
+-
+-
+-
+-
+- mapred.jobtracker.blacklist.fault-bucket-width
+- 15
+-
+- 15-minute bucket size (value is in minutes)
+-
+-
+-
+-
+- mapred.queue.names
+- default
+- Comma separated list of queues configured for this jobtracker.
+-
+-
+-
+- mapreduce.shuffle.port
+- 8081
+- Default port that the ShuffleHandler will run on. ShuffleHandler is a service run at the NodeManager to facilitate transfers of intermediate Map outputs to requesting Reducers.
+-
+-
+-
+- mapreduce.jobhistory.intermediate-done-dir
+- /mr-history/tmp
+- Directory where history files are written by MapReduce jobs.
+-
+-
+-
+- mapreduce.jobhistory.done-dir
+- /mr-history/done
+- Directory where history files are managed by the MR JobHistory Server.
+-
+-
+-
+diff --git ambari-server/src/test/resources/stacks/HDP/2.0.1/services/MAPREDUCE2/metainfo.xml ambari-server/src/test/resources/stacks/HDP/2.0.1/services/MAPREDUCE2/metainfo.xml
+deleted file mode 100644
+index 7c4d1f4..0000000
+--- ambari-server/src/test/resources/stacks/HDP/2.0.1/services/MAPREDUCE2/metainfo.xml
++++ /dev/null
+@@ -1,32 +0,0 @@
+-
+-
+-
+- mapred
+- Apache Hadoop NextGen MapReduce (client libraries)
+- 2.0.3.22-1
+-
+-
+- HISTORYSERVER
+- MASTER
+-
+-
+- MAPREDUCE2_CLIENT
+- CLIENT
+-
+-
+-
+diff --git ambari-server/src/test/resources/stacks/HDP/2.0.1/services/NAGIOS/metainfo.xml ambari-server/src/test/resources/stacks/HDP/2.0.1/services/NAGIOS/metainfo.xml
+deleted file mode 100644
+index bd7de07..0000000
+--- ambari-server/src/test/resources/stacks/HDP/2.0.1/services/NAGIOS/metainfo.xml
++++ /dev/null
+@@ -1,30 +0,0 @@
+-
+-
+-
+- root
+- Nagios Monitoring and Alerting system
+- 3.2.3
+-
+-
+-
+- NAGIOS_SERVER
+- MASTER
+-
+-
+-
+-
+diff --git ambari-server/src/test/resources/stacks/HDP/2.0.1/services/OOZIE/configuration/oozie-site.xml ambari-server/src/test/resources/stacks/HDP/2.0.1/services/OOZIE/configuration/oozie-site.xml
+deleted file mode 100644
+index 1665ba8..0000000
+--- ambari-server/src/test/resources/stacks/HDP/2.0.1/services/OOZIE/configuration/oozie-site.xml
++++ /dev/null
+@@ -1,245 +0,0 @@
+-
+-
+-
+-
+-
+-
+-
+- oozie.base.url
+- http://localhost:11000/oozie
+- Base Oozie URL.
+-
+-
+-
+- oozie.system.id
+- oozie-${user.name}
+-
+- The Oozie system ID.
+-
+-
+-
+-
+- oozie.systemmode
+- NORMAL
+-
+- System mode for Oozie at startup.
+-
+-
+-
+-
+- oozie.service.AuthorizationService.security.enabled
+- true
+-
+- Specifies whether security (user name/admin role) is enabled or not.
+- If disabled any user can manage Oozie system and manage any job.
+-
+-
+-
+-
+- oozie.service.PurgeService.older.than
+- 30
+-
+- Jobs older than this value, in days, will be purged by the PurgeService.
+-
+-
+-
+-
+- oozie.service.PurgeService.purge.interval
+- 3600
+-
+- Interval at which the purge service will run, in seconds.
+-
+-
+-
+-
+- oozie.service.CallableQueueService.queue.size
+- 1000
+- Max callable queue size
+-
+-
+-
+- oozie.service.CallableQueueService.threads
+- 10
+- Number of threads used for executing callables
+-
+-
+-
+- oozie.service.CallableQueueService.callable.concurrency
+- 3
+-
+- Maximum concurrency for a given callable type.
+- Each command is a callable type (submit, start, run, signal, job, jobs, suspend,resume, etc).
+- Each action type is a callable type (Map-Reduce, Pig, SSH, FS, sub-workflow, etc).
+- All commands that use action executors (action-start, action-end, action-kill and action-check) use
+- the action type as the callable type.
+-
+-
+-
+-
+- oozie.service.coord.normal.default.timeout
+- 120
+- Default timeout for a coordinator action input check (in minutes) for normal job.
+- -1 means infinite timeout
+-
+-
+-
+- oozie.db.schema.name
+- oozie
+-
+- Oozie DataBase Name
+-
+-
+-
+-
+- oozie.service.HadoopAccessorService.jobTracker.whitelist
+-
+-
+- Whitelisted job tracker for Oozie service.
+-
+-
+-
+-
+- oozie.authentication.type
+- simple
+-
+-
+-
+-
+-
+- oozie.service.HadoopAccessorService.nameNode.whitelist
+-
+-
+-
+-
+-
+-
+- oozie.service.WorkflowAppService.system.libpath
+- /user/${user.name}/share/lib
+-
+- System library path to use for workflow applications.
+- This path is added to workflow application if their job properties sets
+- the property 'oozie.use.system.libpath' to true.
+-
+-
+-
+-
+- use.system.libpath.for.mapreduce.and.pig.jobs
+- false
+-
+- If set to true, submissions of MapReduce and Pig jobs will include
+- automatically the system library path, thus not requiring users to
+- specify where the Pig JAR files are. Instead, the ones from the system
+- library path are used.
+-
+-
+-
+- oozie.authentication.kerberos.name.rules
+-
+- RULE:[2:$1@$0]([jt]t@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-MAPREDUSER/
+- RULE:[2:$1@$0]([nd]n@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HDFSUSER/
+- RULE:[2:$1@$0](hm@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/
+- RULE:[2:$1@$0](rs@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/
+- DEFAULT
+-
+- The mapping from kerberos principal names to local OS user names.
+-
+-
+- oozie.service.HadoopAccessorService.hadoop.configurations
+- *=/etc/hadoop/conf
+-
+- Comma separated AUTHORITY=HADOOP_CONF_DIR, where AUTHORITY is the HOST:PORT of
+- the Hadoop service (JobTracker, HDFS). The wildcard '*' configuration is
+- used when there is no exact match for an authority. The HADOOP_CONF_DIR contains
+- the relevant Hadoop *-site.xml files. If the path is relative is looked within
+- the Oozie configuration directory; though the path can be absolute (i.e. to point
+- to Hadoop client conf/ directories in the local filesystem.
+-
+-
+-
+- oozie.service.ActionService.executor.ext.classes
+-
+- org.apache.oozie.action.email.EmailActionExecutor,
+- org.apache.oozie.action.hadoop.HiveActionExecutor,
+- org.apache.oozie.action.hadoop.ShellActionExecutor,
+- org.apache.oozie.action.hadoop.SqoopActionExecutor,
+- org.apache.oozie.action.hadoop.DistcpActionExecutor
+-
+-
+-
+-
+- oozie.service.SchemaService.wf.ext.schemas
+- shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd
+-
+-
+- oozie.service.JPAService.create.db.schema
+- false
+-
+- Creates Oozie DB.
+-
+- If set to true, it creates the DB schema if it does not exist. If the DB schema exists is a NOP.
+- If set to false, it does not create the DB schema. If the DB schema does not exist it fails start up.
+-
+-
+-
+-
+- oozie.service.JPAService.jdbc.driver
+- org.apache.derby.jdbc.EmbeddedDriver
+-
+- JDBC driver class.
+-
+-
+-
+-
+- oozie.service.JPAService.jdbc.url
+- jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true
+-
+- JDBC URL.
+-
+-
+-
+-
+- oozie.service.JPAService.jdbc.username
+- sa
+-
+- DB user name.
+-
+-
+-
+-
+- oozie.service.JPAService.jdbc.password
+-
+-
+- DB user password.
+-
+- IMPORTANT: if password is emtpy leave a 1 space string, the service trims the value,
+- if empty Configuration assumes it is NULL.
+-
+-
+-
+-
+- oozie.service.JPAService.pool.max.active.conn
+- 10
+-
+- Max number of connections.
+-
+-
+-
+\ No newline at end of file
+diff --git ambari-server/src/test/resources/stacks/HDP/2.0.1/services/OOZIE/metainfo.xml ambari-server/src/test/resources/stacks/HDP/2.0.1/services/OOZIE/metainfo.xml
+deleted file mode 100644
+index a65b547..0000000
+--- ambari-server/src/test/resources/stacks/HDP/2.0.1/services/OOZIE/metainfo.xml
++++ /dev/null
+@@ -1,35 +0,0 @@
+-
+-
+-
+- root
+- System for workflow coordination and execution of Apache Hadoop jobs
+- 3.3.1
+-
+-
+-
+- OOZIE_SERVER
+- MASTER
+-
+-
+-
+- OOZIE_CLIENT
+- CLIENT
+-
+-
+-
+-
+diff --git ambari-server/src/test/resources/stacks/HDP/2.0.1/services/PIG/configuration/pig.properties ambari-server/src/test/resources/stacks/HDP/2.0.1/services/PIG/configuration/pig.properties
+deleted file mode 100644
+index 01000b5..0000000
+--- ambari-server/src/test/resources/stacks/HDP/2.0.1/services/PIG/configuration/pig.properties
++++ /dev/null
+@@ -1,52 +0,0 @@
+-# Licensed to the Apache Software Foundation (ASF) under one
+-# or more contributor license agreements. See the NOTICE file
+-# distributed with this work for additional information
+-# regarding copyright ownership. The ASF licenses this file
+-# to you under the Apache License, Version 2.0 (the
+-# "License"); you may not use this file except in compliance
+-# with the License. You may obtain a copy of the License at
+-#
+-# http://www.apache.org/licenses/LICENSE-2.0
+-#
+-# Unless required by applicable law or agreed to in writing, software
+-# distributed under the License is distributed on an "AS IS" BASIS,
+-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-# See the License for the specific language governing permissions and
+-# limitations under the License.
+-
+-# Pig default configuration file. All values can be overwritten by pig.properties and command line arguments.
+-# see bin/pig -help
+-
+-# brief logging (no timestamps)
+-brief=false
+-
+-#debug level, INFO is default
+-debug=INFO
+-
+-#verbose print all log messages to screen (default to print only INFO and above to screen)
+-verbose=false
+-
+-#exectype local|mapreduce, mapreduce is default
+-exectype=mapreduce
+-
+-#Enable insertion of information about script into hadoop job conf
+-pig.script.info.enabled=true
+-
+-#Do not spill temp files smaller than this size (bytes)
+-pig.spill.size.threshold=5000000
+-#EXPERIMENT: Activate garbage collection when spilling a file bigger than this size (bytes)
+-#This should help reduce the number of files being spilled.
+-pig.spill.gc.activation.size=40000000
+-
+-#the following two parameters are to help estimate the reducer number
+-pig.exec.reducers.bytes.per.reducer=1000000000
+-pig.exec.reducers.max=999
+-
+-#Temporary location to store the intermediate data.
+-pig.temp.dir=/tmp/
+-
+-#Threshold for merging FRJoin fragment files
+-pig.files.concatenation.threshold=100
+-pig.optimistic.files.concatenation=false;
+-
+-pig.disable.counter=false
+diff --git ambari-server/src/test/resources/stacks/HDP/2.0.1/services/PIG/metainfo.xml ambari-server/src/test/resources/stacks/HDP/2.0.1/services/PIG/metainfo.xml
+deleted file mode 100644
+index 731d7b0..0000000
+--- ambari-server/src/test/resources/stacks/HDP/2.0.1/services/PIG/metainfo.xml
++++ /dev/null
+@@ -1,30 +0,0 @@
+-
+-
+-
+- root
+- Scripting platform for analyzing large datasets
+- 0.10.1.22-1
+-
+-
+-
+- PIG
+- CLIENT
+-
+-
+-
+-
+diff --git ambari-server/src/test/resources/stacks/HDP/2.0.1/services/TEZ/metainfo.xml ambari-server/src/test/resources/stacks/HDP/2.0.1/services/TEZ/metainfo.xml
+deleted file mode 100644
+index 30aa43e..0000000
+--- ambari-server/src/test/resources/stacks/HDP/2.0.1/services/TEZ/metainfo.xml
++++ /dev/null
+@@ -1,30 +0,0 @@
+-
+-
+-
+- root
+- Tez is the next generation Hadoop Query Processing framework written on top of YARN
+- 0.1.0.22-1
+-
+-
+-
+- TEZ_CLIENT
+- CLIENT
+-
+-
+-
+-
+diff --git ambari-server/src/test/resources/stacks/HDP/2.0.1/services/WEBHCAT/configuration/webhcat-site.xml ambari-server/src/test/resources/stacks/HDP/2.0.1/services/WEBHCAT/configuration/webhcat-site.xml
+deleted file mode 100644
+index 31d0113..0000000
+--- ambari-server/src/test/resources/stacks/HDP/2.0.1/services/WEBHCAT/configuration/webhcat-site.xml
++++ /dev/null
+@@ -1,126 +0,0 @@
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+- templeton.port
+- 50111
+- The HTTP port for the main server.
+-
+-
+-
+- templeton.hadoop.conf.dir
+- /etc/hadoop/conf
+- The path to the Hadoop configuration.
+-
+-
+-
+- templeton.jar
+- /usr/lib/hcatalog/share/webhcat/svr/webhcat.jar
+- The path to the Templeton jar file.
+-
+-
+-
+- templeton.libjars
+- /usr/lib/zookeeper/zookeeper.jar
+- Jars to add the the classpath.
+-
+-
+-
+-
+- templeton.hadoop
+- /usr/bin/hadoop
+- The path to the Hadoop executable.
+-
+-
+-
+- templeton.pig.archive
+- hdfs:///apps/webhcat/pig.tar.gz
+- The path to the Pig archive.
+-
+-
+-
+- templeton.pig.path
+- pig.tar.gz/pig/bin/pig
+- The path to the Pig executable.
+-
+-
+-
+- templeton.hcat
+- /usr/bin/hcat
+- The path to the hcatalog executable.
+-
+-
+-
+- templeton.hive.archive
+- hdfs:///apps/webhcat/hive.tar.gz
+- The path to the Hive archive.
+-
+-
+-
+- templeton.hive.path
+- hive.tar.gz/hive/bin/hive
+- The path to the Hive executable.
+-
+-
+-
+- templeton.hive.properties
+-
+- Properties to set when running hive.
+-
+-
+-
+-
+- templeton.zookeeper.hosts
+-
+- ZooKeeper servers, as comma separated host:port pairs
+-
+-
+-
+- templeton.storage.class
+- org.apache.hcatalog.templeton.tool.ZooKeeperStorage
+- The class to use as storage
+-
+-
+-
+- templeton.override.enabled
+- false
+-
+- Enable the override path in templeton.override.jars
+-
+-
+-
+-
+- templeton.streaming.jar
+- hdfs:///apps/webhcat/hadoop-streaming.jar
+- The hdfs path to the Hadoop streaming jar file.
+-
+-
+-
+- templeton.exec.timeout
+- 60000
+- Time out for templeton api
+-
+-
+-
+diff --git ambari-server/src/test/resources/stacks/HDP/2.0.1/services/WEBHCAT/metainfo.xml ambari-server/src/test/resources/stacks/HDP/2.0.1/services/WEBHCAT/metainfo.xml
+deleted file mode 100644
+index e65992f..0000000
+--- ambari-server/src/test/resources/stacks/HDP/2.0.1/services/WEBHCAT/metainfo.xml
++++ /dev/null
+@@ -1,31 +0,0 @@
+-
+-
+-
+- root
+- This is comment for WEBHCAT service
+- 0.5.0
+-
+-
+-
+- WEBHCAT_SERVER
+- MASTER
+-
+-
+-
+-
+-
+diff --git ambari-server/src/test/resources/stacks/HDP/2.0.1/services/YARN/configuration/capacity-scheduler.xml ambari-server/src/test/resources/stacks/HDP/2.0.1/services/YARN/configuration/capacity-scheduler.xml
+deleted file mode 100644
+index 3f78292..0000000
+--- ambari-server/src/test/resources/stacks/HDP/2.0.1/services/YARN/configuration/capacity-scheduler.xml
++++ /dev/null
+@@ -1,120 +0,0 @@
+-
+-
+-
+-
+-
+- yarn.scheduler.capacity.maximum-applications
+- 10000
+-
+- Maximum number of applications that can be pending and running.
+-
+-
+-
+-
+- yarn.scheduler.capacity.maximum-am-resource-percent
+- 0.1
+-
+- Maximum percent of resources in the cluster which can be used to run
+- application masters i.e. controls number of concurrent running
+- applications.
+-
+-
+-
+-
+- yarn.scheduler.capacity.root.queues
+- default
+-
+- The queues at the this level (root is the root queue).
+-
+-
+-
+-
+- yarn.scheduler.capacity.root.capacity
+- 100
+-
+- The total capacity as a percentage out of 100 for this queue.
+- If it has child queues then this includes their capacity as well.
+- The child queues capacity should add up to their parent queue's capacity
+- or less.
+-
+-
+-
+-
+- yarn.scheduler.capacity.root.default.capacity
+- 100
+- Default queue target capacity.
+-
+-
+-
+- yarn.scheduler.capacity.root.default.user-limit-factor
+- 1
+-
+- Default queue user limit a percentage from 0.0 to 1.0.
+-
+-
+-
+-
+- yarn.scheduler.capacity.root.default.maximum-capacity
+- 100
+-
+- The maximum capacity of the default queue.
+-
+-
+-
+-
+- yarn.scheduler.capacity.root.default.state
+- RUNNING
+-
+- The state of the default queue. State can be one of RUNNING or STOPPED.
+-
+-
+-
+-
+- yarn.scheduler.capacity.root.default.acl_submit_jobs
+- *
+-
+- The ACL of who can submit jobs to the default queue.
+-
+-
+-
+-
+- yarn.scheduler.capacity.root.default.acl_administer_jobs
+- *
+-
+- The ACL of who can administer jobs on the default queue.
+-
+-
+-
+-
+- yarn.scheduler.capacity.root.acl_administer_queues
+- *
+-
+- The ACL for who can administer this queue i.e. change sub-queue
+- allocations.
+-
+-
+-
+-
+- yarn.scheduler.capacity.root.unfunded.capacity
+- 50
+-
+- No description
+-
+-
+-
+-
+diff --git ambari-server/src/test/resources/stacks/HDP/2.0.1/services/YARN/configuration/container-executor.cfg ambari-server/src/test/resources/stacks/HDP/2.0.1/services/YARN/configuration/container-executor.cfg
+deleted file mode 100644
+index 502ddaa..0000000
+--- ambari-server/src/test/resources/stacks/HDP/2.0.1/services/YARN/configuration/container-executor.cfg
++++ /dev/null
+@@ -1,20 +0,0 @@
+-#
+-# Licensed to the Apache Software Foundation (ASF) under one or more
+-# contributor license agreements. See the NOTICE file distributed with
+-# this work for additional information regarding copyright ownership.
+-# The ASF licenses this file to You under the Apache License, Version 2.0
+-# (the "License"); you may not use this file except in compliance with
+-# the License. You may obtain a copy of the License at
+-#
+-# http://www.apache.org/licenses/LICENSE-2.0
+-#
+-# Unless required by applicable law or agreed to in writing, software
+-# distributed under the License is distributed on an "AS IS" BASIS,
+-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-# See the License for the specific language governing permissions and
+-# limitations under the License.
+-#
+-yarn.nodemanager.local-dirs=TODO-YARN-LOCAL-DIR
+-yarn.nodemanager.linux-container-executor.group=hadoop
+-yarn.nodemanager.log-dirs=TODO-YARN-LOG-DIR
+-banned.users=hfds,bin,0
+diff --git ambari-server/src/test/resources/stacks/HDP/2.0.1/services/YARN/configuration/core-site.xml ambari-server/src/test/resources/stacks/HDP/2.0.1/services/YARN/configuration/core-site.xml
+deleted file mode 100644
+index 3a2af49..0000000
+--- ambari-server/src/test/resources/stacks/HDP/2.0.1/services/YARN/configuration/core-site.xml
++++ /dev/null
+@@ -1,20 +0,0 @@
+-
+-
+-
+-
+-
+diff --git ambari-server/src/test/resources/stacks/HDP/2.0.1/services/YARN/configuration/yarn-site.xml ambari-server/src/test/resources/stacks/HDP/2.0.1/services/YARN/configuration/yarn-site.xml
+deleted file mode 100644
+index e6c02bd..0000000
+--- ambari-server/src/test/resources/stacks/HDP/2.0.1/services/YARN/configuration/yarn-site.xml
++++ /dev/null
+@@ -1,172 +0,0 @@
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+- yarn.resourcemanager.resource-tracker.address
+- TODO-RMNODE-HOSTNAME:8025
+-
+-
+-
+- yarn.resourcemanager.scheduler.address
+- TODO-RMNODE-HOSTNAME:8030
+-
+-
+-
+- yarn.resourcemanager.address
+- TODO-RMNODE-HOSTNAME:8050
+-
+-
+-
+- yarn.resourcemanager.admin.address
+- TODO-RMNODE-HOSTNAME:8141
+-
+-
+-
+- yarn.resourcemanager.scheduler.class
+- org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler
+-
+-
+-
+- yarn.scheduler.minimum-allocation-mb
+- 1024
+-
+-
+-
+- yarn.scheduler.maximum-allocation-mb
+- 8192
+-
+-
+-
+-
+-
+- yarn.nodemanager.address
+- 0.0.0.0:45454
+-
+-
+-
+- yarn.nodemanager.local-dirs
+- TODO-YARN-LOCAL-DIR
+-
+-
+-
+- yarn.nodemanager.resource.memory-mb
+- 8192
+- Amount of physical memory, in MB, that can be allocated
+- for containers.
+-
+-
+-
+- yarn.application.classpath
+- /etc/hadoop/conf,/usr/lib/hadoop/*,/usr/lib/hadoop/lib/*,/usr/lib/hadoop-hdfs/*,/usr/lib/hadoop-hdfs/lib/*,/usr/lib/hadoop-yarn/*,/usr/lib/hadoop-yarn/lib/*,/usr/lib/hadoop-mapreduce/*,/usr/lib/hadoop-mapreduce/lib/*
+- Classpath for typical applications.
+-
+-
+-
+- yarn.nodemanager.vmem-pmem-ratio
+- 2.1
+- Ratio between virtual memory to physical memory when
+- setting memory limits for containers. Container allocations are
+- expressed in terms of physical memory, and virtual memory usage
+- is allowed to exceed this allocation by this ratio.
+-
+-
+-
+-
+- yarn.nodemanager.container-executor.class
+- org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor
+- ContainerExecutor for launching containers
+-
+-
+-
+- yarn.nodemanager.aux-services
+- mapreduce.shuffle
+- Auxilliary services of NodeManager
+-
+-
+-
+- yarn.nodemanager.aux-services.class
+- org.apache.hadoop.mapred.ShuffleHandler
+-
+-
+-
+- yarn.nodemanager.log-dirs
+- TODO-YARN-LOG-DIR
+-
+-
+-
+- yarn.nodemanager.container-monitor.interval-ms
+- 3000
+- The interval, in milliseconds, for which the node manager
+- waits between two cycles of monitoring its containers' memory usage.
+-
+-
+-
+-
+- yarn.nodemanager.health-checker.script.path
+- /etc/hadoop/conf/health_check
+-
+-
+-
+- yarn.nodemanager.health-checker.interval-ms
+- 135000
+-
+-
+-
+- yarn.nodemanager.health-checker.script.timeout-ms
+- 60000
+-
+-
+-
+- yarn.nodemanager.log.retain-second
+- 604800
+-
+-
+-
+- yarn.log-aggregation-enable
+- true
+-
+-
+-
+- yarn.nodemanager.remote-app-log-dir
+- /app-logs
+-
+-
+-
+- yarn.nodemanager.remote-app-log-dir-suffix
+- logs
+-
+-
+-
+- yarn.nodemanager.log-aggregation.compression-type
+- gz
+-
+-
+-
+- yarn.nodemanager.delete.debug-delay-sec
+- 36000
+-
+-
+-
+-
+diff --git ambari-server/src/test/resources/stacks/HDP/2.0.1/services/YARN/metainfo.xml ambari-server/src/test/resources/stacks/HDP/2.0.1/services/YARN/metainfo.xml
+deleted file mode 100644
+index 743c40d..0000000
+--- ambari-server/src/test/resources/stacks/HDP/2.0.1/services/YARN/metainfo.xml
++++ /dev/null
+@@ -1,36 +0,0 @@
+-
+-
+-
+- mapred
+- Apache Hadoop NextGen MapReduce (YARN)
+- 2.0.3.22-1
+-
+-
+- RESOURCEMANAGER
+- MASTER
+-
+-
+- NODEMANAGER
+- SLAVE
+-
+-
+- YARN_CLIENT
+- CLIENT
+-
+-
+-
+diff --git ambari-server/src/test/resources/stacks/HDP/2.0.1/services/ZOOKEEPER/metainfo.xml ambari-server/src/test/resources/stacks/HDP/2.0.1/services/ZOOKEEPER/metainfo.xml
+deleted file mode 100644
+index e72fd2a..0000000
+--- ambari-server/src/test/resources/stacks/HDP/2.0.1/services/ZOOKEEPER/metainfo.xml
++++ /dev/null
+@@ -1,35 +0,0 @@
+-
+-
+-
+- root
+- Centralized service which provides highly reliable distributed coordination
+- 3.4.5.22-1
+-
+-
+-
+- ZOOKEEPER_SERVER
+- MASTER
+-
+-
+-
+- ZOOKEEPER_CLIENT
+- CLIENT
+-
+-
+-
+-
+diff --git ambari-web/pom.xml ambari-web/pom.xml
+index 10f8f43..a72596f 100644
+--- ambari-web/pom.xml
++++ ambari-web/pom.xml
+@@ -31,7 +31,6 @@
+ Ambari Web
+
+ UTF-8
+- python2
+
+
+
+@@ -91,7 +90,7 @@
+
+
+
+-
++
+
+
+