Skip to content
Merged
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ public enum DependencyType {
@SerializedName("INSTALL")
INSTALL
}
@SerializedName("dependencyType")
@SerializedName("type")
private DependencyType dependencyType;

public String getName() {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1287,7 +1287,7 @@ INSERT INTO ambari_sequences (sequence_name, sequence_value) VALUES
('servicecomponent_version_id_seq', 0),
('blueprint_service_id_seq', 0),
('blueprint_mpack_instance_id_seq', 0),
('hostgroup_component_id_seq', 0);
('hostgroup_component_id_seq', 0),
('repo_os_id_seq', 0),
('repo_definition_id_seq', 0),
('hostcomponentdesiredstate_id_seq', 0);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions import format_jvm_option
from resource_management.libraries.functions.version import format_stack_version, get_major_version
from resource_management.libraries.functions.cluster_settings import get_cluster_setting_value
from string import lower

config = Script.get_config()
Expand Down Expand Up @@ -64,7 +65,7 @@
versioned_stack_root = '/usr/hdp/current'

#security params
security_enabled = config['configurations']['cluster-env']['security_enabled']
security_enabled = get_cluster_setting_value('security_enabled')

#java params
java_home = config['hostLevelParams']['java_home']
Expand Down Expand Up @@ -94,7 +95,7 @@

#users and groups
hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
user_group = config['configurations']['cluster-env']['user_group']
user_group = get_cluster_setting_value('user_group')

namenode_host = default("/clusterHostInfo/namenode_host", [])
has_namenode = not len(namenode_host) == 0
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.functions.stack_features import get_stack_feature_version
from resource_management.libraries.functions.get_architecture import get_architecture
from resource_management.libraries.functions.cluster_settings import get_cluster_setting_value
from ambari_commons.constants import AMBARI_SUDO_BINARY


Expand Down Expand Up @@ -72,7 +73,7 @@
ambari_java_home = default("/commandParams/ambari_java_home", None)
ambari_jdk_name = default("/commandParams/ambari_jdk_name", None)

security_enabled = config['configurations']['cluster-env']['security_enabled']
security_enabled = get_cluster_setting_value('security_enabled')
hdfs_user = config['configurations']['hadoop-env']['hdfs_user']

# Some datanode settings
Expand Down Expand Up @@ -163,7 +164,7 @@ def is_secure_port(port):

#users and groups
hbase_user = config['configurations']['hbase-env']['hbase_user']
smoke_user = config['configurations']['cluster-env']['smokeuser']
smoke_user = get_cluster_setting_value('smokeuser')
gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
gmond_user = config['configurations']['ganglia-env']["gmond_user"]
tez_user = config['configurations']['tez-env']["tez_user"]
Expand All @@ -173,7 +174,7 @@ def is_secure_port(port):
zeppelin_user = config['configurations']['zeppelin-env']["zeppelin_user"]
zeppelin_group = config['configurations']['zeppelin-env']["zeppelin_group"]

user_group = config['configurations']['cluster-env']['user_group']
user_group = get_cluster_setting_value('user_group')

ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
namenode_host = default("/clusterHostInfo/namenode_host", [])
Expand Down Expand Up @@ -219,9 +220,9 @@ def is_secure_port(port):
ranger_group = config['configurations']['ranger-env']['ranger_group']
dfs_cluster_administrators_group = config['configurations']['hdfs-site']["dfs.cluster.administrators"]

sysprep_skip_create_users_and_groups = default("/configurations/cluster-env/sysprep_skip_create_users_and_groups", False)
ignore_groupsusers_create = default("/configurations/cluster-env/ignore_groupsusers_create", False)
fetch_nonlocal_groups = config['configurations']['cluster-env']["fetch_nonlocal_groups"]
sysprep_skip_create_users_and_groups = get_cluster_setting_value('sysprep_skip_create_users_and_groups')
ignore_groupsusers_create = get_cluster_setting_value('ignore_groupsusers_create')
fetch_nonlocal_groups = get_cluster_setting_value('fetch_nonlocal_groups')

smoke_user_dirs = format("/tmp/hadoop-{smoke_user},/tmp/hsperfdata_{smoke_user},/home/{smoke_user},/tmp/{smoke_user},/tmp/sqoop-{smoke_user}")
if has_hbase_masters:
Expand All @@ -247,7 +248,7 @@ def is_secure_port(port):
host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)

tez_am_view_acls = config['configurations']['tez-site']["tez.am.view-acls"]
override_uid = str(default("/configurations/cluster-env/override_uid", "true")).lower()
override_uid = get_cluster_setting_value('override_uid')

# if NN HA on secure clutser, access Zookeper securely
if stack_supports_zk_security and dfs_ha_enabled and security_enabled:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
from resource_management.libraries.script.script import Script
from resource_management.libraries.functions import default, format
from resource_management.libraries.functions.expect import expect
from resource_management.libraries.functions.cluster_settings import get_cluster_setting_value

config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
Expand All @@ -35,19 +36,19 @@

#users and groups
hbase_user = config['configurations']['hbase-env']['hbase_user']
smoke_user = config['configurations']['cluster-env']['smokeuser']
smoke_user = get_cluster_setting_value('smokeuser')
gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
gmond_user = config['configurations']['ganglia-env']["gmond_user"]
tez_user = config['configurations']['tez-env']["tez_user"]

user_group = config['configurations']['cluster-env']['user_group']
user_group = get_cluster_setting_value('user_group')
proxyuser_group = default("/configurations/hadoop-env/proxyuser_group","users")

hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']

# repo templates
repo_rhel_suse = config['configurations']['cluster-env']['repo_suse_rhel_template']
repo_ubuntu = config['configurations']['cluster-env']['repo_ubuntu_template']
repo_rhel_suse = get_cluster_setting_value('repo_suse_rhel_template')
repo_ubuntu = get_cluster_setting_value('repo_ubuntu_template')

#hosts
hostname = config["hostname"]
Expand Down Expand Up @@ -93,7 +94,7 @@
hbase_tmp_dir = "/tmp/hbase-hbase"

#security params
security_enabled = config['configurations']['cluster-env']['security_enabled']
security_enabled = get_cluster_setting_value('security_enabled')

#java params
java_home = config['hostLevelParams']['java_home']
Expand All @@ -102,7 +103,7 @@
jce_policy_zip = default("/hostLevelParams/jce_name", None) # None when jdk is already installed by user
jce_location = config['hostLevelParams']['jdk_location']
jdk_location = config['hostLevelParams']['jdk_location']
ignore_groupsusers_create = default("/configurations/cluster-env/ignore_groupsusers_create", False)
ignore_groupsusers_create = get_cluster_setting_value('ignore_groupsusers_create')
host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)

smoke_user_dirs = format("/tmp/hadoop-{smoke_user},/tmp/hsperfdata_{smoke_user},/home/{smoke_user},/tmp/{smoke_user},/tmp/sqoop-{smoke_user}")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@
from resource_management.libraries.functions.stack_features import get_stack_feature_version
from resource_management.libraries.functions import StackFeature
from ambari_commons.constants import AMBARI_SUDO_BINARY
from resource_management.libraries.functions.cluster_settings import get_cluster_setting_value

config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
Expand All @@ -49,10 +50,10 @@

# Whether to skip copying fast-hdfs-resource.jar to /var/lib/ambari-agent/lib/
# This is required if tarballs are going to be copied to HDFS, so set to False
sysprep_skip_copy_fast_jar_hdfs = host_sys_prepped and default("/configurations/cluster-env/sysprep_skip_copy_fast_jar_hdfs", False)
sysprep_skip_copy_fast_jar_hdfs = host_sys_prepped and get_cluster_setting_value('sysprep_skip_copy_fast_jar_hdfs')

# Whether to skip setting up the unlimited key JCE policy
sysprep_skip_setup_jce = host_sys_prepped and default("/configurations/cluster-env/sysprep_skip_setup_jce", False)
sysprep_skip_setup_jce = host_sys_prepped and get_cluster_setting_value('sysprep_skip_setup_jce')

stack_version_unformatted = config['hostLevelParams']['stack_version']
stack_version_formatted = format_stack_version(stack_version_unformatted)
Expand All @@ -79,7 +80,7 @@
current_service = config['serviceName']

#security params
security_enabled = config['configurations']['cluster-env']['security_enabled']
security_enabled = get_cluster_setting_value('security_enabled')

ambari_server_resources_url = default("/hostLevelParams/jdk_location", None)
if ambari_server_resources_url is not None and ambari_server_resources_url.endswith('/'):
Expand All @@ -98,7 +99,7 @@
hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
yarn_user = config['configurations']['yarn-env']['yarn_user']

user_group = config['configurations']['cluster-env']['user_group']
user_group = get_cluster_setting_value('user_group')

#hosts
hostname = config["hostname"]
Expand Down Expand Up @@ -292,7 +293,7 @@
hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
hdfs_principal_name = default('/configurations/hadoop-env/hdfs_principal_name', None)
hdfs_site = config['configurations']['hdfs-site']
smoke_user = config['configurations']['cluster-env']['smokeuser']
smoke_user = get_cluster_setting_value('smokeuser')
smoke_hdfs_user_dir = format("/user/{smoke_user}")
smoke_hdfs_user_mode = 0770

Expand Down