diff --git a/ambari-server/src/main/python/ambari-server.py b/ambari-server/src/main/python/ambari-server.py index b9a415fa08e..b7f42b3c215 100755 --- a/ambari-server/src/main/python/ambari-server.py +++ b/ambari-server/src/main/python/ambari-server.py @@ -287,11 +287,11 @@ def setup_security(args): iAction = 0 for actionDesc in actions: iAction += 1 - print(' [{0}] {1}'.format(iAction, actionDesc[1])) + print(f' [{iAction}] {actionDesc[1]}') print('=' * 75) - choice_prompt = 'Enter choice, (1-{0}): '.format(iAction) - choice_re = '[1-{0}]'.format(iAction) + choice_prompt = f'Enter choice, (1-{iAction}): ' + choice_re = f'[1-{iAction}]' choice = get_validated_string_input(choice_prompt, '0', choice_re, 'Invalid choice', False, False) @@ -358,13 +358,11 @@ def print_action_arguments_help(action): required_options = _action_option_dependence_map[action][0] optional_options = _action_option_dependence_map[action][1] if required_options or optional_options: - print("Options used by action {0}:".format(action)) + print(f"Options used by action {action}:") if required_options: - print(" required:{0}".format( - ";".join([print_opt for print_opt, _ in required_options]))) + print(f" required:{';'.join([print_opt for print_opt, _ in required_options])}") if optional_options: - print(" optional:{0}".format( - ";".join([print_opt for print_opt, _ in optional_options]))) + print(f" optional:{';'.join([print_opt for print_opt, _ in optional_options])}") @OsFamilyFuncImpl(OSConst.WINSRV_FAMILY) def init_action_parser(action, parser): @@ -439,7 +437,7 @@ def init_action_parser(action, parser): default_purge_resources = ",".join([STACK_DEFINITIONS_RESOURCE_NAME, MPACKS_RESOURCE_NAME]) add_parser_options('--purge-list', default=default_purge_resources, - help="Comma separated list of resources to purge ({0}). By default ({1}) will be purged.".format(purge_resources, default_purge_resources), + help=f"Comma separated list of resources to purge ({purge_resources}). By default ({default_purge_resources}) will be purged.", dest="purge_list", parser=parser, optional_for_actions=[INSTALL_MPACK_ACTION] @@ -560,7 +558,7 @@ def init_ldap_setup_parser_options(parser): parser.add_option('--ldap-secondary-host', action="callback", callback=check_ldap_url_options, type='str', default=None, help="Secondary Host for LDAP (must not be used together with --ldap-secondary-url)", dest="ldap_secondary_host") parser.add_option('--ldap-secondary-port', action="callback", callback=check_ldap_url_options, type='int', default=None, help="Secondary Port for LDAP (must not be used together with --ldap-secondary-url)", dest="ldap_secondary_port") parser.add_option('--ldap-ssl', default=None, help="Use SSL [true/false] for LDAP", dest="ldap_ssl") - parser.add_option('--ldap-type', default=None, help="Specify ldap type [{}] for offering defaults for missing options.".format("/".join(LDAP_TYPES)), dest="ldap_type") + parser.add_option('--ldap-type', default=None, help=f"Specify ldap type [{'/'.join(LDAP_TYPES)}] for offering defaults for missing options.", dest="ldap_type") parser.add_option('--ldap-user-class', default=None, help="User Attribute Object Class for LDAP", dest="ldap_user_class") parser.add_option('--ldap-user-attr', default=None, help="User Attribute Name for LDAP", dest="ldap_user_attr") parser.add_option('--ldap-user-group-member-attr', default=None, help="User Group Member Attribute for LDAP", dest="ldap_user_group_member_attr") @@ -683,7 +681,7 @@ def init_install_mpack_parser_options(parser): default_purge_resources = ",".join([STACK_DEFINITIONS_RESOURCE_NAME, MPACKS_RESOURCE_NAME]) parser.add_option('--purge-list', default=default_purge_resources, - help="Comma separated list of resources to purge ({0}). By default ({1}) will be purged.".format(purge_resources, default_purge_resources), + help=f"Comma separated list of resources to purge ({purge_resources}). By default ({default_purge_resources}) will be purged.", dest="purge_list") parser.add_option('--force', action="store_true", default=False, help="Force install management pack", dest="force") @@ -924,7 +922,7 @@ def setup_logging(logger, filename, logging_level): logging.basicConfig(format=formatstr, level=logging_level, filename=filename) logger.setLevel(logging_level) - logger.info("loglevel=logging.{0}".format(logging._levelToName[logging_level])) + logger.info(f"loglevel=logging.{logging._levelToName[logging_level]}") def init_logging(): # init logger @@ -999,7 +997,7 @@ def main(options, args, parser): possible_args = ' or '.join(str(x) for x in action_obj.possible_args_numbers) parser.error("Invalid number of arguments. Entered: " + str(len(args)) + ", required: " + possible_args) - options.exit_message = "Ambari Server '%s' completed successfully." % action + options.exit_message = f"Ambari Server '{action}' completed successfully." options.exit_code = None try: @@ -1007,7 +1005,7 @@ def main(options, args, parser): required, optional = _action_option_dependence_map[action] for opt_str, opt_dest in required: if hasattr(options, opt_dest) and getattr(options, opt_dest) is None: - print("Missing option {0} for action {1}".format(opt_str, action)) + print(f"Missing option {opt_str} for action {action}") print_action_arguments_help(action) print("Run ambari-server.py --help to see detailed description of each option") raise FatalException(1, "Missing option") @@ -1023,15 +1021,15 @@ def main(options, args, parser): for warning in options.warnings: print_warning_msg(warning) pass - options.exit_message = "Ambari Server '%s' completed with warnings." % action + options.exit_message = f"Ambari Server '{action}' completed with warnings." pass except FatalException as e: if e.reason is not None: - print_error_msg("Exiting with exit code {0}. \nREASON: {1}".format(e.code, e.reason)) + print_error_msg(f"Exiting with exit code {e.code}. \nREASON: {e.reason}") logger.exception(str(e)) sys.exit(e.code) except NonFatalException as e: - options.exit_message = "Ambari Server '%s' completed with warnings." % action + options.exit_message = f"Ambari Server '{action}' completed with warnings." if e.reason is not None: print_warning_msg(e.reason) @@ -1069,7 +1067,7 @@ def mainBody(): try: main(options, args, parser) except Exception as e: - print_error_msg("Unexpected {0}: {1}".format((e).__class__.__name__, str(e)) +\ + print_error_msg(f"Unexpected {e.__class__.__name__}: {str(e)}" +\ "\nFor more info run ambari-server with -v or --verbose option") sys.exit(1) diff --git a/ambari-server/src/main/python/ambari_server/dbCleanup.py b/ambari-server/src/main/python/ambari_server/dbCleanup.py index 7c418568de0..c935f0c02b0 100644 --- a/ambari-server/src/main/python/ambari_server/dbCleanup.py +++ b/ambari-server/src/main/python/ambari_server/dbCleanup.py @@ -147,5 +147,5 @@ def validate_args(options): try: datetime.datetime.strptime(options.purge_from_date, "%Y-%m-%d") except ValueError as e: - print_error_msg("The --from-date argument has an invalid format. {0}".format(e.args[0])) + print_error_msg(f"The --from-date argument has an invalid format. {e.args[0]}") return 1 diff --git a/ambari-server/src/main/python/ambari_server/dbConfiguration.py b/ambari-server/src/main/python/ambari_server/dbConfiguration.py index 0f0a272a45e..a70f045fbd2 100644 --- a/ambari-server/src/main/python/ambari_server/dbConfiguration.py +++ b/ambari-server/src/main/python/ambari_server/dbConfiguration.py @@ -145,7 +145,7 @@ def configure_database(self, properties, options): return result def setup_database(self): - print('Configuring {0} database...'.format(self.db_title)) + print(f'Configuring {self.db_title} database...') #DB setup should be done last after doing any setup. if self._is_local_database(): @@ -175,7 +175,7 @@ def ensure_jdbc_driver_installed(self, properties): # check driver is present by default driver path default_driver_path = self._get_default_driver_path(properties) if default_driver_path and os.path.isfile(default_driver_path): - ambari_should_use_existing_default_jdbc = get_YN_input("Should ambari use existing default jdbc {0} [y/n] (y)? ".format(default_driver_path), True) + ambari_should_use_existing_default_jdbc = get_YN_input(f"Should ambari use existing default jdbc {default_driver_path} [y/n] (y)? ", True) if ambari_should_use_existing_default_jdbc: properties.process_pair(JDBC_DRIVER_PATH_PROPERTY, default_driver_path) update_properties(properties) @@ -190,15 +190,15 @@ def ensure_jdbc_driver_installed(self, properties): custom_jdbc_name = os.path.basename(path_to_custom_jdbc_driver) if not path_to_custom_jdbc_driver == os.path.join(configDefaults.JAVA_SHARE_PATH, custom_jdbc_name): if os.path.isfile(os.path.join(configDefaults.JAVA_SHARE_PATH, custom_jdbc_name)): - replace_jdbc_in_share_dir = get_YN_input("You already have file {0} in /usr/share/java/. Should it be replaced? [y/n] (y)? ".format(custom_jdbc_name), True) + replace_jdbc_in_share_dir = get_YN_input(f"You already have file {custom_jdbc_name} in /usr/share/java/. Should it be replaced? [y/n] (y)? ", True) if replace_jdbc_in_share_dir: try: os.remove(os.path.join(configDefaults.JAVA_SHARE_PATH, custom_jdbc_name)) except Exception as ee: - err = 'ERROR: Could not remove jdbc file. %s' % os.path.join(configDefaults.JAVA_SHARE_PATH, custom_jdbc_name) + err = f'ERROR: Could not remove jdbc file. {os.path.join(configDefaults.JAVA_SHARE_PATH, custom_jdbc_name)}' raise FatalException(1, err) shutil.copy(path_to_custom_jdbc_driver, configDefaults.JAVA_SHARE_PATH) - print("Copying {0} to {1}".format(path_to_custom_jdbc_driver, configDefaults.JAVA_SHARE_PATH)) + print(f"Copying {path_to_custom_jdbc_driver} to {configDefaults.JAVA_SHARE_PATH}") except Exception as e: err = "Can not copy file {0} to {1} due to: {2} . Please check file " \ "permissions and free disk space.".format(path_to_custom_jdbc_driver, configDefaults.JAVA_SHARE_PATH, str(e)) @@ -465,7 +465,7 @@ def select_dbms(self, options): dbms_choices = '' for desc in self.DBMS_LIST: if len(desc.storage_name) > 0: - dbms_storage = " ({0})".format(desc.storage_name) + dbms_storage = f" ({desc.storage_name})" else: dbms_storage = "" dbms_choice_prompt += self.DBMS_PROMPT_PATTERN.format(n_dbms, desc.dbms_name, dbms_storage) diff --git a/ambari-server/src/main/python/ambari_server/dbConfiguration_linux.py b/ambari-server/src/main/python/ambari_server/dbConfiguration_linux.py index 5b850c855c8..09fb8c12822 100644 --- a/ambari-server/src/main/python/ambari_server/dbConfiguration_linux.py +++ b/ambari-server/src/main/python/ambari_server/dbConfiguration_linux.py @@ -368,7 +368,7 @@ class PGConfig(LinuxDBMSConfig): versioned_script_path = glob.glob("/usr/pgsql-*/bin/postgresql*-setup") # versioned version of psql if versioned_script_path: - PG_INITDB_CMD = "{0} initdb".format(versioned_script_path[0]) + PG_INITDB_CMD = f"{versioned_script_path[0]} initdb" psql_service_file = glob.glob("/usr/lib/systemd/system/postgresql-*.service") if psql_service_file: @@ -378,17 +378,17 @@ class PGConfig(LinuxDBMSConfig): raise FatalException(1, "Cannot find postgresql-setup script.") SERVICE_CMD = "/usr/bin/env systemctl" - PG_ST_CMD = "%s status %s" % (SERVICE_CMD, PG_SERVICE_NAME) + PG_ST_CMD = f"{SERVICE_CMD} status {PG_SERVICE_NAME}" - PG_START_CMD = AMBARI_SUDO_BINARY + " %s start %s" % (SERVICE_CMD, PG_SERVICE_NAME) - PG_RESTART_CMD = AMBARI_SUDO_BINARY + " %s restart %s" % (SERVICE_CMD, PG_SERVICE_NAME) - PG_HBA_RELOAD_CMD = AMBARI_SUDO_BINARY + " %s reload %s" % (SERVICE_CMD, PG_SERVICE_NAME) + PG_START_CMD = AMBARI_SUDO_BINARY + f" {SERVICE_CMD} start {PG_SERVICE_NAME}" + PG_RESTART_CMD = AMBARI_SUDO_BINARY + f" {SERVICE_CMD} restart {PG_SERVICE_NAME}" + PG_HBA_RELOAD_CMD = AMBARI_SUDO_BINARY + f" {SERVICE_CMD} reload {PG_SERVICE_NAME}" else: SERVICE_CMD = "/usr/bin/env service" if os.path.isfile("/usr/bin/postgresql-setup"): PG_INITDB_CMD = "/usr/bin/postgresql-setup initdb" else: - PG_INITDB_CMD = "%s %s initdb" % (SERVICE_CMD, PG_SERVICE_NAME) + PG_INITDB_CMD = f"{SERVICE_CMD} {PG_SERVICE_NAME} initdb" if OSCheck.is_suse_family() and not is_service_exist(PG_SERVICE_NAME): versioned_script_paths = glob.glob("/usr/pgsql-*/bin/postgresql*-setup") @@ -396,18 +396,18 @@ class PGConfig(LinuxDBMSConfig): versioned_script_path_tps = [(re.search(r'postgresql-([0-9]+\.?[0-9]*)', path).group(1), path) for path in versioned_script_paths] versioned_script_path_tps.sort(key = lambda t: float(t[0]), reverse = True) for versioned_script_path_tp in versioned_script_path_tps: - pgsql_service_file_name = "postgresql-%s" % versioned_script_path_tp[0] + pgsql_service_file_name = f"postgresql-{versioned_script_path_tp[0]}" if is_service_exist(pgsql_service_file_name): PG_SERVICE_NAME = pgsql_service_file_name - PG_INITDB_CMD = "%s initdb" % versioned_script_path_tp[1] - PG_HBA_DIR = "/var/lib/pgsql/%s/data" % versioned_script_path_tp[0] + PG_INITDB_CMD = f"{versioned_script_path_tp[1]} initdb" + PG_HBA_DIR = f"/var/lib/pgsql/{versioned_script_path_tp[0]}/data" break - PG_ST_CMD = "%s %s status" % (SERVICE_CMD, PG_SERVICE_NAME) + PG_ST_CMD = f"{SERVICE_CMD} {PG_SERVICE_NAME} status" - PG_START_CMD = AMBARI_SUDO_BINARY + " %s %s start" % (SERVICE_CMD, PG_SERVICE_NAME) - PG_RESTART_CMD = AMBARI_SUDO_BINARY + " %s %s restart" % (SERVICE_CMD, PG_SERVICE_NAME) - PG_HBA_RELOAD_CMD = AMBARI_SUDO_BINARY + " %s %s reload" % (SERVICE_CMD, PG_SERVICE_NAME) + PG_START_CMD = AMBARI_SUDO_BINARY + f" {SERVICE_CMD} {PG_SERVICE_NAME} start" + PG_RESTART_CMD = AMBARI_SUDO_BINARY + f" {SERVICE_CMD} {PG_SERVICE_NAME} restart" + PG_HBA_RELOAD_CMD = AMBARI_SUDO_BINARY + f" {SERVICE_CMD} {PG_SERVICE_NAME} reload" PG_HBA_CONF_FILE = None PG_HBA_CONF_FILE_BACKUP = None @@ -473,7 +473,7 @@ def ensure_dbms_is_running(self, options, properties, scmStatus=None): if is_root(): (pg_status, retcode, out, err) = PGConfig._check_postgre_up() if not retcode == 0: - err = 'Unable to start PostgreSQL server. Status {0}. {1}. Exiting'.format(pg_status, err) + err = f'Unable to start PostgreSQL server. Status {pg_status}. {err}. Exiting' raise FatalException(retcode, err) else: print("Unable to check PostgreSQL server status when starting " \ @@ -539,7 +539,7 @@ def _reset_local_database(self): default = "no" # Run automatic reset only for embedded DB - okToRun = get_YN_input("Confirm server reset [yes/no]({0})? ".format(default), get_silent()) + okToRun = get_YN_input(f"Confirm server reset [yes/no]({default})? ", get_silent()) if not okToRun: err = "Ambari Server 'reset' cancelled" raise FatalException(1, err) @@ -687,7 +687,7 @@ def _check_postgre_up(): out, err = process.communicate() retcode = process.returncode - print_info_msg("Waiting for postgres to start at port {0}...".format(PG_PORT)) + print_info_msg(f"Waiting for postgres to start at port {PG_PORT}...") wait_for_port_opened('127.0.0.1', PG_PORT, PG_PORT_CHECK_TRIES_COUNT, PG_PORT_CHECK_INTERVAL) pg_status, retcode, out, err = PGConfig._get_postgre_status() @@ -893,7 +893,7 @@ def _change_tables_owner(self): "\" OWNER TO \""+self.database_username+"\"", self.database_name, False) if retcode != 0 or "ALTER TABLE" not in stdout: - print_error_msg("Failed to change owner of table:{0} to user:{1}".format(tbl, owner)) + print_error_msg(f"Failed to change owner of table:{tbl} to user:{owner}") return False return True diff --git a/ambari-server/src/main/python/ambari_server/dbConfiguration_windows.py b/ambari-server/src/main/python/ambari_server/dbConfiguration_windows.py index e57f2f8a8b9..cd1d8093d6b 100644 --- a/ambari-server/src/main/python/ambari_server/dbConfiguration_windows.py +++ b/ambari-server/src/main/python/ambari_server/dbConfiguration_windows.py @@ -110,7 +110,7 @@ def _configure_database_password(showDefault=True): def _prompt_db_properties(self): if self.must_set_database_options: #prompt for SQL Server host and instance name - hostname_prompt = "SQL Server host and instance for the {0} database: ({1}) ".format(self.db_title, self.database_host) + hostname_prompt = f"SQL Server host and instance for the {self.db_title} database: ({self.database_host}) " self.database_host = get_validated_string_input(hostname_prompt, self.database_host, None, None, False, True) #prompt for SQL Server authentication method @@ -135,12 +135,12 @@ def _prompt_db_properties(self): else: self.use_windows_authentication = False - user_prompt = "SQL Server user name for the {0} database: ({1}) ".format(self.db_title, self.database_username) + user_prompt = f"SQL Server user name for the {self.db_title} database: ({self.database_username}) " username = get_validated_string_input(user_prompt, self.database_username, None, "User name", False, False) self.database_username = username - user_prompt = "SQL Server password for the {0} database: ".format(self.db_title) + user_prompt = f"SQL Server password for the {self.db_title} database: " password = get_validated_string_input(user_prompt, "", None, "Password", True, False) self.database_password = password @@ -170,12 +170,12 @@ def _setup_remote_server(self, properties, options): pass def _setup_remote_database(self): - print('Populating the {0} database structure...'.format(self.db_title)) + print(f'Populating the {self.db_title} database structure...') self._populate_database_structure() def _reset_remote_database(self): - print('Resetting the {0} database structure...'.format(self.db_title)) + print(f'Resetting the {self.db_title} database structure...') self._populate_database_structure() @@ -253,10 +253,10 @@ def _get_jdbc_driver_path(self): return driver_path def _build_sql_server_connection_string(self): - databaseUrl = "jdbc:sqlserver://{0}".format(ensure_double_backslashes(self.database_host)) + databaseUrl = f"jdbc:sqlserver://{ensure_double_backslashes(self.database_host)}" if self.database_port is not None and self.database_port != "": - databaseUrl += ":{0}".format(self.database_port) - databaseUrl += ";databaseName={0}".format(self.database_name) + databaseUrl += f":{self.database_port}" + databaseUrl += f";databaseName={self.database_name}" if(self.use_windows_authentication): databaseUrl += ";integratedSecurity=true" #No need to append the username and password, the Ambari server adds them by itself when connecting to the database @@ -293,10 +293,10 @@ def _populate_database_structure(self): @staticmethod def _execute_db_script(databaseHost, databaseScript, minReportedSeverityLevel=10): - dbCmd = 'sqlcmd -S {0} -b -V {1} -i {2}'.format(databaseHost, minReportedSeverityLevel, databaseScript) + dbCmd = f'sqlcmd -S {databaseHost} -b -V {minReportedSeverityLevel} -i {databaseScript}' retCode, outData, errData = run_os_command(['cmd', '/C', dbCmd]) if not retCode == 0: - err = 'Running database create script failed. Error output: {0} Output: {1} Exiting.'.format(errData, outData) + err = f'Running database create script failed. Error output: {errData} Output: {outData} Exiting.' raise FatalException(retCode, err) print_info_msg("sqlcmd output:") print_info_msg(outData) diff --git a/ambari-server/src/main/python/ambari_server/properties.py b/ambari-server/src/main/python/ambari_server/properties.py index 539363af055..7cea03fcdb9 100644 --- a/ambari-server/src/main/python/ambari_server/properties.py +++ b/ambari-server/src/main/python/ambari_server/properties.py @@ -170,7 +170,7 @@ def __getattr__(self, name): if hasattr(self._props, name): return getattr(self._props, name) else: - raise NotImplementedError("The method '{}' is not implemented.".format(name)) + raise NotImplementedError(f"The method '{name}' is not implemented.") def __contains__(self, key): return key in self._props diff --git a/ambari-server/src/main/python/ambari_server/resourceFilesKeeper.py b/ambari-server/src/main/python/ambari_server/resourceFilesKeeper.py index c9fc48be44d..c8722bd637e 100644 --- a/ambari-server/src/main/python/ambari_server/resourceFilesKeeper.py +++ b/ambari-server/src/main/python/ambari_server/resourceFilesKeeper.py @@ -84,7 +84,7 @@ def _iter_update_directory_archive(self, subdirs_list): def _update_resources_subdir_archive(self, subdir): archive_root = os.path.join(self.resources_dir, subdir) - self.dbg_out("Updating archive for {0} dir at {1}...".format(subdir, archive_root)) + self.dbg_out(f"Updating archive for {subdir} dir at {archive_root}...") # update the directories so that the .hash is generated self.update_directory_archive(archive_root) @@ -94,25 +94,25 @@ def update_directory_archives(self): Please see AMBARI-4481 for more details """ # archive stacks - self.dbg_out("Updating archives for stack dirs at {0}...".format(self.stacks_root)) + self.dbg_out(f"Updating archives for stack dirs at {self.stacks_root}...") valid_stacks = self.list_stacks(self.stacks_root) - self.dbg_out("Stacks: {0}".format(pprint.pformat(valid_stacks))) + self.dbg_out(f"Stacks: {pprint.pformat(valid_stacks)}") # Iterate over stack directories self._iter_update_directory_archive(valid_stacks) # archive common services common_services_root = os.path.join(self.resources_dir, self.COMMON_SERVICES_DIR) - self.dbg_out("Updating archives for common services dirs at {0}...".format(common_services_root)) + self.dbg_out(f"Updating archives for common services dirs at {common_services_root}...") valid_common_services = self.list_common_services(common_services_root) - self.dbg_out("Common Services: {0}".format(pprint.pformat(valid_common_services))) + self.dbg_out(f"Common Services: {pprint.pformat(valid_common_services)}") # Iterate over common services directories self._iter_update_directory_archive(valid_common_services) # archive extensions extensions_root = os.path.join(self.resources_dir, self.EXTENSIONS_DIR) - self.dbg_out("Updating archives for extensions dirs at {0}...".format(extensions_root)) + self.dbg_out(f"Updating archives for extensions dirs at {extensions_root}...") valid_extensions = self.list_extensions(extensions_root) - self.dbg_out("Extensions: {0}".format(pprint.pformat(valid_extensions))) + self.dbg_out(f"Extensions: {pprint.pformat(valid_extensions)}") # Iterate over extension directories self._iter_update_directory_archive(valid_extensions) @@ -131,7 +131,7 @@ def update_directory_archives(self): def _list_metainfo_dirs(self, root_dir): valid_items = [] # Format: - glob_pattern = "{0}/*/*".format(root_dir) + glob_pattern = f"{root_dir}/*/*" dirs = glob.glob(glob_pattern) for directory in dirs: metainfo_file = os.path.join(directory, self.METAINFO_XML) @@ -146,7 +146,7 @@ def list_stacks(self, root_dir): try: return self._list_metainfo_dirs(root_dir) except Exception as err: - raise KeeperException("Can not list stacks: {0}".format(str(err))) + raise KeeperException(f"Can not list stacks: {str(err)}") def list_common_services(self, root_dir): """ @@ -155,7 +155,7 @@ def list_common_services(self, root_dir): try: return self._list_metainfo_dirs(root_dir) except Exception as err: - raise KeeperException("Can not list common services: {0}".format(str(err))) + raise KeeperException(f"Can not list common services: {str(err)}") def list_extensions(self, root_dir): """ @@ -164,7 +164,7 @@ def list_extensions(self, root_dir): try: return self._list_metainfo_dirs(root_dir) except Exception as err: - raise KeeperException("Can not list extensions: {0}".format(str(err))) + raise KeeperException(f"Can not list extensions: {str(err)}") def update_directory_archive(self, directory): """ @@ -185,7 +185,7 @@ def update_directory_archive(self, directory): self.zip_directory(directory, skip_empty_directory) # Skip generation of .hash file is directory is empty if (skip_empty_directory and (not os.path.exists(directory) or not os.listdir(directory))): - self.dbg_out("Empty directory. Skipping generation of hash file for {0}".format(directory)) + self.dbg_out(f"Empty directory. Skipping generation of hash file for {directory}") else: self.write_hash_sum(directory, cur_hash) pass @@ -209,7 +209,7 @@ def count_hash_sum(self, directory): file_list.append(full_path) file_list.sort() for path in file_list: - self.dbg_out("Counting hash of {0}".format(path)) + self.dbg_out(f"Counting hash of {path}") with open(path, 'rb') as fh: while True: data = fh.read(self.BUFFER) @@ -218,8 +218,7 @@ def count_hash_sum(self, directory): sha1.update(data) return sha1.hexdigest() except Exception as err: - raise KeeperException("Can not calculate directory " - "hash: {0}".format(str(err))) + raise KeeperException(f"Can not calculate directory hash: {str(err)}") def read_hash_sum(self, directory): @@ -233,8 +232,7 @@ def read_hash_sum(self, directory): with open(hash_file) as fh: return fh.readline().strip() except Exception as err: - raise KeeperException("Can not read file {0} : {1}".format(hash_file, - str(err))) + raise KeeperException(f"Can not read file {hash_file} : {str(err)}") else: return None @@ -250,19 +248,18 @@ def write_hash_sum(self, directory, new_hash): fh.write(new_hash) os.chmod(hash_file, 0o644) except Exception as err: - raise KeeperException("Can not write to file {0} : {1}".format(hash_file, - str(err))) + raise KeeperException(f"Can not write to file {hash_file} : {str(err)}") def zip_directory(self, directory, skip_if_empty = False): """ Packs entire directory into zip file. Hash file is also packaged into archive """ - self.dbg_out("creating archive for directory {0}".format(directory)) + self.dbg_out(f"creating archive for directory {directory}") try: if skip_if_empty: if not os.path.exists(directory) or not os.listdir(directory): - self.dbg_out("Empty directory. Skipping archive creation for {0}".format(directory)) + self.dbg_out(f"Empty directory. Skipping archive creation for {directory}") return zip_file_path = os.path.join(directory, self.ARCHIVE_NAME) @@ -274,8 +271,7 @@ def zip_directory(self, directory, skip_if_empty = False): if not self.is_ignored(filename): absname = os.path.abspath(os.path.join(root, filename)) arcname = absname[len(abs_src) + 1:] - self.dbg_out('zipping %s as %s' % (os.path.join(root, filename), - arcname)) + self.dbg_out(f'zipping {os.path.join(root, filename)} as {arcname}') zf.write(absname, arcname) zf.close() os.chmod(zip_file_path, 0o755) @@ -294,7 +290,7 @@ def is_ignored(self, filename): def dbg_out(self, text): if self.DEBUG: - sys.stderr.write("{0}\n".format(text)) + sys.stderr.write(f"{text}\n") if not self.DEBUG and self.verbose: print(text) diff --git a/ambari-server/src/main/python/ambari_server/serverConfiguration.py b/ambari-server/src/main/python/ambari_server/serverConfiguration.py index cec3fd71003..29965a7eb5d 100644 --- a/ambari-server/src/main/python/ambari_server/serverConfiguration.py +++ b/ambari-server/src/main/python/ambari_server/serverConfiguration.py @@ -233,7 +233,7 @@ def get_conf_dir(): return conf_dir except KeyError: default_conf_dir = AmbariPath.get("/etc/ambari-server/conf") - print_info_msg("{0} is not set, using default {1}".format(AMBARI_CONF_VAR, default_conf_dir)) + print_info_msg(f"{AMBARI_CONF_VAR} is not set, using default {default_conf_dir}") return default_conf_dir def find_properties_file(): @@ -244,7 +244,7 @@ def find_properties_file(): print_error_msg (err) raise FatalException(1, err) else: - print_info_msg('Loading properties from {0}'.format(conf_file)) + print_info_msg(f'Loading properties from {conf_file}') return conf_file # Load ambari properties and return dict with values @@ -259,7 +259,7 @@ def get_ambari_properties(): with open(conf_file) as hfR: properties.load(hfR) except (Exception) as e: - print_error_msg ('Could not read "%s": %s' % (conf_file, str(e))) + print_error_msg (f'Could not read "{conf_file}": {str(e)}') return -1 # Try to replace $ROOT with the value from the OS environment. @@ -276,7 +276,7 @@ def get_ambari_properties(): properties.__dict__[k] = v.replace("$ROOT", root) properties._props[k] = v.replace("$ROOT", root) except (Exception) as e: - print_error_msg('Could not replace %s in "%s": %s' %(conf_file, root_env, str(e))) + print_error_msg(f'Could not replace {conf_file} in "{root_env}": {str(e)}') return properties class ServerDatabaseType(object): @@ -442,10 +442,10 @@ def check_if_directories_writable(self, directories): os.makedirs(directory, 0o755) except Exception as ex: # permission denied here is expected when ambari runs as non-root - print_error_msg("Could not create {0}. Reason: {1}".format(directory, str(ex))) + print_error_msg(f"Could not create {directory}. Reason: {str(ex)}") if not os.path.isdir(directory) or not os.access(directory, os.W_OK): - raise FatalException(-1, "Unable to access {0} directory. Confirm the directory is created and is writable by Ambari Server user account '{1}'".format(directory, getpass.getuser())) + raise FatalException(-1, f"Unable to access {directory} directory. Confirm the directory is created and is writable by Ambari Server user account '{getpass.getuser()}'") @OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY) class ServerConfigDefaultsWindows(ServerConfigDefaults): @@ -846,14 +846,14 @@ def update_database_name_property(upgrade=False): if properties == -1: err = "Error getting ambari properties" raise FatalException(-1, err) - print_warning_msg("{0} property isn't set in {1} . Setting it to default value - {2}".format(JDBC_DATABASE_NAME_PROPERTY, AMBARI_PROPERTIES_FILE, configDefaults.DEFAULT_DB_NAME)) + print_warning_msg(f"{JDBC_DATABASE_NAME_PROPERTY} property isn't set in {AMBARI_PROPERTIES_FILE} . Setting it to default value - {configDefaults.DEFAULT_DB_NAME}") properties.process_pair(JDBC_DATABASE_NAME_PROPERTY, configDefaults.DEFAULT_DB_NAME) conf_file = find_properties_file() try: with open(conf_file, "w") as hfW: properties.store(hfW) except Exception as e: - err = 'Could not write ambari config file "%s": %s' % (conf_file, e) + err = f'Could not write ambari config file "{conf_file}": {e}' raise FatalException(-1, err) @@ -923,9 +923,9 @@ def read_passwd_for_alias(alias, masterKey="", options=None): command = SECURITY_PROVIDER_GET_CMD.format(get_java_exe_path(), serverClassPath.get_full_ambari_classpath_escaped_for_shell(), alias, tempFilePath, masterKey) (retcode, stdout, stderr) = run_os_command(command) - print_info_msg("Return code from credential provider get passwd: {0}".format(str(retcode))) + print_info_msg(f"Return code from credential provider get passwd: {str(retcode)}") if retcode != 0: - print_error_msg ('ERROR: Unable to read password from store. alias = {0}'.format(alias)) + print_error_msg (f'ERROR: Unable to read password from store. alias = {alias}') else: with open(tempFilePath, 'r') as hfRTemp: passwd = hfRTemp.read() @@ -963,7 +963,7 @@ def save_passwd_for_alias(alias, passwd, masterKey=""): command = SECURITY_PROVIDER_PUT_CMD.format(get_java_exe_path(), serverClassPath.get_full_ambari_classpath_escaped_for_shell(), alias, passwd, masterKey) (retcode, stdout, stderr) = run_os_command(command) - print_info_msg("Return code from credential provider save passwd: {0}".format(str(retcode))) + print_info_msg(f"Return code from credential provider save passwd: {str(retcode)}") return retcode else: print_error_msg("Alias or password is unreadable.") @@ -993,7 +993,7 @@ def remove_password_file(filename): try: os.remove(passFilePath) except Exception as e: - print_warning_msg('Unable to remove password file: {0}'.format(str(e))) + print_warning_msg(f'Unable to remove password file: {str(e)}') return 1 pass return 0 @@ -1129,9 +1129,9 @@ def update_krb_jaas_login_properties(): # restore original file, destination arg for rename func shouldn't exists os.remove(conf_file) os.rename(prev_conf_file, conf_file) - print_warning_msg("Original file %s kept" % AMBARI_KRB_JAAS_LOGIN_FILE) + print_warning_msg(f"Original file {AMBARI_KRB_JAAS_LOGIN_FILE} kept") except OSError as e: - print_error_msg ("Couldn't move %s file: %s" % (prev_conf_file, str(e))) + print_error_msg (f"Couldn't move {prev_conf_file} file: {str(e)}") return -1 return 0 @@ -1150,9 +1150,9 @@ def update_ambari_env(): if env_file is not None: os.remove(env_file) os.rename(prev_env_file, env_file) - print(("INFO: Original file %s kept") % (AMBARI_ENV_FILE)) + print(f"INFO: Original file {AMBARI_ENV_FILE} kept") except OSError as e: - print_error_msg ( "Couldn't move %s file: %s" % (prev_env_file, str(e))) + print_error_msg ( f"Couldn't move {prev_env_file} file: {str(e)}") return -1 return 0 @@ -1192,12 +1192,12 @@ def update_ambari_properties(): # Previous config file does not exist if (not prev_conf_file) or (prev_conf_file is None): - print_warning_msg("Can not find %s file from previous version, skipping import of settings" % configDefaults.AMBARI_PROPERTIES_BACKUP_FILE) + print_warning_msg(f"Can not find {configDefaults.AMBARI_PROPERTIES_BACKUP_FILE} file from previous version, skipping import of settings") return 0 # ambari.properties file does not exists if conf_file is None: - print_error_msg("Can't find %s file" % AMBARI_PROPERTIES_FILE) + print_error_msg(f"Can't find {AMBARI_PROPERTIES_FILE} file") return -1 with open(prev_conf_file) as hfOld: @@ -1205,7 +1205,7 @@ def update_ambari_properties(): old_properties = Properties() old_properties.load(hfOld) except Exception as e: - print_error_msg ('Could not read "%s": %s' % (prev_conf_file, str(e))) + print_error_msg (f'Could not read "{prev_conf_file}": {str(e)}') return -1 try: @@ -1239,7 +1239,7 @@ def update_ambari_properties(): new_properties.store(hfW) except Exception as e: - print_error_msg ('Could not write "%s": %s' % (conf_file, str(e))) + print_error_msg (f'Could not write "{conf_file}": {str(e)}') return -1 timestamp = datetime.datetime.now() @@ -1248,7 +1248,7 @@ def update_ambari_properties(): try: os.rename(prev_conf_file, new_conf_file) except Exception as e: - print_error_msg ('Could not rename "%s" to "%s": %s' % (prev_conf_file, new_conf_file, str(e))) + print_error_msg (f'Could not rename "{prev_conf_file}" to "{new_conf_file}": {str(e)}') #Not critical, move on return 0 @@ -1264,7 +1264,7 @@ def update_properties(propertyMap): with open(conf_file, 'r') as file: properties.load(file) except (Exception) as e: - print_error_msg('Could not read "%s": %s' % (conf_file, e)) + print_error_msg(f'Could not read "{conf_file}": {e}') return -1 for key in propertyMap.keys(): @@ -1302,14 +1302,14 @@ def write_property(key, value): with open(conf_file, "r") as hfR: properties.load(hfR) except Exception as e: - print_error_msg('Could not read ambari config file "%s": %s' % (conf_file, e)) + print_error_msg(f'Could not read ambari config file "{conf_file}": {e}') return -1 properties.process_pair(key, value) try: with open(conf_file, 'w') as hfW: properties.store(hfW) except Exception as e: - print_error_msg('Could not write ambari config file "%s": %s' % (conf_file, e)) + print_error_msg(f'Could not write ambari config file "{conf_file}": {e}') return -1 return 0 @@ -1455,20 +1455,20 @@ def find_jdk(): if jdkPath: if validate_jdk(jdkPath): return jdkPath - print("INFO: Looking for available JDKs at {0}".format(configDefaults.JDK_INSTALL_DIR)) + print(f"INFO: Looking for available JDKs at {configDefaults.JDK_INSTALL_DIR}") jdks = glob.glob(os.path.join(configDefaults.JDK_INSTALL_DIR, configDefaults.JDK_SEARCH_PATTERN)) #[fbarca] Use the newest JDK jdks.sort(reverse=True) - print_info_msg("Found: {0}".format(str(jdks))) + print_info_msg(f"Found: {str(jdks)}") if len(jdks) == 0: return for jdkPath in jdks: - print("INFO: Trying to use JDK {0}".format(jdkPath)) + print(f"INFO: Trying to use JDK {jdkPath}") if validate_jdk(jdkPath): - print("INFO: Selected JDK {0}".format(jdkPath)) + print(f"INFO: Selected JDK {jdkPath}") return jdkPath else: - print_error_msg ("JDK {0} is invalid".format(jdkPath)) + print_error_msg (f"JDK {jdkPath} is invalid") return def get_java_exe_path(): diff --git a/ambari-server/src/main/python/ambari_server/serverSetup.py b/ambari-server/src/main/python/ambari_server/serverSetup.py index 967bfecb43f..d84a48cb3dd 100644 --- a/ambari-server/src/main/python/ambari_server/serverSetup.py +++ b/ambari-server/src/main/python/ambari_server/serverSetup.py @@ -166,7 +166,7 @@ def check_selinux(): raise FatalException(1, None) return 0 except OSError: - print_warning_msg("Could not run {0}: OK".format(GET_SE_LINUX_ST_CMD)) + print_warning_msg(f"Could not run {GET_SE_LINUX_ST_CMD}: OK") return 0 @@ -227,10 +227,10 @@ def do_checks(self): adjust_directory_permissions(self.user) except OSError as e: - print_error_msg("Failed: %s" % str(e)) + print_error_msg(f"Failed: {str(e)}") return 4 except Exception as e: - print_error_msg("Unexpected error %s" % str(e)) + print_error_msg(f"Unexpected error {str(e)}") return 1 return 0 @@ -252,7 +252,7 @@ def __init__(self, options): def _create_custom_user(self): user = get_validated_string_input( - "Enter user account for ambari-server service ({0}):".format(self.user), + f"Enter user account for ambari-server service ({self.user}):", self.user, None, "Invalid username.", False @@ -264,7 +264,7 @@ def _create_custom_user(self): if get_silent(): password = self.password else: - password = get_validated_string_input("Enter password for user {0}:".format(user), "", None, "Password", True, False) + password = get_validated_string_input(f"Enter password for user {user}:", "", None, "Password", True, False) from ambari_commons.os_windows import UserHelper @@ -280,7 +280,7 @@ def _create_custom_user(self): "skipping user creation".format(user)) elif status == UserHelper.ACTION_FAILED: # fail - print_warning_msg("Can't create user {0}. Failed with message {1}".format(user, message)) + print_warning_msg(f"Can't create user {user}. Failed with message {message}") return UserHelper.ACTION_FAILED self.password = password @@ -289,12 +289,12 @@ def _create_custom_user(self): #This is unconditional status, message = uh.add_user_privilege('SeServiceLogonRight') if status == UserHelper.ACTION_FAILED: - print_warning_msg("Can't add SeServiceLogonRight to user {0}. Failed with message {1}".format(user, message)) + print_warning_msg(f"Can't add SeServiceLogonRight to user {user}. Failed with message {message}") return UserHelper.ACTION_FAILED status, message = uh.add_user_privilege('SeBatchLogonRight') if status == UserHelper.ACTION_FAILED: - print_warning_msg("Can't add SeBatchLogonRight to user {0}. Failed with message {1}".format(user, message)) + print_warning_msg(f"Can't add SeBatchLogonRight to user {user}. Failed with message {message}") return UserHelper.ACTION_FAILED print_info_msg("User configuration is done.") @@ -322,19 +322,18 @@ def __init__(self, options): def _create_custom_user(self): user = get_validated_string_input( - "Enter user account for ambari-server daemon ({0}):".format(self.user), + f"Enter user account for ambari-server daemon ({self.user}):", self.user, "^[a-z_][a-z0-9_-]{1,31}$", "Invalid username.", False ) - print_info_msg("Trying to create user {0}".format(user)) + print_info_msg(f"Trying to create user {user}") command = self.NR_USERADD_CMD.format(user, self.NR_USER_COMMENT) retcode, out, err = run_os_command(command) if retcode == 9: # 9 = username already in use - print_info_msg("User {0} already exists, " - "skipping user creation".format(user)) + print_info_msg(f"User {user} already exists, skipping user creation") elif retcode != 0: # fail print_warning_msg("Can't create user {0}. Command {1} " @@ -520,7 +519,7 @@ def download_and_install_jdk(self, args, properties, ambariOnly = False): elif properties[JDK_DOWNLOAD_SUPPORTED_PROPERTY].upper() == "FALSE": print("ERROR: Oracle JDK is not found in {1}. JDK download is not supported in this distribution. Please download Oracle JDK " \ "archive ({0}) manually from Oracle site, place it into {1} and re-run this script.".format(jdk_cfg.dest_file, dest_file)) - print("NOTE: If you have already downloaded the file, please verify if the name is exactly same as {0}.".format(jdk_cfg.dest_file)) + print(f"NOTE: If you have already downloaded the file, please verify if the name is exactly same as {jdk_cfg.dest_file}.") print('Exiting...') sys.exit(1) else: @@ -544,7 +543,7 @@ def download_and_install_jdk(self, args, properties, ambariOnly = False): try: (retcode, out, java_home_dir) = self._install_jdk(dest_file, jdk_cfg) except Exception as e: - print("Installation of JDK has failed: %s\n" % str(e)) + print(f"Installation of JDK has failed: {str(e)}\n") file_exists = os.path.isfile(dest_file) if file_exists: ok = get_YN_input("JDK found at " + dest_file + ". " @@ -563,7 +562,7 @@ def download_and_install_jdk(self, args, properties, ambariOnly = False): try: (retcode, out) = self._install_jdk(dest_file, jdk_cfg) except Exception as e: - print("Installation of JDK was failed: %s\n" % str(e)) + print(f"Installation of JDK was failed: {str(e)}\n") err = "Unable to install JDK. Please remove JDK, file found at " + \ dest_file + " and re-run Ambari Server setup" raise FatalException(1, err) @@ -595,7 +594,7 @@ def download_and_unpack_jce_policy(self, properties, ambariOnly = False): print(err_msg_stdout) print_error_msg("Failed to download JCE policy files:") if e.reason is not None: - print_error_msg("\nREASON: {0}".format(e.reason)) + print_error_msg(f"\nREASON: {e.reason}") # TODO: We don't fail installation if _download_jce_policy fails. Is it OK? print('Installing JCE policy...') @@ -607,7 +606,7 @@ def download_and_unpack_jce_policy(self, properties, ambariOnly = False): print(err_msg_stdout) print_error_msg("Failed to install JCE policy files:") if e.reason is not None: - print_error_msg("\nREASON: {0}".format(e.reason)) + print_error_msg(f"\nREASON: {e.reason}") # TODO: We don't fail installation if _download_jce_policy fails. Is it OK? @staticmethod @@ -725,7 +724,7 @@ def __init__(self): def _install_jdk(self, java_inst_file, jdk_cfg): jdk_inst_dir = jdk_cfg.inst_dir - print("Installing JDK to {0}".format(jdk_inst_dir)) + print(f"Installing JDK to {jdk_inst_dir}") if not os.path.exists(jdk_inst_dir): os.makedirs(jdk_inst_dir) @@ -752,14 +751,14 @@ def _install_jdk(self, java_inst_file, jdk_cfg): if retcode == 1603: # JDK already installed - print("JDK already installed in {0}".format(jdk_inst_dir)) + print(f"JDK already installed in {jdk_inst_dir}") retcode = 0 else: if retcode != 0: - err = "Installation of JDK returned exit code %s" % retcode + err = f"Installation of JDK returned exit code {retcode}" raise FatalException(retcode, err) - print("Successfully installed JDK to {0}".format(jdk_inst_dir)) + print(f"Successfully installed JDK to {jdk_inst_dir}") # Don't forget to adjust the JAVA_HOME env var @@ -768,11 +767,11 @@ def _install_jdk(self, java_inst_file, jdk_cfg): def _ensure_java_home_env_var_is_set(self, java_home_dir): if JAVA_HOME not in os.environ or os.environ[JAVA_HOME] != java_home_dir: java_home_dir_unesc = compress_backslashes(java_home_dir) - retcode, out, err = run_os_command("SETX {0} {1} /M".format(JAVA_HOME, java_home_dir_unesc)) + retcode, out, err = run_os_command(f"SETX {JAVA_HOME} {java_home_dir_unesc} /M") if retcode != 0: print_warning_msg("SETX output: " + out) print_warning_msg("SETX error output: " + err) - err = "Setting JAVA_HOME failed. Exit code={0}".format(retcode) + err = f"Setting JAVA_HOME failed. Exit code={retcode}" raise FatalException(1, err) os.environ[JAVA_HOME] = java_home_dir @@ -804,7 +803,7 @@ def __init__(self): def _install_jdk(self, java_inst_file, jdk_cfg): jdk_inst_dir = jdk_cfg.inst_dir - print("Installing JDK to {0}".format(jdk_inst_dir)) + print(f"Installing JDK to {jdk_inst_dir}") retcode, out, err = run_os_command(self.CREATE_JDK_DIR_CMD.format(jdk_inst_dir)) retcode, out, err = run_os_command(self.CHMOD_JDK_DIR_CMD.format(jdk_inst_dir)) @@ -821,13 +820,13 @@ def _install_jdk(self, java_inst_file, jdk_cfg): os.chdir(savedPath) if retcode != 0: - err = "Installation of JDK returned exit code %s" % retcode + err = f"Installation of JDK returned exit code {retcode}" raise FatalException(retcode, err) jdk_version = re.search(jdk_cfg.reg_exp, out).group(1) java_home_dir = os.path.join(jdk_inst_dir, jdk_version) - print("Successfully installed JDK to {0}".format(jdk_inst_dir)) + print(f"Successfully installed JDK to {jdk_inst_dir}") return (retcode, out, java_home_dir) def _ensure_java_home_env_var_is_set(self, java_home_dir): @@ -854,7 +853,7 @@ def adjust_jce_permissions(self, jdk_path): (stdoutdata, stderrdata) = process.communicate() if process.returncode != 0: - print_warning_msg("Failed to change jce permissions. {0}\n{1}".format(stderrdata, stdoutdata)) + print_warning_msg(f"Failed to change jce permissions. {stderrdata}\n{stdoutdata}") def download_and_install_jdk(options): properties = get_ambari_properties() @@ -916,11 +915,11 @@ def _check_jdbc_options(options): def setup_jdbc(args): if not os.path.isfile(args.jdbc_driver): - err = "File {0} does not exist!".format(args.jdbc_driver) + err = f"File {args.jdbc_driver} does not exist!" raise FatalException(1, err) if args.jdbc_db not in JDBC_DB_OPTION_VALUES: - err = "Unsupported database name {0}. Please see help for more information.".format(args.jdbc_db) + err = f"Unsupported database name {args.jdbc_db}. Please see help for more information." raise FatalException(1, err) _cache_jdbc_driver(args) @@ -960,7 +959,7 @@ def _cache_jdbc_driver(args): try: shutil.copy(args.jdbc_driver, dest) - print("Copying {0} to {1}".format(args.jdbc_driver, dest)) + print(f"Copying {args.jdbc_driver} to {dest}") except Exception as e: err = "Cannot copy file {0} to {1} due to: {2} . Please check file " \ "permissions and free disk space.".format(args.jdbc_driver, dest, str(e)) @@ -971,7 +970,7 @@ def _cache_jdbc_driver(args): if os.path.isfile(symlink_name): os.remove(symlink_name) os.symlink(dest, symlink_name) - print("Creating symlink {0} to {1}".format(dest, symlink_name)) + print(f"Creating symlink {dest} to {symlink_name}") except Exception as e: err = "Cannot create symlink {0} to {1} due to: {2} . Please check file " \ "permissions and free disk space.".format(dest, symlink_name, str(e)) @@ -1118,10 +1117,10 @@ def expand_jce_zip_file(jce_zip_path, jdk_security_path): try: f.close() except Exception as e: - err = "Fail during the extraction of {0}.".format(jce_zip_path) + err = f"Fail during the extraction of {jce_zip_path}." raise FatalException(1, err) else: - err = "The path {0} or {1} is invalid.".format(jdk_security_path, jce_zip_path) + err = f"The path {jdk_security_path} or {jce_zip_path} is invalid." raise FatalException(1, err) if unziped_jce_path: @@ -1183,7 +1182,7 @@ def setup(options): try: download_and_install_jdk(options) except FatalException as e: - err = 'Downloading or installing JDK failed: {0}. Exiting.'.format(e) + err = f'Downloading or installing JDK failed: {e}. Exiting.' raise FatalException(e.code, err) print('Checking GPL software agreement...') @@ -1213,7 +1212,7 @@ def setup(options): json_url = get_json_url_from_repo_file() if json_url: - print("Ambari repo file contains latest json url {0}, updating stacks repoinfos with it...".format(json_url)) + print(f"Ambari repo file contains latest json url {json_url}, updating stacks repoinfos with it...") properties = get_ambari_properties() stack_root = get_stack_location(properties) update_latest_in_repoinfos_for_stacks(stack_root, json_url) @@ -1231,7 +1230,7 @@ def setup(options): def setup_jce_policy(args): logger.info("Setup JCE policy for ambari-server.") if not os.path.exists(args[1]): - err = "Can not run 'setup-jce'. Invalid path {0}.".format(args[1]) + err = f"Can not run 'setup-jce'. Invalid path {args[1]}." raise FatalException(1, err) properties = get_ambari_properties() @@ -1244,7 +1243,7 @@ def setup_jce_policy(args): try: shutil.copy(args[1], resources_dir) except Exception as e: - err = "Fail while trying to copy {0} to {1}. {2}".format(args[1], resources_dir, e) + err = f"Fail while trying to copy {args[1]} to {resources_dir}. {e}" raise FatalException(1, err) jdk_path = properties.get_property(JAVA_HOME_PROPERTY) @@ -1259,7 +1258,7 @@ def setup_jce_policy(args): try: JDKSetup.unpack_jce_policy(jdk_path, resources_dir, zip_name) except FatalException as e: - err = 'Installing JCE failed: {0}. Exiting.'.format(e) + err = f'Installing JCE failed: {e}. Exiting.' raise FatalException(e.code, err) update_properties(properties) @@ -1285,23 +1284,23 @@ def check_ambari_java_version_is_valid(java_home, java_bin, min_version, propert ) (out, err) = process.communicate() if process.returncode != 0: - err = "Checking JDK version command returned with exit code %s" % process.returncode + err = f"Checking JDK version command returned with exit code {process.returncode}" raise FatalException(process.returncode, err) else: actual_jdk_version = int(out) - print('JDK version found: {0}'.format(actual_jdk_version)) + print(f'JDK version found: {actual_jdk_version}') if actual_jdk_version < min_version: - print('Minimum JDK version is {0} for Ambari. Setup JDK again only for Ambari Server.'.format(min_version)) + print(f'Minimum JDK version is {min_version} for Ambari. Setup JDK again only for Ambari Server.') properties.process_pair(STACK_JAVA_VERSION, out) result = False else: - print('Minimum JDK version is {0} for Ambari. Skipping to setup different JDK for Ambari Server.'.format(min_version)) + print(f'Minimum JDK version is {min_version} for Ambari. Skipping to setup different JDK for Ambari Server.') except FatalException as e: - err = 'Running java version check command failed: {0}. Exiting.'.format(e) + err = f'Running java version check command failed: {e}. Exiting.' raise FatalException(e.code, err) except Exception as e: - err = 'Running java version check command failed: {0}. Exiting.'.format(e) + err = f'Running java version check command failed: {e}. Exiting.' raise FatalException(1, err) return result diff --git a/ambari-server/src/main/python/ambari_server/serverUpgrade.py b/ambari-server/src/main/python/ambari_server/serverUpgrade.py index d7f0ee1293e..81d185eb68f 100644 --- a/ambari-server/src/main/python/ambari_server/serverUpgrade.py +++ b/ambari-server/src/main/python/ambari_server/serverUpgrade.py @@ -163,7 +163,7 @@ def run_schema_upgrade(args): check_gpl_license_approved(upgrade_response) - print_info_msg("Return code from schema upgrade command, retcode = {0}".format(str(retcode)), True) + print_info_msg(f"Return code from schema upgrade command, retcode = {str(retcode)}", True) if stdout: print_info_msg("Console output from schema upgrade command:", True) print_info_msg(stdout, True) @@ -207,16 +207,16 @@ def move_user_custom_actions(): custom_actions_dir_path = os.path.join(resources_dir, 'custom_actions') custom_actions_scripts_dir_path = os.path.join(custom_actions_dir_path, 'scripts') - print_info_msg('Moving *.py files from %s to %s' % (custom_actions_dir_path, custom_actions_scripts_dir_path)) + print_info_msg(f'Moving *.py files from {custom_actions_dir_path} to {custom_actions_scripts_dir_path}') try: for custom_action_file_name in os.listdir(custom_actions_dir_path): custom_action_file_path = os.path.join(custom_actions_dir_path, custom_action_file_name) if os.path.isfile(custom_action_file_path) and custom_action_file_path.endswith('.py'): - print_info_msg('Moving %s to %s' % (custom_action_file_path, custom_actions_scripts_dir_path)) + print_info_msg(f'Moving {custom_action_file_path} to {custom_actions_scripts_dir_path}') shutil.move(custom_action_file_path, custom_actions_scripts_dir_path) except (OSError, shutil.Error) as e: - err = 'Upgrade failed. Can not move *.py files from %s to %s. ' % (custom_actions_dir_path, custom_actions_scripts_dir_path) + str(e) + err = f'Upgrade failed. Can not move *.py files from {custom_actions_dir_path} to {custom_actions_scripts_dir_path}. ' + str(e) print_error_msg(err) raise FatalException(1, err) @@ -225,13 +225,13 @@ def upgrade(args): if not is_root(): err = configDefaults.MESSAGE_ERROR_UPGRADE_NOT_ROOT raise FatalException(4, err) - print_info_msg('Updating Ambari Server properties in {0} ...'.format(AMBARI_PROPERTIES_FILE), True) + print_info_msg(f'Updating Ambari Server properties in {AMBARI_PROPERTIES_FILE} ...', True) retcode = update_ambari_properties() if not retcode == 0: err = AMBARI_PROPERTIES_FILE + ' file can\'t be updated. Exiting' raise FatalException(retcode, err) - print_info_msg('Updating Ambari Server properties in {0} ...'.format(AMBARI_ENV_FILE), True) + print_info_msg(f'Updating Ambari Server properties in {AMBARI_ENV_FILE} ...', True) retcode = update_ambari_env() if not retcode == 0: err = AMBARI_ENV_FILE + ' file can\'t be updated. Exiting' @@ -241,7 +241,7 @@ def upgrade(args): if retcode == -2: pass # no changes done, let's be silent elif retcode == 0: - print_info_msg("File {0} updated.".format(AMBARI_KRB_JAAS_LOGIN_FILE), True) + print_info_msg(f"File {AMBARI_KRB_JAAS_LOGIN_FILE} updated.", True) elif not retcode == 0: err = AMBARI_KRB_JAAS_LOGIN_FILE + ' file can\'t be updated. Exiting' raise FatalException(retcode, err) @@ -261,7 +261,7 @@ def upgrade(args): retcode = run_schema_upgrade(args) if not retcode == 0: - print_error_msg("Ambari server upgrade failed. Please look at {0}, for more details.".format(configDefaults.SERVER_LOG_FILE)) + print_error_msg(f"Ambari server upgrade failed. Please look at {configDefaults.SERVER_LOG_FILE}, for more details.") raise FatalException(11, 'Schema upgrade failed.') user = read_ambari_user() @@ -318,7 +318,7 @@ def upgrade(args): json_url = get_json_url_from_repo_file() if json_url: - print("Ambari repo file contains latest json url {0}, updating stacks repoinfos with it...".format(json_url)) + print(f"Ambari repo file contains latest json url {json_url}, updating stacks repoinfos with it...") properties = get_ambari_properties() stack_root = get_stack_location(properties) update_latest_in_repoinfos_for_stacks(stack_root, json_url) @@ -370,10 +370,10 @@ def set_current(options): raise FatalException(1, "Failed to read properties file.") base_url = get_ambari_server_api_base(properties) - url = base_url + "clusters/{0}/stack_versions".format(finalize_options.cluster_name) - admin_auth = base64.encodebytes(('%s:%s' % (admin_login, admin_password)).encode()).decode().replace('\n', '') + url = base_url + f"clusters/{finalize_options.cluster_name}/stack_versions" + admin_auth = base64.encodebytes(f'{admin_login}:{admin_password}'.encode()).decode().replace('\n', '') request = urllib.request.Request(url) - request.add_header('Authorization', 'Basic %s' % admin_auth) + request.add_header('Authorization', f'Basic {admin_auth}') request.add_header('X-Requested-By', 'ambari') data = { @@ -399,7 +399,7 @@ def set_current(options): code, content) raise FatalException(1, err) except Exception as e: - err = 'Setting current version failed. Error details: %s' % e + err = f'Setting current version failed. Error details: {e}' raise FatalException(1, err) sys.stdout.write('\nCurrent version successfully updated to ' + finalize_options.desired_repo_version) diff --git a/ambari-server/src/main/python/ambari_server/serverUtils.py b/ambari-server/src/main/python/ambari_server/serverUtils.py index d48422eb848..3e2e485b190 100644 --- a/ambari-server/src/main/python/ambari_server/serverUtils.py +++ b/ambari-server/src/main/python/ambari_server/serverUtils.py @@ -57,7 +57,7 @@ def is_server_runing(): pid = f.readline().strip() if not pid.isdigit(): - err = "'%s' is incorrect PID value. %s is corrupt. Removing" % (pid, pid_file_path) + err = f"'{pid}' is incorrect PID value. {pid_file_path} is corrupt. Removing" f.close() run_os_command("rm -f " + pid_file_path) raise NonFatalException(err) @@ -113,8 +113,7 @@ def refresh_stack_hash(properties): verbose=get_verbose())) resource_files_keeper.perform_housekeeping() except KeeperException as ex: - msg = "Can not organize resource files at {0}: {1}".format( - resources_location, str(ex)) + msg = f"Can not organize resource files at {resources_location}: {str(ex)}" raise FatalException(-1, msg) @@ -142,7 +141,7 @@ def get_ambari_server_api_base(properties): api_port_prop = properties.get_property(SSL_API_PORT) if api_port_prop is not None: api_port = api_port_prop - return '{0}://{1}:{2!s}/api/v1/'.format(api_protocol, api_host, api_port) + return f'{api_protocol}://{api_host}:{api_port!s}/api/v1/' def get_ambari_admin_username_password_pair(options): @@ -176,7 +175,7 @@ def get_cluster_name(properties, admin_login, admin_password): items = json_data['items'] if len(items) > 0: cluster_name = items[0]['Clusters']['cluster_name'] - print_info_msg('Found cluster name: %s' % cluster_name) + print_info_msg(f'Found cluster name: {cluster_name}') return cluster_name @@ -192,9 +191,9 @@ def get_json_via_rest_api(properties, admin_login, admin_password, entry_point): :return: HTTP status, JSON data """ url = get_ambari_server_api_base(properties) + entry_point - admin_auth = base64.encodebytes(('%s:%s' % (admin_login, admin_password)).encode()).decode().replace('\n', '') + admin_auth = base64.encodebytes(f'{admin_login}:{admin_password}'.encode()).decode().replace('\n', '') request = urllib.request.Request(url) - request.add_header('Authorization', 'Basic %s' % admin_auth) + request.add_header('Authorization', f'Basic {admin_auth}') request.add_header('X-Requested-By', 'ambari') request.get_method = lambda: 'GET' @@ -204,7 +203,7 @@ def get_json_via_rest_api(properties, admin_login, admin_password, entry_point): response_status_code = response.getcode() json_data = None print_info_msg( - "Received HTTP %s while fetching information from Ambari's REST API" % response_status_code) + f"Received HTTP {response_status_code} while fetching information from Ambari's REST API") if response_status_code == 200: json_data = json.loads(response.read()) if (get_debug_mode()): @@ -215,9 +214,9 @@ def get_json_via_rest_api(properties, admin_login, admin_password, entry_point): def perform_changes_via_rest_api(properties, admin_login, admin_password, url_postfix, get_method, request_data=None): url = get_ambari_server_api_base(properties) + url_postfix - admin_auth = base64.encodebytes(('%s:%s' % (admin_login, admin_password)).encode()).decode().replace('\n', '') + admin_auth = base64.encodebytes(f'{admin_login}:{admin_password}'.encode()).decode().replace('\n', '') request = urllib.request.Request(url) - request.add_header('Authorization', 'Basic %s' % admin_auth) + request.add_header('Authorization', f'Basic {admin_auth}') request.add_header('X-Requested-By', 'ambari') if request_data is not None: request.data=json.dumps(request_data) @@ -302,7 +301,7 @@ def eligible(service_info, is_sso_integration): return service_info['ldap_integration_supported'] def get_eligible_services(properties, admin_login, admin_password, cluster_name, entry_point, service_qualifier): - print_info_msg("Fetching %s enabled services" % service_qualifier) + print_info_msg(f"Fetching {service_qualifier} enabled services") safe_cluster_name = urllib.parse.quote(cluster_name) @@ -314,9 +313,9 @@ def get_eligible_services(properties, admin_login, admin_password, cluster_name, services = [item['ServiceInfo']['service_name'] for item in json_data['items'] if eligible(item['ServiceInfo'], 'SSO' == service_qualifier)] if len(services) > 0: - print_info_msg('Found %s enabled services: %s' % (service_qualifier, ', '.join(services))) + print_info_msg(f"Found {service_qualifier} enabled services: {', '.join(services)}") else: - print_info_msg('No %s enabled services were found' % service_qualifier) + print_info_msg(f'No {service_qualifier} enabled services were found') return services diff --git a/ambari-server/src/main/python/ambari_server/setupHttps.py b/ambari-server/src/main/python/ambari_server/setupHttps.py index a63a61b6e8a..5cc8a139b76 100644 --- a/ambari-server/src/main/python/ambari_server/setupHttps.py +++ b/ambari-server/src/main/python/ambari_server/setupHttps.py @@ -89,7 +89,7 @@ def get_and_persist_truststore_path(properties, options): while not truststore_path: truststore_path = get_validated_string_input( - "Path to TrustStore file {0}:".format(get_prompt_default(SSL_TRUSTSTORE_PATH_DEFAULT)), + f"Path to TrustStore file {get_prompt_default(SSL_TRUSTSTORE_PATH_DEFAULT)}:", SSL_TRUSTSTORE_PATH_DEFAULT, ".*", False, False, answer = options.trust_store_path) if truststore_path: @@ -102,7 +102,7 @@ def get_and_persist_truststore_type(properties, options): if not truststore_type: SSL_TRUSTSTORE_TYPE_DEFAULT = get_value_from_properties(properties, SSL_TRUSTSTORE_TYPE_PROPERTY, "jks") truststore_type = get_validated_string_input( - "TrustStore type [jks/jceks/pkcs12] {0}:".format(get_prompt_default(SSL_TRUSTSTORE_TYPE_DEFAULT)), + f"TrustStore type [jks/jceks/pkcs12] {get_prompt_default(SSL_TRUSTSTORE_TYPE_DEFAULT)}:", SSL_TRUSTSTORE_TYPE_DEFAULT, "^(jks|jceks|pkcs12)?$", "Wrong type", False, answer = options.trust_store_type) if truststore_type: diff --git a/ambari-server/src/main/python/ambari_server/setupMpacks.py b/ambari-server/src/main/python/ambari_server/setupMpacks.py index 4319f84b8ab..9af5f200d72 100755 --- a/ambari-server/src/main/python/ambari_server/setupMpacks.py +++ b/ambari-server/src/main/python/ambari_server/setupMpacks.py @@ -129,7 +129,7 @@ def download_mpack(mpack_path): archive_filename = os.path.basename(mpack_path) tmp_archive_path = os.path.join(tmpdir, archive_filename) - print_info_msg("Download management pack to temp location {0}".format(tmp_archive_path)) + print_info_msg(f"Download management pack to temp location {tmp_archive_path}") if os.path.exists(tmp_archive_path): os.remove(tmp_archive_path) if os.path.exists(mpack_path): @@ -154,7 +154,7 @@ def expand_mpack(archive_path): # Expand management pack in temp directory tmp_root_dir = os.path.join(tmpdir, archive_root_dir) - print_info_msg("Expand management pack at temp location {0}".format(tmp_root_dir)) + print_info_msg(f"Expand management pack at temp location {tmp_root_dir}") if os.path.exists(tmp_root_dir): sudo.rmtree(tmp_root_dir) @@ -222,7 +222,7 @@ def _get_mpack_name_version(mpack_path): # Read mpack metadata mpack_metadata = read_mpack_metadata(tmp_root_dir) if not mpack_metadata: - raise FatalException(-1, 'Malformed management pack {0}. Metadata file missing!'.format(mpack_path)) + raise FatalException(-1, f'Malformed management pack {mpack_path}. Metadata file missing!') return (mpack_metadata.name, mpack_metadata.version) @@ -270,12 +270,12 @@ def remove_symlinks(stack_location, extension_location, service_definitions_loca for name in files: file = os.path.join(root, name) if os.path.islink(file) and staged_mpack_dir in os.path.realpath(file): - print_info_msg("Removing symlink {0}".format(file)) + print_info_msg(f"Removing symlink {file}") sudo.unlink(file) for name in dirs: dir = os.path.join(root, name) if os.path.islink(dir) and staged_mpack_dir in os.path.realpath(dir): - print_info_msg("Removing symlink {0}".format(dir)) + print_info_msg(f"Removing symlink {dir}") sudo.unlink(dir) def run_mpack_install_checker(options, mpack_stacks): @@ -451,7 +451,7 @@ def create_dashboard_symlinks(src_service_dir, service_name, dashboard_location, dest_grafana_dashboards_dir = os.path.join(dashboard_location, GRAFANA_DASHBOARDS_DIRNAME) dest_service_dashboards_link = os.path.join(dest_grafana_dashboards_dir, service_name) if os.path.exists(dest_service_dashboards_link): - message = "Grafana dashboards already exist for service {0}.".format(service_name) + message = f"Grafana dashboards already exist for service {service_name}." print_warning_msg(message) else: create_symlink_using_path(src_grafana_dashboards_dir, dest_service_dashboards_link, options.force) @@ -461,7 +461,7 @@ def create_dashboard_symlinks(src_service_dir, service_name, dashboard_location, if os.path.exists(src_metrics_file): dest_metrics_dir = os.path.join(dashboard_location, SERVICE_METRICS_DIRNAME) if os.path.exists(os.path.join(dest_metrics_dir, service_metrics_filename)): - message = "Service metrics already exist for service {0}.".format(service_name) + message = f"Service metrics already exist for service {service_name}." print_warning_msg(message) else: create_symlink(src_metrics_dir, dest_metrics_dir, service_metrics_filename, options.force) @@ -577,7 +577,7 @@ def search_mpacks(mpack_name, max_mpack_version=None): if os.path.isdir(staged_mpack_dir): staged_mpack_metadata = read_mpack_metadata(staged_mpack_dir) if not staged_mpack_metadata: - print_error_msg("Skipping malformed management pack in directory {0}.".format(staged_mpack_dir)) + print_error_msg(f"Skipping malformed management pack in directory {staged_mpack_dir}.") continue staged_mpack_name = staged_mpack_metadata.name staged_mpack_version = staged_mpack_metadata.version @@ -603,7 +603,7 @@ def _uninstall_mpack(mpack_name, mpack_version): :param mpack_name: Management pack name :param mpack_version: Management pack version """ - print_info_msg("Uninstalling management pack {0}-{1}".format(mpack_name, mpack_version)) + print_info_msg(f"Uninstalling management pack {mpack_name}-{mpack_version}") # Get ambari mpack properties stack_location, extension_location, service_definitions_location, mpacks_staging_location, dashboard_location = get_mpack_properties() found = False @@ -622,15 +622,15 @@ def _uninstall_mpack(mpack_name, mpack_version): staged_mpack_name = staged_mpack_metadata.name staged_mpack_version = staged_mpack_metadata.version if mpack_name == staged_mpack_name and compare_versions(staged_mpack_version, mpack_version, format=True) == 0: - print_info_msg("Removing management pack staging location {0}".format(staged_mpack_dir)) + print_info_msg(f"Removing management pack staging location {staged_mpack_dir}") sudo.rmtree(staged_mpack_dir) remove_symlinks(stack_location, extension_location, service_definitions_location, dashboard_location, staged_mpack_dir) found = True break if not found: - print_error_msg("Management pack {0}-{1} is not installed!".format(mpack_name, mpack_version)) + print_error_msg(f"Management pack {mpack_name}-{mpack_version} is not installed!") else: - print_info_msg("Management pack {0}-{1} successfully uninstalled!".format(mpack_name, mpack_version)) + print_info_msg(f"Management pack {mpack_name}-{mpack_version} successfully uninstalled!") def validate_mpack_prerequisites(mpack_metadata): """ @@ -688,7 +688,7 @@ def _install_mpack(options, replay_mode=False, is_upgrade=False): print_error_msg("Management pack not specified!") raise FatalException(-1, 'Management pack not specified!') - print_info_msg("Installing management pack {0}".format(mpack_path)) + print_info_msg(f"Installing management pack {mpack_path}") # Download management pack to a temp location tmp_archive_path = download_mpack(mpack_path) @@ -702,7 +702,7 @@ def _install_mpack(options, replay_mode=False, is_upgrade=False): # Read mpack metadata mpack_metadata = read_mpack_metadata(tmp_root_dir) if not mpack_metadata: - raise FatalException(-1, 'Malformed management pack {0}. Metadata file missing!'.format(mpack_path)) + raise FatalException(-1, f'Malformed management pack {mpack_path}. Metadata file missing!') # Validate management pack prerequisites # Skip validation in replay mode @@ -767,10 +767,10 @@ def _install_mpack(options, replay_mode=False, is_upgrade=False): mpack_name, mpack_version, mpack_staging_dir)) if os.path.exists(mpack_staging_dir): if options.force: - print_info_msg("Force removing previously installed management pack from {0}".format(mpack_staging_dir)) + print_info_msg(f"Force removing previously installed management pack from {mpack_staging_dir}") sudo.rmtree(mpack_staging_dir) else: - error_msg = "Management pack {0}-{1} already installed!".format(mpack_name, mpack_version) + error_msg = f"Management pack {mpack_name}-{mpack_version} already installed!" print_error_msg(error_msg) raise FatalException(-1, error_msg) shutil.move(tmp_root_dir, mpack_staging_dir) @@ -797,7 +797,7 @@ def _install_mpack(options, replay_mode=False, is_upgrade=False): elif artifact.type == STACK_ADDON_SERVICE_DEFINITIONS_ARTIFACT_NAME: process_stack_addon_service_definitions_artifact(artifact, artifact_source_dir, options) else: - print_info_msg("Unknown artifact {0} of type {1}".format(artifact_name, artifact_type)) + print_info_msg(f"Unknown artifact {artifact_name} of type {artifact_type}") ambari_user = read_ambari_user() @@ -809,17 +809,17 @@ def _install_mpack(options, replay_mode=False, is_upgrade=False): mod = pack[1] user = pack[2].format(ambari_user) recursive = pack[3] - logger.info("Setting file permissions: {0} {1} {2} {3}".format(file, mod, user, recursive)) + logger.info(f"Setting file permissions: {file} {mod} {user} {recursive}") set_file_permissions(file, mod, user, recursive) for pack in change_ownership_list: path = pack[0] user = pack[1].format(ambari_user) recursive = pack[2] - logger.info("Changing ownership: {0} {1} {2}".format(path, user, recursive)) + logger.info(f"Changing ownership: {path} {user} {recursive}") change_owner(path, user, recursive) - print_info_msg("Management pack {0}-{1} successfully installed! Please restart ambari-server.".format(mpack_name, mpack_version)) + print_info_msg(f"Management pack {mpack_name}-{mpack_version} successfully installed! Please restart ambari-server.") return mpack_metadata, mpack_name, mpack_version, mpack_staging_dir, mpack_archive_path # TODO @@ -830,7 +830,7 @@ def _execute_hook(mpack_metadata, hook_name, base_dir): if hook_name == hook.name: hook_script = os.path.join(base_dir, hook.script) if os.path.exists(hook_script): - print_info_msg("Executing {0} hook script : {1}".format(hook_name, hook_script)) + print_info_msg(f"Executing {hook_name} hook script : {hook_script}") if hook.type == PYTHON_HOOK_TYPE: command = ["/usr/bin/ambari-python-wrap", hook_script] elif hook.type == SHELL_HOOK_TYPE: @@ -872,7 +872,7 @@ def add_replay_log(mpack_command, mpack_archive_path, purge, purge_list, force, replay_log_file = get_replay_log_file() log = { 'mpack_command' : mpack_command, 'mpack_path' : mpack_archive_path, 'purge' : purge, 'purge_list': purge_list, 'force' : force, 'verbose' : verbose } with open(replay_log_file, "a") as replay_log: - replay_log.write("{0}\n".format(log)) + replay_log.write(f"{log}\n") def remove_replay_logs(mpack_name): replay_log_file = get_replay_log_file() @@ -887,7 +887,7 @@ def remove_replay_logs(mpack_name): logs.append(log) with open(replay_log_file, "w") as replay_log: for log in logs: - replay_log.write("{0}\n".format(log)) + replay_log.write(f"{log}\n") def install_mpack(options, replay_mode=False): """ @@ -927,7 +927,7 @@ def uninstall_mpack(options, replay_mode=False): _uninstall_mpacks(mpack_name) - print_info_msg("Management pack {0} successfully uninstalled!".format(mpack_name)) + print_info_msg(f"Management pack {mpack_name} successfully uninstalled!") if not replay_mode: remove_replay_logs(mpack_name) @@ -950,7 +950,7 @@ def upgrade_mpack(options, replay_mode=False): print_error_msg("No management packs found that can be upgraded!") raise FatalException(-1, 'No management packs found that can be upgraded!') - print_info_msg("Upgrading management pack {0}".format(mpack_path)) + print_info_msg(f"Upgrading management pack {mpack_path}") # Force install new management pack version options.force = True @@ -962,7 +962,7 @@ def upgrade_mpack(options, replay_mode=False): # Execute post upgrade hook _execute_hook(mpack_metadata, AFTER_UPGRADE_HOOK_NAME, mpack_staging_dir) - print_info_msg("Management pack {0}-{1} successfully upgraded!".format(mpack_name, mpack_version)) + print_info_msg(f"Management pack {mpack_name}-{mpack_version} successfully upgraded!") if not replay_mode: add_replay_log(UPGRADE_MPACK_ACTION, mpack_archive_path, False, [], options.force, options.verbose) @@ -987,7 +987,7 @@ def replay_mpack_logs(): elif replay_options.mpack_command == UPGRADE_MPACK_ACTION: upgrade_mpack(replay_options, replay_mode=True) else: - error_msg = "Invalid mpack command {0} in mpack replay log {1}!".format(replay_options.mpack_command, replay_log_file) + error_msg = f"Invalid mpack command {replay_options.mpack_command} in mpack replay log {replay_log_file}!" print_error_msg(error_msg) raise FatalException(-1, error_msg) else: diff --git a/ambari-server/src/main/python/ambari_server/setupSecurity.py b/ambari-server/src/main/python/ambari_server/setupSecurity.py index 370f04272e7..5f3b7c6c571 100644 --- a/ambari-server/src/main/python/ambari_server/setupSecurity.py +++ b/ambari-server/src/main/python/ambari_server/setupSecurity.py @@ -76,7 +76,7 @@ REGEX_TRUE_FALSE = "^(true|false)?$" REGEX_SKIP_CONVERT = "^(skip|convert)?$" REGEX_REFERRAL = "^(follow|ignore)?$" -REGEX_LDAP_TYPE = "^({})?$".format("|".join(LDAP_TYPES)) +REGEX_LDAP_TYPE = f"^({'|'.join(LDAP_TYPES)})?$" REGEX_ANYTHING = ".*" LDAP_TO_PAM_MIGRATION_HELPER_CMD = "{0} -cp {1} " + \ "org.apache.ambari.server.security.authentication.LdapToPamMigrationHelper" + \ @@ -153,7 +153,7 @@ def adjust_directory_permissions(ambari_user): properties = get_ambari_properties() bootstrap_dir = os.path.abspath(get_value_from_properties(properties, BOOTSTRAP_DIR_PROPERTY)) - print_info_msg("Cleaning bootstrap directory ({0}) contents...".format(bootstrap_dir)) + print_info_msg(f"Cleaning bootstrap directory ({bootstrap_dir}) contents...") if os.path.exists(bootstrap_dir): shutil.rmtree(bootstrap_dir) #Ignore the non-existent dir error @@ -162,7 +162,7 @@ def adjust_directory_permissions(ambari_user): try: os.makedirs(bootstrap_dir) except Exception as ex: - print_warning_msg("Failed recreating the bootstrap directory: {0}".format(str(ex))) + print_warning_msg(f"Failed recreating the bootstrap directory: {str(ex)}") pass else: print_warning_msg("Bootstrap directory lingering around after 5s. Unable to complete the cleanup.") @@ -217,14 +217,14 @@ def adjust_directory_permissions(ambari_user): mod = pack[1] user = pack[2].format(ambari_user) recursive = pack[3] - print_info_msg("Setting file permissions: {0} {1} {2} {3}".format(file, mod, user, recursive)) + print_info_msg(f"Setting file permissions: {file} {mod} {user} {recursive}") set_file_permissions(file, mod, user, recursive) for pack in configDefaults.NR_CHANGE_OWNERSHIP_LIST: path = pack[0] user = pack[1].format(ambari_user) recursive = pack[2] - print_info_msg("Changing ownership: {0} {1} {2}".format(path, user, recursive)) + print_info_msg(f"Changing ownership: {path} {user} {recursive}") change_owner(path, user, recursive) def configure_ldap_password(ldap_manager_password_option, interactive_mode): @@ -305,9 +305,9 @@ def get_ldap_property_from_db(properties, admin_login, admin_password, property_ def get_ldap_properties_from_db(properties, admin_login, admin_password): ldap_properties = None url = get_ambari_server_api_base(properties) + SETUP_LDAP_CONFIG_URL - admin_auth = base64.b64encode('{}:{}'.format(admin_login, admin_password).encode()).decode().replace('\n', '') + admin_auth = base64.b64encode(f'{admin_login}:{admin_password}'.encode()).decode().replace('\n', '') request = urllib.request.Request(url) - request.add_header('Authorization', 'Basic %s' % admin_auth) + request.add_header('Authorization', f'Basic {admin_auth}') request.add_header('X-Requested-By', 'ambari') request.get_method = lambda: 'GET' request_in_progress = True @@ -339,10 +339,10 @@ def get_ldap_properties_from_db(properties, admin_login, admin_password): if e.code == 404: sys.stdout.write(' No configuration.') return None - err = 'Error while fetching LDAP configuration. Error details: %s' % e + err = f'Error while fetching LDAP configuration. Error details: {e}' raise FatalException(1, err) except Exception as e: - err = 'Error while fetching LDAP configuration. Error details: %s' % e + err = f'Error while fetching LDAP configuration. Error details: {e}' raise FatalException(1, err) return ldap_properties @@ -396,9 +396,9 @@ def sync_ldap(options): raise FatalException(1, err) url = get_ambari_server_api_base(properties) + SERVER_API_LDAP_URL - admin_auth = base64.encodebytes(('%s:%s' % (admin_login, admin_password)).encode()).decode().replace('\n', '') + admin_auth = base64.encodebytes(f'{admin_login}:{admin_password}'.encode()).decode().replace('\n', '') request = urllib.request.Request(url) - request.add_header('Authorization', 'Basic %s' % admin_auth) + request.add_header('Authorization', f'Basic {admin_auth}') request.add_header('X-Requested-By', 'ambari') if ldap_sync_options.ldap_sync_all: @@ -434,7 +434,7 @@ def sync_ldap(options): try: response = urllib.request.urlopen(request, context=get_ssl_context(properties)) except Exception as e: - err = 'Sync event creation failed. Error details: %s' % e + err = f'Sync event creation failed. Error details: {e}' raise FatalException(1, err) response_status_code = response.getcode() @@ -445,7 +445,7 @@ def sync_ldap(options): url = response_body['resources'][0]['href'] request = urllib.request.Request(url) - request.add_header('Authorization', 'Basic %s' % admin_auth) + request.add_header('Authorization', f'Basic {admin_auth}') request.add_header('X-Requested-By', 'ambari') body = [{"LDAP":{"synced_groups":"*","synced_users":"*"}}] request.data=json.dumps(body) @@ -460,7 +460,7 @@ def sync_ldap(options): response = urllib.request.urlopen(request, context=get_ssl_context(properties)) except Exception as e: request_in_progress = False - err = 'Sync event check failed. Error details: %s' % e + err = f'Sync event check failed. Error details: {e}' raise FatalException(1, err) response_status_code = response.getcode() @@ -476,9 +476,9 @@ def sync_ldap(options): print('\n\nCompleted LDAP Sync.') print('Summary:') for principal_type, summary in sync_info['summary'].items(): - print(' {0}:'.format(principal_type)) + print(f' {principal_type}:') for action, amount in summary.items(): - print(' {0} = {1!s}'.format(action, amount)) + print(f' {action} = {amount!s}') request_in_progress = False else: time.sleep(1) @@ -635,7 +635,7 @@ def setup_master_key(masterKeyFile, options, properties, resetKey): print_info_msg("Deleting master key file at location: " + str( masterKeyFile)) except Exception as e: - print('ERROR: Could not remove master key file. %s' % e) + print(f'ERROR: Could not remove master key file. {e}') # Blow up the credential store made with previous key, if any store_file = get_credential_store_location(properties) if os.path.exists(store_file): @@ -803,7 +803,7 @@ def should_query_ldap_type(ldap_property_list_reqd): return False def query_ldap_type(ldap_type_option): - return get_validated_string_input("Please select the type of LDAP you want to use [{}]({}):".format("/".join(LDAP_TYPES), LDAP_GENERIC), + return get_validated_string_input(f"Please select the type of LDAP you want to use [{'/'.join(LDAP_TYPES)}]({LDAP_GENERIC}):", LDAP_GENERIC, REGEX_LDAP_TYPE, "Please enter one of the followings '{}'!".format("', '".join(LDAP_TYPES)), @@ -903,7 +903,7 @@ def setup_ldap(options): ts_password = None if ldaps: - disable_endpoint_identification = get_validated_string_input("Disable endpoint identification during SSL handshake [true/false] ({0}): ".format(disable_endpoint_identification_default), + disable_endpoint_identification = get_validated_string_input(f"Disable endpoint identification during SSL handshake [true/false] ({disable_endpoint_identification_default}): ", disable_endpoint_identification_default, REGEX_TRUE_FALSE, "Invalid characters in the input!", False, allowEmpty=True, answer=options.ldap_sync_disable_endpoint_identification) if interactive_mode else options.ldap_sync_disable_endpoint_identification @@ -920,7 +920,7 @@ def setup_ldap(options): format(truststore_default), truststore_set) if interactive_mode else None if custom_trust_store: - ts_type = get_validated_string_input("TrustStore type [jks/jceks/pkcs12] {0}:".format(get_prompt_default(ssl_truststore_type_default)), + ts_type = get_validated_string_input(f"TrustStore type [jks/jceks/pkcs12] {get_prompt_default(ssl_truststore_type_default)}:", ssl_truststore_type_default, "^(jks|jceks|pkcs12)?$", "Wrong type", False, answer=options.trust_store_type) if interactive_mode else options.trust_store_type ts_path = None while True: @@ -959,21 +959,21 @@ def setup_ldap(options): print('=' * 20) for property in ldap_property_list_reqd: if property.prop_name in ldap_property_value_map: - print("%s %s" % (property.get_prompt_text(ldap_type), ldap_property_value_map[property.prop_name])) + print(f"{property.get_prompt_text(ldap_type)} {ldap_property_value_map[property.prop_name]}") for property in ldap_property_list_opt: if property in ldap_property_value_map: if property not in ldap_property_list_passwords: - print("%s: %s" % (property, ldap_property_value_map[property])) + print(f"{property}: {ldap_property_value_map[property]}") else: - print("%s: %s" % (property, BLIND_PASSWORD)) + print(f"{property}: {BLIND_PASSWORD}") for property in ldap_property_list_opt: if property in ldap_property_values_in_ambari_properties: if property not in ldap_property_list_passwords: - print("%s: %s" % (property, ldap_property_values_in_ambari_properties[property])) + print(f"{property}: {ldap_property_values_in_ambari_properties[property]}") else: - print("%s: %s" % (property, BLIND_PASSWORD)) + print(f"{property}: {BLIND_PASSWORD}") save_settings = True if options.ldap_save_settings is not None else get_YN_input("Save settings [y/n] (y)? ", True) @@ -1183,7 +1183,7 @@ def migrate_ldap_pam(args): def populate_ambari_requires_ldap(options, properties): if options.ldap_enabled_ambari is None: enabled = get_boolean_from_dictionary(properties, AMBARI_LDAP_AUTH_ENABLED, False) - enabled = get_YN_input("Use LDAP authentication for Ambari [y/n] ({0})? ".format('y' if enabled else 'n'), enabled) + enabled = get_YN_input(f"Use LDAP authentication for Ambari [y/n] ({'y' if enabled else 'n'})? ", enabled) else: enabled = 'true' == options.ldap_enabled_ambari @@ -1195,17 +1195,17 @@ def populate_service_management(options, properties, ambari_properties, admin_lo if options.ldap_enabled_services is None: if options.ldap_manage_services is None: manage_services = get_boolean_from_dictionary(properties, LDAP_MANAGE_SERVICES, False) - manage_services = get_YN_input("Manage LDAP configurations for eligible services [y/n] ({0})? ".format('y' if manage_services else 'n'), manage_services) + manage_services = get_YN_input(f"Manage LDAP configurations for eligible services [y/n] ({'y' if manage_services else 'n'})? ", manage_services) else: manage_services = 'true' == options.ldap_manage_services stored_manage_services = get_boolean_from_dictionary(properties, LDAP_MANAGE_SERVICES, False) - print("Manage LDAP configurations for eligible services [y/n] ({0})? {1}".format('y' if stored_manage_services else 'n', 'y' if manage_services else 'n')) + print(f"Manage LDAP configurations for eligible services [y/n] ({'y' if stored_manage_services else 'n'})? {'y' if manage_services else 'n'}") if manage_services: enabled_services = get_value_from_dictionary(properties, LDAP_ENABLED_SERVICES, "").upper().split(',') all = WILDCARD_FOR_ALL_SERVICES in enabled_services - configure_for_all_services = get_YN_input(" Manage LDAP for all services [y/n] ({0})? ".format('y' if all else 'n'), all) + configure_for_all_services = get_YN_input(f" Manage LDAP for all services [y/n] ({'y' if all else 'n'})? ", all) if configure_for_all_services: services = WILDCARD_FOR_ALL_SERVICES else: @@ -1219,7 +1219,7 @@ def populate_service_management(options, properties, ambari_properties, admin_lo for service in eligible_services: enabled = service.upper() in enabled_services - question = " Manage LDAP for {0} [y/n] ({1})? ".format(service, 'y' if enabled else 'n') + question = f" Manage LDAP for {service} [y/n] ({'y' if enabled else 'n'})? " if get_YN_input(question, enabled): service_list.append(service) diff --git a/ambari-server/src/main/python/ambari_server/setupSso.py b/ambari-server/src/main/python/ambari_server/setupSso.py index 95153b692dd..bb1e02cadee 100644 --- a/ambari-server/src/main/python/ambari_server/setupSso.py +++ b/ambari-server/src/main/python/ambari_server/setupSso.py @@ -83,7 +83,7 @@ def validate_options(options): def populate_sso_provider_url(options, properties): if not options.sso_provider_url: provider_url = get_value_from_dictionary(properties, SSO_PROVIDER_URL, SSO_PROVIDER_URL_DEFAULT) - provider_url = get_validated_string_input("Provider URL ({0}): ".format(provider_url), provider_url, REGEX_URL, + provider_url = get_validated_string_input(f"Provider URL ({provider_url}): ", provider_url, REGEX_URL, "Invalid provider URL", False) else: provider_url = options.sso_provider_url @@ -108,7 +108,7 @@ def populate_sso_public_cert(options, properties): def populate_jwt_cookie_name(options, properties): if not options.sso_jwt_cookie_name and (not options.sso_provider_url or not options.sso_public_cert_file): cookie_name = get_value_from_dictionary(properties, JWT_COOKIE_NAME, JWT_COOKIE_NAME_DEFAULT) - cookie_name = get_validated_string_input("JWT Cookie name ({0}): ".format(cookie_name), cookie_name, REGEX_ANYTHING, + cookie_name = get_validated_string_input(f"JWT Cookie name ({cookie_name}): ", cookie_name, REGEX_ANYTHING, "Invalid cookie name", False) else: cookie_name = options.sso_jwt_cookie_name if options.sso_jwt_cookie_name else JWT_COOKIE_NAME_DEFAULT @@ -119,7 +119,7 @@ def populate_jwt_cookie_name(options, properties): def populate_jwt_audiences(options, properties): if options.sso_jwt_audience_list is None and (not options.sso_provider_url or not options.sso_public_cert_file): audiences = get_value_from_dictionary(properties, JWT_AUDIENCES, JWT_AUDIENCES_DEFAULT) - audiences = get_validated_string_input("JWT audiences list (comma-separated), empty for any ({0}): ".format(audiences), audiences, + audiences = get_validated_string_input(f"JWT audiences list (comma-separated), empty for any ({audiences}): ", audiences, REGEX_ANYTHING, "Invalid value", False) else: audiences = options.sso_jwt_audience_list if options.sso_jwt_audience_list else JWT_AUDIENCES_DEFAULT @@ -129,7 +129,7 @@ def populate_jwt_audiences(options, properties): def populate_ambari_requires_sso(options, properties): if options.sso_enabled_ambari is None: enabled = get_boolean_from_dictionary(properties, AMBARI_SSO_AUTH_ENABLED, False) - enabled = get_YN_input("Use SSO for Ambari [y/n] ({0})? ".format('y' if enabled else 'n'), enabled) + enabled = get_YN_input(f"Use SSO for Ambari [y/n] ({'y' if enabled else 'n'})? ", enabled) else: enabled = 'true' == options.sso_enabled_ambari @@ -139,7 +139,7 @@ def populate_service_management(options, properties, ambari_properties, admin_lo if not options.sso_enabled_services: if not options.sso_manage_services: manage_services = get_boolean_from_dictionary(properties, SSO_MANAGE_SERVICES, False) - manage_services = get_YN_input("Manage SSO configurations for eligible services [y/n] ({0})? ".format('y' if manage_services else 'n'), manage_services) + manage_services = get_YN_input(f"Manage SSO configurations for eligible services [y/n] ({'y' if manage_services else 'n'})? ", manage_services) else: manage_services = 'true' == options.sso_manage_services @@ -152,7 +152,7 @@ def populate_service_management(options, properties, ambari_properties, admin_lo enabled_services = get_value_from_dictionary(properties, SSO_ENABLED_SERVICES, "").upper().split(',') all = "*" in enabled_services - configure_for_all_services = get_YN_input(" Use SSO for all services [y/n] ({0})? ".format('y' if all else 'n'), all) + configure_for_all_services = get_YN_input(f" Use SSO for all services [y/n] ({'y' if all else 'n'})? ", all) if configure_for_all_services: services = WILDCARD_FOR_ALL_SERVICES else: @@ -166,7 +166,7 @@ def populate_service_management(options, properties, ambari_properties, admin_lo for service in eligible_services: enabled = service.upper() in enabled_services - question = " Use SSO for {0} [y/n] ({1})? ".format(service, 'y' if enabled else 'n') + question = f" Use SSO for {service} [y/n] ({'y' if enabled else 'n'})? " if get_YN_input(question, enabled): service_list.append(service) @@ -253,7 +253,7 @@ def setup_sso(options): sso_status = "disabled" else: sso_status = "not configured" - sys.stdout.write("\nSSO is currently %s\n" % sso_status) + sys.stdout.write(f"\nSSO is currently {sso_status}\n") if sso_status == "enabled": enable_sso = not get_YN_input("Do you want to disable SSO authentication [y/n] (n)? ", False) diff --git a/ambari-server/src/main/python/ambari_server/setupTrustedProxy.py b/ambari-server/src/main/python/ambari_server/setupTrustedProxy.py index 4b259348c0f..2e87ae6f7ab 100644 --- a/ambari-server/src/main/python/ambari_server/setupTrustedProxy.py +++ b/ambari-server/src/main/python/ambari_server/setupTrustedProxy.py @@ -61,7 +61,7 @@ def get_trusted_proxy_properties(ambari_properties, admin_login, admin_password) def populate_tproxy_configuration_property(properties, tproxy_user_name, property_name, question_text_qualifier): resolved_property_name = property_name.format(tproxy_user_name) resolved_property_value = get_value_from_dictionary(properties, resolved_property_name, WILDCARD_FOR_ALL) - resolved_property_value = get_validated_string_input("Allowed {0} for {1} ({2})? ".format(question_text_qualifier, tproxy_user_name, resolved_property_value), resolved_property_value, REGEX_ANYTHING, "Invalid input", False) + resolved_property_value = get_validated_string_input(f"Allowed {question_text_qualifier} for {tproxy_user_name} ({resolved_property_value})? ", resolved_property_value, REGEX_ANYTHING, "Invalid input", False) properties[resolved_property_name] = resolved_property_value @@ -108,7 +108,7 @@ def validate_options(options): if options.tproxy_configuration_file_path and options.tproxy_configuration_file_path is not None: if not os.path.isfile(options.tproxy_configuration_file_path): - errors.append("--tproxy-configuration-file-path is set to a non-existing file: {}".format(options.tproxy_configuration_file_path)) + errors.append(f"--tproxy-configuration-file-path is set to a non-existing file: {options.tproxy_configuration_file_path}") if len(errors) > 0: error_msg = "The following errors occurred while processing your request: {0}" @@ -141,7 +141,7 @@ def setup_trusted_proxy(options): tproxy_status = "disabled" else: tproxy_status = "not configured" - print_info_msg("\nTrusted Proxy support is currently %s\n" % tproxy_status) + print_info_msg(f"\nTrusted Proxy support is currently {tproxy_status}\n") if tproxy_status == "enabled": enable_tproxy = not get_YN_input("Do you want to disable Trusted Proxy support [y/n] (n)? ", False) diff --git a/ambari-server/src/main/python/ambari_server/userInput.py b/ambari-server/src/main/python/ambari_server/userInput.py index 29f916e097a..b814d232a3f 100644 --- a/ambari-server/src/main/python/ambari_server/userInput.py +++ b/ambari-server/src/main/python/ambari_server/userInput.py @@ -134,7 +134,7 @@ def get_validated_filepath_input(prompt, description, default = None, answer = N def get_multi_line_input(prompt, end_line=""): full_prompt = prompt if end_line: - full_prompt += " ([{0}] to finish input):".format(end_line) + full_prompt += f" ([{end_line}] to finish input):" else: full_prompt += " (empty line to finish input):".format(end_line) diff --git a/ambari-server/src/main/python/ambari_server/utils.py b/ambari-server/src/main/python/ambari_server/utils.py index 4b585f966ad..216cf375a4a 100644 --- a/ambari-server/src/main/python/ambari_server/utils.py +++ b/ambari-server/src/main/python/ambari_server/utils.py @@ -59,7 +59,7 @@ def get_pg_hba_init_files(): elif OSCheck.is_suse_family(): return '/etc/init.d/postgresql' else: - raise Exception("Unsupported OS family '{0}'".format(OSCheck.get_os_family())) + raise Exception(f"Unsupported OS family '{OSCheck.get_os_family()}'") # ToDo: move that function to common-functions @@ -113,7 +113,7 @@ def save_pid(pid, pidfile): """ try: pfile = open(pidfile, "w") - pfile.write("%s\n" % pid) + pfile.write(f"{pid}\n") except IOError as e: logger.error("Failed to write PID to " + pidfile + " due to " + str(e)) pass @@ -140,7 +140,7 @@ def save_main_pid_ex(pids, pidfile, exclude_list=[], skip_daemonize=False): pfile = open(pidfile, "w") for item in pids: if pid_exists(item["pid"]) and (item["exe"] not in exclude_list): - pfile.write("%s\n" % item["pid"]) + pfile.write(f"{item['pid']}\n") pid_saved = item["pid"] logger.info("Ambari server started with PID " + str(item["pid"])) if pid_exists(item["pid"]) and (item["exe"] in exclude_list) and not skip_daemonize: @@ -272,7 +272,7 @@ def get_postgre_hba_dir(OS_FAMILY): pg_hba_init_basename = os.path.basename(get_pg_hba_init_files()) # Get postgres_data location (default: /var/lib/pgsql/data) - cmd = "alias basename='echo {0}; true' ; alias exit=return; source {1} status &>/dev/null; echo $PGDATA".format(pg_hba_init_basename, get_pg_hba_init_files()) + cmd = f"alias basename='echo {pg_hba_init_basename}; true' ; alias exit=return; source {get_pg_hba_init_files()} status &>/dev/null; echo $PGDATA" p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE, diff --git a/ambari-server/src/main/python/ambari_server_main.py b/ambari-server/src/main/python/ambari_server_main.py index ffd898b2ca5..ca9e1ee1969 100644 --- a/ambari-server/src/main/python/ambari_server_main.py +++ b/ambari-server/src/main/python/ambari_server_main.py @@ -165,7 +165,7 @@ def generate_child_process_param_list(ambari_user, java_exe, class_path, command_base = SERVER_START_CMD_DEBUG if debug_start else SERVER_START_CMD - ulimit_cmd = "%s %s" % (ULIMIT_CMD, str(get_ulimit_open_files(properties))) + ulimit_cmd = f"{ULIMIT_CMD} {str(get_ulimit_open_files(properties))}" command = command_base.format(java_exe, ambari_provider_module_option, jvm_args, @@ -189,7 +189,7 @@ def generate_child_process_param_list(ambari_user, java_exe, class_path, sh_shell=locate_file('sh', '/bin'), command=command, ambari_env_file=AMBARI_ENV_FILE) else: - cmd = "{ulimit_cmd} ; {command}".format(ulimit_cmd=ulimit_cmd, command=command) + cmd = f"{ulimit_cmd} ; {command}" param_list.append(cmd) return param_list diff --git a/ambari-server/src/main/python/azuredb_create_generator.py b/ambari-server/src/main/python/azuredb_create_generator.py index fddc77506e8..3f5669e434a 100755 --- a/ambari-server/src/main/python/azuredb_create_generator.py +++ b/ambari-server/src/main/python/azuredb_create_generator.py @@ -76,7 +76,7 @@ deletes = [] for table in inserts: if table not in tables: - deletes.append(" DELETE {0};".format(table)) + deletes.append(f" DELETE {table};") tables.add(table) deletes.reverse() delete_sql = "\n".join(deletes) diff --git a/ambari-server/src/main/python/bootstrap.py b/ambari-server/src/main/python/bootstrap.py index 13aed53801c..94515b86726 100755 --- a/ambari-server/src/main/python/bootstrap.py +++ b/ambari-server/src/main/python/bootstrap.py @@ -220,7 +220,7 @@ def _call(*args, **kwargs): return _call def try_to_execute(self, action): - last_retcode = {"exitstatus": 177, "log":"Try to execute '{0}'".format(str(action)), "errormsg":"Execute of '{0}' failed".format(str(action))} + last_retcode = {"exitstatus": 177, "log":f"Try to execute '{str(action)}'", "errormsg":f"Execute of '{str(action)}' failed"} try: retcode = action() if isinstance(retcode, int): @@ -289,7 +289,7 @@ def createTargetDir(self): self.host_log.write("==========================\n") self.host_log.write("Creating target directory...") command = os.path.join(self.shared_state.script_dir, self.CREATE_REMOTING_DIR_SCRIPT_NAME) - psr = PSR(command, self.host, self.host_log, params="{0} {1}".format(self.host, self.getTempFolder())) + psr = PSR(command, self.host, self.host_log, params=f"{self.host} {self.getTempFolder()}") retcode = psr.run() self.host_log.write("\n") return retcode @@ -300,7 +300,7 @@ def unzippingBootstrapArchive(self): self.host_log.write("==========================\n") self.host_log.write("Unzipping bootstrap archive...") command = os.path.join(self.shared_state.script_dir, self.UNZIP_REMOTING_SCRIPT_NAME) - psr = PSR(command, self.host, self.host_log, params="{0} {1} {2}".format(self.host, zipFile, self.getTempFolder())) + psr = PSR(command, self.host, self.host_log, params=f"{self.host} {zipFile} {self.getTempFolder()}") result = psr.run() self.host_log.write("\n") return result @@ -312,7 +312,7 @@ def copyBootstrapArchive(self): self.host_log.write("==========================\n") self.host_log.write("Copying bootstrap archive...") command = os.path.join(self.shared_state.script_dir, self.SEND_REMOTING_FILE_SCRIPT_NAME) - psr = PSR(command, self.host, self.host_log, params="{0} {1} {2}".format(self.host, fileToCopy, target)) + psr = PSR(command, self.host, self.host_log, params=f"{self.host} {fileToCopy} {target}") result = psr.run() self.host_log.write("\n") return result @@ -324,7 +324,7 @@ def copyChocolateyConfig(self): self.host_log.write("==========================\n") self.host_log.write("Copying chocolatey config file...") command = os.path.join(self.shared_state.script_dir, self.SEND_REMOTING_FILE_SCRIPT_NAME) - psr = PSR(command, self.host, self.host_log, params="{0} {1} {2}".format(self.host, fileToCopy, target)) + psr = PSR(command, self.host, self.host_log, params=f"{self.host} {fileToCopy} {target}") result = psr.run() self.host_log.write("\n") return result @@ -334,7 +334,7 @@ def configureChocolatey(self): self.host_log.write("Running configure chocolatey script...") tmpConfig = os.path.join(self.getTempFolder(), self.CHOCOLATEY_CONFIG_FILENAME) command = os.path.join(self.shared_state.script_dir, self.CONFIGURE_CHOCOLATEY_SCRIPT_NAME) - psr = PSR(command, self.host, self.host_log, params="{0} {1}".format(self.host, tmpConfig)) + psr = PSR(command, self.host, self.host_log, params=f"{self.host} {tmpConfig}") result = psr.run() self.host_log.write("\n") return result @@ -351,7 +351,7 @@ def runSetupAgent(self): self.host_log.write("==========================\n") self.host_log.write("Running setup agent script...") command = os.path.join(self.shared_state.script_dir, self.RUN_REMOTING_SCRIPT_NAME) - psr = PSR(command, self.host, self.host_log, params="{0} \"{1}\"".format(self.host, self.getRunSetupCommand(self.host))) + psr = PSR(command, self.host, self.host_log, params=f"{self.host} \"{self.getRunSetupCommand(self.host)}\"") retcode = psr.run() self.host_log.write("\n") return retcode @@ -429,7 +429,7 @@ def getRepoDir(self): elif OSCheck.is_ubuntu_family(): return "/etc/apt/sources.list.d" else: - raise Exception("Unsupported OS family '{0}'".format(OSCheck.get_os_family())) + raise Exception(f"Unsupported OS family '{OSCheck.get_os_family()}'") def getRepoFile(self): """ Ambari repo file for Ambari.""" @@ -508,12 +508,12 @@ def copyCommonFunctions(self): return result def getMoveRepoFileWithPasswordCommand(self, targetDir): - return "{sudo} -S mv ".format(sudo=AMBARI_SUDO) + str(self.getRemoteName(self.AMBARI_REPO_FILENAME)) \ + return f"{AMBARI_SUDO} -S mv " + str(self.getRemoteName(self.AMBARI_REPO_FILENAME)) \ + " " + os.path.join(str(targetDir), self.AMBARI_REPO_FILENAME) + \ " < " + str(self.getPasswordFile()) def getMoveRepoFileWithoutPasswordCommand(self, targetDir): - return "{sudo} mv ".format(sudo=AMBARI_SUDO) + str(self.getRemoteName(self.AMBARI_REPO_FILENAME)) \ + return f"{AMBARI_SUDO} mv " + str(self.getRemoteName(self.AMBARI_REPO_FILENAME)) \ + " " + os.path.join(str(targetDir), self.AMBARI_REPO_FILENAME) def getMoveRepoFileCommand(self, targetDir): @@ -527,7 +527,7 @@ def getAptUpdateCommand(self): (AMBARI_SUDO, "sources.list.d", self.AMBARI_REPO_FILENAME) def getRepoFileChmodCommand(self): - return "{0} chmod 644 {1}".format(AMBARI_SUDO, self.getRepoFile()) + return f"{AMBARI_SUDO} chmod 644 {self.getRepoFile()}" def copyNeededFiles(self): # get the params @@ -578,7 +578,7 @@ def copyNeededFiles(self): else: self.host_log.write("==========================\n") self.host_log.write("Copying required files...") - self.host_log.write("Ambari repo file not found: {0}".format(self.getRepoFile())) + self.host_log.write(f"Ambari repo file not found: {self.getRepoFile()}") retcode = -1 pass @@ -609,7 +609,7 @@ def getRunSetupWithPasswordCommand(self, expected_hostname): version = self.getAmbariVersion() port = self.getAmbariPort() passwordFile = self.getPasswordFile() - return "{sudo} -S python3 ".format(sudo=AMBARI_SUDO) + str(setupFile) + " " + str(expected_hostname) + \ + return f"{AMBARI_SUDO} -S python3 " + str(setupFile) + " " + str(expected_hostname) + \ " " + str(passphrase) + " " + str(server)+ " " + quote_bash_args(str(user_run_as)) + " " + str(version) + \ " " + str(port) + " < " + str(passwordFile) @@ -620,7 +620,7 @@ def getRunSetupWithoutPasswordCommand(self, expected_hostname): user_run_as = self.shared_state.user_run_as version=self.getAmbariVersion() port=self.getAmbariPort() - return "{sudo} python3 ".format(sudo=AMBARI_SUDO) + str(setupFile) + " " + str(expected_hostname) + \ + return f"{AMBARI_SUDO} python3 " + str(setupFile) + " " + str(expected_hostname) + \ " " + str(passphrase) + " " + str(server)+ " " + quote_bash_args(str(user_run_as)) + " " + str(version) + \ " " + str(port) diff --git a/ambari-server/src/main/python/setupAgent.py b/ambari-server/src/main/python/setupAgent.py index ebc4a2e9c74..e90c88a1f82 100755 --- a/ambari-server/src/main/python/setupAgent.py +++ b/ambari-server/src/main/python/setupAgent.py @@ -355,7 +355,7 @@ def run_setup(argv=None): if (not retcode["exitstatus"] == 0): return retcode else: - return {"exitstatus": 2, "log": "Ambari repo file not found: {0}".format(ambari_repo_file)} + return {"exitstatus": 2, "log": f"Ambari repo file not found: {ambari_repo_file}"} pass elif retcode["exitstatus"] == 1: if retcode["log"] != None and retcode["log"] != "" and retcode["log"][0].strip() != "": @@ -368,7 +368,7 @@ def run_setup(argv=None): logmessage = "Desired version ("+projectVersion+") of ambari-agent package is not available." ambari_repo_file = get_ambari_repo_file_full_name() if not os.path.exists(ambari_repo_file): - logmessage = logmessage + " " + "Ambari repo file not found: {0}".format(ambari_repo_file) + logmessage = logmessage + " " + f"Ambari repo file not found: {ambari_repo_file}" return {"exitstatus": retcode["exitstatus"], "log": logmessage} pass else: diff --git a/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/3.0.0/package/scripts/command_commons.py b/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/3.0.0/package/scripts/command_commons.py index 235a10c7d7b..89bc47ca281 100755 --- a/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/3.0.0/package/scripts/command_commons.py +++ b/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/3.0.0/package/scripts/command_commons.py @@ -319,7 +319,7 @@ def create_command(command): """ Create hdfs command. Append kinit to the command if required. """ - kinit_cmd = "{0} -kt {1} {2};".format(kinit_path_local, params.infra_solr_kerberos_keytab, params.infra_solr_kerberos_principal) if params.security_enabled else "" + kinit_cmd = f"{kinit_path_local} -kt {params.infra_solr_kerberos_keytab} {params.infra_solr_kerberos_principal};" if params.security_enabled else "" return kinit_cmd + command def execute_commad(command): @@ -332,7 +332,7 @@ def move_hdfs_folder(source_dir, target_dir): cmd=create_command(format('hdfs dfs -mv {source_dir} {target_dir}')) returncode, stdout = execute_commad(cmd) if returncode: - raise Exception("Unable to move HDFS dir '{0}' to '{1}' (return code: {2})".format(source_dir, target_dir, str(returncode))) + raise Exception(f"Unable to move HDFS dir '{source_dir}' to '{target_dir}' (return code: {str(returncode)})") return stdout.strip() def check_hdfs_folder_exists(hdfs_dir): diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/3.0.0/package/scripts/ams.py b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/3.0.0/package/scripts/ams.py index 197687faf98..961332c7cb4 100644 --- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/3.0.0/package/scripts/ams.py +++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/3.0.0/package/scripts/ams.py @@ -136,7 +136,7 @@ def ams(name=None): target = compress_backslashes(glob.glob(os.path.expandvars(target))[0]) if not os.path.exists(real_link): #TODO check the symlink destination too. Broken in Python 2.x on Windows. - Execute('cmd /c mklink "{0}" "{1}"'.format(real_link, target)) + Execute(f'cmd /c mklink "{real_link}" "{target}"') pass elif name == 'monitor': @@ -156,7 +156,7 @@ def ams(name=None): target = compress_backslashes(glob.glob(os.path.expandvars(target))[0]) if not os.path.exists(real_link): #TODO check the symlink destination too. Broken in Python 2.x on Windows. - Execute('cmd /c mklink "{0}" "{1}"'.format(real_link, target)) + Execute(f'cmd /c mklink "{real_link}" "{target}"') Directory(params.ams_monitor_conf_dir, owner=params.ams_user, diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/3.0.0/package/scripts/metrics_grafana.py b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/3.0.0/package/scripts/metrics_grafana.py index d8f48e651cc..3682608d6c3 100644 --- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/3.0.0/package/scripts/metrics_grafana.py +++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/3.0.0/package/scripts/metrics_grafana.py @@ -51,7 +51,7 @@ def start(self, env, upgrade_type=None): if not sudo.path_exists(pidfile): Logger.warning("Pid file doesn't exist after starting of the component.") else: - Logger.info("Grafana Server has started with pid: {0}".format(sudo.read_file(pidfile).strip())) + Logger.info(f"Grafana Server has started with pid: {sudo.read_file(pidfile).strip()}") from metrics_grafana_util import create_ams_datasource, create_ams_dashboards, create_grafana_admin_pwd diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/3.0.0/package/scripts/metrics_grafana_util.py b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/3.0.0/package/scripts/metrics_grafana_util.py index 5b4dd928f9f..8e2bd90c897 100644 --- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/3.0.0/package/scripts/metrics_grafana_util.py +++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/3.0.0/package/scripts/metrics_grafana_util.py @@ -62,14 +62,14 @@ def perform_grafana_get_call(url, server): ssl_version=Script.get_force_https_protocol_value() ) - userAndPass = b64encode('{0}:{1}'.format(server.user, server.password).encode()).decode() - headers = { 'Authorization' : 'Basic %s' % userAndPass } + userAndPass = b64encode(f'{server.user}:{server.password}'.encode()).decode() + headers = { 'Authorization' : f'Basic {userAndPass}' } - Logger.info("Connecting (GET) to %s:%s%s" % (server.host, server.port, url)) + Logger.info(f"Connecting (GET) to {server.host}:{server.port}{url}") conn.request("GET", url, headers = headers) response = conn.getresponse() - Logger.info("Http response: %s %s" % (response.status, response.reason)) + Logger.info(f"Http response: {response.status} {response.reason}") break except (http.client.HTTPException, socket.error) as ex: if i < params.grafana_connect_attempts - 1: @@ -78,7 +78,7 @@ def perform_grafana_get_call(url, server): time.sleep(params.grafana_connect_retry_delay) continue else: - raise Fail("Ambari Metrics Grafana update failed due to: %s" % str(ex)) + raise Fail(f"Ambari Metrics Grafana update failed due to: {str(ex)}") pass return response @@ -88,9 +88,9 @@ def perform_grafana_put_call(url, id, payload, server): response = None data = None - userAndPass = b64encode('{0}:{1}'.format(server.user, server.password).encode()).decode() + userAndPass = b64encode(f'{server.user}:{server.password}'.encode()).decode() headers = {"Content-Type": "application/json", - 'Authorization' : 'Basic %s' % userAndPass } + 'Authorization' : f'Basic {userAndPass}' } grafana_https_enabled = server.protocol.lower() == 'https' ca_certs = None @@ -109,7 +109,7 @@ def perform_grafana_put_call(url, id, payload, server): conn.request("PUT", url + "/" + str(id), payload, headers) response = conn.getresponse() data = response.read() - Logger.info("Http data: %s" % data) + Logger.info(f"Http data: {data}") conn.close() break except (http.client.HTTPException, socket.error) as ex: @@ -119,7 +119,7 @@ def perform_grafana_put_call(url, id, payload, server): time.sleep(params.grafana_connect_retry_delay) continue else: - raise Fail("Ambari Metrics Grafana update failed due to: %s" % str(ex)) + raise Fail(f"Ambari Metrics Grafana update failed due to: {str(ex)}") pass return (response, data) @@ -129,10 +129,10 @@ def perform_grafana_post_call(url, payload, server): response = None data = None - userAndPass = b64encode('{0}:{1}'.format(server.user, server.password).encode()).decode() - Logger.debug('POST payload: %s' % payload) + userAndPass = b64encode(f'{server.user}:{server.password}'.encode()).decode() + Logger.debug(f'POST payload: {payload}') headers = {"Content-Type": "application/json", "Content-Length" : len(payload), - 'Authorization' : 'Basic %s' % userAndPass} + 'Authorization' : f'Basic {userAndPass}'} grafana_https_enabled = server.protocol.lower() == 'https' ca_certs = None @@ -141,7 +141,7 @@ def perform_grafana_post_call(url, payload, server): for i in range(0, params.grafana_connect_attempts): try: - Logger.info("Connecting (POST) to %s:%s%s" % (server.host, server.port, url)) + Logger.info(f"Connecting (POST) to {server.host}:{server.port}{url}") conn = network.get_http_connection( server.host, int(server.port), @@ -152,7 +152,7 @@ def perform_grafana_post_call(url, payload, server): conn.request("POST", url, payload, headers) response = conn.getresponse() - Logger.info("Http response: %s %s" % (response.status, response.reason)) + Logger.info(f"Http response: {response.status} {response.reason}") if response.status == 401: #Intermittent error thrown from Grafana if i < params.grafana_connect_attempts - 1: Logger.info("Connection to Grafana failed. Next retry in %s seconds." @@ -160,7 +160,7 @@ def perform_grafana_post_call(url, payload, server): time.sleep(params.grafana_connect_retry_delay) continue data = response.read() - Logger.info("Http data: %s" % data) + Logger.info(f"Http data: {data}") conn.close() break except (http.client.HTTPException, socket.error) as ex: @@ -170,7 +170,7 @@ def perform_grafana_post_call(url, payload, server): time.sleep(params.grafana_connect_retry_delay) continue else: - raise Fail("Ambari Metrics Grafana update failed due to: %s" % str(ex)) + raise Fail(f"Ambari Metrics Grafana update failed due to: {str(ex)}") pass return (response, data) @@ -194,14 +194,14 @@ def perform_grafana_delete_call(url, server): ssl_version=Script.get_force_https_protocol_value() ) - userAndPass = b64encode('{0}:{1}'.format(server.user, server.password).encode()).decode() - headers = { 'Authorization' : 'Basic %s' % userAndPass } + userAndPass = b64encode(f'{server.user}:{server.password}'.encode()).decode() + headers = { 'Authorization' : f'Basic {userAndPass}' } - Logger.info("Connecting (DELETE) to %s:%s%s" % (server.host, server.port, url)) + Logger.info(f"Connecting (DELETE) to {server.host}:{server.port}{url}") conn.request("DELETE", url, headers = headers) response = conn.getresponse() - Logger.info("Http response: %s %s" % (response.status, response.reason)) + Logger.info(f"Http response: {response.status} {response.reason}") break except (http.client.HTTPException, socket.error) as ex: if i < params.grafana_connect_attempts - 1: @@ -210,7 +210,7 @@ def perform_grafana_delete_call(url, server): time.sleep(params.grafana_connect_retry_delay) continue else: - raise Fail("Ambari Metrics Grafana update failed due to: %s" % str(ex)) + raise Fail(f"Ambari Metrics Grafana update failed due to: {str(ex)}") pass return response @@ -302,7 +302,7 @@ def create_ams_datasource(): if host in results: if results[host].status == SUCCESS: new_datasource_host = host - Logger.info("Found working collector on host %s" % new_datasource_host) + Logger.info(f"Found working collector on host {new_datasource_host}") break else: Logger.warning(results[host].result) @@ -311,7 +311,7 @@ def create_ams_datasource(): Logger.warning("All metric collectors are unavailable. Will use random collector as datasource host.") new_datasource_host = params.metric_collector_host - Logger.info("New datasource host will be %s" % new_datasource_host) + Logger.info(f"New datasource host will be {new_datasource_host}") ams_datasource_json = Template('metrics_grafana_datasource.json.j2', ams_datasource_name=METRICS_GRAFANA_DATASOURCE_NAME, ams_datasource_host=new_datasource_host).get_content() @@ -344,12 +344,12 @@ def create_ams_datasource(): if datasource_type == new_datasource_type: Logger.info("Grafana datasource type validation succeeded.") else: - Logger.info("Grafana datasource type validation failed. Old type = %s, New type = %s" % (datasource_type, new_datasource_type)) + Logger.info(f"Grafana datasource type validation failed. Old type = {datasource_type}, New type = {new_datasource_type}") update_datasource = True if update_datasource: # Metrics datasource present, but collector host is wrong or the datasource type is outdated. datasource_id = datasources_json[i]["id"] - Logger.info("Updating datasource, id = %s" % datasource_id) + Logger.info(f"Updating datasource, id = {datasource_id}") (response, data) = perform_grafana_put_call(GRAFANA_DATASOURCE_URL, datasource_id, ams_datasource_json, server) @@ -373,7 +373,7 @@ def create_ams_datasource(): if not create_datasource: return else: - Logger.info("Generating datasource:\n%s" % ams_datasource_json) + Logger.info(f"Generating datasource:\n{ams_datasource_json}") (response, data) = perform_grafana_post_call(GRAFANA_DATASOURCE_URL, ams_datasource_json, server) @@ -402,7 +402,7 @@ def create_ams_dashboards(): dashboard_files = params.get_grafana_dashboard_defs() version = params.get_ambari_version() - Logger.info("Checking dashboards to update for Ambari version : %s" % version) + Logger.info(f"Checking dashboards to update for Ambari version : {version}") # Friendly representation of dashboard Dashboard = namedtuple('Dashboard', ['uri', 'id', 'title', 'tags']) @@ -433,7 +433,7 @@ def create_ams_dashboards(): (GRAFANA_SEARCH_BUILTIN_DASHBOARDS, response.status, response.reason, response.read())) return - Logger.debug('Dashboard definitions found = %s' % str(dashboard_files)) + Logger.debug(f'Dashboard definitions found = {str(dashboard_files)}') if dashboard_files: for dashboard_file in dashboard_files: @@ -441,7 +441,7 @@ def create_ams_dashboards(): with open(dashboard_file, 'r') as file: dashboard_def = json.load(file) except Exception as e: - Logger.error('Unable to load dashboard json file %s' % dashboard_file) + Logger.error(f'Unable to load dashboard json file {dashboard_file}') Logger.error(str(e)) continue @@ -487,12 +487,12 @@ def create_ams_dashboards(): (response, data) = perform_grafana_post_call(GRAFANA_DASHBOARDS_URL, paylaod, server) if response and response.status == 200: - Logger.info("Dashboard created successfully.\n %s" % str(data)) + Logger.info(f"Dashboard created successfully.\n {str(data)}") else: - Logger.error("Failed creating dashboard: %s" % dashboard_def['title']) + Logger.error(f"Failed creating dashboard: {dashboard_def['title']}") pass else: - Logger.info('No update needed for dashboard = %s' % dashboard_def['title']) + Logger.info(f"No update needed for dashboard = {dashboard_def['title']}") pass pass diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/3.0.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/3.0.0/package/scripts/params.py index b1502d5ef36..85de36589ce 100644 --- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/3.0.0/package/scripts/params.py +++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/3.0.0/package/scripts/params.py @@ -147,7 +147,7 @@ def get_ambari_version(): f.close() except Exception as e: Logger.info('Unable to determine ambari version from version file.') - Logger.debug('Exception: %s' % str(e)) + Logger.debug(f'Exception: {str(e)}') # No hostname script identified in the ambari agent conf pass pass @@ -356,8 +356,8 @@ def get_ambari_version(): ams_monitor_principal = config['configurations']['ams-hbase-security-site']['ams.monitor.principal'] if ams_monitor_keytab and ams_monitor_principal: - monitor_kinit_cmd = '%s -kt %s %s' % (kinit_path_local, ams_monitor_keytab, ams_monitor_principal.replace('_HOST',_hostname_lowercase)) - klist_cmd = '%s' % klist_path_local + monitor_kinit_cmd = f"{kinit_path_local} -kt {ams_monitor_keytab} {ams_monitor_principal.replace('_HOST', _hostname_lowercase)}" + klist_cmd = f'{klist_path_local}' #Ambari metrics log4j settings ams_hbase_log_maxfilesize = default('configurations/ams-hbase-log4j/ams_hbase_log_maxfilesize',256) diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/3.0.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/3.0.0/package/scripts/service_check.py index 68738657333..eea31ceef8e 100644 --- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/3.0.0/package/scripts/service_check.py +++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/3.0.0/package/scripts/service_check.py @@ -81,8 +81,7 @@ def service_check_for_single_host(self, metric_collector_host, params): if params.metric_collector_https_enabled: protocol = "https" port = str(params.metric_collector_port) - uri = '{0}://{1}:{2}{3}'.format( - protocol, metric_collector_host, port, self.AMS_METRICS_POST_URL) + uri = f'{protocol}://{metric_collector_host}:{port}{self.AMS_METRICS_POST_URL}' call_curl_krb_request(tmp_dir, params.smoke_user_keytab, params.smoke_user_princ, uri, params.kinit_path_local, params.smoke_user, self.AMS_CONNECT_TIMEOUT, method, metric_json, header, tries = self.AMS_CONNECT_TRIES) @@ -125,14 +124,14 @@ def service_check_for_single_host(self, metric_collector_host, params): ) conn.request("GET", self.AMS_METRICS_GET_URL % encoded_get_metrics_parameters) response = conn.getresponse() - Logger.info("Http response for host %s : %s %s" % (metric_collector_host, response.status, response.reason)) + Logger.info(f"Http response for host {metric_collector_host} : {response.status} {response.reason}") data = response.read() - Logger.info("Http data: %s" % data) + Logger.info(f"Http data: {data}") conn.close() if response.status == 200: - Logger.info("Metrics were retrieved from host %s" % metric_collector_host) + Logger.info(f"Metrics were retrieved from host {metric_collector_host}") else: raise Fail("Metrics were not retrieved from host %s. GET request status: %s %s \n%s" % (metric_collector_host, response.status, response.reason, data)) @@ -146,7 +145,7 @@ def floats_eq(f1, f2, delta): if (str(current_time) in metrics_data["metrics"] and str(current_time + 1000) in metrics_data["metrics"] and floats_eq(metrics_data["metrics"][str(current_time)], random_value1, 0.0000001) and floats_eq(metrics_data["metrics"][str(current_time + 1000)], current_time, 1)): - Logger.info("Values %s and %s were found in the response from host %s." % (metric_collector_host, random_value1, current_time)) + Logger.info(f"Values {metric_collector_host} and {random_value1} were found in the response from host {current_time}.") values_are_present = True break pass @@ -157,13 +156,13 @@ def floats_eq(f1, f2, delta): % (self.AMS_READ_TIMEOUT)) time.sleep(self.AMS_READ_TIMEOUT) else: - raise Fail("Values %s and %s were not found in the response." % (random_value1, current_time)) + raise Fail(f"Values {random_value1} and {current_time} were not found in the response.") else: break pass except Fail as ex: - Logger.warning("Ambari Metrics service check failed on collector host %s. Reason : %s" % (metric_collector_host, str(ex))) - raise Fail("Ambari Metrics service check failed on collector host %s. Reason : %s" % (metric_collector_host, str(ex))) + Logger.warning(f"Ambari Metrics service check failed on collector host {metric_collector_host}. Reason : {str(ex)}") + raise Fail(f"Ambari Metrics service check failed on collector host {metric_collector_host}. Reason : {str(ex)}") @OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT) def service_check(self, env): @@ -194,11 +193,11 @@ def is_spnego_enabled(params): def call_curl_krb_request(tmp_dir, user_keytab, user_princ, uri, kinit_path, user, connection_timeout, method='GET', metric_json='', header='', tries = 1, current_time = 0, random_value = 0): if method == 'POST': - Logger.info("Generated metrics for %s:\n%s" % (uri, metric_json)) + Logger.info(f"Generated metrics for {uri}:\n{metric_json}") for i in range(0, tries): try: - Logger.info("Connecting (%s) to %s" % (method, uri)); + Logger.info(f"Connecting ({method}) to {uri}"); response = None errmsg = None @@ -215,16 +214,16 @@ def call_curl_krb_request(tmp_dir, user_keytab, user_princ, uri, kinit_path, use % (uri, connection_timeout)) continue else: - raise Fail("Unable to {0} metrics on: {1}. Exception: {2}".format(method, uri, str(exception))) + raise Fail(f"Unable to {method} metrics on: {uri}. Exception: {str(exception)}") finally: if not response: - Logger.error("Unable to {0} metrics on: {1}. Error: {2}".format(method, uri, errmsg)) + Logger.error(f"Unable to {method} metrics on: {uri}. Error: {errmsg}") else: - Logger.info("%s response from %s: %s, errmsg: %s" % (method, uri, response, errmsg)); + Logger.info(f"{method} response from {uri}: {response}, errmsg: {errmsg}"); try: response.close() except: - Logger.debug("Unable to close {0} connection to {1}".format(method, uri)) + Logger.debug(f"Unable to close {method} connection to {uri}") if method == 'GET': data_json = json.loads(response) @@ -237,18 +236,17 @@ def floats_eq(f1, f2, delta): if (str(current_time) in metrics_data["metrics"] and str(current_time + 1000) in metrics_data["metrics"] and floats_eq(metrics_data["metrics"][str(current_time)], random_value, 0.0000001) and floats_eq(metrics_data["metrics"][str(current_time + 1000)], current_time, 1)): - Logger.info("Values %s and %s were found in the response from %s." % (uri, random_value, current_time)) + Logger.info(f"Values {uri} and {random_value} were found in the response from {current_time}.") values_are_present = True break pass if not values_are_present: if i < tries - 1: #range/xrange returns items from start to end-1 - Logger.info("Values weren't stored yet. Retrying in %s seconds." - % (tries)) + Logger.info(f"Values weren't stored yet. Retrying in {tries} seconds.") time.sleep(connection_timeout) else: - raise Fail("Values %s and %s were not found in the response." % (random_value, current_time)) + raise Fail(f"Values {random_value} and {current_time} were not found in the response.") else: break pass @@ -259,7 +257,7 @@ def post_metrics_to_collector(ams_metrics_post_url, metric_collector_host, metri metric_json, headers, ca_certs, tries = 1, connect_timeout = 10): for i in range(0, tries): try: - Logger.info("Generated metrics for host %s :\n%s" % (metric_collector_host, metric_json)) + Logger.info(f"Generated metrics for host {metric_collector_host} :\n{metric_json}") Logger.info("Connecting (POST) to %s:%s%s" % (metric_collector_host, metric_collector_port, @@ -274,7 +272,7 @@ def post_metrics_to_collector(ams_metrics_post_url, metric_collector_host, metri conn.request("POST", ams_metrics_post_url, metric_json, headers) response = conn.getresponse() - Logger.info("Http response for host %s: %s %s" % (metric_collector_host, response.status, response.reason)) + Logger.info(f"Http response for host {metric_collector_host}: {response.status} {response.reason}") except (http.client.HTTPException, socket.error) as ex: if i < tries - 1: #range/xrange returns items from start to end-1 time.sleep(connect_timeout) @@ -285,7 +283,7 @@ def post_metrics_to_collector(ams_metrics_post_url, metric_collector_host, metri raise Fail("Metrics were not saved. Connection failed.") data = response.read() - Logger.info("Http data: %s" % data) + Logger.info(f"Http data: {data}") conn.close() if response.status == 200: @@ -295,8 +293,7 @@ def post_metrics_to_collector(ams_metrics_post_url, metric_collector_host, metri Logger.info("Metrics were not saved.") if i < tries - 1: #range/xrange returns items from start to end-1 time.sleep(tries) - Logger.info("Next retry in %s seconds." - % (tries)) + Logger.info(f"Next retry in {tries} seconds.") else: raise Fail("Metrics were not saved. POST request status: %s %s \n%s" % (response.status, response.reason, data)) diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/3.0.0/package/scripts/split_points.py b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/3.0.0/package/scripts/split_points.py index ea539d14ef1..ca02feeae68 100644 --- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/3.0.0/package/scripts/split_points.py +++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/3.0.0/package/scripts/split_points.py @@ -101,7 +101,7 @@ def initialize_region_counts(self): memstore_flush_size = format_Xmx_size_to_bytes(self.ams_hbase_site['hbase.hregion.memstore.flush.size']) max_inmemory_regions = (memstore_max_mem / memstore_flush_size) - other_region_static_count - print('max_inmemory_regions: %s' % max_inmemory_regions) + print(f'max_inmemory_regions: {max_inmemory_regions}') if max_inmemory_regions > 2: # Lets say total = 25, so we have 20 regions to allocate between @@ -123,7 +123,7 @@ def initialize_ordered_set_of_metrics(self): self.gatherMetrics(metrics, self.customServiceMetricsDir) self.metrics = sorted(metrics) - print('metrics length: %s' % len(self.metrics)) + print(f'metrics length: {len(self.metrics)}') def gatherMetrics(self, metrics, dir): @@ -137,7 +137,7 @@ def gatherMetrics(self, metrics, dir): # Process for stack services selected at deploy time or all stack services if # services arg is not passed if self.services is None or file.rstrip(metric_filename_ext) in self.services: - print('Processing file: %s' % os.path.join(dir, file)) + print(f'Processing file: {os.path.join(dir, file)}') service_metrics = set() with open(os.path.join(dir, file), 'r') as f: for metric in f: @@ -222,7 +222,7 @@ def main(argv = None): sys.exit(2) print('--------- AMS Regions Split point finder ---------') - print('Services: %s' % services) + print(f'Services: {services}') mode = 'distributed' if 'hbase.rootdir' in ams_hbase_site and \ 'hdfs' in ams_hbase_site['hbase.rootdir'] else \ @@ -232,15 +232,15 @@ def main(argv = None): ams_hbase_site, ams_hbase_env, serviceMetricsDir, mode, services) result = split_point_finder.get_split_points() - print('Split points for precision table : %s' % len(result.precision)) - print('precision: %s' % str(result.precision)) - print('Split points for aggregate table : %s' % len(result.aggregate)) - print('aggregate: %s' % str(result.aggregate)) + print(f'Split points for precision table : {len(result.precision)}') + print(f'precision: {str(result.precision)}') + print(f'Split points for aggregate table : {len(result.aggregate)}') + print(f'aggregate: {str(result.aggregate)}') return 0 else: - print('Cannot find service metrics dir in %s' % scriptDir) + print(f'Cannot find service metrics dir in {scriptDir}') if __name__ == '__main__': main(sys.argv) diff --git a/ambari-server/src/main/resources/custom_actions/scripts/check_host.py b/ambari-server/src/main/resources/custom_actions/scripts/check_host.py index 1f5eac7acfd..ac2b59bc48e 100644 --- a/ambari-server/src/main/resources/custom_actions/scripts/check_host.py +++ b/ambari-server/src/main/resources/custom_actions/scripts/check_host.py @@ -197,7 +197,7 @@ def actionexecute(self, env): error_message += "Check {0} was unsuccessful. Exit code: {1}.".format(check_name, \ structured_output[check_name]["exit_code"]) if "message" in structured_output[check_name]: - error_message += " Message: {0}".format(structured_output[check_name]["message"]) + error_message += f" Message: {structured_output[check_name]['message']}" error_message += "\n" Logger.info("Host checks completed.") diff --git a/ambari-server/src/main/resources/custom_actions/scripts/clear_repocache.py b/ambari-server/src/main/resources/custom_actions/scripts/clear_repocache.py index 50e3307370e..b18ac1b6970 100644 --- a/ambari-server/src/main/resources/custom_actions/scripts/clear_repocache.py +++ b/ambari-server/src/main/resources/custom_actions/scripts/clear_repocache.py @@ -36,7 +36,7 @@ def actionexecute(self, env): if 0 == code: structured_output["clear_repocache"] = {"exit_code" : 0, "message": format("Repository cache successfully cleared!")} else: - structured_output["clear_repocache"] = {"exit_code": code, "message": "Failed to clear repository cache! {0}".format(str(output))} + structured_output["clear_repocache"] = {"exit_code": code, "message": f"Failed to clear repository cache! {str(output)}"} self.put_structured_out(structured_output) def get_clearcache_cmd(self): @@ -50,7 +50,7 @@ def get_clearcache_cmd(self): Logger.info("Clear repository cache for the Ubuntu OS family"); return ('/usr/bin/apt-get', 'update') else: - raise Exception("Unsupported OS family: '{0}' ".format(OSCheck.get_os_family())) + raise Exception(f"Unsupported OS family: '{OSCheck.get_os_family()}' ") if __name__ == "__main__": ClearRepoCache().execute() diff --git a/ambari-server/src/main/resources/custom_actions/scripts/install_packages.py b/ambari-server/src/main/resources/custom_actions/scripts/install_packages.py index 75292b4f480..7839ff95fb6 100644 --- a/ambari-server/src/main/resources/custom_actions/scripts/install_packages.py +++ b/ambari-server/src/main/resources/custom_actions/scripts/install_packages.py @@ -103,11 +103,11 @@ def actionexecute(self, env): self.repository_version)) else: Logger.info( - "Will install packages for repository version {0}".format(self.repository_version)) + f"Will install packages for repository version {self.repository_version}") new_repo_files = Script.repository_util.create_repo_files() self.repo_files.update(new_repo_files) except Exception as err: - Logger.logger.exception("Cannot install repository files. Error: {0}".format(str(err))) + Logger.logger.exception(f"Cannot install repository files. Error: {str(err)}") num_errors += 1 # Build structured output with initial values @@ -143,13 +143,13 @@ def actionexecute(self, env): num_errors += 1 except Exception as err: num_errors += 1 - Logger.logger.exception("Could not install packages. Error: {0}".format(str(err))) + Logger.logger.exception(f"Could not install packages. Error: {str(err)}") try: lzo_utils.install_lzo_if_needed() except Exception as err: num_errors += 1 - Logger.logger.exception("Could not install LZO packages. Error: {0}".format(str(err))) + Logger.logger.exception(f"Could not install LZO packages. Error: {str(err)}") # Provide correct exit code if num_errors > 0: @@ -186,11 +186,11 @@ def _fix_default_links_for_current(self): if 0 == len(restricted_packages): Logger.info("There are no restricted conf-select packages for this installation") else: - Logger.info("Restricting conf-select packages to {0}".format(restricted_packages)) + Logger.info(f"Restricting conf-select packages to {restricted_packages}") for package_name, directories in conf_select.get_package_dirs().items(): - Logger.info("Attempting to fix the default conf links for {0}".format(package_name)) - Logger.info("The following directories will be fixed for {0}: {1}".format(package_name, str(directories))) + Logger.info(f"Attempting to fix the default conf links for {package_name}") + Logger.info(f"The following directories will be fixed for {package_name}: {str(directories)}") component_name = None for directory_struct in directories: @@ -200,7 +200,7 @@ def _fix_default_links_for_current(self): if component_name: stack_version = stack_select.get_stack_version_before_install(component_name) else: - Logger.warning("Unable to fix {0} since stack using outdated stack_packages.json".format(package_name)) + Logger.warning(f"Unable to fix {package_name} since stack using outdated stack_packages.json") return if 0 == len(restricted_packages) or package_name in restricted_packages: @@ -208,7 +208,7 @@ def _fix_default_links_for_current(self): conf_select.convert_conf_directories_to_symlinks(package_name, stack_version, directories) else: Logger.warning( - "Unable to fix {0} since there is no known installed version for this component".format(package_name)) + f"Unable to fix {package_name} since there is no known installed version for this component") def _relink_configurations_with_conf_select(self, stack_id, stack_version): """ @@ -224,12 +224,12 @@ def _relink_configurations_with_conf_select(self, stack_id, stack_version): args = stack_id.upper().split('-') if len(args) != 2: - Logger.info("Unrecognized stack id {0}, cannot create config links".format(stack_id)) + Logger.info(f"Unrecognized stack id {stack_id}, cannot create config links") return target_stack_version = args[1] if not (target_stack_version and check_stack_feature(StackFeature.CONFIG_VERSIONING, target_stack_version)): - Logger.info("Configuration symlinks are not needed for {0}".format(stack_version)) + Logger.info(f"Configuration symlinks are not needed for {stack_version}") return # After upgrading hdf-select package from HDF-2.X to HDF-3.Y, we need to create this symlink @@ -242,7 +242,7 @@ def _relink_configurations_with_conf_select(self, stack_id, stack_version): if 0 == len(restricted_packages): Logger.info("There are no restricted conf-select packages for this installation") else: - Logger.info("Restricting conf-select packages to {0}".format(restricted_packages)) + Logger.info(f"Restricting conf-select packages to {restricted_packages}") for package_name, directories in conf_select.get_package_dirs().items(): if 0 == len(restricted_packages) or package_name in restricted_packages: @@ -267,13 +267,13 @@ def compute_actual_version(self): self.put_structured_out(self.structured_output) Logger.info("Attempting to determine actual version with build number.") - Logger.info("Old versions: {0}".format(self.old_versions)) + Logger.info(f"Old versions: {self.old_versions}") new_versions = get_stack_versions(self.stack_root_folder) - Logger.info("New versions: {0}".format(new_versions)) + Logger.info(f"New versions: {new_versions}") deltas = set(new_versions) - set(self.old_versions) - Logger.info("Deltas: {0}".format(deltas)) + Logger.info(f"Deltas: {deltas}") # Get version without build number normalized_repo_version = self.repository_version.split('-')[0] @@ -297,7 +297,7 @@ def compute_actual_version(self): self.actual_version = self.actual_version.strip() self.structured_output['actual_version'] = self.actual_version self.put_structured_out(self.structured_output) - Logger.info("Found actual version {0} by searching for best possible match".format(self.actual_version)) + Logger.info(f"Found actual version {self.actual_version} by searching for best possible match") else: msg = "Could not determine actual version installed. Try reinstalling packages again." raise Fail(msg) @@ -309,13 +309,13 @@ def check_partial_install(self): :return: """ Logger.info("Installation of packages failed. Checking if installation was partially complete") - Logger.info("Old versions: {0}".format(self.old_versions)) + Logger.info(f"Old versions: {self.old_versions}") new_versions = get_stack_versions(self.stack_root_folder) - Logger.info("New versions: {0}".format(new_versions)) + Logger.info(f"New versions: {new_versions}") deltas = set(new_versions) - set(self.old_versions) - Logger.info("Deltas: {0}".format(deltas)) + Logger.info(f"Deltas: {deltas}") # Get version without build number normalized_repo_version = self.repository_version.split('-')[0] @@ -324,7 +324,7 @@ def check_partial_install(self): # Some packages were installed successfully. Log this version to REPO_VERSION_HISTORY_FILE partial_install_version = next(iter(deltas)).strip() write_actual_version_to_history_file(normalized_repo_version, partial_install_version) - Logger.info("Version {0} was partially installed. ".format(partial_install_version)) + Logger.info(f"Version {partial_install_version} was partially installed. ") def find_best_fit_version(self, versions, repo_version): """ @@ -343,7 +343,7 @@ def find_best_fit_version(self, versions, repo_version): build_num_match = re.search("[\d\.]+-\d+", repo_version) if build_num_match and repo_version in versions: # If repo version has build number and is found in the list of versions, return it as the matching version - Logger.info("Best Fit Version: Resolved from repo version with valid build number: {0}".format(repo_version)) + Logger.info(f"Best Fit Version: Resolved from repo version with valid build number: {repo_version}") return repo_version # Get version without build number @@ -355,7 +355,7 @@ def find_best_fit_version(self, versions, repo_version): if len(match_versions) == 1: # Resolved without conflicts - Logger.info("Best Fit Version: Resolved from normalized repo version without conflicts: {0}".format(match_versions[0])) + Logger.info(f"Best Fit Version: Resolved from normalized repo version without conflicts: {match_versions[0]}") return match_versions[0] # Resolve conflicts using REPO_VERSION_HISTORY_FILE @@ -363,7 +363,7 @@ def find_best_fit_version(self, versions, repo_version): # Validate history version retrieved is valid if history_version in match_versions: - Logger.info("Best Fit Version: Resolved from normalized repo version using {0}: {1}".format(REPO_VERSION_HISTORY_FILE, history_version)) + Logger.info(f"Best Fit Version: Resolved from normalized repo version using {REPO_VERSION_HISTORY_FILE}: {history_version}") return history_version # No matching version @@ -435,7 +435,7 @@ def install_packages(self, package_list): self.repo_mgr.upgrade_package(name, installation_context) except Exception as err: ret_code = 1 - Logger.logger.error("Package Manager failed to install packages: {0}".format(str(err))) + Logger.logger.error(f"Package Manager failed to install packages: {str(err)}") # Remove already installed packages in case of fail if packages_were_checked and packages_installed_before: @@ -469,11 +469,11 @@ def install_packages(self, package_list): self.check_partial_install() except Fail as err: ret_code = 1 - Logger.logger.exception("Failure while computing actual version. Error: {0}".format(str(err))) + Logger.logger.exception(f"Failure while computing actual version. Error: {str(err)}") return ret_code def abort_handler(self, signum, frame): - Logger.error("Caught signal {0}, will handle it gracefully. Compute the actual version if possible before exiting.".format(signum)) + Logger.error(f"Caught signal {signum}, will handle it gracefully. Compute the actual version if possible before exiting.") self.check_partial_install() def filter_package_list(self, package_list): diff --git a/ambari-server/src/main/resources/custom_actions/scripts/remove_bits.py b/ambari-server/src/main/resources/custom_actions/scripts/remove_bits.py index 57b5fa78605..2c9955de314 100644 --- a/ambari-server/src/main/resources/custom_actions/scripts/remove_bits.py +++ b/ambari-server/src/main/resources/custom_actions/scripts/remove_bits.py @@ -45,10 +45,10 @@ def remove_hdp_21(self, env): packages_to_remove = ["zookeeper", "hadoop", "hadoop-lzo", "hadoop-hdfs", "hadoop-libhdfs", "hadoop-yarn", "hadoop-client", "hadoop-mapreduce", "hive", "hive-hcatalog", "hive-jdbc", "hive-webhcat", "hcatalog", "webhcat-tar-hive", "webhcat-tar-pig", "oozie", "oozie-client", "pig", "sqoop", "tez" "falcon", "storm", "flume", "hbase", "phoenix"] packages_to_remove.reverse() - Logger.info("Packages to remove: {0}".format(" ".join(packages_to_remove))) + Logger.info(f"Packages to remove: {' '.join(packages_to_remove)}") for name in packages_to_remove: - Logger.info("Attempting to remove {0}".format(name)) + Logger.info(f"Attempting to remove {name}") Package(name, action="remove") if __name__ == "__main__": diff --git a/ambari-server/src/main/resources/custom_actions/scripts/remove_previous_stacks.py b/ambari-server/src/main/resources/custom_actions/scripts/remove_previous_stacks.py index 6b7371656a2..b1d06f53981 100644 --- a/ambari-server/src/main/resources/custom_actions/scripts/remove_previous_stacks.py +++ b/ambari-server/src/main/resources/custom_actions/scripts/remove_previous_stacks.py @@ -56,19 +56,19 @@ def remove_stack_version(self, structured_output, version): Package(package, action="remove") self.remove_stack_folder(structured_output, version) structured_output["remove_previous_stacks"] = {"exit_code": 0, - "message": format("Stack version {0} successfully removed!".format(version))} + "message": format(f"Stack version {version} successfully removed!")} self.put_structured_out(structured_output) def remove_stack_folder(self, structured_output, version): if version and version != '' and stack_root and stack_root != '': - Logger.info("Removing {0}/{1}".format(stack_root, version)) + Logger.info(f"Removing {stack_root}/{version}") try: Execute(('rm', '-f', stack_root + version), sudo=True) finally: structured_output["remove_previous_stacks"] = {"exit_code": -1, - "message": "Failed to remove version {0}{1}".format(stack_root, version)} + "message": f"Failed to remove version {stack_root}{version}"} self.put_structured_out(structured_output) def get_packages_to_remove(self, version): @@ -80,7 +80,7 @@ def get_packages_to_remove(self, version): for package in all_installed_packages: if formated_version in package and self.stack_tool_package not in package: packages.append(package) - Logger.info("%s added to remove" % (package)) + Logger.info(f"{package} added to remove") return packages def check_no_symlink_to_version(self, structured_output, version): @@ -91,17 +91,17 @@ def check_no_symlink_to_version(self, structured_output, version): "message": "{0} contains symlink to version for remove! {1}".format( stack_root_current, version)} self.put_structured_out(structured_output) - raise Fail("{0} contains symlink to version for remove! {1}".format(stack_root_current, version)) + raise Fail(f"{stack_root_current} contains symlink to version for remove! {version}") def get_lower_versions(self, current_version): versions = get_stack_versions(stack_root) - Logger.info("available versions: {0}".format(str(versions))) + Logger.info(f"available versions: {str(versions)}") lover_versions = [] for version in versions: if self.compare(version, current_version) < 0 : lover_versions.append(version) - Logger.info("version %s added to remove" % (version)) + Logger.info(f"version {version} added to remove") return lover_versions def compare(self, version1, version2): diff --git a/ambari-server/src/main/resources/custom_actions/scripts/ru_execute_tasks.py b/ambari-server/src/main/resources/custom_actions/scripts/ru_execute_tasks.py index 745b23407f3..dbb158cdd91 100644 --- a/ambari-server/src/main/resources/custom_actions/scripts/ru_execute_tasks.py +++ b/ambari-server/src/main/resources/custom_actions/scripts/ru_execute_tasks.py @@ -55,12 +55,12 @@ def __init__(self, t): def __str__(self): inner = [] if self.type: - inner.append("Type: %s" % str(self.type)) + inner.append(f"Type: {str(self.type)}") if self.script and self.function: - inner.append("Script: %s - Function: %s" % (str(self.script), str(self.function))) + inner.append(f"Script: {str(self.script)} - Function: {str(self.function)}") elif self.command: - inner.append("Command: %s" % str(self.command)) - return "Task. %s" % ", ".join(inner) + inner.append(f"Command: {str(self.command)}") + return f"Task. {', '.join(inner)}" def replace_variables(cmd, host_name, version): @@ -78,7 +78,7 @@ def resolve_ambari_config(): if os.path.exists(config_path): agent_config.read(config_path) else: - raise Exception("No config found at %s" % str(config_path)) + raise Exception(f"No config found at {str(config_path)}") except Exception as err: traceback.print_exc() Logger.warning(err) @@ -140,7 +140,7 @@ def actionexecute(self, env): script_path = os.path.join(base_dir, task.script) if not os.path.exists(script_path): - message = "Script %s does not exist" % str(script_path) + message = f"Script {str(script_path)} does not exist" raise Fail(message) # Notice that the script_path is now the fully qualified path, and the diff --git a/ambari-server/src/main/resources/custom_actions/scripts/stack_select_set_all.py b/ambari-server/src/main/resources/custom_actions/scripts/stack_select_set_all.py index 6043353d75a..6f884cbed1c 100644 --- a/ambari-server/src/main/resources/custom_actions/scripts/stack_select_set_all.py +++ b/ambari-server/src/main/resources/custom_actions/scripts/stack_select_set_all.py @@ -72,7 +72,7 @@ def actionexecute(self, env): cmd = ('ambari-python-wrap', stack_selector_path, 'set', 'all', summary.associated_version) code, out = shell.call(cmd, sudo=True) if code != 0: - raise Exception("Command '{0}' exit code is nonzero".format(cmd)) + raise Exception(f"Command '{cmd}' exit code is nonzero") def is_host_skippable(stack_selector_path, associated_version): @@ -100,13 +100,13 @@ def is_host_skippable(stack_selector_path, associated_version): # check to see if the output is empty, indicating no versions installed if not out.strip(): - Logger.info("{0} has no stack versions installed".format(socket.gethostname())) + Logger.info(f"{socket.gethostname()} has no stack versions installed") return True # some pre-prepped systems may have a version, so there may be a version, so # add the extra check if it is available if not associated_version in out: - Logger.info("{0} is not found in the list of versions {1}".format(associated_version, out)) + Logger.info(f"{associated_version} is not found in the list of versions {out}") return True return False diff --git a/ambari-server/src/main/resources/host_scripts/alert_disk_space.py b/ambari-server/src/main/resources/host_scripts/alert_disk_space.py index 357d56360e9..f290af1e415 100644 --- a/ambari-server/src/main/resources/host_scripts/alert_disk_space.py +++ b/ambari-server/src/main/resources/host_scripts/alert_disk_space.py @@ -120,7 +120,7 @@ def _get_warnings_for_partition(parameters, disk_usage): # Check absolute disk space value if disk_usage.free < min_free_space: result_code = 'WARNING' - label += '. Total free space is less than {0}'.format(_get_formatted_size(min_free_space)) + label += f'. Total free space is less than {_get_formatted_size(min_free_space)}' return result_code, label @@ -160,7 +160,7 @@ def _get_disk_usage(path='/'): total = disk_stats.f_blocks * disk_stats.f_frsize used = (disk_stats.f_blocks - disk_stats.f_bfree) * disk_stats.f_frsize else: - raise NotImplementedError("{0} is not a supported platform for this alert".format(platform.platform())) + raise NotImplementedError(f"{platform.platform()} is not a supported platform for this alert") return DiskInfo(total=total, used=used, free=free, path=path) @@ -203,13 +203,13 @@ def _get_formatted_size(bytes): if bytes < 1000: return '%i' % bytes + ' B' elif 1000 <= bytes < 1000000: - return '%.1f' % (bytes / 1000.0) + ' KB' + return f'{bytes / 1000.0:.1f}' + ' KB' elif 1000000 <= bytes < 1000000000: - return '%.1f' % (bytes / 1000000.0) + ' MB' + return f'{bytes / 1000000.0:.1f}' + ' MB' elif 1000000000 <= bytes < 1000000000000: - return '%.1f' % (bytes / 1000000000.0) + ' GB' + return f'{bytes / 1000000000.0:.1f}' + ' GB' else: - return '%.1f' % (bytes / 1000000000000.0) + ' TB' + return f'{bytes / 1000000000000.0:.1f}' + ' TB' if __name__ == '__main__': print(_get_disk_usage(os.getcwd())) diff --git a/ambari-server/src/main/resources/host_scripts/alert_ulimit.py b/ambari-server/src/main/resources/host_scripts/alert_ulimit.py index 19abd7f1f9e..3db5c108b54 100644 --- a/ambari-server/src/main/resources/host_scripts/alert_ulimit.py +++ b/ambari-server/src/main/resources/host_scripts/alert_ulimit.py @@ -70,13 +70,13 @@ def _get_warnings_for_partition(parameters, soft_ulimit): return 'CRITICAL', ['Unable to determine ulimit for open files (-n)'] return_code = "OK" - label = "Ulimit for open files (-n) is {0}".format(soft_ulimit) + label = f"Ulimit for open files (-n) is {soft_ulimit}" if soft_ulimit >= critical_count: - label = "Ulimit for open files (-n) is {0} which is higher or equal than critical value of {1}".format(soft_ulimit, critical_count) + label = f"Ulimit for open files (-n) is {soft_ulimit} which is higher or equal than critical value of {critical_count}" return_code = 'CRITICAL' elif soft_ulimit >= warning_count: - label = "Ulimit for open files (-n) is {0} which is higher or equal than warning value of {1}".format(soft_ulimit, warning_count) + label = f"Ulimit for open files (-n) is {soft_ulimit} which is higher or equal than warning value of {warning_count}" return_code = 'WARNING' return return_code, label diff --git a/ambari-server/src/main/resources/host_scripts/alert_version_select.py b/ambari-server/src/main/resources/host_scripts/alert_version_select.py index 6cd94805469..668f4f56637 100644 --- a/ambari-server/src/main/resources/host_scripts/alert_version_select.py +++ b/ambari-server/src/main/resources/host_scripts/alert_version_select.py @@ -64,7 +64,7 @@ def execute(configurations={}, parameters={}, host_name=None): # Check required properties if STACK_TOOLS not in configurations: - return (RESULT_STATE_UNKNOWN, ['{0} is a required parameter for the script'.format(STACK_TOOLS)]) + return (RESULT_STATE_UNKNOWN, [f'{STACK_TOOLS} is a required parameter for the script']) stack_name = Script.get_stack_name() @@ -73,7 +73,7 @@ def execute(configurations={}, parameters={}, host_name=None): stack_tools_str = configurations[STACK_TOOLS] if stack_tools_str is None: - return (RESULT_STATE_UNKNOWN, ['{0} is a required parameter for the script and the value is null'.format(STACK_TOOLS)]) + return (RESULT_STATE_UNKNOWN, [f'{STACK_TOOLS} is a required parameter for the script and the value is null']) distro_select = "unknown-distro-select" try: @@ -91,18 +91,18 @@ def execute(configurations={}, parameters={}, host_name=None): (code, out, versions) = unsafe_get_stack_versions() if code == 0: - msg.append("{0} ".format(distro_select)) + msg.append(f"{distro_select} ") if versions is not None and type(versions) is list and len(versions) > 0: - msg.append("reported the following versions: {0}".format(", ".join(versions))) + msg.append(f"reported the following versions: {', '.join(versions)}") return (RESULT_STATE_OK, ["\n".join(msg)]) else: - msg.append("{0} could not properly read {1}. Check this directory for unexpected contents.".format(distro_select, stack_root_dir)) + msg.append(f"{distro_select} could not properly read {stack_root_dir}. Check this directory for unexpected contents.") if out is not None: msg.append(out) return (RESULT_STATE_CRITICAL, ["\n".join(msg)]) else: - msg.append("No stack root {0} to check.".format(stack_root_dir)) + msg.append(f"No stack root {stack_root_dir} to check.") return (RESULT_STATE_OK, ["\n".join(msg)]) except Exception as e: return (RESULT_STATE_CRITICAL, [e.message]) diff --git a/ambari-server/src/main/resources/scripts/Ambaripreupload.py b/ambari-server/src/main/resources/scripts/Ambaripreupload.py index aa5c3c64723..fcc5878e6e3 100644 --- a/ambari-server/src/main/resources/scripts/Ambaripreupload.py +++ b/ambari-server/src/main/resources/scripts/Ambaripreupload.py @@ -57,7 +57,7 @@ def get_stack_version(): tmpfile = tempfile.NamedTemporaryFile() out = None with open(tmpfile.name, 'r+') as file: - get_stack_version_cmd = '/usr/bin/hdp-select status %s > %s' % ('hadoop-mapreduce-historyserver', tmpfile.name) + get_stack_version_cmd = f'/usr/bin/hdp-select status hadoop-mapreduce-historyserver > {tmpfile.name}' code, stdoutdata = shell.call(get_stack_version_cmd) out = file.read() pass @@ -70,7 +70,7 @@ def get_stack_version(): stack_version = matches[0] if matches and len(matches) > 0 else None if not stack_version: - Logger.error("Could not parse HDP version from output of hdp-select: %s" % str(out)) + Logger.error(f"Could not parse HDP version from output of hdp-select: {str(out)}") return 1 else: stack_version = options.hdp_version @@ -90,14 +90,14 @@ def get_stack_version(): (options, args) = parser.parse_args() if not os.path.exists(options.sql_driver_path): - Logger.error("SQL driver file {} does not exist".format(options.sql_driver_path)) + Logger.error(f"SQL driver file {options.sql_driver_path} does not exist") if os.path.exists(DEFAULT_SQL_DRIVER_PATH): - Logger.warning("Fallback to SQL driver {}".format(DEFAULT_SQL_DRIVER_PATH)) + Logger.warning(f"Fallback to SQL driver {DEFAULT_SQL_DRIVER_PATH}") options.sql_driver_path = DEFAULT_SQL_DRIVER_PATH else: sys.exit(1) - Logger.info("Using SQL driver from {}".format(options.sql_driver_path)) + Logger.info(f"Using SQL driver from {options.sql_driver_path}") sql_driver_filename = os.path.basename(options.sql_driver_path) # See if hdfs path prefix is provided on the command line. If yes, use that value, if no @@ -237,7 +237,7 @@ def copy_tarballs_to_hdfs(source, dest, component_user, file_owner, group_owner) component_tar_source_file, component_tar_destination_folder = source, dest if not os.path.exists(component_tar_source_file): - Logger.warning("Could not find file: %s" % str(component_tar_source_file)) + Logger.warning(f"Could not find file: {str(component_tar_source_file)}") return 1 file_name = os.path.basename(component_tar_source_file) @@ -296,7 +296,7 @@ def copy_zeppelin_dependencies_to_hdfs(file_pattern): if spark_deps_full_path and os.path.exists(spark_deps_full_path[0]): copy_tarballs_to_hdfs(spark_deps_full_path[0], hdfs_path_prefix+'/apps/zeppelin/', params.hdfs_user, 'zeppelin', 'zeppelin') else: - Logger.info('zeppelin-spark-dependencies not found at %s.' % file_pattern) + Logger.info(f'zeppelin-spark-dependencies not found at {file_pattern}.') def putCreatedHdfsResourcesToIgnore(env): if not 'hdfs_files' in env.config: @@ -314,7 +314,7 @@ def putCreatedHdfsResourcesToIgnore(env): fp.write(file_content) def putSQLDriverToOozieShared(): - params.HdfsResource(hdfs_path_prefix + '/user/oozie/share/lib/sqoop/{0}'.format(sql_driver_filename), + params.HdfsResource(hdfs_path_prefix + f'/user/oozie/share/lib/sqoop/{sql_driver_filename}', owner='hdfs', type='file', action=['create_on_execute'], mode=0o644, source=options.sql_driver_path) def create_yarn_service_tarball(): @@ -510,7 +510,7 @@ def create_yarn_service_tarball(): Logger.info("Completed tarball copy.") if not options.upgrade: - Logger.info("Executing stack-selector-tool for stack {0} ...".format(stack_version)) + Logger.info(f"Executing stack-selector-tool for stack {stack_version} ...") Execute( ('/usr/bin/hdp-select', 'set', 'all', stack_version), sudo = True diff --git a/ambari-server/src/main/resources/scripts/add_service_api.py b/ambari-server/src/main/resources/scripts/add_service_api.py index 321b8c6bd2d..3f3745532f3 100644 --- a/ambari-server/src/main/resources/scripts/add_service_api.py +++ b/ambari-server/src/main/resources/scripts/add_service_api.py @@ -58,7 +58,7 @@ } ################################################################# -SERVER_URL = "{protocol}://{hostname}:{port}".format(protocol=PROTOCOL, hostname=HOSTNAME, port=PORT) +SERVER_URL = f"{PROTOCOL}://{HOSTNAME}:{PORT}" def main(): # add service @@ -91,15 +91,15 @@ def main(): config[x['StackConfigurations']['property_name']] = x['StackConfigurations']['property_value'] for site_name, site_content in configs.items(): - code = call('/var/lib/ambari-server/resources/scripts/configs.sh get {hostname} {cluster_name} {site_name}'.format(hostname=HOSTNAME, cluster_name=CLUSTER_NAME, site_name=site_name))[0] + code = call(f'/var/lib/ambari-server/resources/scripts/configs.sh get {HOSTNAME} {CLUSTER_NAME} {site_name}')[0] if code: print("Adding new site: "+site_name) - checked_call('curl -i -H \'X-Requested-By:anything\' -X PUT -d \'{{"Clusters":{{"desired_configs":{{"type":"{site_name}","tag":"version1","properties":{site_content}}}}}}}\' -u admin:admin {server_url}/api/v1/clusters/{cluster_name}'.format(site_name=site_name, site_content=json.dumps(site_content), server_url=SERVER_URL, cluster_name=CLUSTER_NAME)) + checked_call(f'curl -i -H \'X-Requested-By:anything\' -X PUT -d \'{{"Clusters":{{"desired_configs":{{"type":"{site_name}","tag":"version1","properties":{json.dumps(site_content)}}}}}}}\' -u admin:admin {SERVER_URL}/api/v1/clusters/{CLUSTER_NAME}') else: timestamp = int(time.time()) print("Modifiying site: "+site_name+" version"+str(timestamp)) - checked_call('/var/lib/ambari-server/resources/scripts/configs.sh get {hostname} {cluster_name} {site_name} /tmp/current_site.json'.format(hostname=HOSTNAME, cluster_name=CLUSTER_NAME, site_name=site_name)) + checked_call(f'/var/lib/ambari-server/resources/scripts/configs.sh get {HOSTNAME} {CLUSTER_NAME} {site_name} /tmp/current_site.json') with open('/tmp/current_site.json', "r") as f: fcontent = f.read() @@ -108,12 +108,12 @@ def main(): for k,v in site_content.items(): d['properties'][k] = v - checked_call('curl -i -H \'X-Requested-By:anything\' -X PUT -d \'{{"Clusters":{{"desired_configs":{{"type":"{site_name}","tag":"version{timestamp}","properties":{site_content}}}}}}}\' -u admin:admin {server_url}/api/v1/clusters/{cluster_name}'.format(site_name=site_name, timestamp=timestamp, site_content=json.dumps(d['properties']), server_url=SERVER_URL, cluster_name=CLUSTER_NAME)) + checked_call(f"curl -i -H 'X-Requested-By:anything' -X PUT -d '{{\"Clusters\":{{\"desired_configs\":{{\"type\":\"{site_name}\",\"tag\":\"version{timestamp}\",\"properties\":{json.dumps(d['properties'])}}}}}}}' -u admin:admin {SERVER_URL}/api/v1/clusters/{CLUSTER_NAME}") for site_name, site_configs in CONFIGS_TO_CHANGE.items(): for config_name, config_value in site_configs.items(): print("Adding config "+config_name+"="+config_value+" to "+site_name) - checked_call('/var/lib/ambari-server/resources/scripts/configs.sh set {hostname} {cluster_name} {site_name} {config_name} {config_value}'.format(config_name=config_name, config_value=config_value, hostname=HOSTNAME, cluster_name=CLUSTER_NAME, site_name=site_name)) + checked_call(f'/var/lib/ambari-server/resources/scripts/configs.sh set {HOSTNAME} {CLUSTER_NAME} {site_name} {config_name} {config_value}') # install all new components diff --git a/ambari-server/src/main/resources/scripts/cluster_blueprint.py b/ambari-server/src/main/resources/scripts/cluster_blueprint.py index 48b07c1a75b..20b5911016e 100644 --- a/ambari-server/src/main/resources/scripts/cluster_blueprint.py +++ b/ambari-server/src/main/resources/scripts/cluster_blueprint.py @@ -126,8 +126,8 @@ def http_request(self, req): uri = req.get_full_url() user = USERNAME pw = PASSWORD - raw = "%s:%s" % (user, pw) - auth = 'Basic %s' % base64.b64encode(raw.encode()).decode().strip() + raw = f"{user}:{pw}" + auth = f'Basic {base64.b64encode(raw.encode()).decode().strip()}' req.add_unredirected_header('Authorization', auth) return req @@ -153,15 +153,15 @@ def importBlueprint(self, blueprintLocation, hostsLocation, clusterName): # Verify json data blueprint_json = json.loads(blueprint) - logger.debug("blueprint json: %s" % blueprint_json) + logger.debug(f"blueprint json: {blueprint_json}") blueprintInfo = blueprint_json.get("Blueprints") if not blueprintInfo: - raise Exception("Cannot read blueprint info from blueprint at %s" % blueprintLocation) + raise Exception(f"Cannot read blueprint info from blueprint at {blueprintLocation}") blueprint_name = blueprintInfo.get("blueprint_name") if not blueprint_name: - raise Exception("blueprint_name required inside Blueprints %s" % blueprintInfo) + raise Exception(f"blueprint_name required inside Blueprints {blueprintInfo}") hosts_json = None @@ -193,7 +193,7 @@ def importBlueprint(self, blueprintLocation, hostsLocation, clusterName): pass pass - logger.debug("host assignments json: %s" % hosts_json) + logger.debug(f"host assignments json: {hosts_json}") # Create blueprint blueprintCreateUrl = getUrl(BLUEPRINT_CREATE_URL.format(blueprint_name)) @@ -205,7 +205,7 @@ def importBlueprint(self, blueprintLocation, hostsLocation, clusterName): logger.info("Blueprint %s already exists, proceeding with host " "assignments." % blueprint_name) else: - logger.error("Unable to create blueprint from location %s" % blueprintLocation) + logger.error(f"Unable to create blueprint from location {blueprintLocation}") sys.exit(1) pass @@ -226,7 +226,7 @@ def buildHostAssignments(self, blueprintName, blueprintJson, masters, hostAssignments = '{{"blueprint":"{0}","host_groups":[{1}]}}' hostGroupHosts = '{{"name":"{0}","hosts":[{1}]}}' hosts = '{{"fqdn":"{0}"}},' - logger.debug("Blueprint: {0}, Masters: {1}, Slaves: {2}".format(blueprintName, masters, slaves)) + logger.debug(f"Blueprint: {blueprintName}, Masters: {masters}, Slaves: {slaves}") mastersUsed = 0 slavesUsed = 0 hostGroupsJson = '' @@ -356,7 +356,7 @@ def exportBlueprint(self, clusterName, exportFilePath): logger.info(resp) pass else: - logger.error("Unable to perform export operation on cluster, %s" % clusterName) + logger.error(f"Unable to perform export operation on cluster, {clusterName}") pass @@ -367,21 +367,21 @@ def performPostOperation(self, url, data): req.get_method = lambda: 'POST' try: - logger.info("POST request: %s" % req.get_full_url()) - logger.debug("Payload: %s " % data) + logger.info(f"POST request: {req.get_full_url()}") + logger.debug(f"Payload: {data} ") resp = self.urlOpener.open(req) if resp: - logger.info("Create response: %s" % resp.getcode()) + logger.info(f"Create response: {resp.getcode()}") retCode = str(resp.getcode()).strip() if retCode == "201" or retCode == "202": urlResp = resp.read() - logger.info("Response data: %s" % str(urlResp)) + logger.info(f"Response data: {str(urlResp)}") return retCode pass pass except urllib.error.HTTPError as e: logger.error("POST request failed.") - logger.error('HTTPError : %s' % e.read()) + logger.error(f'HTTPError : {e.read()}') if e.code == 409: return '409' pass @@ -404,9 +404,9 @@ def performGetOperation(self, url): resp = resp.read() data = json.loads(resp) else: - logger.error("Unable to get response from server, url = %s" % url) + logger.error(f"Unable to get response from server, url = {url}") except: - logger.error("Error reading response from server, url %s" % url) + logger.error(f"Error reading response from server, url {url}") return data @@ -494,7 +494,7 @@ def main(): elif options.action == "export": ambariBlueprint.exportBlueprint(options.cluster, options.blueprint) else: - raise Exception("Unsupported action %s" % options.action) + raise Exception(f"Unsupported action {options.action}") pass diff --git a/ambari-server/src/main/resources/scripts/configs.py b/ambari-server/src/main/resources/scripts/configs.py index 14c9faa1404..ba401b8a57f 100644 --- a/ambari-server/src/main/resources/scripts/configs.py +++ b/ambari-server/src/main/resources/scripts/configs.py @@ -79,10 +79,10 @@ class UsageException(Exception): def api_accessor(host, login, password, protocol, port, unsafe=None): def do_request(api_url, request_type=GET_REQUEST_TYPE, request_body=None): try: - url = '{0}://{1}:{2}{3}'.format(protocol, host, port, api_url) - admin_auth = base64.encodebytes(('%s:%s' % (login, password)).encode()).decode().replace('\n', '') + url = f'{protocol}://{host}:{port}{api_url}' + admin_auth = base64.encodebytes(f'{login}:{password}'.encode()).decode().replace('\n', '') request = urllib.request.Request(url) - request.add_header('Authorization', 'Basic %s' % admin_auth) + request.add_header('Authorization', f'Basic {admin_auth}') request.add_header('X-Requested-By', 'ambari') request.data=request_body request.get_method = lambda: request_type @@ -98,7 +98,7 @@ def do_request(api_url, request_type=GET_REQUEST_TYPE, request_body=None): response_body = response.read() response_body = response_body.decode('utf-8') if isinstance(response_body, bytes) else response_body except Exception as exc: - raise Exception('Problem with accessing api. Reason: {0}'.format(exc)) + raise Exception(f'Problem with accessing api. Reason: {exc}') return response_body return do_request @@ -108,7 +108,7 @@ def get_config_tag(cluster, config_type, accessor): desired_tags = json.loads(response) current_config_tag = desired_tags[CLUSTERS][DESIRED_CONFIGS][config_type][TAG] except Exception as exc: - raise Exception('"{0}" not found in server response. Response:\n{1}'.format(config_type, response)) + raise Exception(f'"{config_type}" not found in server response. Response:\n{response}') return current_config_tag def create_new_desired_config(cluster, config_type, properties, attributes, accessor, version_note): @@ -127,15 +127,15 @@ def create_new_desired_config(cluster, config_type, properties, attributes, acce new_config[CLUSTERS][DESIRED_CONFIGS][ATTRIBUTES] = attributes request_body = json.dumps(new_config) request_body = request_body.encode('utf-8') if isinstance(request_body, str) else request_body - new_file = 'doSet_{0}.json'.format(new_tag) - logger.info('### PUTting json into: {0}'.format(new_file)) + new_file = f'doSet_{new_tag}.json' + logger.info(f'### PUTting json into: {new_file}') output_to_file(new_file)(new_config) accessor(CLUSTERS_URL.format(cluster), PUT_REQUEST_TYPE, request_body) - logger.info('### NEW Site:{0}, Tag:{1}'.format(config_type, new_tag)) + logger.info(f'### NEW Site:{config_type}, Tag:{new_tag}') def get_current_config(cluster, config_type, accessor): config_tag = get_config_tag(cluster, config_type, accessor) - logger.info("### on (Site:{0}, Tag:{1})".format(config_type, config_tag)) + logger.info(f"### on (Site:{config_type}, Tag:{config_tag})") response = accessor(CONFIGURATION_URL.format(cluster, config_type, config_tag)) config_by_tag = json.loads(response, object_pairs_hook=OrderedDict) current_config = config_by_tag[ITEMS][0] @@ -171,13 +171,13 @@ def read_xml_data_to_map(path): if name != None: name_text = name.text if name.text else "" else: - logger.warn("No name is found for one of the properties in {0}, ignoring it".format(path)) + logger.warn(f"No name is found for one of the properties in {path}, ignoring it") continue if value != None: value_text = value.text if value.text else "" else: - logger.warn("No value is found for \"{0}\" in {1}, using empty string for it".format(name_text, path)) + logger.warn(f"No value is found for \"{name_text}\" in {path}, using empty string for it") value_text = "" if final != None: @@ -193,14 +193,14 @@ def update(cluster, config_type, accessor): with open(config_file) as in_file: file_content = in_file.read() except Exception as e: - raise Exception('Cannot find file "{0}" to PUT'.format(config_file)) + raise Exception(f'Cannot find file "{config_file}" to PUT') try: file_properties = json.loads(file_content) except Exception as e: - raise Exception('File "{0}" should be in the following JSON format ("properties_attributes" is optional):\n{1}'.format(config_file, FILE_FORMAT)) + raise Exception(f'File "{config_file}" should be in the following JSON format ("properties_attributes" is optional):\n{FILE_FORMAT}') new_properties = file_properties.get(PROPERTIES, {}) new_attributes = file_properties.get(ATTRIBUTES, {}) - logger.info('### PUTting file: "{0}"'.format(config_file)) + logger.info(f'### PUTting file: "{config_file}"') return new_properties, new_attributes return update @@ -240,14 +240,14 @@ def set_properties(cluster, config_type, args, accessor, version_note): elif ext == ".json": updater = update_from_file(config_file) else: - logger.error("File extension {0} is not supported".format(ext)) + logger.error(f"File extension {ext} is not supported") return -1 - logger.info('### from file {0}'.format(config_file)) + logger.info(f'### from file {config_file}') else: config_name = args[0] config_value = args[1] updater = update_specific_property(config_name, config_value) - logger.info('### new property - "{0}":"{1}"'.format(config_name, config_value)) + logger.info(f'### new property - "{config_name}":"{config_value}"') update_config(cluster, config_type, updater, accessor, version_note) return 0 @@ -258,7 +258,7 @@ def delete_properties(cluster, config_type, args, accessor, version_note): return -1 config_name = args[0] - logger.info('### on property "{0}"'.format(config_name)) + logger.info(f'### on property "{config_name}"') update_config(cluster, config_type, delete_specific_property(config_name), accessor, version_note) return 0 @@ -268,7 +268,7 @@ def get_properties(cluster, config_type, args, accessor): if len(args) > 0: filename = args[0] output = output_to_file(filename) - logger.info('### to file "{0}"'.format(filename)) + logger.info(f'### to file "{filename}"') else: output = output_to_console get_config(cluster, config_type, accessor, output) @@ -323,13 +323,13 @@ def main(): user = login_lines[0] password = login_lines[1] else: - logger.error("Incorrect content of {0} file. File should contain Ambari username and password separated by new line.".format(options.credentials_file)) + logger.error(f"Incorrect content of {options.credentials_file} file. File should contain Ambari username and password separated by new line.") return -1 except Exception as e: - logger.error("You don't have permissions to {0} file".format(options.credentials_file)) + logger.error(f"You don't have permissions to {options.credentials_file} file") return -1 else: - logger.error("File {0} doesn't exist or you don't have permissions.".format(options.credentials_file)) + logger.error(f"File {options.credentials_file} doesn't exist or you don't have permissions.") return -1 else: user = options.user @@ -373,7 +373,7 @@ def main(): action_args = [options.key] return delete_properties(cluster, config_type, action_args, accessor, version_note) else: - logger.error('Action "{0}" is not supported. Supported actions: "get", "set", "delete".'.format(action)) + logger.error(f'Action "{action}" is not supported. Supported actions: "get", "set", "delete".') return -1 if __name__ == "__main__": diff --git a/ambari-server/src/main/resources/scripts/export_ams_metrics.py b/ambari-server/src/main/resources/scripts/export_ams_metrics.py index f38d71ec0e1..bf24e29bb5e 100644 --- a/ambari-server/src/main/resources/scripts/export_ams_metrics.py +++ b/ambari-server/src/main/resources/scripts/export_ams_metrics.py @@ -99,7 +99,7 @@ def get_data_from_url(collector_uri): try: connection = urllib.request.urlopen(req) except Exception as e: - logger.error('Error on metrics GET request: %s' % collector_uri) + logger.error(f'Error on metrics GET request: {collector_uri}') logger.error(str(e)) # Validate json before dumping response_data = None @@ -107,7 +107,7 @@ def get_data_from_url(collector_uri): try: response_data = json.loads(connection.read()) except Exception as e: - logger.warn('Error parsing json data returned from URI: %s' % collector_uri) + logger.warn(f'Error parsing json data returned from URI: {collector_uri}') logger.debug(str(e)) return response_data @@ -166,14 +166,14 @@ def write_metrics_to_file(metrics, host=None): for metric in metrics: uri = Params.get_collector_uri(metric, host) - logger.info('Request URI: %s' % str(uri)) + logger.info(f'Request URI: {str(uri)}') metrics_json = Utils.get_data_from_url(uri) if metrics_json: if host: path = os.path.join(Params.OUT_DIR, host, metric) else: path = os.path.join(Params.OUT_DIR, metric) - logger.info('Writing metric file: %s' % path) + logger.info(f'Writing metric file: {path}') with open(path, 'w') as file: file.write(json.dumps(metrics_json)) @@ -185,7 +185,7 @@ def get_metrics_metadata(): app_metrics_metadata.append({"metricname": metric, "seriesStartTime": Params.START_TIME, "supportsAggregation": "false", "type": "UNDEFINED"}) else: app_metrics_metadata.append({"metricname": metric, "seriesStartTime": Params.START_TIME, "supportsAggregation": "false"}) - logger.debug("Adding {0} to metadata".format(metric)) + logger.debug(f"Adding {metric} to metadata") return {Params.AMS_APP_ID : app_metrics_metadata} @@ -374,9 +374,8 @@ def main(): 'You can use it to visualize information exported by the AMS thin client') d = datetime.datetime.now() - time_suffix = '{0}-{1}-{2}-{3}-{4}-{5}'.format(d.year, d.month, d.day, - d.hour, d.minute, d.second) - print('Time: %s' % time_suffix) + time_suffix = f'{d.year}-{d.month}-{d.day}-{d.hour}-{d.minute}-{d.second}' + print(f'Time: {time_suffix}') logfile = os.path.join('/tmp', 'ambari_metrics_export.out') @@ -387,7 +386,7 @@ def main(): parser.add_option("-v", "--verbose", dest="verbose", action="store_true", default=False, help="output verbosity.") parser.add_option("-l", "--logfile", dest="log_file", default=logfile, - metavar='FILE', help="Log file. [default: %s]" % logfile) + metavar='FILE', help=f"Log file. [default: {logfile}]") export_options_group = OptionGroup(parser, "Required options for action 'export'") #export metrics ----------------------------------------------------- @@ -408,7 +407,7 @@ def main(): export_options_group.add_option("-e", "--end_time", dest="end_time", help="End time in milliseconds since epoch or UTC timestamp in YYYY-MM-DDTHH:mm:ssZ format.") export_options_group.add_option("-o", "--output-dir", dest="output_dir", default=output_dir, - help="Output dir. [default: %s]" % output_dir) + help=f"Output dir. [default: {output_dir}]") parser.add_option_group(export_options_group) #start Flask server ----------------------------------------------------- diff --git a/ambari-server/src/main/resources/scripts/stack_advisor.py b/ambari-server/src/main/resources/scripts/stack_advisor.py index edfddae0d68..0a4e3f57892 100755 --- a/ambari-server/src/main/resources/scripts/stack_advisor.py +++ b/ambari-server/src/main/resources/scripts/stack_advisor.py @@ -39,7 +39,7 @@ RECOMMEND_CONFIGURATIONS_FOR_KERBEROS, RECOMMEND_CONFIGURATION_DEPENDENCIES, VALIDATE_CONFIGURATIONS] -USAGE = "Usage: \nPossible actions are: {0}\n".format( str(ALL_ACTIONS) ) +USAGE = f"Usage: \nPossible actions are: {str(ALL_ACTIONS)}\n" SCRIPT_DIRECTORY = os.path.dirname(os.path.abspath(__file__)) STACKS_DIRECTORY = os.path.join(SCRIPT_DIRECTORY, '../stacks') @@ -63,7 +63,7 @@ def loadJson(path): return json.load(f) except Exception as err: traceback.print_exc() - raise StackAdvisorException("Error loading file at: {0}".format(path)) + raise StackAdvisorException(f"Error loading file at: {path}") def dumpJson(json_object, dump_file): @@ -72,7 +72,7 @@ def dumpJson(json_object, dump_file): json.dump(json_object, out, indent=1) except Exception as err: traceback.print_exc() - raise StackAdvisorException("Error writing to file {0} : {1}".format(dump_file, str(err))) + raise StackAdvisorException(f"Error writing to file {dump_file} : {str(err)}") def main(argv=None): @@ -169,10 +169,10 @@ def instantiateStackAdvisor(stackName, stackVersion, parentVersions): with open(path, 'r') as fp: stack_advisor = imp.load_module('stack_advisor_impl', fp, path, ('.py', 'rb', imp.PY_SOURCE)) className = STACK_ADVISOR_IMPL_CLASS_TEMPLATE.format(stackName, version.replace('.', '')) - print("StackAdvisor implementation for stack {0}, version {1} was loaded".format(stackName, version)) + print(f"StackAdvisor implementation for stack {stackName}, version {version} was loaded") except IOError: # file not found traceback.print_exc() - print("StackAdvisor implementation for stack {0}, version {1} was not found".format(stackName, version)) + print(f"StackAdvisor implementation for stack {stackName}, version {version} was not found") try: clazz = getattr(stack_advisor, className) @@ -189,10 +189,10 @@ def instantiateStackAdvisor(stackName, stackVersion, parentVersions): main(sys.argv) except StackAdvisorException as stack_exception: traceback.print_exc() - print("Error occured in stack advisor.\nError details: {0}".format(str(stack_exception))) + print(f"Error occured in stack advisor.\nError details: {str(stack_exception)}") sys.exit(1) except Exception as e: traceback.print_exc() - print("Error occured in stack advisor.\nError details: {0}".format(str(e))) + print(f"Error occured in stack advisor.\nError details: {str(e)}") sys.exit(2) diff --git a/ambari-server/src/main/resources/scripts/takeover_config_merge.py b/ambari-server/src/main/resources/scripts/takeover_config_merge.py index e94dca6a562..a6486ab2180 100644 --- a/ambari-server/src/main/resources/scripts/takeover_config_merge.py +++ b/ambari-server/src/main/resources/scripts/takeover_config_merge.py @@ -88,7 +88,7 @@ def read_data_to_map(self, path): if name != None: configurations[name] = str(value) except: - logger.error("Couldn't parse {0} file. Skipping ...".format(path)) + logger.error(f"Couldn't parse {path} file. Skipping ...") return None, None return configurations, None @@ -131,13 +131,13 @@ def read_data_to_map(self, path): if name != None: name_text = name.text if name.text else "" else: - logger.warn("No name is found for one of the properties in {0}, ignoring it".format(path)) + logger.warn(f"No name is found for one of the properties in {path}, ignoring it") continue if value != None: value_text = value.text if value.text else "" else: - logger.warn("No value is found for \"{0}\" in {1}, using empty string for it".format(name_text, path)) + logger.warn(f"No value is found for \"{name_text}\" in {path}, using empty string for it") value_text = "" if final != None: @@ -145,7 +145,7 @@ def read_data_to_map(self, path): properties_attributes[name_text] = final_text configurations[name_text] = value_text - logger.debug("Following configurations found in {0}:\n{1}".format(path, configurations)) + logger.debug(f"Following configurations found in {path}:\n{configurations}") return configurations, properties_attributes class ConfigMerge: @@ -185,7 +185,7 @@ def get_all_supported_files_grouped_by_name(extensions=SUPPORTED_EXTENSIONS, dir if ext in extensions: file_path = os.path.join(dirName, file) if ext in ConfigMerge.SUPPORTED_FILENAME_ENDINGS and not ConfigMerge.SUPPORTED_FILENAME_ENDINGS[ext] in root: - logger.warn("File {0} is not configurable by Ambari. Skipping...".format(file_path)) + logger.warn(f"File {file_path} is not configurable by Ambari. Skipping...") continue config_name = None @@ -199,7 +199,7 @@ def get_all_supported_files_grouped_by_name(extensions=SUPPORTED_EXTENSIONS, dir if not config_name: if file in ConfigMerge.NOT_MAPPED_FILES: if ConfigMerge.UNKNOWN_FILES_MAPPING_FILE: - logger.error("File {0} doesn't match any regex from {1}".format(file_path, ConfigMerge.UNKNOWN_FILES_MAPPING_FILE)) + logger.error(f"File {file_path} doesn't match any regex from {ConfigMerge.UNKNOWN_FILES_MAPPING_FILE}") else: logger.error("Cannot map {0} to Ambari config type. Please use -u option to specify config mapping for this file. \n" "For more information use --help option for script".format(file_path)) @@ -225,7 +225,7 @@ def merge_configurations(filepath_to_configurations): if not value in property_name_to_value_to_filepaths[configuration_name]: property_name_to_value_to_filepaths[configuration_name][value] = [] - logger.debug("Iterating over '{0}' with value '{1}' in file '{2}'".format(configuration_name, value, path)) + logger.debug(f"Iterating over '{configuration_name}' with value '{value}' in file '{path}'") property_name_to_value_to_filepaths[configuration_name][value].append(path) merged_configurations[configuration_name] = value @@ -272,10 +272,10 @@ def format_conflicts_output(property_name_to_value_to_filepaths): for value, filepaths in value_to_filepaths.items(): if not first_item: first_item = True - output += "\n\n=== {0} | {1} | {2} |\nHas conflicts with:\n\n".format(property_name,filepaths[0], value) + output += f"\n\n=== {property_name} | {filepaths[0]} | {value} |\nHas conflicts with:\n\n" continue for filepath in filepaths: - output += "| {0} | {1} | {2} |\n".format(property_name, filepath, value) + output += f"| {property_name} | {filepath} | {value} |\n" return output @@ -289,7 +289,7 @@ def perform_merge(self): configuration_type = os.path.splitext(filename)[0] for path_and_parser in paths_and_parsers: path, parser = path_and_parser - logger.debug("Read data from {0}".format(path)) + logger.debug(f"Read data from {path}") parsed_configurations_from_path, parsed_properties_attributes = parser.read_data_to_map(path) if parsed_configurations_from_path != None: filepath_to_configurations[path] = parsed_configurations_from_path @@ -311,7 +311,7 @@ def perform_merge(self): has_conflicts = True conflict_filename = os.path.join(self.OUTPUT_DIR, configuration_type + "-conflicts.txt") logger.warn( - "You have configurations conflicts for {0}. Please check {1}".format(configuration_type, conflict_filename)) + f"You have configurations conflicts for {configuration_type}. Please check {conflict_filename}") with open(conflict_filename, "w") as fp: fp.write(configuration_conflicts_output) @@ -319,7 +319,7 @@ def perform_merge(self): has_conflicts = True conflict_filename = os.path.join(self.OUTPUT_DIR, configuration_type + "-attributes-conflicts.txt") logger.warn( - "You have property attribute conflicts for {0}. Please check {1}".format(configuration_type, conflict_filename)) + f"You have property attribute conflicts for {configuration_type}. Please check {conflict_filename}") with open(conflict_filename, "w") as fp: fp.write(attribute_conflicts_output) @@ -328,7 +328,7 @@ def perform_merge(self): result_json_file = os.path.join(self.OUTPUT_DIR, "blueprint.json") - logger.info("Using '{0}' file as output for blueprint template".format(result_json_file)) + logger.info(f"Using '{result_json_file}' file as output for blueprint template") with open(result_json_file, 'w') as outfile: outfile.write(json.dumps(ConfigMerge.format_for_blueprint(result_configurations, result_property_attributes), sort_keys=True, indent=4, @@ -363,7 +363,7 @@ def perform_diff(self): if configuration_diff_output and configuration_diff_output != "": conflict_filename = os.path.join(ConfigMerge.OUTPUT_DIR, "file-diff.txt") logger.warn( - "You have file diff conflicts. Please check {0}".format(conflict_filename)) + f"You have file diff conflicts. Please check {conflict_filename}") with open(conflict_filename, "w") as fp: fp.write(configuration_diff_output) @@ -439,7 +439,7 @@ def get_missing_attributes(attributes, matches, file_path): conflicts = [] for key, value in attributes.items(): if not key in matches: - conflicts.append({key : "Final attribute is missing in {0} file".format(file_path)}) + conflicts.append({key : f"Final attribute is missing in {file_path} file"}) return conflicts @staticmethod @@ -447,7 +447,7 @@ def get_missing_properties(configurations, matches, file_path): conflicts = [] for key, value in configurations.items(): if not key in matches: - conflicts.append({key : "Property is missing in {0} file".format(file_path)}) + conflicts.append({key : f"Property is missing in {file_path} file"}) return conflicts @staticmethod @@ -455,7 +455,7 @@ def get_missing_files(config_file_paths, matches, input_dir): conflicts = [] for file_name in config_file_paths: if file_name not in matches: - conflicts.append({file_name : "Configurations file is missing for {0} directory".format(input_dir)}) + conflicts.append({file_name : f"Configurations file is missing for {input_dir} directory"}) return conflicts def main(): @@ -530,8 +530,8 @@ def main(): if options.action == "merge" : ConfigMerge.INPUT_DIR = options.inputDir file_paths = ConfigMerge.get_all_supported_files_grouped_by_name(directory=ConfigMerge.INPUT_DIR) - logger.info("Writing logs into '{0}' file".format(logegr_file_name)) - logger.debug("Following configuration files found:\n{0}".format(file_paths.items())) + logger.info(f"Writing logs into '{logegr_file_name}' file") + logger.debug(f"Following configuration files found:\n{file_paths.items()}") config_merge = ConfigMerge(config_files_map=file_paths) return config_merge.perform_merge() @@ -539,26 +539,26 @@ def main(): if options.leftInputDir and os.path.isdir(options.leftInputDir): ConfigMerge.LEFT_INPUT_DIR = options.leftInputDir else: - logger.error("Directory \"{0}\" doesn't exist. Use option \"-h\" for details".format(options.leftInputDir)) + logger.error(f"Directory \"{options.leftInputDir}\" doesn't exist. Use option \"-h\" for details") return -1 if options.rightInputDir and os.path.isdir(options.rightInputDir): ConfigMerge.RIGHT_INPUT_DIR = options.rightInputDir else: - logger.error("Directory \"{0}\" doesn't exist. Use option \"-h\" for details".format(options.rightInputDir)) + logger.error(f"Directory \"{options.rightInputDir}\" doesn't exist. Use option \"-h\" for details") return -1 - logger.info("Writing logs into '{0}' file".format(logegr_file_name)) + logger.info(f"Writing logs into '{logegr_file_name}' file") left_file_paths = ConfigMerge.get_all_supported_files_grouped_by_name(directory=ConfigMerge.LEFT_INPUT_DIR) - logger.debug("Following configuration files found:\n{0} for left directory".format(left_file_paths.items())) + logger.debug(f"Following configuration files found:\n{left_file_paths.items()} for left directory") right_file_paths = ConfigMerge.get_all_supported_files_grouped_by_name(directory=ConfigMerge.RIGHT_INPUT_DIR) - logger.debug("Following configuration files found:\n{0} for right directory".format(right_file_paths.items())) + logger.debug(f"Following configuration files found:\n{right_file_paths.items()} for right directory") config_merge = ConfigMerge(left_file_paths=left_file_paths , right_file_paths=right_file_paths) return config_merge.perform_diff() else: - logger.error("Action \"{0}\" doesn't supports by script. Use option \"-h\" for details".format(options.action)) + logger.error(f"Action \"{options.action}\" doesn't supports by script. Use option \"-h\" for details") return -1 if __name__ == "__main__": diff --git a/ambari-server/src/main/resources/stack-hooks/after-INSTALL/scripts/shared_initialization.py b/ambari-server/src/main/resources/stack-hooks/after-INSTALL/scripts/shared_initialization.py index e6a79f512b1..0f807ba71c2 100644 --- a/ambari-server/src/main/resources/stack-hooks/after-INSTALL/scripts/shared_initialization.py +++ b/ambari-server/src/main/resources/stack-hooks/after-INSTALL/scripts/shared_initialization.py @@ -58,7 +58,7 @@ def setup_stack_symlinks(struct_out_file): json_version = load_version(struct_out_file) if not json_version: - Logger.info("There is no advertised version for this component stored in {0}".format(struct_out_file)) + Logger.info(f"There is no advertised version for this component stored in {struct_out_file}") return # On parallel command execution this should be executed by a single process at a time. @@ -70,7 +70,7 @@ def setup_stack_symlinks(struct_out_file): def setup_config(): import params stackversion = params.stack_version_unformatted - Logger.info("FS Type: {0}".format(params.dfs_type)) + Logger.info(f"FS Type: {params.dfs_type}") is_hadoop_conf_dir_present = False if hasattr(params, "hadoop_conf_dir") and params.hadoop_conf_dir is not None and os.path.exists(params.hadoop_conf_dir): @@ -133,7 +133,7 @@ def link_configs(struct_out_file): json_version = load_version(struct_out_file) if not json_version: - Logger.info("Could not load 'version' from {0}".format(struct_out_file)) + Logger.info(f"Could not load 'version' from {struct_out_file}") return # On parallel command execution this should be executed by a single process at a time. diff --git a/ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/shared_initialization.py b/ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/shared_initialization.py index fc9966fc186..90e01540964 100644 --- a/ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/shared_initialization.py +++ b/ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/shared_initialization.py @@ -161,7 +161,7 @@ def get_uid(user, return_existing=False): service_env_str = str(service_env[0]) uid = params.config['configurations'][service_env_str][user_str] if len(service_env) > 1: - Logger.warning("Multiple values found for %s, using %s" % (user_str, uid)) + Logger.warning(f"Multiple values found for {user_str}, using {uid}") return uid else: if return_existing: @@ -180,7 +180,7 @@ def get_uid(user, return_existing=False): def setup_hadoop_env(): import params stackversion = params.stack_version_unformatted - Logger.info("FS Type: {0}".format(params.dfs_type)) + Logger.info(f"FS Type: {params.dfs_type}") if params.has_hdfs_clients or stackversion.find('Gluster') >= 0 or params.dfs_type == 'HCFS': if params.security_enabled: tc_owner = "root" diff --git a/ambari-server/src/main/resources/stack-hooks/before-INSTALL/scripts/repo_initialization.py b/ambari-server/src/main/resources/stack-hooks/before-INSTALL/scripts/repo_initialization.py index 35396865e19..ca04cf30d5e 100644 --- a/ambari-server/src/main/resources/stack-hooks/before-INSTALL/scripts/repo_initialization.py +++ b/ambari-server/src/main/resources/stack-hooks/before-INSTALL/scripts/repo_initialization.py @@ -37,7 +37,7 @@ def _alter_repo(action, repo_dicts, repo_template): if 0 == len(repo_dicts): Logger.info("Repository list is empty. Ambari may not be managing the repositories.") else: - Logger.info("Initializing {0} repositories".format(str(len(repo_dicts)))) + Logger.info(f"Initializing {str(len(repo_dicts))} repositories") for repo in repo_dicts: if not 'baseUrl' in repo: diff --git a/ambari-server/src/main/resources/stack-hooks/before-START/scripts/custom_extensions.py b/ambari-server/src/main/resources/stack-hooks/before-START/scripts/custom_extensions.py index ee1b6db6dec..f5eb3775767 100644 --- a/ambari-server/src/main/resources/stack-hooks/before-START/scripts/custom_extensions.py +++ b/ambari-server/src/main/resources/stack-hooks/before-START/scripts/custom_extensions.py @@ -50,7 +50,7 @@ def setup_extensions(): hadoop_custom_extensions_services = [ service.strip().upper() for service in hadoop_custom_extensions_services.split(",") ] hadoop_custom_extensions_services.append("YARN") - hadoop_custom_extensions_local_dir = "{0}/current/ext/hadoop".format(Script.get_stack_root()) + hadoop_custom_extensions_local_dir = f"{Script.get_stack_root()}/current/ext/hadoop" if params.current_service in hadoop_custom_extensions_services: clean_extensions(hadoop_custom_extensions_local_dir) @@ -75,7 +75,7 @@ def setup_hbase_extensions(): hbase_custom_extensions_owner = default("/configurations/hbase-site/hbase.custom-extensions.owner", params.hdfs_user) hbase_custom_extensions_hdfs_dir = get_config_formatted_value(default("/configurations/hbase-site/hbase.custom-extensions.root", DEFAULT_HADOOP_HBASE_EXTENSION_DIR.format(params.major_stack_version))) - hbase_custom_extensions_local_dir = "{0}/current/ext/hbase".format(Script.get_stack_root()) + hbase_custom_extensions_local_dir = f"{Script.get_stack_root()}/current/ext/hbase" impacted_components = ['HBASE_MASTER', 'HBASE_REGIONSERVER', 'PHOENIX_QUERY_SERVER']; role = params.config.get('role','') @@ -95,7 +95,7 @@ def setup_extensions_hive(): hive_custom_extensions_owner = default("/configurations/hive-site/hive.custom-extensions.owner", params.hdfs_user) hive_custom_extensions_hdfs_dir = DEFAULT_HADOOP_HIVE_EXTENSION_DIR.format(params.major_stack_version) - hive_custom_extensions_local_dir = "{0}/current/ext/hive".format(Script.get_stack_root()) + hive_custom_extensions_local_dir = f"{Script.get_stack_root()}/current/ext/hive" impacted_components = ['HIVE_SERVER', 'HIVE_CLIENT']; role = params.config.get('role','') @@ -149,10 +149,10 @@ def download_extensions(owner_user, owner_group, hdfs_source_dir, local_target_d # Execute command is not quoting correctly. cmd = format("{sudo} mv {extensions_tmp_dir}/* {local_target_dir}") - only_if_cmd = "ls -d {extensions_tmp_dir}/*".format(extensions_tmp_dir=extensions_tmp_dir) + only_if_cmd = f"ls -d {extensions_tmp_dir}/*" Execute(cmd, only_if=only_if_cmd) - only_if_local = 'ls -d "{local_target_dir}"'.format(local_target_dir=local_target_dir) + only_if_local = f'ls -d "{local_target_dir}"' Execute(("chown", "-R", "root:root", local_target_dir), sudo=True, only_if=only_if_local) diff --git a/ambari-server/src/main/resources/stack-hooks/before-START/scripts/params.py b/ambari-server/src/main/resources/stack-hooks/before-START/scripts/params.py index f7402827ef6..8d7f7dbc239 100644 --- a/ambari-server/src/main/resources/stack-hooks/before-START/scripts/params.py +++ b/ambari-server/src/main/resources/stack-hooks/before-START/scripts/params.py @@ -95,7 +95,7 @@ unlimited_key_jce_required = default("/componentLevelParams/unlimited_key_jce_required", False) jdk_name = default("/ambariLevelParams/jdk_name", None) java_home = default("/ambariLevelParams/java_home", None) -java_exec = "{0}/bin/java".format(java_home) if java_home is not None else "/bin/java" +java_exec = f"{java_home}/bin/java" if java_home is not None else "/bin/java" #users and groups has_hadoop_env = 'hadoop-env' in config['configurations'] diff --git a/ambari-server/src/main/resources/stack-hooks/before-START/scripts/shared_initialization.py b/ambari-server/src/main/resources/stack-hooks/before-START/scripts/shared_initialization.py index 69c2de76440..812902ca31f 100644 --- a/ambari-server/src/main/resources/stack-hooks/before-START/scripts/shared_initialization.py +++ b/ambari-server/src/main/resources/stack-hooks/before-START/scripts/shared_initialization.py @@ -190,7 +190,7 @@ def create_microsoft_r_dir(): mode=0o777) params.HdfsResource(None, action="execute") except Exception as exception: - Logger.warning("Could not check the existence of {0} on DFS while starting {1}, exception: {2}".format(directory, params.current_service, str(exception))) + Logger.warning(f"Could not check the existence of {directory} on DFS while starting {params.current_service}, exception: {str(exception)}") def setup_unlimited_key_jce_policy(): """ @@ -248,15 +248,15 @@ def __setup_unlimited_key_jce_policy(custom_java_home, custom_jdk_name, custom_j jce_zip_source = format("{ambari_server_resources_url}/{custom_jce_name}") java_security_dir = format("{custom_java_home}/jre/lib/security") - Logger.debug("Downloading the unlimited key JCE policy files from {0} to {1}.".format(jce_zip_source, jce_zip_target)) + Logger.debug(f"Downloading the unlimited key JCE policy files from {jce_zip_source} to {jce_zip_target}.") Directory(params.artifact_dir, create_parents=True) File(jce_zip_target, content=DownloadSource(jce_zip_source)) - Logger.debug("Removing existing JCE policy JAR files: {0}.".format(java_security_dir)) + Logger.debug(f"Removing existing JCE policy JAR files: {java_security_dir}.") File(format("{java_security_dir}/US_export_policy.jar"), action="delete") File(format("{java_security_dir}/local_policy.jar"), action="delete") - Logger.debug("Unzipping the unlimited key JCE policy files from {0} into {1}.".format(jce_zip_target, java_security_dir)) + Logger.debug(f"Unzipping the unlimited key JCE policy files from {jce_zip_target} into {java_security_dir}.") extract_cmd = ("unzip", "-o", "-j", "-q", jce_zip_target, "-d", java_security_dir) Execute(extract_cmd, only_if=format("test -e {java_security_dir} && test -f {jce_zip_target}"), diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HBASE/package/scripts/functions.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HBASE/package/scripts/functions.py index d35ce60bb8d..4be04075dee 100755 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HBASE/package/scripts/functions.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HBASE/package/scripts/functions.py @@ -48,7 +48,7 @@ def ensure_unit_for_memory(memory_size): if len(memory_size_unit) > 0: unit = memory_size_unit[0] if unit not in ['b', 'k', 'm', 'g', 't', 'p']: - raise Exception("Memory size unit error. %s - wrong unit" % unit) - return "%s%s" % (memory_size_values[0], unit) + raise Exception(f"Memory size unit error. {unit} - wrong unit") + return f"{memory_size_values[0]}{unit}" else: raise Exception('Memory size can not be calculated') diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HBASE/package/scripts/hbase_master.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HBASE/package/scripts/hbase_master.py index 1fe6aa79477..d4c4596e554 100755 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HBASE/package/scripts/hbase_master.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HBASE/package/scripts/hbase_master.py @@ -148,7 +148,7 @@ def security_status(self, env): else: issues = [] for cf in result_issues: - issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf])) + issues.append(f"Configuration file {cf} did not pass the validation. Reason: {result_issues[cf]}") self.put_structured_out({"securityIssuesFound": ". ".join(issues)}) self.put_structured_out({"securityState": "UNSECURED"}) else: diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HBASE/package/scripts/hbase_regionserver.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HBASE/package/scripts/hbase_regionserver.py index 0a291b92b87..8ac7914d58e 100755 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HBASE/package/scripts/hbase_regionserver.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HBASE/package/scripts/hbase_regionserver.py @@ -200,7 +200,7 @@ def security_status(self, env): else: issues = [] for cf in result_issues: - issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf])) + issues.append(f"Configuration file {cf} did not pass the validation. Reason: {result_issues[cf]}") self.put_structured_out({"securityIssuesFound": ". ".join(issues)}) self.put_structured_out({"securityState": "UNSECURED"}) else: diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HBASE/package/scripts/hbase_upgrade.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HBASE/package/scripts/hbase_upgrade.py index 77a65ac721f..51e1a471a83 100755 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HBASE/package/scripts/hbase_upgrade.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HBASE/package/scripts/hbase_upgrade.py @@ -28,9 +28,9 @@ class HbaseMasterUpgrade(Script): def take_snapshot(self, env): import params - snap_cmd = "echo 'snapshot_all' | {0} shell".format(params.hbase_cmd) + snap_cmd = f"echo 'snapshot_all' | {params.hbase_cmd} shell" - exec_cmd = "{0} {1}".format(params.kinit_cmd, snap_cmd) + exec_cmd = f"{params.kinit_cmd} {snap_cmd}" Execute(exec_cmd, user=params.hbase_user) diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HBASE/package/scripts/setup_ranger_hbase.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HBASE/package/scripts/setup_ranger_hbase.py index 40e3fb17f3d..9f4c18f74d4 100755 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HBASE/package/scripts/setup_ranger_hbase.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HBASE/package/scripts/setup_ranger_hbase.py @@ -63,7 +63,7 @@ def setup_ranger_hbase(upgrade_type=None, service_name="hbase-master"): ) params.HdfsResource(None, action="execute") except Exception as err: - Logger.exception("Audit directory creation in HDFS for HBASE Ranger plugin failed with error:\n{0}".format(err)) + Logger.exception(f"Audit directory creation in HDFS for HBASE Ranger plugin failed with error:\n{err}") api_version = 'v2' diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HBASE/package/scripts/upgrade.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HBASE/package/scripts/upgrade.py index 7d04e3f1062..d11b8b91132 100755 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HBASE/package/scripts/upgrade.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HBASE/package/scripts/upgrade.py @@ -42,9 +42,9 @@ def post_regionserver(env): import params env.set_params(params) - check_cmd = "echo 'status \"simple\"' | {0} shell".format(params.hbase_cmd) + check_cmd = f"echo 'status \"simple\"' | {params.hbase_cmd} shell" - exec_cmd = "{0} {1}".format(params.kinit_cmd, check_cmd) + exec_cmd = f"{params.kinit_cmd} {check_cmd}" is_regionserver_registered(exec_cmd, params.hbase_user, params.hostname, re.IGNORECASE) @@ -95,10 +95,10 @@ def is_regionserver_registered(cmd, user, hostname, regex_search_flags): match = re.search(bound_ip_address_to_match, out, regex_search_flags) except socket.error: # this is merely a backup, so just log that it failed - Logger.warning("Unable to lookup the IP address of {0}, reverse DNS lookup may not be working.".format(hostname)) + Logger.warning(f"Unable to lookup the IP address of {hostname}, reverse DNS lookup may not be working.") pass # failed with both a hostname and an IP address, so raise the Fail and let the function auto retry if not match: raise Fail( - "The RegionServer named {0} has not yet registered with the HBase Master".format(hostname)) + f"The RegionServer named {hostname} has not yet registered with the HBase Master") diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HBASE/service_advisor.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HBASE/service_advisor.py index 0841333e999..7f927d426fc 100755 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HBASE/service_advisor.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HBASE/service_advisor.py @@ -283,7 +283,7 @@ def recommendHBASEConfigurationsFromHDP22(self, configurations, clusterData, ser bucketcache_offheap_memory = regionserver_max_direct_memory_size - reserved_offheap_memory hbase_bucketcache_size = bucketcache_offheap_memory hbase_bucketcache_percentage_in_combinedcache = float(bucketcache_offheap_memory) / hbase_bucketcache_size - hbase_bucketcache_percentage_in_combinedcache_str = "{0:.4f}".format(math.ceil(hbase_bucketcache_percentage_in_combinedcache * 10000) / 10000.0) + hbase_bucketcache_percentage_in_combinedcache_str = f"{math.ceil(hbase_bucketcache_percentage_in_combinedcache * 10000) / 10000.0:.4f}" # Set values in hbase-site putHbaseSiteProperty('hfile.block.cache.size', hfile_block_cache_size) @@ -513,9 +513,9 @@ def recommendHBASEConfigurationsForKerberos(self, configurations, clusterData, s putHbaseSiteProperty('hbase.master.ui.readonly', 'true') phoenix_query_server_hosts = self.getPhoenixQueryServerHosts(services, hosts) - self.logger.debug("Calculated Phoenix Query Server hosts: %s" % str(phoenix_query_server_hosts)) + self.logger.debug(f"Calculated Phoenix Query Server hosts: {str(phoenix_query_server_hosts)}") if phoenix_query_server_hosts: - self.logger.debug("Attempting to update hadoop.proxyuser.HTTP.hosts with %s" % str(phoenix_query_server_hosts)) + self.logger.debug(f"Attempting to update hadoop.proxyuser.HTTP.hosts with {str(phoenix_query_server_hosts)}") # The PQS hosts we want to ensure are set new_value = ','.join(phoenix_query_server_hosts) # Update the proxyuser setting, deferring to out callback to merge results together @@ -720,16 +720,16 @@ def validateHBASEConfigurationsFromHDP22(self, properties, recommendedDefaults, if prop_name1 in hbase_site and not self.is_number(hbase_site[prop_name1]): validationItems.append({"config-name": prop_name1, "item": self.getWarnItem( - "{0} should be float value".format(prop_name1))}) + f"{prop_name1} should be float value")}) elif prop_name2 in hbase_site and not self.is_number(hbase_site[prop_name2]): validationItems.append({"config-name": prop_name2, "item": self.getWarnItem( - "{0} should be float value".format(prop_name2))}) + f"{prop_name2} should be float value")}) elif prop_name1 in hbase_site and prop_name2 in hbase_site and \ float(hbase_site[prop_name1]) + float(hbase_site[prop_name2]) > props_max_sum: validationItems.append({"config-name": prop_name1, "item": self.getWarnItem( - "{0} and {1} sum should not exceed {2}".format(prop_name1, prop_name2, props_max_sum))}) + f"{prop_name1} and {prop_name2} sum should not exceed {props_max_sum}")}) # Validate bucket cache correct config prop_name = "hbase.bucketcache.ioengine" @@ -747,11 +747,11 @@ def validateHBASEConfigurationsFromHDP22(self, properties, recommendedDefaults, if prop_name1 in hbase_site and prop_name2 in hbase_site and hbase_site[prop_name1] and not hbase_site[prop_name2]: validationItems.append({"config-name": prop_name2, "item": self.getWarnItem( - "If bucketcache ioengine is enabled, {0} should be set".format(prop_name2))}) + f"If bucketcache ioengine is enabled, {prop_name2} should be set")}) if prop_name1 in hbase_site and prop_name3 in hbase_site and hbase_site[prop_name1] and not hbase_site[prop_name3]: validationItems.append({"config-name": prop_name3, "item": self.getWarnItem( - "If bucketcache ioengine is enabled, {0} should be set".format(prop_name3))}) + f"If bucketcache ioengine is enabled, {prop_name3} should be set")}) # Validate hbase.security.authentication. # Kerberos works only when security enabled. @@ -778,7 +778,7 @@ def validateHBASEEnvConfigurationsFromHDP22(self, properties, recommendedDefault if prop_name1 in hbase_site_properties and prop_name in hbase_env and hbase_site_properties[prop_name1] and hbase_site_properties[prop_name1] == "offheap" and not hbase_env[prop_name]: validationItems.append({"config-name": prop_name, "item": self.getWarnItem( - "If bucketcache ioengine is enabled, {0} should be set".format(prop_name))}) + f"If bucketcache ioengine is enabled, {prop_name} should be set")}) return self.toConfigurationValidationProblems(validationItems, "hbase-env") diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/alerts/alert_checkpoint_time.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/alerts/alert_checkpoint_time.py index c7c3760518c..2d37a9d3732 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/alerts/alert_checkpoint_time.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/alerts/alert_checkpoint_time.py @@ -100,7 +100,7 @@ def execute(configurations={}, parameters={}, host_name=None): # hdfs-site is required if not HDFS_SITE_KEY in configurations: - return (RESULT_STATE_UNKNOWN, ['{0} is a required parameter for the script'.format(HDFS_SITE_KEY)]) + return (RESULT_STATE_UNKNOWN, [f'{HDFS_SITE_KEY} is a required parameter for the script']) if NN_HTTP_POLICY_KEY in configurations: http_policy = configurations[NN_HTTP_POLICY_KEY] @@ -165,12 +165,12 @@ def execute(configurations={}, parameters={}, host_name=None): uri = nn_address break if not uri: - return (RESULT_STATE_SKIPPED, ['NameNode on host {0} not found (namenode adresses = {1})'.format(host_name, ', '.join(nn_addresses))]) + return (RESULT_STATE_SKIPPED, [f"NameNode on host {host_name} not found (namenode adresses = {', '.join(nn_addresses)})"]) current_time = int(round(time.time() * 1000)) - last_checkpoint_time_qry = "{0}://{1}/jmx?qry=Hadoop:service=NameNode,name=FSNamesystem".format(scheme,uri) - journal_transaction_info_qry = "{0}://{1}/jmx?qry=Hadoop:service=NameNode,name=NameNodeInfo".format(scheme,uri) + last_checkpoint_time_qry = f"{scheme}://{uri}/jmx?qry=Hadoop:service=NameNode,name=FSNamesystem" + journal_transaction_info_qry = f"{scheme}://{uri}/jmx?qry=Hadoop:service=NameNode,name=NameNodeInfo" # start out assuming an OK status label = None @@ -221,10 +221,10 @@ def execute(configurations={}, parameters={}, host_name=None): # Either too many uncommitted transactions or missed check-pointing for # long time decided by the thresholds if is_checkpoint_txn_critical or (float(delta) / int(checkpoint_period)*100 >= int(percent_critical)): - logger.debug('Raising critical alert: transaction_difference = {0}, checkpoint_tx = {1}'.format(transaction_difference, checkpoint_tx)) + logger.debug(f'Raising critical alert: transaction_difference = {transaction_difference}, checkpoint_tx = {checkpoint_tx}') result_code = 'CRITICAL' elif is_checkpoint_txn_warning or (float(delta) / int(checkpoint_period)*100 >= int(percent_warning)): - logger.debug('Raising warning alert: transaction_difference = {0}, checkpoint_tx = {1}'.format(transaction_difference, checkpoint_tx)) + logger.debug(f'Raising warning alert: transaction_difference = {transaction_difference}, checkpoint_tx = {checkpoint_tx}') result_code = 'WARNING' except: diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/alerts/alert_datanode_unmounted_data_dir.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/alerts/alert_datanode_unmounted_data_dir.py index b96acf4e862..6ba0f48a563 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/alerts/alert_datanode_unmounted_data_dir.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/alerts/alert_datanode_unmounted_data_dir.py @@ -66,18 +66,18 @@ def execute(configurations={}, parameters={}, host_name=None): # Check required properties if DFS_DATA_DIR not in configurations: - return (RESULT_STATE_UNKNOWN, ['{0} is a required parameter for the script'.format(DFS_DATA_DIR)]) + return (RESULT_STATE_UNKNOWN, [f'{DFS_DATA_DIR} is a required parameter for the script']) dfs_data_dir = configurations[DFS_DATA_DIR] if dfs_data_dir is None: - return (RESULT_STATE_UNKNOWN, ['{0} is a required parameter for the script and the value is null'.format(DFS_DATA_DIR)]) + return (RESULT_STATE_UNKNOWN, [f'{DFS_DATA_DIR} is a required parameter for the script and the value is null']) # This follows symlinks and will return False for a broken link (even in the middle of the linked list) data_dir_mount_file_exists = True if not os.path.exists(DATA_DIR_MOUNT_FILE): data_dir_mount_file_exists = False - warnings.append("{0} was not found.".format(DATA_DIR_MOUNT_FILE)) + warnings.append(f"{DATA_DIR_MOUNT_FILE} was not found.") normalized_data_dirs = set() # data dirs that have been normalized data_dirs_not_exist = set() # data dirs that do not exist diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/alerts/alert_ha_namenode_health.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/alerts/alert_ha_namenode_health.py index 2f40172a3d2..ceb921963dc 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/alerts/alert_ha_namenode_health.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/alerts/alert_ha_namenode_health.py @@ -84,7 +84,7 @@ def execute(configurations={}, parameters={}, host_name=None): # hdfs-site is required if not HDFS_SITE_KEY in configurations: - return (RESULT_STATE_UNKNOWN, ['{0} is a required parameter for the script'.format(HDFS_SITE_KEY)]) + return (RESULT_STATE_UNKNOWN, [f'{HDFS_SITE_KEY} is a required parameter for the script']) if SMOKEUSER_KEY in configurations: smokeuser = configurations[SMOKEUSER_KEY] @@ -126,7 +126,7 @@ def execute(configurations={}, parameters={}, host_name=None): # look for dfs.ha.namenodes.foo nn_unique_ids_key = 'dfs.ha.namenodes.' + name_service if not nn_unique_ids_key in hdfs_site: - return (RESULT_STATE_UNKNOWN, ['Unable to find unique namenode alias key {0}'.format(nn_unique_ids_key)]) + return (RESULT_STATE_UNKNOWN, [f'Unable to find unique namenode alias key {nn_unique_ids_key}']) namenode_http_fragment = NAMENODE_HTTP_FRAGMENT jmx_uri_fragment = "http://{0}/jmx?qry=Hadoop:service=NameNode,name=*" diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/alerts/alert_metrics_deviation.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/alerts/alert_metrics_deviation.py index 32de2211843..88bc79726d6 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/alerts/alert_metrics_deviation.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/alerts/alert_metrics_deviation.py @@ -169,7 +169,7 @@ def execute(configurations={}, parameters={}, host_name=None): # hdfs-site is required if not HDFS_SITE_KEY in configurations: - return (RESULT_STATE_UNKNOWN, ['{0} is a required parameter for the script'.format(HDFS_SITE_KEY)]) + return (RESULT_STATE_UNKNOWN, [f'{HDFS_SITE_KEY} is a required parameter for the script']) if METRICS_COLLECTOR_VIP_HOST_KEY in configurations and METRICS_COLLECTOR_VIP_PORT_KEY in configurations: collector_host = configurations[METRICS_COLLECTOR_VIP_HOST_KEY].split(',')[0] @@ -177,7 +177,7 @@ def execute(configurations={}, parameters={}, host_name=None): else: # ams-site/timeline.metrics.service.webapp.address is required if not METRICS_COLLECTOR_WEBAPP_ADDRESS_KEY in configurations: - return (RESULT_STATE_UNKNOWN, ['{0} is a required parameter for the script'.format(METRICS_COLLECTOR_WEBAPP_ADDRESS_KEY)]) + return (RESULT_STATE_UNKNOWN, [f'{METRICS_COLLECTOR_WEBAPP_ADDRESS_KEY} is a required parameter for the script']) else: collector_webapp_address = configurations[METRICS_COLLECTOR_WEBAPP_ADDRESS_KEY].split(":") if valid_collector_webapp_address(collector_webapp_address): @@ -190,7 +190,7 @@ def execute(configurations={}, parameters={}, host_name=None): namenode_service_rpc_address = None # hdfs-site is required if not HDFS_SITE_KEY in configurations: - return (RESULT_STATE_UNKNOWN, ['{0} is a required parameter for the script'.format(HDFS_SITE_KEY)]) + return (RESULT_STATE_UNKNOWN, [f'{HDFS_SITE_KEY} is a required parameter for the script']) hdfs_site = configurations[HDFS_SITE_KEY] @@ -201,7 +201,7 @@ def execute(configurations={}, parameters={}, host_name=None): if NAMESERVICE_KEY in configurations and app_id.lower() == 'namenode': # hdfs-site is required if not HDFS_SITE_KEY in configurations: - return (RESULT_STATE_UNKNOWN, ['{0} is a required parameter for the script'.format(HDFS_SITE_KEY)]) + return (RESULT_STATE_UNKNOWN, [f'{HDFS_SITE_KEY} is a required parameter for the script']) if SMOKEUSER_KEY in configurations: smokeuser = configurations[SMOKEUSER_KEY] @@ -238,7 +238,7 @@ def execute(configurations={}, parameters={}, host_name=None): # look for dfs.ha.namenodes.foo nn_unique_ids_key = 'dfs.ha.namenodes.' + name_service if not nn_unique_ids_key in hdfs_site: - return (RESULT_STATE_UNKNOWN, ['Unable to find unique NameNode alias key {0}'.format(nn_unique_ids_key)]) + return (RESULT_STATE_UNKNOWN, [f'Unable to find unique NameNode alias key {nn_unique_ids_key}']) namenode_http_fragment = 'dfs.namenode.http-address.{0}.{1}' jmx_uri_fragment = "http://{0}/jmx?qry=Hadoop:service=NameNode,name=*" @@ -282,7 +282,7 @@ def execute(configurations={}, parameters={}, host_name=None): active_namenodes.append(namenode) # Only check active NN - nn_service_rpc_address_key = 'dfs.namenode.servicerpc-address.{0}.{1}'.format(name_service, nn_unique_id) + nn_service_rpc_address_key = f'dfs.namenode.servicerpc-address.{name_service}.{nn_unique_id}' if nn_service_rpc_address_key in hdfs_site: namenode_service_rpc_address = hdfs_site[nn_service_rpc_address_key] pass @@ -362,7 +362,7 @@ def execute(configurations={}, parameters={}, host_name=None): # Filter out points below min threshold metrics = [metric for metric in metrics if metric > (minimum_value_threshold * minimum_value_multiplier)] if len(metrics) < 2: - return (RESULT_STATE_OK, ['There were no data points above the minimum threshold of {0} seconds'.format(minimum_value_threshold)]) + return (RESULT_STATE_OK, [f'There were no data points above the minimum threshold of {minimum_value_threshold} seconds']) mean_value = mean(metrics) stddev = sample_standard_deviation(metrics) @@ -375,13 +375,13 @@ def execute(configurations={}, parameters={}, host_name=None): # log the AMS request if logger.isEnabledFor(logging.DEBUG): - logger.debug(""" - AMS request parameters - {0} - AMS response - {1} - Mean - {2} - Standard deviation - {3} - Percentage standard deviation - {4} - """.format(encoded_get_metrics_parameters, data_json, mean_value, stddev, deviation_percent)) + logger.debug(f""" + AMS request parameters - {encoded_get_metrics_parameters} + AMS response - {data_json} + Mean - {mean_value} + Standard deviation - {stddev} + Percentage standard deviation - {deviation_percent} + """) mean_value_localized = locale.format("%.0f", mean_value, grouping=True) diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/alerts/alert_upgrade_finalized.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/alerts/alert_upgrade_finalized.py index a55b46cedfa..ab09648583c 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/alerts/alert_upgrade_finalized.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/alerts/alert_upgrade_finalized.py @@ -75,7 +75,7 @@ def execute(configurations={}, parameters={}, host_name=None): # hdfs-site is required if not HDFS_SITE_KEY in configurations: - return 'SKIPPED', ['{0} is a required parameter for the script'.format(HDFS_SITE_KEY)] + return 'SKIPPED', [f'{HDFS_SITE_KEY} is a required parameter for the script'] if NN_HTTP_POLICY_KEY in configurations: http_policy = configurations[NN_HTTP_POLICY_KEY] @@ -114,9 +114,9 @@ def execute(configurations={}, parameters={}, host_name=None): break if not uri: return 'SKIPPED', [ - 'NameNode on host {0} not found (namenode adresses = {1})'.format(host_name, ', '.join(nn_addresses))] + f"NameNode on host {host_name} not found (namenode adresses = {', '.join(nn_addresses)})"] - upgrade_finalized_qry = "{0}://{1}/jmx?qry=Hadoop:service=NameNode,name=NameNodeInfo".format(scheme, uri) + upgrade_finalized_qry = f"{scheme}://{uri}/jmx?qry=Hadoop:service=NameNode,name=NameNodeInfo" # start out assuming an OK status label = None diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/scripts/datanode_upgrade.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/scripts/datanode_upgrade.py index 1a529d28e59..4ec9a1a2a10 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/scripts/datanode_upgrade.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/scripts/datanode_upgrade.py @@ -52,7 +52,7 @@ def pre_rolling_upgrade_shutdown(hdfs_binary): if code != 0: # Due to bug HDFS-7533, DataNode may not always shutdown during stack upgrade, and it is necessary to kill it. if output is not None and re.search("Shutdown already in progress", output): - Logger.error("Due to a known issue in DataNode, the command {0} did not work, so will need to shutdown the datanode forcefully.".format(command)) + Logger.error(f"Due to a known issue in DataNode, the command {command} did not work, so will need to shutdown the datanode forcefully.") return False return True @@ -112,10 +112,10 @@ def _check_datanode_startup(hdfs_binary): hostname = params.hostname.lower() hostname_ip = socket.gethostbyname(params.hostname.lower()) if hostname in hdfs_output.lower() or hostname_ip in hdfs_output.lower(): - Logger.info("DataNode {0} reports that it has rejoined the cluster.".format(params.hostname)) + Logger.info(f"DataNode {params.hostname} reports that it has rejoined the cluster.") return else: - raise Fail("DataNode {0} was not found in the list of live DataNodes".format(params.hostname)) + raise Fail(f"DataNode {params.hostname} was not found in the list of live DataNodes") # return_code is not 0, fail - raise Fail("Unable to determine if the DataNode has started after upgrade (result code {0})".format(str(return_code))) + raise Fail(f"Unable to determine if the DataNode has started after upgrade (result code {str(return_code)})") diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/scripts/hdfs.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/scripts/hdfs.py index 93bd8f0c9e4..71a1cfc1a95 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/scripts/hdfs.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/scripts/hdfs.py @@ -178,7 +178,7 @@ def handle_new_line(self, line, is_stderr): if line.startswith('SUCCESS: Changed property'): self.reconfig_successful = True - Logger.info('[reconfig] %s' % (line)) + Logger.info(f'[reconfig] {line}') @OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT) def reconfig(componentName, componentAddress): diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/scripts/hdfs_namenode.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/scripts/hdfs_namenode.py index 26944fd3cd9..0895b96f243 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/scripts/hdfs_namenode.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/scripts/hdfs_namenode.py @@ -60,7 +60,7 @@ def wait_for_safemode_off(hdfs_binary, afterwait_sleep=0, execute_kinit=False, r sleep_minutes = int(sleep_seconds * retries / 60) - Logger.info("Waiting up to {0} minutes for the NameNode to leave Safemode...".format(sleep_minutes)) + Logger.info(f"Waiting up to {sleep_minutes} minutes for the NameNode to leave Safemode...") if params.security_enabled and execute_kinit: kinit_command = format("{params.kinit_path_local} -kt {params.hdfs_user_keytab} {params.hdfs_principal_name}") @@ -101,7 +101,7 @@ def namenode(action=None, hdfs_binary=None, do_format=True, upgrade_type=None, # set up failover / secure zookeper ACLs, this feature is supported from HDP 2.6 ownwards set_up_zkfc_security(params) elif action == "start": - Logger.info("Called service {0} with upgrade_type: {1}".format(action, str(upgrade_type))) + Logger.info(f"Called service {action} with upgrade_type: {str(upgrade_type)}") setup_ranger_hdfs(upgrade_type=upgrade_type) import params @@ -147,7 +147,7 @@ def namenode(action=None, hdfs_binary=None, do_format=True, upgrade_type=None, options = "-rollingUpgrade downgrade" elif upgrade_type == constants.UPGRADE_TYPE_NON_ROLLING: is_previous_image_dir = is_previous_fs_image() - Logger.info("Previous file system image dir present is {0}".format(str(is_previous_image_dir))) + Logger.info(f"Previous file system image dir present is {str(is_previous_image_dir)}") if params.upgrade_direction == Direction.UPGRADE: options = "-rollingUpgrade started" @@ -164,7 +164,7 @@ def namenode(action=None, hdfs_binary=None, do_format=True, upgrade_type=None, Logger.info("The NameNode upgrade marker file {0} does not exist, yet an upgrade is currently suspended. " "Assuming that the upgrade of NameNode has not occurred yet.".format(namenode_upgrade.get_upgrade_in_progress_marker())) - Logger.info("Options for start command are: {0}".format(options)) + Logger.info(f"Options for start command are: {options}") service( action="start", @@ -271,8 +271,8 @@ def namenode(action=None, hdfs_binary=None, do_format=True, upgrade_type=None, #TODO: Replace with format_namenode() namenode_format_marker = os.path.join(params.hadoop_conf_dir,"NN_FORMATTED") if not os.path.exists(namenode_format_marker): - hadoop_cmd = "cmd /C %s" % (os.path.join(params.hadoop_home, "bin", "hadoop.cmd")) - Execute("%s namenode -format" % (hadoop_cmd), logoutput=True) + hadoop_cmd = f"cmd /C {os.path.join(params.hadoop_home, 'bin', 'hadoop.cmd')}" + Execute(f"{hadoop_cmd} namenode -format", logoutput=True) open(namenode_format_marker, 'a').close() Service(params.namenode_win_service_name, action=action) elif action == "stop": @@ -545,7 +545,7 @@ def bootstrap_standby_namenode(params, use_path=False): elif is_namenode_bootstrapped(params): # Once out of INITIAL_START phase bootstrap only if we couldnt bootstrap during cluster deployment return True - Logger.info("Boostrapping standby namenode: %s" % (bootstrap_cmd)) + Logger.info(f"Boostrapping standby namenode: {bootstrap_cmd}") for i in range(iterations): Logger.info('Try %d out of %d' % (i+1, iterations)) code, out = shell.call(bootstrap_cmd, logoutput=False, user=params.hdfs_user) @@ -560,7 +560,7 @@ def bootstrap_standby_namenode(params, use_path=False): else: Logger.warning('Bootstrap standby namenode failed with %d error code. Will retry' % (code)) except Exception as ex: - Logger.error('Bootstrap standby namenode threw an exception. Reason %s' %(str(ex))) + Logger.error(f'Bootstrap standby namenode threw an exception. Reason {str(ex)}') if bootstrapped: for mark_dir in mark_dirs: Directory(mark_dir, diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/scripts/hdfs_rebalance.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/scripts/hdfs_rebalance.py index c31041b329c..5a75684a19b 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/scripts/hdfs_rebalance.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/scripts/hdfs_rebalance.py @@ -114,7 +114,7 @@ def parseMemory(self, memorySize, multiplier_type): try: factor = self.MEMORY_SUFFIX.index(multiplier_type) except ValueError: - raise AmbariException("Failed to memory value [%s %s]" % (memorySize, multiplier_type)) + raise AmbariException(f"Failed to memory value [{memorySize} {multiplier_type}]") return float(memorySize) * (1024 ** factor) def toJson(self): diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/scripts/journalnode_upgrade.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/scripts/journalnode_upgrade.py index fd625dd7162..511ad1884eb 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/scripts/journalnode_upgrade.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/scripts/journalnode_upgrade.py @@ -137,15 +137,15 @@ def ensure_jns_have_new_txn(nodelist, last_txn_id): if node in actual_txn_ids and actual_txn_ids[node] and actual_txn_ids[node] >= last_txn_id: continue - url = '%s://%s:%s' % (protocol, node, params.journalnode_port) + url = f'{protocol}://{node}:{params.journalnode_port}' data = utils.get_jmx_data(url, 'Journal-', 'LastWrittenTxId', params.https_only, params.security_enabled) if data: actual_txn_ids[node] = int(data) if actual_txn_ids[node] >= last_txn_id: - Logger.info("JournalNode %s has a higher transaction id: %s" % (node, str(data))) + Logger.info(f"JournalNode {node} has a higher transaction id: {str(data)}") jns_updated += 1 else: - Logger.info("JournalNode %s is still on transaction id: %s" % (node, str(data))) + Logger.info(f"JournalNode {node} is still on transaction id: {str(data)}") Logger.info("Sleeping for %d secs" % step_time_secs) time.sleep(step_time_secs) diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/scripts/namenode.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/scripts/namenode.py index 3e315188086..54563fb9f86 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/scripts/namenode.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/scripts/namenode.py @@ -277,7 +277,7 @@ def rebalancehdfs(self, env): name_node_parameters = json.loads( params.name_node_params ) threshold = name_node_parameters['threshold'] - _print("Starting balancer with threshold = %s\n" % threshold) + _print(f"Starting balancer with threshold = {threshold}\n") rebalance_env = {'PATH': params.hadoop_bin_dir} @@ -303,7 +303,7 @@ def calculateCompletePercent(first, current): try: division_result = current.bytesLeftToMove/first.bytesLeftToMove except ZeroDivisionError: - Logger.warning("Division by zero. Bytes Left To Move = {0}. Return 1.0".format(first.bytesLeftToMove)) + Logger.warning(f"Division by zero. Bytes Left To Move = {first.bytesLeftToMove}. Return 1.0") return 1.0 return 1.0 - division_result @@ -319,7 +319,7 @@ def startRebalancingProcess(threshold, rebalance_env): basedir = os.path.join(env.config.basedir, 'scripts', 'balancer-emulator') command = ['ambari-python-wrap','hdfs-command.py'] - _print("Executing command %s\n" % command) + _print(f"Executing command {command}\n") parser = hdfs_rebalance.HdfsParser() @@ -327,7 +327,7 @@ def handle_new_line(line, is_stderr): if is_stderr: return - _print('[balancer] %s' % (line)) + _print(f'[balancer] {line}') pl = parser.parseLine(line) if pl: res = pl.toJson() @@ -335,7 +335,7 @@ def handle_new_line(line, is_stderr): self.put_structured_out(res) elif parser.state == 'PROCESS_FINISED' : - _print('[balancer] %s' % ('Process is finished' )) + _print(f'[balancer] Process is finished') self.put_structured_out({'completePercent' : 1}) return @@ -379,25 +379,25 @@ def rebalancehdfs(self, env): name_node_parameters = json.loads( params.name_node_params ) threshold = name_node_parameters['threshold'] - _print("Starting balancer with threshold = %s\n" % threshold) + _print(f"Starting balancer with threshold = {threshold}\n") def calculateCompletePercent(first, current): return 1.0 - current.bytesLeftToMove/first.bytesLeftToMove def startRebalancingProcess(threshold): - rebalanceCommand = 'hdfs balancer -threshold %s' % threshold + rebalanceCommand = f'hdfs balancer -threshold {threshold}' return ['cmd', '/C', rebalanceCommand] command = startRebalancingProcess(threshold) basedir = os.path.join(env.config.basedir, 'scripts') - _print("Executing command %s\n" % command) + _print(f"Executing command {command}\n") parser = hdfs_rebalance.HdfsParser() returncode, stdout, err = run_os_command_impersonated(' '.join(command), hdfs_username, Script.get_password(params.hdfs_user), hdfs_domain) for line in stdout.split('\n'): - _print('[balancer] %s %s' % (str(datetime.now()), line )) + _print(f'[balancer] {str(datetime.now())} {line}') pl = parser.parseLine(line) if pl: res = pl.toJson() @@ -405,7 +405,7 @@ def startRebalancingProcess(threshold): self.put_structured_out(res) elif parser.state == 'PROCESS_FINISED' : - _print('[balancer] %s %s' % (str(datetime.now()), 'Process is finished' )) + _print(f'[balancer] {str(datetime.now())} Process is finished') self.put_structured_out({'completePercent' : 1}) break diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/scripts/namenode_ha_state.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/scripts/namenode_ha_state.py index a93075c5e20..494ebd07613 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/scripts/namenode_ha_state.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/scripts/namenode_ha_state.py @@ -120,11 +120,11 @@ def __init__(self): def __str__(self): return "Namenode HA State: {\n" + \ - ("IDs: %s\n" % ", ".join(self.nn_unique_ids)) + \ - ("Addresses: %s\n" % str(self.nn_unique_id_to_addresses)) + \ - ("States: %s\n" % str(self.namenode_state_to_hostnames)) + \ - ("Encrypted: %s\n" % str(self.encrypted)) + \ - ("Healthy: %s\n" % str(self.is_healthy())) + \ + f"IDs: {', '.join(self.nn_unique_ids)}\n" + \ + f"Addresses: {str(self.nn_unique_id_to_addresses)}\n" + \ + f"States: {str(self.namenode_state_to_hostnames)}\n" + \ + f"Encrypted: {str(self.encrypted)}\n" + \ + f"Healthy: {str(self.is_healthy())}\n" + \ "}" def is_encrypted(self): diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/scripts/namenode_upgrade.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/scripts/namenode_upgrade.py index 251394ee877..45d58130b2d 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/scripts/namenode_upgrade.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/scripts/namenode_upgrade.py @@ -81,11 +81,11 @@ def prepare_upgrade_enter_safe_mode(hdfs_binary): # Safe to call if already in Safe Mode desired_state = SafeMode.ON safemode_transition_successful, original_state = reach_safemode_state(params.hdfs_user, desired_state, params.dfs_ha_enabled, hdfs_binary) - Logger.info("Transition successful: {0}, original state: {1}".format(str(safemode_transition_successful), str(original_state))) + Logger.info(f"Transition successful: {str(safemode_transition_successful)}, original state: {str(original_state)}") if not safemode_transition_successful: - raise Fail("Could not transition to safemode state %s. Please check logs to make sure namenode is up." % str(desired_state)) + raise Fail(f"Could not transition to safemode state {str(desired_state)}. Please check logs to make sure namenode is up.") except Exception as e: - message = "Could not enter safemode. Error: {0}. As the HDFS user, call this command: {1}".format(str(e), safe_mode_enter_cmd) + message = f"Could not enter safemode. Error: {str(e)}. As the HDFS user, call this command: {safe_mode_enter_cmd}" Logger.error(message) raise Fail(message) @@ -115,7 +115,7 @@ def prepare_upgrade_backup_namenode_dir(): i = 0 failed_paths = [] nn_name_dirs = params.dfs_name_dir.split(',') - backup_destination_root_dir = "{0}/{1}".format(params.namenode_backup_dir, params.stack_version_unformatted) + backup_destination_root_dir = f"{params.namenode_backup_dir}/{params.stack_version_unformatted}" if len(nn_name_dirs) > 0: Logger.info("Backup the NameNode name directory's CURRENT folder.") for nn_dir in nn_name_dirs: @@ -123,7 +123,7 @@ def prepare_upgrade_backup_namenode_dir(): namenode_current_image = os.path.join(nn_dir, "current") unique = get_unique_id_and_date() + "_" + str(i) # Note that /tmp may not be writeable. - backup_current_folder = "{0}/namenode_{1}/".format(backup_destination_root_dir, unique) + backup_current_folder = f"{backup_destination_root_dir}/namenode_{unique}/" if os.path.isdir(namenode_current_image) and not os.path.isdir(backup_current_folder): try: @@ -154,7 +154,7 @@ def prepare_upgrade_finalize_previous_upgrades(hdfs_binary): if out: expected_substring = "there is no rolling upgrade in progress" if expected_substring not in out.lower(): - Logger.warning('Finalize command did not contain substring: %s' % expected_substring) + Logger.warning(f'Finalize command did not contain substring: {expected_substring}') else: Logger.warning("Finalize command did not return any output.") except Exception as e: @@ -170,7 +170,7 @@ def reach_safemode_state(user, safemode_state, in_ha, hdfs_binary): :return: Returns a tuple of (transition success, original state). If no change is needed, the indicator of success will be True """ - Logger.info("Prepare to transition into safemode state %s" % safemode_state) + Logger.info(f"Prepare to transition into safemode state {safemode_state}") import params original_state = SafeMode.UNKNOWN @@ -186,7 +186,7 @@ def reach_safemode_state(user, safemode_state, in_ha, hdfs_binary): if code == 0 and out is not None: Logger.info(out) re_pattern = r"Safe mode is (\S*)" - Logger.info("Pattern to search: {0}".format(re_pattern)) + Logger.info(f"Pattern to search: {re_pattern}") m = re.search(re_pattern, out, re.IGNORECASE) if m and len(m.groups()) >= 1: original_state = m.group(1).upper() @@ -222,7 +222,7 @@ def prepare_rolling_upgrade(hdfs_binary): import params if not params.upgrade_direction or params.upgrade_direction not in [Direction.UPGRADE, Direction.DOWNGRADE]: - raise Fail("Could not retrieve upgrade direction: %s" % str(params.upgrade_direction)) + raise Fail(f"Could not retrieve upgrade direction: {str(params.upgrade_direction)}") Logger.info(format("Performing a(n) {params.upgrade_direction} of HDFS")) if params.security_enabled: @@ -235,7 +235,7 @@ def prepare_rolling_upgrade(hdfs_binary): desired_state = SafeMode.OFF safemode_transition_successful, original_state = reach_safemode_state(params.hdfs_user, desired_state, True, hdfs_binary) if not safemode_transition_successful: - raise Fail("Could not transition to safemode state %s. Please check logs to make sure namenode is up." % str(desired_state)) + raise Fail(f"Could not transition to safemode state {str(desired_state)}. Please check logs to make sure namenode is up.") dfsadmin_base_command = get_dfsadmin_base_command(hdfs_binary) prepare = dfsadmin_base_command + " -rollingUpgrade prepare" @@ -299,7 +299,7 @@ def create_upgrade_marker(): if not os.path.isfile(namenode_upgrade_in_progress_marker): File(namenode_upgrade_in_progress_marker) except: - Logger.warning("Unable to create NameNode upgrade marker file {0}".format(namenode_upgrade_in_progress_marker)) + Logger.warning(f"Unable to create NameNode upgrade marker file {namenode_upgrade_in_progress_marker}") def delete_upgrade_marker(): @@ -317,7 +317,7 @@ def delete_upgrade_marker(): if os.path.isfile(namenode_upgrade_in_progress_marker): File(namenode_upgrade_in_progress_marker, action='delete') except: - error_message = "Unable to remove NameNode upgrade marker file {0}".format(namenode_upgrade_in_progress_marker) + error_message = f"Unable to remove NameNode upgrade marker file {namenode_upgrade_in_progress_marker}" Logger.error(error_message) raise Fail(error_message) diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/scripts/service_check.py index 7173ff54f73..e3b8081e112 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/scripts/service_check.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/scripts/service_check.py @@ -137,16 +137,16 @@ def service_check(self, env): tmp_file = dir + '/' + unique #commands for execution - hadoop_cmd = "cmd /C %s" % (os.path.join(params.hadoop_home, "bin", "hadoop.cmd")) - create_dir_cmd = "%s fs -mkdir %s" % (hadoop_cmd, dir) - own_dir = "%s fs -chmod 777 %s" % (hadoop_cmd, dir) - test_dir_exists = "%s fs -test -e %s" % (hadoop_cmd, dir) - cleanup_cmd = "%s fs -rm %s" % (hadoop_cmd, tmp_file) - create_file_cmd = "%s fs -put %s %s" % (hadoop_cmd, os.path.join(params.hadoop_conf_dir, "core-site.xml"), tmp_file) - test_cmd = "%s fs -test -e %s" % (hadoop_cmd, tmp_file) - - hdfs_cmd = "cmd /C %s" % (os.path.join(params.hadoop_home, "bin", "hdfs.cmd")) - safemode_command = "%s dfsadmin -safemode get | %s OFF" % (hdfs_cmd, params.grep_exe) + hadoop_cmd = f"cmd /C {os.path.join(params.hadoop_home, 'bin', 'hadoop.cmd')}" + create_dir_cmd = f"{hadoop_cmd} fs -mkdir {dir}" + own_dir = f"{hadoop_cmd} fs -chmod 777 {dir}" + test_dir_exists = f"{hadoop_cmd} fs -test -e {dir}" + cleanup_cmd = f"{hadoop_cmd} fs -rm {tmp_file}" + create_file_cmd = f"{hadoop_cmd} fs -put {os.path.join(params.hadoop_conf_dir, 'core-site.xml')} {tmp_file}" + test_cmd = f"{hadoop_cmd} fs -test -e {tmp_file}" + + hdfs_cmd = f"cmd /C {os.path.join(params.hadoop_home, 'bin', 'hdfs.cmd')}" + safemode_command = f"{hdfs_cmd} dfsadmin -safemode get | {params.grep_exe} OFF" Execute(safemode_command, logoutput=True, try_sleep=3, tries=20) Execute(create_dir_cmd, user=params.hdfs_user,logoutput=True, ignore_failures=True) diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/scripts/utils.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/scripts/utils.py index 9bebc216637..d903e7e8b5e 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/scripts/utils.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/scripts/utils.py @@ -52,7 +52,7 @@ def safe_zkfc_op(action, env): :param action: start or stop :param env: environment """ - Logger.info("Performing action {0} on zkfc.".format(action)) + Logger.info(f"Performing action {action} on zkfc.") zkfc = None if action == "start": try: @@ -96,7 +96,7 @@ def initiate_safe_zkfc_failover(): Logger.info(format("Standby NameNode id: {standby_namenode_id}")) if unknown_namenodes: for unknown_namenode in unknown_namenodes: - Logger.info("NameNode HA state for {0} is unknown".format(unknown_namenode[0])) + Logger.info(f"NameNode HA state for {unknown_namenode[0]} is unknown") if params.namenode_id == active_namenode_id and params.other_namenode_id == standby_namenode_id: # Failover if this NameNode is active and other NameNode is up and in standby (i.e. ready to become active on failover) @@ -105,7 +105,7 @@ def initiate_safe_zkfc_failover(): failover_command = format("hdfs haadmin -ns {dfs_ha_nameservices} -failover {namenode_id} {other_namenode_id}") check_standby_cmd = format("hdfs haadmin -ns {dfs_ha_nameservices} -getServiceState {namenode_id} | grep standby") - msg = "Rolling Upgrade - Initiating a ZKFC failover on active NameNode host {0}.".format(params.hostname) + msg = f"Rolling Upgrade - Initiating a ZKFC failover on active NameNode host {params.hostname}." Logger.info(msg) code, out = shell.call(failover_command, user=params.hdfs_user, logoutput=True) Logger.info(format("Rolling Upgrade - failover command returned {code}")) @@ -133,7 +133,7 @@ def initiate_safe_zkfc_failover(): try_sleep=6, logoutput=True) else: - msg = "Rolling Upgrade - Skipping ZKFC failover on NameNode host {0}.".format(params.hostname) + msg = f"Rolling Upgrade - Skipping ZKFC failover on NameNode host {params.hostname}." Logger.info(msg) def kill_zkfc(zkfc_user): @@ -320,7 +320,7 @@ def get_jmx_data(nn_address, modeler_type, metric, encrypted=False, security_ena nn_address = nn_address + "/" nn_address = nn_address + "jmx" - Logger.info("Retrieve modeler: %s, metric: %s from JMX endpoint %s" % (modeler_type, metric, nn_address)) + Logger.info(f"Retrieve modeler: {modeler_type}, metric: {metric} from JMX endpoint {nn_address}") if security_enabled: import params diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/scripts/zkfc_slave.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/scripts/zkfc_slave.py index 3042405e18b..6c08c346fbf 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/scripts/zkfc_slave.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/scripts/zkfc_slave.py @@ -156,7 +156,7 @@ def initialize_ha_zookeeper(params): try: iterations = 10 formatZK_cmd = "hdfs zkfc -formatZK -nonInteractive" - Logger.info("Initialize HA state in ZooKeeper: %s" % (formatZK_cmd)) + Logger.info(f"Initialize HA state in ZooKeeper: {formatZK_cmd}") for i in range(iterations): Logger.info('Try %d out of %d' % (i+1, iterations)) code, out = shell.call(formatZK_cmd, logoutput=False, user=params.hdfs_user) @@ -169,7 +169,7 @@ def initialize_ha_zookeeper(params): else: Logger.warning('HA state initialization in ZooKeeper failed with %d error code. Will retry' % (code)) except Exception as ex: - Logger.error('HA state initialization in ZooKeeper threw an exception. Reason %s' %(str(ex))) + Logger.error(f'HA state initialization in ZooKeeper threw an exception. Reason {str(ex)}') return False diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/service_advisor.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/service_advisor.py index 44fd5b3f4b7..0fd9626a25b 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/service_advisor.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/service_advisor.py @@ -192,7 +192,7 @@ def recommendConfigurationsFromHDP206(self, configurations, clusterData, service putHDFSSitePropertyAttributes = self.putPropertyAttribute(configurations, "hdfs-site") totalAvailableRam = clusterData['totalAvailableRam'] - self.logger.info("Class: %s, Method: %s. Total Available Ram: %s" % (self.__class__.__name__, inspect.stack()[0][3], str(totalAvailableRam))) + self.logger.info(f"Class: {self.__class__.__name__}, Method: {inspect.stack()[0][3]}. Total Available Ram: {str(totalAvailableRam)}") putHDFSProperty('namenode_heapsize', max(int(totalAvailableRam / 2), 1024)) putHDFSProperty = self.putProperty(configurations, "hadoop-env", services) putHDFSProperty('namenode_opt_newsize', max(int(totalAvailableRam / 8), 128)) @@ -206,8 +206,8 @@ def recommendConfigurationsFromHDP206(self, configurations, clusterData, service nameServices = hdfsSiteProperties['dfs.internal.nameservices'] if nameServices is None and hdfsSiteProperties and 'dfs.nameservices' in hdfsSiteProperties: nameServices = hdfsSiteProperties['dfs.nameservices'] - if nameServices and "dfs.ha.namenodes.%s" % nameServices in hdfsSiteProperties: - namenodes = hdfsSiteProperties["dfs.ha.namenodes.%s" % nameServices] + if nameServices and f"dfs.ha.namenodes.{nameServices}" in hdfsSiteProperties: + namenodes = hdfsSiteProperties[f"dfs.ha.namenodes.{nameServices}"] if len(namenodes.split(',')) > 1: putHDFSSitePropertyAttributes("dfs.namenode.rpc-address", "delete", "true") @@ -576,7 +576,7 @@ def sampleValidator(self, properties, recommendedDefaults, configurations, servi item = {"level": "ERROR|WARN", "message": "value"} ''' validationItems.append({"config-name": "my_config_property_name", - "item": self.getErrorItem("My custom message in method %s" % inspect.stack()[0][3])}) + "item": self.getErrorItem(f"My custom message in method {inspect.stack()[0][3]}")}) return self.toConfigurationValidationProblems(validationItems, "hadoop-env") def validateHDFSConfigurationsFromHDP206(self, properties, recommendedDefaults, configurations, services, hosts): @@ -614,7 +614,7 @@ def validatorOneDataDirPerPartition(self, properties, propertyName, services, ho break if len(warnings) > 0: - return self.getWarnItem("cluster-env/one_dir_per_partition is enabled but there are multiple data directories on the same mount. Affected hosts: {0}".format(", ".join(sorted(warnings)))) + return self.getWarnItem(f"cluster-env/one_dir_per_partition is enabled but there are multiple data directories on the same mount. Affected hosts: {', '.join(sorted(warnings))}") return None @@ -680,8 +680,8 @@ def getAmbariProxyUsersForHDFSValidationItems(self, properties, services): if "HDFS" in servicesList: ambari_user = self.getAmbariUser(services) props = ( - "hadoop.proxyuser.{0}.hosts".format(ambari_user), - "hadoop.proxyuser.{0}.groups".format(ambari_user) + f"hadoop.proxyuser.{ambari_user}.hosts", + f"hadoop.proxyuser.{ambari_user}.groups" ) for prop in props: validationItems.append({"config-name": prop, "item": self.validatorNotEmpty(properties, prop)}) @@ -819,7 +819,7 @@ def validateHDFSConfigurationsFromHDP22(self, properties, recommendedDefaults, c if data_transfer_protection_value is not None: if data_transfer_protection_value not in VALID_TRANSFER_PROTECTION_VALUES: validationItems.append({"config-name": data_transfer_protection, "item": self.getWarnItem( - "Invalid property value: {0}. Valid values are {1}.".format(data_transfer_protection_value, VALID_TRANSFER_PROTECTION_VALUES) + f"Invalid property value: {data_transfer_protection_value}. Valid values are {VALID_TRANSFER_PROTECTION_VALUES}." )}) return self.toConfigurationValidationProblems(validationItems, "hdfs-site") diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HIVE/package/alerts/alert_hive_interactive_thrift_port.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HIVE/package/alerts/alert_hive_interactive_thrift_port.py index de03c4f4d58..e64aac26294 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HIVE/package/alerts/alert_hive_interactive_thrift_port.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HIVE/package/alerts/alert_hive_interactive_thrift_port.py @@ -243,7 +243,7 @@ def execute(configurations={}, parameters={}, host_name=None): def shouldCheck(kinitcmd, smokeuser, host_name, webui_port, webui_use_ssl): """Should check node if: 1) non-HA setup or 2) active node in a HA setup""" - logger.debug("shouldCheck kinitcmd={}, smokeuser={}, host_name={}, webui_port={}, webui_use_ssl={}".format(kinitcmd, smokeuser, host_name, webui_port, webui_use_ssl)) + logger.debug(f"shouldCheck kinitcmd={kinitcmd}, smokeuser={smokeuser}, host_name={host_name}, webui_port={webui_port}, webui_use_ssl={webui_use_ssl}") if (kinitcmd): # prevent concurrent kinit @@ -255,10 +255,10 @@ def shouldCheck(kinitcmd, smokeuser, host_name, webui_port, webui_use_ssl): kinit_lock.release() protocol = "https" if webui_use_ssl else "http" - check_cmd = "curl -k {}://{}:{}/leader".format(protocol, host_name, webui_port) - logger.debug("cmd={}".format(check_cmd)) + check_cmd = f"curl -k {protocol}://{host_name}:{webui_port}/leader" + logger.debug(f"cmd={check_cmd}") code, out, err = shell.call(check_cmd, stdout = subprocess.PIPE, stderr = subprocess.PIPE, quiet = True) - logger.debug("code={}, out={}, err={}".format(code, out, err)) + logger.debug(f"code={code}, out={out}, err={err}") if (code != 0): - raise Exception("shouldCheck failed with exit code {}".format(code)) + raise Exception(f"shouldCheck failed with exit code {code}") return out != 'false' # false means there is a HA setup and the server is in standby mode \ No newline at end of file diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HIVE/package/scripts/hive.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HIVE/package/scripts/hive.py index 3d09284ba54..1a6ad9046a3 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HIVE/package/scripts/hive.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HIVE/package/scripts/hive.py @@ -54,7 +54,7 @@ def hive(name=None): # We should change configurations for client as well as for server. # The reason is that stale-configs are service-level, not component. - Logger.info("Directories to fill with configs: %s" % str(params.hive_conf_dirs_list)) + Logger.info(f"Directories to fill with configs: {str(params.hive_conf_dirs_list)}") for conf_dir in params.hive_conf_dirs_list: fill_conf_dir(conf_dir) diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HIVE/package/scripts/hive_server_upgrade.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HIVE/package/scripts/hive_server_upgrade.py index aafbe23a4c9..075154ea615 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HIVE/package/scripts/hive_server_upgrade.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HIVE/package/scripts/hive_server_upgrade.py @@ -130,7 +130,7 @@ def _get_current_hiveserver_version(): raise Fail('Unable to execute hive --version command to retrieve the hiveserver2 version.') if return_code != 0: - raise Fail('Unable to determine the current HiveServer2 version because of a non-zero return code of {0}'.format(str(return_code))) + raise Fail(f'Unable to determine the current HiveServer2 version because of a non-zero return code of {str(return_code)}') match = re.search('^(Hive) ([0-9]+.[0-9]+.\S+)', output, re.MULTILINE) @@ -138,6 +138,6 @@ def _get_current_hiveserver_version(): current_hive_server_version = match.group(2) return current_hive_server_version else: - raise Fail('The extracted hiveserver2 version "{0}" does not matching any known pattern'.format(output)) + raise Fail(f'The extracted hiveserver2 version "{output}" does not matching any known pattern') diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HIVE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HIVE/package/scripts/params.py index 13f95f3fd58..3e027728bc3 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HIVE/package/scripts/params.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HIVE/package/scripts/params.py @@ -160,8 +160,8 @@ # --- Tarballs --- # DON'T CHANGE THESE VARIABLE NAMES # Values don't change from those in copy_tarball.py -hive_tar_source = "{0}/{1}/hive/hive.tar.gz".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN) -hive_tar_dest_file = "/{0}/apps/{1}/hive/hive.tar.gz".format(STACK_NAME_PATTERN,STACK_VERSION_PATTERN) +hive_tar_source = f"{STACK_ROOT_PATTERN}/{STACK_VERSION_PATTERN}/hive/hive.tar.gz" +hive_tar_dest_file = f"/{STACK_NAME_PATTERN}/apps/{STACK_VERSION_PATTERN}/hive/hive.tar.gz" tarballs_mode = 0o444 diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HIVE/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HIVE/package/scripts/service_check.py index 5bbf08acb61..8b6e8f6bd0c 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HIVE/package/scripts/service_check.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HIVE/package/scripts/service_check.py @@ -69,7 +69,7 @@ def service_check(self, env): def check_hive_server(self, env, server_component_name, kinit_cmd, address_list, server_port, ssl_keystore, ssl_password): import params env.set_params(params) - Logger.info("Server Address List : {0}, Port : {1}, SSL KeyStore : {2}".format(address_list, server_port, ssl_keystore)) + Logger.info(f"Server Address List : {address_list}, Port : {server_port}, SSL KeyStore : {ssl_keystore}") if not address_list: raise Fail("Can not find any "+server_component_name+" ,host. Please check configuration.") @@ -79,7 +79,7 @@ def check_hive_server(self, env, server_component_name, kinit_cmd, address_list, start_time = time.time() end_time = start_time + SOCKET_WAIT_SECONDS - Logger.info("Waiting for the {0} to start...".format(server_component_name)) + Logger.info(f"Waiting for the {server_component_name} to start...") workable_server_available = False i = 0 @@ -93,10 +93,10 @@ def check_hive_server(self, env, server_component_name, kinit_cmd, address_list, ssl_keystore=ssl_keystore, ssl_password=ssl_password, ldap_username=params.hive_ldap_user, ldap_password=params.hive_ldap_passwd, pam_username=params.hive_pam_username, pam_password=params.hive_pam_password) - Logger.info("Successfully connected to {0} on port {1}".format(address, server_port)) + Logger.info(f"Successfully connected to {address} on port {server_port}") workable_server_available = True except: - Logger.info("Connection to {0} on port {1} failed".format(address, server_port)) + Logger.info(f"Connection to {address} on port {server_port} failed") time.sleep(5) i += 1 @@ -145,7 +145,7 @@ def check_llap(self, env, kinit_cmd, address, port, key, hive_auth="NOSASL", tra exec_path = os.environ['PATH'] + os.pathsep + params.hadoop_bin_dir + os.pathsep + upgrade_hive_bin # beeline path - llap_cmd = "! beeline -u '%s'" % format(";".join(beeline_url)) + llap_cmd = f"! beeline -u '{format(';'.join(beeline_url))}'" # Append LLAP SQL script path llap_cmd += format(" --hiveconf \"hiveLlapServiceCheck={unique_id}\" -f {stack_root}/current/hive-server2/scripts/llap/sql/serviceCheckScript.sql") # Append grep patterns for detecting failure diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HIVE/package/scripts/setup_ranger_hive.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HIVE/package/scripts/setup_ranger_hive.py index aeeb8691bed..489e7545564 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HIVE/package/scripts/setup_ranger_hive.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HIVE/package/scripts/setup_ranger_hive.py @@ -60,7 +60,7 @@ def setup_ranger_hive(upgrade_type = None): ) params.HdfsResource(None, action="execute") except Exception as err: - Logger.exception("Audit directory creation in HDFS for HIVE Ranger plugin failed with error:\n{0}".format(err)) + Logger.exception(f"Audit directory creation in HDFS for HIVE Ranger plugin failed with error:\n{err}") api_version='v2' diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HIVE/service_advisor.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HIVE/service_advisor.py index 83b192fca06..da961678c69 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HIVE/service_advisor.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HIVE/service_advisor.py @@ -537,7 +537,7 @@ def recommendHiveConfigurationsFromHDP30(self, configurations, clusterData, serv metadata_port = services["configurations"]["application-properties"]["properties"]["atlas.server.https.port"] else: metadata_port = atlas_server_default_https_port - putHiveSiteProperty("atlas.rest.address", "{0}://{1}:{2}".format(scheme, atlas_rest_host, metadata_port)) + putHiveSiteProperty("atlas.rest.address", f"{scheme}://{atlas_rest_host}:{metadata_port}") else: putHiveSitePropertyAttribute("atlas.cluster.name", "delete", "true") putHiveSitePropertyAttribute("atlas.rest.address", "delete", "true") @@ -561,7 +561,7 @@ def recommendHiveConfigurationsFromHDP30(self, configurations, clusterData, serv if hive_tez_default_queue: putHiveInteractiveSiteProperty("hive.server2.tez.default.queues", hive_tez_default_queue) - self.logger.debug("Updated 'hive.server2.tez.default.queues' config : '{0}'".format(hive_tez_default_queue)) + self.logger.debug(f"Updated 'hive.server2.tez.default.queues' config : '{hive_tez_default_queue}'") else: self.logger.info("DBG: Setting 'num_llap_nodes' config's READ ONLY attribute as 'True'.") putHiveInteractiveEnvProperty("enable_hive_interactive", "false") @@ -617,7 +617,7 @@ def druid_host(self, component_name, config_type, services, hosts, default_host= if hosts and config_type in services['configurations']: host = hosts[0]['Hosts']['host_name'] port = services['configurations'][config_type]['properties']['druid.port'] - return "%s:%s" % (host, port) + return f"{host}:{port}" else: return default_host @@ -649,9 +649,9 @@ def setLlapDaemonQueuePropAttributes(self, services, configurations): "count(configurations['capacity-scheduler']['properties']['capacity-scheduler']) = " "{0}".format(len(capacity_scheduler_properties))) else: - self.logger.info("Read configurations['capacity-scheduler']['properties']['capacity-scheduler'] is : {0}".format(cap_sched_props_as_str)) + self.logger.info(f"Read configurations['capacity-scheduler']['properties']['capacity-scheduler'] is : {cap_sched_props_as_str}") else: - self.logger.info("configurations['capacity-scheduler']['properties']['capacity-scheduler'] : {0}.".format(cap_sched_props_as_str)) + self.logger.info(f"configurations['capacity-scheduler']['properties']['capacity-scheduler'] : {cap_sched_props_as_str}.") # if "capacity_scheduler_properties" is empty, implies we may have "capacity-scheduler" configs as dictionary # in configurations, if "capacity-scheduler" changed in current invocation. @@ -660,7 +660,7 @@ def setLlapDaemonQueuePropAttributes(self, services, configurations): capacity_scheduler_properties = cap_sched_props_as_dict self.logger.info("'capacity-scheduler' changed in current Stack Advisor invocation. Retrieved the configs as dictionary from configurations.") else: - self.logger.info("Read configurations['capacity-scheduler']['properties'] is : {0}".format(cap_sched_props_as_dict)) + self.logger.info(f"Read configurations['capacity-scheduler']['properties'] is : {cap_sched_props_as_dict}") else: self.logger.info("'capacity-scheduler' not modified in the current Stack Advisor invocation.") @@ -677,7 +677,7 @@ def setLlapDaemonQueuePropAttributes(self, services, configurations): leafQueues = [{"label": str(queueName), "value": queueName} for queueName in leafQueueNames] leafQueues = sorted(leafQueues, key=lambda q: q["value"]) putHiveInteractiveSitePropertyAttribute("hive.llap.daemon.queue.name", "entries", leafQueues) - self.logger.info("'hive.llap.daemon.queue.name' config Property Attributes set to : {0}".format(leafQueues)) + self.logger.info(f"'hive.llap.daemon.queue.name' config Property Attributes set to : {leafQueues}") else: self.logger.error("Problem retrieving YARN queues. Skipping updating HIVE Server Interactve " "'hive.server2.tez.default.queues' property attributes.") @@ -998,7 +998,7 @@ def validateHiveInteractiveSiteConfigurationsFromHDP30(self, properties, recomme validationItems.append({"config-name": "hive.server2.tez.sessions.per.default.queue","item": self.getWarnItem(errMsg3)}) if int(hsi_site["hive.llap.io.memory.size"]) > int(hsi_site["hive.llap.daemon.yarn.container.mb"]): - errorMessage = "In-Memory Cache per Daemon (value: {0}) may not be more then Memory per Daemon (value: {1})".format(hsi_site["hive.llap.io.memory.size"], hsi_site["hive.llap.daemon.yarn.container.mb"]) + errorMessage = f"In-Memory Cache per Daemon (value: {hsi_site['hive.llap.io.memory.size']}) may not be more then Memory per Daemon (value: {hsi_site['hive.llap.daemon.yarn.container.mb']})" validationItems.append({"config-name": "hive.llap.io.memory.size","item": self.getErrorItem(errorMessage)}) # Validate that "remaining available capacity" in cluster is at least 512 MB, after "llap" queue is selected, @@ -1039,7 +1039,7 @@ def get_yarn_nm_mem_in_mb(self, services, configurations): yarn_nm_mem_in_mb = float(yarn_site["yarn.nodemanager.resource.memory-mb"]) if yarn_nm_mem_in_mb <= 0.0: - self.logger.warning("'yarn.nodemanager.resource.memory-mb' current value : {0}. Expected value : > 0".format(yarn_nm_mem_in_mb)) + self.logger.warning(f"'yarn.nodemanager.resource.memory-mb' current value : {yarn_nm_mem_in_mb}. Expected value : > 0") return yarn_nm_mem_in_mb @@ -1085,10 +1085,10 @@ def __getQueueAmFractionFromCapacityScheduler(self, capacity_scheduler_propertie for key in cap_sched_keys: if key.endswith("."+llap_daemon_selected_queue_name+".maximum-am-resource-percent"): llap_selected_queue_am_percent_key = key - self.logger.info("AM percent key got for '{0}' queue is : '{1}'".format(llap_daemon_selected_queue_name, llap_selected_queue_am_percent_key)) + self.logger.info(f"AM percent key got for '{llap_daemon_selected_queue_name}' queue is : '{llap_selected_queue_am_percent_key}'") break; if llap_selected_queue_am_percent_key is None: - self.logger.info("Returning default AM percent value : '0.1' for queue : {0}".format(llap_daemon_selected_queue_name)) + self.logger.info(f"Returning default AM percent value : '0.1' for queue : {llap_daemon_selected_queue_name}") return 0.1 # Default value to use if we couldn't retrieve queue's corresponding AM Percent key. else: llap_selected_queue_am_percent = capacity_scheduler_properties.get(llap_selected_queue_am_percent_key) @@ -1101,7 +1101,7 @@ def __getSelectedQueueTotalCap(self, capacity_scheduler_properties, llap_daemon_ """ Calculates the total available capacity for the passed-in YARN queue of any level based on the percentages. """ - self.logger.info("Entered __getSelectedQueueTotalCap fn() with llap_daemon_selected_queue_name= '{0}'.".format(llap_daemon_selected_queue_name)) + self.logger.info(f"Entered __getSelectedQueueTotalCap fn() with llap_daemon_selected_queue_name= '{llap_daemon_selected_queue_name}'.") available_capacity = total_cluster_capacity queue_cap_key = self.__getQueueCapacityKeyFromCapacityScheduler(capacity_scheduler_properties, llap_daemon_selected_queue_name) if queue_cap_key: @@ -1111,13 +1111,13 @@ def __getSelectedQueueTotalCap(self, capacity_scheduler_properties, llap_daemon_ queue_path = queue_cap_key[24:] # Strip from beginning "yarn.scheduler.capacity." queue_path = queue_path[0:-9] # Strip from end ".capacity" queues_list = queue_path.split(".") - self.logger.info("Queue list : {0}".format(queues_list)) + self.logger.info(f"Queue list : {queues_list}") if queues_list: for queue in queues_list: queue_cap_key = self.__getQueueCapacityKeyFromCapacityScheduler(capacity_scheduler_properties, queue) queue_cap_perc = float(capacity_scheduler_properties.get(queue_cap_key)) available_capacity = queue_cap_perc / 100 * available_capacity - self.logger.info("Total capacity available for queue {0} is : {1}".format(queue, available_capacity)) + self.logger.info(f"Total capacity available for queue {queue} is : {available_capacity}") # returns the capacity calculated for passed-in queue in "llap_daemon_selected_queue_name". return available_capacity @@ -1170,15 +1170,15 @@ def get_yarn_min_container_size(self, services, configurations): # Check if services["changed-configurations"] is empty and "yarn.scheduler.minimum-allocation-mb" is modified in current ST invocation. if not services["changed-configurations"] and yarn_site and yarn_min_allocation_property in yarn_site: yarn_min_container_size = yarn_site[yarn_min_allocation_property] - self.logger.info("DBG: 'yarn.scheduler.minimum-allocation-mb' read from output as : {0}".format(yarn_min_container_size)) + self.logger.info(f"DBG: 'yarn.scheduler.minimum-allocation-mb' read from output as : {yarn_min_container_size}") # Check if "yarn.scheduler.minimum-allocation-mb" is input in services array. elif yarn_site_properties and yarn_min_allocation_property in yarn_site_properties: yarn_min_container_size = yarn_site_properties[yarn_min_allocation_property] - self.logger.info("DBG: 'yarn.scheduler.minimum-allocation-mb' read from services as : {0}".format(yarn_min_container_size)) + self.logger.info(f"DBG: 'yarn.scheduler.minimum-allocation-mb' read from services as : {yarn_min_container_size}") if not yarn_min_container_size: - self.logger.error("{0} was not found in the configuration".format(yarn_min_allocation_property)) + self.logger.error(f"{yarn_min_allocation_property} was not found in the configuration") return yarn_min_container_size @@ -1219,10 +1219,10 @@ def calculate_tez_am_container_size(self, services, total_cluster_capacity, is_c elif total_cluster_capacity > 98304: calculated_tez_am_resource_memory_mb = 4096 - self.logger.info("DBG: Calculated and returning 'tez_am_resource_memory_mb' as : {0}".format(calculated_tez_am_resource_memory_mb)) + self.logger.info(f"DBG: Calculated and returning 'tez_am_resource_memory_mb' as : {calculated_tez_am_resource_memory_mb}") return float(calculated_tez_am_resource_memory_mb) else: - self.logger.info("DBG: Returning 'tez_am_resource_memory_mb' as : {0}".format(tez_am_resource_memory_mb)) + self.logger.info(f"DBG: Returning 'tez_am_resource_memory_mb' as : {tez_am_resource_memory_mb}") return float(tez_am_resource_memory_mb) def get_tez_am_resource_memory_mb(self, services): diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/KAFKA/package/scripts/kafka_broker.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/KAFKA/package/scripts/kafka_broker.py index 972e9e5411a..4011078858e 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/KAFKA/package/scripts/kafka_broker.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/KAFKA/package/scripts/kafka_broker.py @@ -107,16 +107,16 @@ def stop(self, env, upgrade_type=None): try: pid = int(sudo.read_file(params.kafka_pid_file)) except: - Logger.info("Pid file {0} does not exist or does not contain a process id number".format(params.kafka_pid_file)) + Logger.info(f"Pid file {params.kafka_pid_file} does not exist or does not contain a process id number") return max_wait = 120 for i in range(max_wait): - Logger.info("Waiting for Kafka Broker stop, current pid: {0}, seconds: {1}s".format(pid, i + 1)) + Logger.info(f"Waiting for Kafka Broker stop, current pid: {pid}, seconds: {i + 1}s") try: sudo.kill(pid, signal.SIGTERM.value) except OSError as e: - Logger.info("Kafka Broker is not running, delete pid file: {0}".format(params.kafka_pid_file)) + Logger.info(f"Kafka Broker is not running, delete pid file: {params.kafka_pid_file}") File(params.kafka_pid_file, action = "delete") return @@ -128,7 +128,7 @@ def stop(self, env, upgrade_type=None): File(params.kafka_pid_file, action = "delete") return - raise Fail("Cannot stop Kafka Broker after {0} seconds".format(max_wait)) + raise Fail(f"Cannot stop Kafka Broker after {max_wait} seconds") def disable_security(self, env): @@ -140,7 +140,7 @@ def disable_security(self, env): Logger.info("The zookeeper.set.acl is false. Skipping reverting ACL") return Execute( - "{0} --zookeeper.connect {1} --zookeeper.acl=unsecure".format(params.kafka_security_migrator, params.zookeeper_connect), \ + f"{params.kafka_security_migrator} --zookeeper.connect {params.zookeeper_connect} --zookeeper.acl=unsecure", \ user=params.kafka_user, \ environment={ 'JAVA_HOME': params.java64_home }, \ logoutput=True, \ diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/KAFKA/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/KAFKA/package/scripts/service_check.py index 350e70ec00b..fb9e6c6d94b 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/KAFKA/package/scripts/service_check.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/KAFKA/package/scripts/service_check.py @@ -42,26 +42,26 @@ def service_check(self, env): topic_exists_cmd_code, topic_exists_cmd_out = shell.call(topic_exists_cmd, logoutput=True, quiet=False, user=params.kafka_user) if topic_exists_cmd_code > 0: - raise Fail("Error encountered when attempting to list topics: {0}".format(topic_exists_cmd_out)) + raise Fail(f"Error encountered when attempting to list topics: {topic_exists_cmd_out}") if not params.kafka_delete_topic_enable: - Logger.info('Kafka delete.topic.enable is not enabled. Skipping topic creation: %s' % topic) + Logger.info(f'Kafka delete.topic.enable is not enabled. Skipping topic creation: {topic}') return # run create topic command only if the topic doesn't exists if topic not in topic_exists_cmd_out: create_topic_cmd = format("{kafka_home}/bin/kafka-topics.sh --zookeeper {kafka_config[zookeeper.connect]} --create --topic {topic} --partitions 1 --replication-factor 1") command = source_cmd + " ; " + create_topic_cmd - Logger.info("Running kafka create topic command: %s" % command) + Logger.info(f"Running kafka create topic command: {command}") call_and_match_output(command, format("({create_topic_cmd_created_output})|({create_topic_cmd_exists_output})"), "Failed to check that topic exists", user=params.kafka_user) under_rep_cmd = format("{kafka_home}/bin/kafka-topics.sh --describe --zookeeper {kafka_config[zookeeper.connect]} --under-replicated-partitions") under_rep_cmd_code, under_rep_cmd_out = shell.call(under_rep_cmd, logoutput=True, quiet=False, user=params.kafka_user) if under_rep_cmd_code > 0: - raise Fail("Error encountered when attempting find under replicated partitions: {0}".format(under_rep_cmd_out)) + raise Fail(f"Error encountered when attempting find under replicated partitions: {under_rep_cmd_out}") elif len(under_rep_cmd_out) > 0 and "Topic" in under_rep_cmd_out: - Logger.warning("Under replicated partitions found: {0}".format(under_rep_cmd_out)) + Logger.warning(f"Under replicated partitions found: {under_rep_cmd_out}") def read_kafka_config(self): import params diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/KAFKA/package/scripts/setup_ranger_kafka.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/KAFKA/package/scripts/setup_ranger_kafka.py index a82bb9ad3ed..b2e643edb6c 100755 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/KAFKA/package/scripts/setup_ranger_kafka.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/KAFKA/package/scripts/setup_ranger_kafka.py @@ -59,7 +59,7 @@ def setup_ranger_kafka(): else: Logger.info('Ranger KMS is not ssl enabled, skipping ssl-client for hdfs audits.') except Exception as err: - Logger.exception("Audit directory creation in HDFS for KAFKA Ranger plugin failed with error:\n{0}".format(err)) + Logger.exception(f"Audit directory creation in HDFS for KAFKA Ranger plugin failed with error:\n{err}") setup_ranger_plugin('kafka-broker', params.service_name, params.previous_jdbc_jar, params.downloaded_custom_connector, params.driver_curl_source, diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/KAFKA/package/scripts/upgrade.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/KAFKA/package/scripts/upgrade.py index 7c862214b46..e3580a1675d 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/KAFKA/package/scripts/upgrade.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/KAFKA/package/scripts/upgrade.py @@ -46,7 +46,7 @@ def run_migration(env, upgrade_type): Logger.info("Skip running the Kafka ACL migration script since cluster security is not enabled.") return - Logger.info("Upgrade type: {0}, direction: {1}".format(str(upgrade_type), params.upgrade_direction)) + Logger.info(f"Upgrade type: {str(upgrade_type)}, direction: {params.upgrade_direction}") # If the schema upgrade script exists in the version upgrading to, then attempt to upgrade/downgrade it while still using the present bits. kafka_acls_script = None @@ -60,7 +60,7 @@ def run_migration(env, upgrade_type): if kafka_acls_script is not None: if os.path.exists(kafka_acls_script): - Logger.info("Found Kafka acls script: {0}".format(kafka_acls_script)) + Logger.info(f"Found Kafka acls script: {kafka_acls_script}") if params.zookeeper_connect is None: raise Fail("Could not retrieve property kafka-broker/zookeeper.connect") @@ -71,4 +71,4 @@ def run_migration(env, upgrade_type): user=params.kafka_user, logoutput=True) else: - Logger.info("Did not find Kafka acls script: {0}".format(kafka_acls_script)) + Logger.info(f"Did not find Kafka acls script: {kafka_acls_script}") diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/KAFKA/service_advisor.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/KAFKA/service_advisor.py index 9d7360fb015..ebe10eb1e4a 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/KAFKA/service_advisor.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/KAFKA/service_advisor.py @@ -311,7 +311,7 @@ def update_listeners_to_sasl(self, services, putKafkaBrokerProperty): listeners = re.sub(r"(^|\b)SSL://", "SASL_SSL://", listeners) putKafkaBrokerProperty('listeners', listeners) except KeyError as e: - self.logger.info('Cannot replace PLAINTEXT to SASL_PLAINTEXT in listeners. KeyError: %s' % e) + self.logger.info(f'Cannot replace PLAINTEXT to SASL_PLAINTEXT in listeners. KeyError: {e}') def recommendKAFKAConfigurationsFromHDP26(self, configurations, clusterData, services, hosts): if 'kafka-env' in services['configurations'] and 'kafka_user' in services['configurations']['kafka-env']['properties']: diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/KERBEROS/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/KERBEROS/package/scripts/service_check.py index 05be1f43914..0152baf1e49 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/KERBEROS/package/scripts/service_check.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/KERBEROS/package/scripts/service_check.py @@ -50,10 +50,10 @@ def service_check(self, env): if ((params.smoke_test_principal is not None) and (params.smoke_test_keytab_file is not None) and os.path.isfile(params.smoke_test_keytab_file)): - print("Performing kinit using %s" % params.smoke_test_principal) + print(f"Performing kinit using {params.smoke_test_principal}") - ccache_file_name = HASH_ALGORITHM("{0}|{1}".format(params.smoke_test_principal, params.smoke_test_keytab_file).encode('utf-8')).hexdigest() - ccache_file_path = "{0}{1}kerberos_service_check_cc_{2}".format(params.tmp_dir, os.sep, ccache_file_name) + ccache_file_name = HASH_ALGORITHM(f"{params.smoke_test_principal}|{params.smoke_test_keytab_file}".encode('utf-8')).hexdigest() + ccache_file_path = f"{params.tmp_dir}{os.sep}kerberos_service_check_cc_{ccache_file_name}" kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None)) kinit_command = "{0} -c {1} -kt {2} {3}".format(kinit_path_local, ccache_file_path, params.smoke_test_keytab_file, diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/TEZ/package/scripts/pre_upgrade.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/TEZ/package/scripts/pre_upgrade.py index bc8d2cd3b3a..03848eb24b2 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/TEZ/package/scripts/pre_upgrade.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/TEZ/package/scripts/pre_upgrade.py @@ -41,7 +41,7 @@ def prepare(self, env): Logger.info("Before starting Stack Upgrade, check if tez tarball has been copied to HDFS.") if params.stack_version_formatted and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.stack_version_formatted): - Logger.info("Stack version {0} is sufficient to check if need to copy tez.tar.gz to HDFS.".format(params.stack_version_formatted)) + Logger.info(f"Stack version {params.stack_version_formatted} is sufficient to check if need to copy tez.tar.gz to HDFS.") # Force it to copy the current version of the tez tarball, rather than the version the RU will go to. resource_created = copy_to_hdfs( diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/TEZ/package/scripts/tez_client.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/TEZ/package/scripts/tez_client.py index 4cd2655fc03..d0f52f6cea6 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/TEZ/package/scripts/tez_client.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/TEZ/package/scripts/tez_client.py @@ -72,7 +72,7 @@ def stack_upgrade_save_new_config(self, env): config_dir = self.get_config_dir_during_stack_upgrade(env, base_dir, conf_select_name) if config_dir: - Logger.info("stack_upgrade_save_new_config(): Calling conf-select on %s using version %s" % (conf_select_name, str(params.version))) + Logger.info(f"stack_upgrade_save_new_config(): Calling conf-select on {conf_select_name} using version {str(params.version)}") # Because this script was called from ru_execute_tasks.py which already enters an Environment with its own basedir, # must change it now so this function can find the Jinja Templates for the service. diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/TEZ/service_advisor.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/TEZ/service_advisor.py index 72506e679d4..ed41bbe4c48 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/TEZ/service_advisor.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/TEZ/service_advisor.py @@ -218,7 +218,7 @@ def recommendTezConfigurationsFromHDP22(self, configurations, clusterData, servi pass if has_tez_view: - tez_url = '{0}://{1}:{2}/#/main/view/TEZ/tez_cluster_instance'.format(server_protocol, server_host, server_port) + tez_url = f'{server_protocol}://{server_host}:{server_port}/#/main/view/TEZ/tez_cluster_instance' putTezProperty("tez.tez-ui.history-url.base", tez_url) pass @@ -279,7 +279,7 @@ def recommendTezConfigurationsFromHDP23(self, configurations, clusterData, servi pass if has_tez_view: - tez_url = '{0}://{1}:{2}/#/main/view/TEZ/tez_cluster_instance'.format(server_protocol, server_host, server_port) + tez_url = f'{server_protocol}://{server_host}:{server_port}/#/main/view/TEZ/tez_cluster_instance' putTezProperty("tez.tez-ui.history-url.base", tez_url) pass @@ -287,7 +287,7 @@ def recommendTezConfigurationsFromHDP30(self, configurations, clusterData, servi putTezProperty = self.putProperty(configurations, "tez-site") if "HIVE" in self.getServiceNames(services) and "hive-site" in services["configurations"] and "hive.metastore.warehouse.external.dir" in services["configurations"]["hive-site"]["properties"]: hive_metastore_warehouse_external_dir = services["configurations"]["hive-site"]["properties"]['hive.metastore.warehouse.external.dir'] - putTezProperty("tez.history.logging.proto-base-dir", "{0}/sys.db".format(hive_metastore_warehouse_external_dir)) + putTezProperty("tez.history.logging.proto-base-dir", f"{hive_metastore_warehouse_external_dir}/sys.db") putTezProperty("tez.history.logging.service.class", "org.apache.tez.dag.history.logging.proto.ProtoHistoryLoggingService") self.logger.info("Updated 'tez-site' config 'tez.history.logging.proto-base-dir' and 'tez.history.logging.service.class'") @@ -331,11 +331,11 @@ def validateTezConfigurationsFromHDP22(self, properties, recommendedDefaults, co if int(tez_site[prop_name1]) > yarnMaxAllocationSize: validationItems.append({"config-name": prop_name1, "item": self.getWarnItem( - "{0} should be less than YARN max allocation size ({1})".format(prop_name1, yarnMaxAllocationSize))}) + f"{prop_name1} should be less than YARN max allocation size ({yarnMaxAllocationSize})")}) if int(tez_site[prop_name2]) > yarnMaxAllocationSize: validationItems.append({"config-name": prop_name2, "item": self.getWarnItem( - "{0} should be less than YARN max allocation size ({1})".format(prop_name2, yarnMaxAllocationSize))}) + f"{prop_name2} should be less than YARN max allocation size ({yarnMaxAllocationSize})")}) return self.toConfigurationValidationProblems(validationItems, "tez-site") diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/alerts/alert_nodemanager_health.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/alerts/alert_nodemanager_health.py index 23884e686fa..a4a9e756150 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/alerts/alert_nodemanager_health.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/alerts/alert_nodemanager_health.py @@ -142,15 +142,15 @@ def execute(configurations={}, parameters={}, host_name=None): if host_port is not None: if ":" in host_port: uri_host, uri_port = host_port.split(':') - host_port = '{0}:{1}'.format(host_name, uri_port) + host_port = f'{host_name}:{uri_port}' else: host_port = host_name # some yarn-site structures don't have the web ui address if host_port is None: - host_port = '{0}:{1}'.format(host_name, NODEMANAGER_DEFAULT_PORT) + host_port = f'{host_name}:{NODEMANAGER_DEFAULT_PORT}' - query = "{0}://{1}/ws/v1/node/info".format(scheme, host_port) + query = f"{scheme}://{host_port}/ws/v1/node/info" try: if kerberos_principal is not None and kerberos_keytab is not None and security_enabled: diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/alerts/alert_nodemanagers_summary.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/alerts/alert_nodemanagers_summary.py index 88c79511c8c..86623e7de0c 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/alerts/alert_nodemanagers_summary.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/alerts/alert_nodemanagers_summary.py @@ -121,7 +121,7 @@ def execute(configurations={}, parameters={}, host_name=None): uri = https_uri uri = str(host_name) + ":" + uri.split(":")[1] - live_nodemanagers_qry = "{0}://{1}/jmx?qry={2}".format(scheme, uri, QRY) + live_nodemanagers_qry = f"{scheme}://{uri}/jmx?qry={QRY}" convert_to_json_failed = False response_code = None try: @@ -155,7 +155,7 @@ def execute(configurations={}, parameters={}, host_name=None): if kerberos_principal is not None and kerberos_keytab is not None and security_enabled: if response_code in [200, 307] and convert_to_json_failed: - return ('UNKNOWN', ['HTTP {0} response (metrics unavailable)'.format(str(response_code))]) + return ('UNKNOWN', [f'HTTP {str(response_code)} response (metrics unavailable)']) elif convert_to_json_failed and response_code not in [200, 307]: raise Exception("[Alert][NodeManager Health Summary] Getting data from {0} failed with http code {1}".format( str(live_nodemanagers_qry), str(response_code))) @@ -212,7 +212,7 @@ def find_value_in_jmx(data_dict, jmx_property, query): for jmx_prop_list_item in beans: if "name" in jmx_prop_list_item and jmx_prop_list_item["name"] == QRY: if jmx_property not in jmx_prop_list_item: - raise Exception("Unable to find {0} in JSON from {1} ".format(jmx_property, query)) + raise Exception(f"Unable to find {jmx_property} in JSON from {query} ") json_data = jmx_prop_list_item return json_data[jmx_property] \ No newline at end of file diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/scripts/functions.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/scripts/functions.py index 282a4ab6ce1..2f562cef19e 100755 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/scripts/functions.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/scripts/functions.py @@ -37,7 +37,7 @@ def ensure_unit_for_memory(memory_size): if len(memory_size_unit) > 0: unit = memory_size_unit[0] if unit not in ['b', 'k', 'm', 'g', 't', 'p']: - raise Exception("Memory size unit error. %s - wrong unit" % unit) - return "%s%s" % (memory_size_values[0], unit) + raise Exception(f"Memory size unit error. {unit} - wrong unit") + return f"{memory_size_values[0]}{unit}" else: raise Exception('Memory size can not be calculated') diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/scripts/mapred_service_check.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/scripts/mapred_service_check.py index 517c3033f7c..699de3048cf 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/scripts/mapred_service_check.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/scripts/mapred_service_check.py @@ -56,7 +56,7 @@ def service_check(self, env): python_executable, validateStatusFilePath, component_type, component_address, params.hadoop_ssl_enabled) if params.security_enabled: - kinit_cmd = "{0} -kt {1} {2};".format(params.kinit_path_local, params.smoke_user_keytab, params.smokeuser) + kinit_cmd = f"{params.kinit_path_local} -kt {params.smoke_user_keytab} {params.smokeuser};" smoke_cmd = kinit_cmd + validateStatusCmd else: smoke_cmd = validateStatusCmd diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/scripts/nodemanager_upgrade.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/scripts/nodemanager_upgrade.py index 7e90aeeaa42..259fcc579cc 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/scripts/nodemanager_upgrade.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/scripts/nodemanager_upgrade.py @@ -69,7 +69,7 @@ def _check_nodemanager_startup(): yarn_output = yarn_output.lower() if hostname in yarn_output or nodemanager_address in yarn_output or hostname_ip in yarn_output: - Logger.info('NodeManager with ID \'{0}\' has rejoined the cluster.'.format(nodemanager_address)) + Logger.info(f'NodeManager with ID \'{nodemanager_address}\' has rejoined the cluster.') return else: - raise Fail('NodeManager with ID \'{0}\' was not found in the list of running NodeManagers. \'{1}\' output was:\n{2}'.format(nodemanager_address, command, yarn_output)) + raise Fail(f'NodeManager with ID \'{nodemanager_address}\' was not found in the list of running NodeManagers. \'{command}\' output was:\n{yarn_output}') diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/scripts/service_check.py index fc3fb0c0034..7e0ac76a850 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/scripts/service_check.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/scripts/service_check.py @@ -53,7 +53,7 @@ def service_check(self, env): yarn_exe = os_utils.quote_path(os.path.join(params.yarn_home, "bin", "yarn.cmd")) - run_yarn_check_cmd = "cmd /C %s node -list" % yarn_exe + run_yarn_check_cmd = f"cmd /C {yarn_exe} node -list" component_type = 'rm' if params.hadoop_ssl_enabled: @@ -66,10 +66,10 @@ def service_check(self, env): validateStatusFileName = "validateYarnComponentStatusWindows.py" validateStatusFilePath = os.path.join(temp_dir, validateStatusFileName) python_executable = sys.executable - validateStatusCmd = "%s %s %s -p %s -s %s" % (python_executable, validateStatusFilePath, component_type, component_address, params.hadoop_ssl_enabled) + validateStatusCmd = f"{python_executable} {validateStatusFilePath} {component_type} -p {component_address} -s {params.hadoop_ssl_enabled}" if params.security_enabled: - kinit_cmd = "%s -kt %s %s;" % (params.kinit_path_local, params.smoke_user_keytab, params.smokeuser) + kinit_cmd = f"{params.kinit_path_local} -kt {params.smoke_user_keytab} {params.smokeuser};" smoke_cmd = kinit_cmd + ' ' + validateStatusCmd else: smoke_cmd = validateStatusCmd @@ -178,7 +178,7 @@ def get_active_rm_webapp_address(self): Logger.warning(format("Cluster info is not available from calling {get_cluster_info_cmd}")) if active_rm_webapp_address is None: - raise Fail('Resource Manager state is not available. Failed to determine the active Resource Manager web application address from {0}'.format(','.join(rm_webapp_addresses))); + raise Fail(f"Resource Manager state is not available. Failed to determine the active Resource Manager web application address from {','.join(rm_webapp_addresses)}"); return active_rm_webapp_address if __name__ == "__main__": diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/service_advisor.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/service_advisor.py index 401cbcea51a..098d73ff948 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/service_advisor.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/service_advisor.py @@ -351,12 +351,12 @@ def recommendYARNConfigurationsFromHDP206(self, configurations, clusterData, ser if "TEZ" in servicesList: ambari_user = self.getAmbariUser(services) ambariHostName = socket.getfqdn() - putYarnProperty("yarn.timeline-service.http-authentication.proxyuser.{0}.hosts".format(ambari_user), ambariHostName) - putYarnProperty("yarn.timeline-service.http-authentication.proxyuser.{0}.groups".format(ambari_user), "*") + putYarnProperty(f"yarn.timeline-service.http-authentication.proxyuser.{ambari_user}.hosts", ambariHostName) + putYarnProperty(f"yarn.timeline-service.http-authentication.proxyuser.{ambari_user}.groups", "*") old_ambari_user = self.getOldAmbariUser(services) if old_ambari_user is not None: - putYarnPropertyAttribute("yarn.timeline-service.http-authentication.proxyuser.{0}.hosts".format(old_ambari_user), 'delete', 'true') - putYarnPropertyAttribute("yarn.timeline-service.http-authentication.proxyuser.{0}.groups".format(old_ambari_user), 'delete', 'true') + putYarnPropertyAttribute(f"yarn.timeline-service.http-authentication.proxyuser.{old_ambari_user}.hosts", 'delete', 'true') + putYarnPropertyAttribute(f"yarn.timeline-service.http-authentication.proxyuser.{old_ambari_user}.groups", 'delete', 'true') def recommendYARNConfigurationsFromHDP22(self, configurations, clusterData, services, hosts): capacity_scheduler_properties, received_as_key_value_pair = self.getCapacitySchedulerProperties(services) @@ -585,7 +585,7 @@ def recommendYARNConfigurationsFromHDP26(self, configurations, clusterData, serv # Calculation for 'ats_heapsize' is in MB. ats_heapsize = self.calculate_yarn_apptimelineserver_heapsize(host_mem, yarn_timeline_app_cache_size) putYarnEnvProperty('apptimelineserver_heapsize', ats_heapsize) # Value in MB - self.logger.info("Updated YARN config 'apptimelineserver_heapsize' as : {0}, ".format(ats_heapsize)) + self.logger.info(f"Updated YARN config 'apptimelineserver_heapsize' as : {ats_heapsize}, ") restyps_list = [] yn_cgrp_active = None @@ -607,7 +607,7 @@ def recommendYARNConfigurationsFromHDP26(self, configurations, clusterData, serv "yarn.resource-types" in services["configurations"]["resource-types"]["properties"]: yarn_restyps = services["configurations"]["resource-types"]["properties"]["yarn.resource-types"] restyps_list = yarn_restyps.split(',') if len(yarn_restyps) > 1 else yarn_restyps.split() - self.logger.info("new what is yarn_restyps: '{0}'.".format(restyps_list)) + self.logger.info(f"new what is yarn_restyps: '{restyps_list}'.") if "yarn-env" in services["configurations"] and \ "yarn_cgroups_enabled" in services["configurations"]["yarn-env"]["properties"]: @@ -622,17 +622,17 @@ def recommendYARNConfigurationsFromHDP26(self, configurations, clusterData, serv if "docker_allowed_devices" in services["configurations"]["container-executor"]["properties"]: docker_allow_dev = services["configurations"]["container-executor"]["properties"]["docker_allowed_devices"] allow_dev_list = docker_allow_dev.split(',') if len(docker_allow_dev) > 1 else docker_allow_dev.split() - self.logger.info("new what is docker_allowed_devices: '{0}'.".format(allow_dev_list)) + self.logger.info(f"new what is docker_allowed_devices: '{allow_dev_list}'.") if "docker_allowed_volume-drivers" in services["configurations"]["container-executor"]["properties"]: docker_allow_vol_drive = services["configurations"]["container-executor"]["properties"]["docker_allowed_volume-drivers"] allow_vol_drive_list = docker_allow_vol_drive.split(',') if len(docker_allow_vol_drive) > 1 else docker_allow_vol_drive.split() - self.logger.info("new what is docker_allowed_volume-drivers: '{0}'.".format(allow_vol_drive_list)) + self.logger.info(f"new what is docker_allowed_volume-drivers: '{allow_vol_drive_list}'.") if "docker_allowed_ro-mounts" in services["configurations"]["container-executor"]["properties"]: docker_allow_romounts = services["configurations"]["container-executor"]["properties"]["docker_allowed_ro-mounts"] allow_romounts_list = docker_allow_romounts.split(',') if len(docker_allow_romounts) > 1 else docker_allow_romounts.split() - self.logger.info("new what is docker.allowed.ro-mounts: '{0}'.".format(allow_romounts_list)) + self.logger.info(f"new what is docker.allowed.ro-mounts: '{allow_romounts_list}'.") if "cgroup_root" in services["configurations"]["container-executor"]["properties"]: cg_root = services["configurations"]["container-executor"]["properties"]["cgroup_root"] @@ -646,7 +646,7 @@ def recommendYARNConfigurationsFromHDP26(self, configurations, clusterData, serv if "yarn.nodemanager.linux-container-executor.cgroups.hierarchy" in services["configurations"]["yarn-site"]["properties"]: lce_cgrp_hirch = services["configurations"]["yarn-site"]["properties"]["yarn.nodemanager.linux-container-executor.cgroups.hierarchy"] lce_cgrp_hirch_list = lce_cgrp_hirch.split(',') if len(lce_cgrp_hirch) > 1 else lce_cgrp_hirch.split() - self.logger.info("new what is yarn.nodemanager.linux-container-executor.cgroups.hierarchy: '{0}'.".format(lce_cgrp_hirch_list)) + self.logger.info(f"new what is yarn.nodemanager.linux-container-executor.cgroups.hierarchy: '{lce_cgrp_hirch_list}'.") if "yarn.nodemanager.linux-container-executor.cgroups.mount" in services["configurations"]["yarn-site"]["properties"]: lce_cgrp_mt = services["configurations"]["yarn-site"]["properties"]["yarn.nodemanager.linux-container-executor.cgroups.mount"] @@ -654,22 +654,22 @@ def recommendYARNConfigurationsFromHDP26(self, configurations, clusterData, serv if "yarn.nodemanager.linux-container-executor.cgroups.mount-path" in services["configurations"]["yarn-site"]["properties"]: lce_cgrp_mtp = services["configurations"]["yarn-site"]["properties"]["yarn.nodemanager.linux-container-executor.cgroups.mount-path"] lce_cgrp_mtp_list = lce_cgrp_mtp.split(',') if len(lce_cgrp_mtp) > 1 else lce_cgrp_mtp.split() - self.logger.info("new what is yarn.nodemanager.linux-container-executor.cgroups.mount-path: '{0}'.".format(lce_cgrp_mtp_list)) + self.logger.info(f"new what is yarn.nodemanager.linux-container-executor.cgroups.mount-path: '{lce_cgrp_mtp_list}'.") if "yarn.nodemanager.resource-plugins.gpu.allowed-gpu-devices" in services["configurations"]["yarn-site"]["properties"]: rp_gpu_agd = services["configurations"]["yarn-site"]["properties"]["yarn.nodemanager.resource-plugins.gpu.allowed-gpu-devices"] rp_gpu_agd_list = rp_gpu_agd.split(',') if len(rp_gpu_agd) > 1 else rp_gpu_agd.split() - self.logger.info("new what is yarn.nodemanager.resource-plugins.gpu.allowed-gpu-devices: '{0}'.".format(rp_gpu_agd_list)) + self.logger.info(f"new what is yarn.nodemanager.resource-plugins.gpu.allowed-gpu-devices: '{rp_gpu_agd_list}'.") if "yarn.nodemanager.resource-plugins.gpu.docker-plugin" in services["configurations"]["yarn-site"]["properties"]: rp_gpu_dp = services["configurations"]["yarn-site"]["properties"]["yarn.nodemanager.resource-plugins.gpu.docker-plugin"] rp_gpu_dp_list = rp_gpu_dp.split(',') if len(rp_gpu_dp) > 1 else rp_gpu_dp.split() - self.logger.info("new what is yarn.nodemanager.resource-plugins.gpu.docker-plugin: '{0}'.".format(rp_gpu_dp_list)) + self.logger.info(f"new what is yarn.nodemanager.resource-plugins.gpu.docker-plugin: '{rp_gpu_dp_list}'.") if "yarn.nodemanager.resource-plugins.gpu.docker-plugin.nvidiadocker-v1.endpoint" in services["configurations"]["yarn-site"]["properties"]: rp_gpu_dp_nv1_ep = services["configurations"]["yarn-site"]["properties"]["yarn.nodemanager.resource-plugins.gpu.docker-plugin.nvidiadocker-v1.endpoint"] rp_gpu_dp_nv1_ep_list = rp_gpu_dp_nv1_ep.split(',') if len(rp_gpu_dp_nv1_ep) > 1 else rp_gpu_dp_nv1_ep.split() - self.logger.info("new what is yarn.nodemanager.resource-plugins.gpu.docker-plugin.nvidiadocker-v1.endpoint: '{0}'.".format(rp_gpu_dp_nv1_ep_list)) + self.logger.info(f"new what is yarn.nodemanager.resource-plugins.gpu.docker-plugin.nvidiadocker-v1.endpoint: '{rp_gpu_dp_nv1_ep_list}'.") if gpu_module_enabled and gpu_module_enabled.lower() == 'true': # put yarn.io/gpu if it is absent in resource-types.xml @@ -960,7 +960,7 @@ def calculate_yarn_apptimelineserver_cache_size(self, host_mem): yarn_timeline_app_cache_size = 7 elif host_mem >= 8192: yarn_timeline_app_cache_size = 10 - self.logger.info("Calculated and returning 'yarn_timeline_app_cache_size' : {0}".format(yarn_timeline_app_cache_size)) + self.logger.info(f"Calculated and returning 'yarn_timeline_app_cache_size' : {yarn_timeline_app_cache_size}") return yarn_timeline_app_cache_size @@ -978,10 +978,10 @@ def read_yarn_apptimelineserver_cache_size(self, services): if yarn_site_in_services and yarn_ats_app_cache_size_config in yarn_site_in_services: yarn_ats_app_cache_size = yarn_site_in_services[yarn_ats_app_cache_size_config] - self.logger.info("'yarn.scheduler.minimum-allocation-mb' read from services as : {0}".format(yarn_ats_app_cache_size)) + self.logger.info(f"'yarn.scheduler.minimum-allocation-mb' read from services as : {yarn_ats_app_cache_size}") if not yarn_ats_app_cache_size: - self.logger.error("'{0}' was not found in the services".format(yarn_ats_app_cache_size_config)) + self.logger.error(f"'{yarn_ats_app_cache_size_config}' was not found in the services") return yarn_ats_app_cache_size @@ -995,7 +995,7 @@ def update_timeline_reader_address(self, configurations, services, property_name new_address = re.sub('[^:]+', timeline_hosts[0], old_address, 1) if old_address != new_address: putYarnProperty(property_name, new_address) - self.logger.info('Updated YARN config {0} to {1}'.format(property_name, new_address)) + self.logger.info(f'Updated YARN config {property_name} to {new_address}') #region LLAP def updateLlapConfigs(self, configurations, services, hosts, llap_queue_name): @@ -1023,7 +1023,7 @@ def updateLlapConfigs(self, configurations, services, hosts, llap_queue_name): is_cluster_create_opr = False if operation == self.CLUSTER_CREATE_OPERATION: is_cluster_create_opr = True - self.logger.info("Is cluster create operation ? = {0}".format(is_cluster_create_opr)) + self.logger.info(f"Is cluster create operation ? = {is_cluster_create_opr}") putHiveInteractiveSiteProperty = self.putProperty(configurations, YARNRecommender.HIVE_INTERACTIVE_SITE, services) putHiveInteractiveSitePropertyAttribute = self.putPropertyAttribute(configurations, YARNRecommender.HIVE_INTERACTIVE_SITE) @@ -1061,7 +1061,7 @@ def updateLlapConfigs(self, configurations, services, hosts, llap_queue_name): if capacity_scheduler_properties: # Get all leaf queues. leafQueueNames = self.getAllYarnLeafQueues(capacity_scheduler_properties) - self.logger.info("YARN leaf Queues = {0}".format(leafQueueNames)) + self.logger.info(f"YARN leaf Queues = {leafQueueNames}") if len(leafQueueNames) == 0: self.logger.error("Queue(s) couldn't be retrieved from capacity-scheduler.") return @@ -1082,7 +1082,7 @@ def updateLlapConfigs(self, configurations, services, hosts, llap_queue_name): putHiveInteractiveSiteProperty('hive.llap.daemon.queue.name', first_leaf_queue) putHiveInteractiveSiteProperty('hive.server2.tez.default.queues', first_leaf_queue) llap_named_queue_selected_in_curr_invocation = False - self.logger.info("DBG: llap_named_queue_selected_in_curr_invocation = {0}".format(llap_named_queue_selected_in_curr_invocation)) + self.logger.info(f"DBG: llap_named_queue_selected_in_curr_invocation = {llap_named_queue_selected_in_curr_invocation}") if (len(leafQueueNames) == 2 and (llap_daemon_selected_queue_name and llap_daemon_selected_queue_name == llap_queue_name) or llap_named_queue_selected_in_curr_invocation) or \ @@ -1138,7 +1138,7 @@ def updateLlapConfigs(self, configurations, services, hosts, llap_queue_name): if not changed_configs_in_hive_int_env and not llap_concurrency_in_changed_configs and \ not llap_daemon_queue_in_changed_configs and services["changed-configurations"]: self.logger.info("DBG: LLAP parameters not modified. Not adjusting LLAP configs.") - self.logger.info("DBG: Current 'changed-configuration' received is : {0}".format(services["changed-configurations"])) + self.logger.info(f"DBG: Current 'changed-configuration' received is : {services['changed-configurations']}") return self.logger.info("\nDBG: Performing LLAP config calculations ......") @@ -1195,7 +1195,7 @@ def updateLlapConfigs(self, configurations, services, hosts, llap_queue_name): mem_per_thread_for_llap = float(mem_per_thread_for_llap) - self.logger.info("DBG: selected_queue_is_ambari_managed_llap = {0}".format(selected_queue_is_ambari_managed_llap)) + self.logger.info(f"DBG: selected_queue_is_ambari_managed_llap = {selected_queue_is_ambari_managed_llap}") if not selected_queue_is_ambari_managed_llap: llap_daemon_selected_queue_cap = self.__getSelectedQueueTotalCap(capacity_scheduler_properties, llap_daemon_selected_queue_name, total_cluster_capacity) @@ -1215,7 +1215,7 @@ def updateLlapConfigs(self, configurations, services, hosts, llap_queue_name): "yarn_nm_mem_in_mb_normalized : {2}".format(num_llap_nodes_requested, total_llap_mem_normalized, yarn_nm_mem_in_mb_normalized)) # Pouplate the 'num_llap_nodes_requested' in config 'num_llap_nodes', a read only config for non-Ambari managed queue case. putHiveInteractiveEnvProperty('num_llap_nodes', num_llap_nodes_requested) - self.logger.info("Setting config 'num_llap_nodes' as : {0}".format(num_llap_nodes_requested)) + self.logger.info(f"Setting config 'num_llap_nodes' as : {num_llap_nodes_requested}") queue_am_fraction_perc = float(self.__getQueueAmFractionFromCapacityScheduler(capacity_scheduler_properties, llap_daemon_selected_queue_name)) hive_tez_am_cap_available = queue_am_fraction_perc * total_llap_mem_normalized self.logger.info("DBG: Calculated 'hive_tez_am_cap_available' : {0}, using following: queue_am_fraction_perc : {1}, " @@ -1239,17 +1239,17 @@ def updateLlapConfigs(self, configurations, services, hosts, llap_queue_name): # What percent is 'total_llap_mem' of 'total_cluster_capacity' ? llap_named_queue_cap_fraction = ceil(total_llap_mem_normalized / total_cluster_capacity * 100) - self.logger.info("DBG: Calculated '{0}' queue capacity percent = {1}.".format(llap_queue_name, llap_named_queue_cap_fraction)) + self.logger.info(f"DBG: Calculated '{llap_queue_name}' queue capacity percent = {llap_named_queue_cap_fraction}.") if llap_named_queue_cap_fraction > 100: - self.logger.warning("Calculated '{0}' queue size = {1}. Cannot be > 100.".format(llap_queue_name, llap_named_queue_cap_fraction)) + self.logger.warning(f"Calculated '{llap_queue_name}' queue size = {llap_named_queue_cap_fraction}. Cannot be > 100.") self.recommendDefaultLlapConfiguration(configurations, services, hosts) return # Adjust capacity scheduler for the 'llap' named queue. self.checkAndManageLlapQueue(services, configurations, hosts, llap_queue_name, llap_named_queue_cap_fraction) hive_tez_am_cap_available = total_llap_mem_normalized - self.logger.info("DBG: hive_tez_am_cap_available : {0}".format(hive_tez_am_cap_available)) + self.logger.info(f"DBG: hive_tez_am_cap_available : {hive_tez_am_cap_available}") # Common calculations now, irrespective of the queue selected. @@ -1268,7 +1268,7 @@ def updateLlapConfigs(self, configurations, services, hosts, llap_queue_name): # Read 'hive.server2.tez.sessions.per.default.queue' prop if it's in changed-configs, else calculate it. if not llap_concurrency_in_changed_configs: if max_executors_per_node <= 0: - self.logger.warning("Calculated 'max_executors_per_node' = {0}. Expected value >= 1.".format(max_executors_per_node)) + self.logger.warning(f"Calculated 'max_executors_per_node' = {max_executors_per_node}. Expected value >= 1.") self.recommendDefaultLlapConfiguration(configurations, services, hosts) return @@ -1297,7 +1297,7 @@ def updateLlapConfigs(self, configurations, services, hosts, llap_queue_name): .format(llap_concurrency, llap_concurrency, normalized_tez_am_container_size, hive_tez_am_cap_available)) if llap_concurrency <= 0: - self.logger.warning("DBG: Calculated 'LLAP Concurrent Queries' = {0}. Expected value >= 1.".format(llap_concurrency)) + self.logger.warning(f"DBG: Calculated 'LLAP Concurrent Queries' = {llap_concurrency}. Expected value >= 1.") self.recommendDefaultLlapConfiguration(configurations, services, hosts) return self.logger.info("DBG: Adjusted 'llap_concurrency' : {0}, using following: hive_tez_am_cap_available : {1}, normalized_tez_am_container_size: " @@ -1307,10 +1307,10 @@ def updateLlapConfigs(self, configurations, services, hosts, llap_queue_name): if 'hive.server2.tez.sessions.per.default.queue' in hsi_site: llap_concurrency = int(hsi_site['hive.server2.tez.sessions.per.default.queue']) if llap_concurrency <= 0: - self.logger.warning("'hive.server2.tez.sessions.per.default.queue' current value : {0}. Expected value : >= 1".format(llap_concurrency)) + self.logger.warning(f"'hive.server2.tez.sessions.per.default.queue' current value : {llap_concurrency}. Expected value : >= 1") self.recommendDefaultLlapConfiguration(configurations, services, hosts) return - self.logger.info("DBG: Read 'llap_concurrency' : {0}".format(llap_concurrency )) + self.logger.info(f"DBG: Read 'llap_concurrency' : {llap_concurrency}") else: llap_concurrency = 1 self.logger.warning("Couldn't retrieve Hive Server interactive's 'hive.server2.tez.sessions.per.default.queue' config. Setting default value 1.") @@ -1362,7 +1362,7 @@ def updateLlapConfigs(self, configurations, services, hosts, llap_queue_name): else: # All good. We have a proper value for memoryPerNode. num_llap_nodes = num_llap_nodes_requested - self.logger.info("DBG: num_llap_nodes : {0}".format(num_llap_nodes)) + self.logger.info(f"DBG: num_llap_nodes : {num_llap_nodes}") # Make sure we have enough memory on each node to run AMs. # If nodes vs nodes_requested is different - AM memory is already factored in. @@ -1391,7 +1391,7 @@ def updateLlapConfigs(self, configurations, services, hosts, llap_queue_name): num_executors_per_node_max = self.get_max_executors_per_node(yarn_nm_mem_in_mb_normalized, cpu_per_nm_host, mem_per_thread_for_llap) if num_executors_per_node_max < 1: - self.logger.warning("Calculated 'Max. Executors per Node' = {0}. Expected values >= 1.".format(num_executors_per_node_max)) + self.logger.warning(f"Calculated 'Max. Executors per Node' = {num_executors_per_node_max}. Expected values >= 1.") self.recommendDefaultLlapConfiguration(configurations, services, hosts) return self.logger.info("DBG: Calculated 'num_executors_per_node_max' : {0}, using following : yarn_nm_mem_in_mb_normalized : {1}, cpu_per_nm_host : {2}, " @@ -1400,7 +1400,7 @@ def updateLlapConfigs(self, configurations, services, hosts, llap_queue_name): # NumExecutorsPerNode is not necessarily max - since some capacity would have been reserved for AMs, if this value were based on mem. num_executors_per_node = min(floor(llap_daemon_mem_per_node / mem_per_thread_for_llap), num_executors_per_node_max) if num_executors_per_node <= 0: - self.logger.warning("Calculated 'Number of Executors Per Node' = {0}. Expected value >= 1".format(num_executors_per_node)) + self.logger.warning(f"Calculated 'Number of Executors Per Node' = {num_executors_per_node}. Expected value >= 1") self.recommendDefaultLlapConfiguration(configurations, services, hosts) return self.logger.info("DBG: Calculated 'num_executors_per_node' : {0}, using following : llap_daemon_mem_per_node : {1}, num_executors_per_node_max : {2}, " @@ -1419,7 +1419,7 @@ def updateLlapConfigs(self, configurations, services, hosts, llap_queue_name): # Calculate value for prop 'llap_heap_size' llap_xmx = max(total_mem_for_executors_per_node * 0.8, total_mem_for_executors_per_node - self.get_llap_headroom_space(services, configurations)) - self.logger.info("DBG: Calculated llap_app_heap_size : {0}, using following : total_mem_for_executors : {1}".format(llap_xmx, total_mem_for_executors_per_node)) + self.logger.info(f"DBG: Calculated llap_app_heap_size : {llap_xmx}, using following : total_mem_for_executors : {total_mem_for_executors_per_node}") # Calculate 'hive_heapsize' for Hive2/HiveServer2 (HSI) hive_server_interactive_heapsize = None @@ -1439,7 +1439,7 @@ def updateLlapConfigs(self, configurations, services, hosts, llap_queue_name): if is_cluster_create_opr or changed_configs_has_enable_hive_int: normalized_tez_am_container_size = int(normalized_tez_am_container_size) putTezInteractiveSiteProperty('tez.am.resource.memory.mb', normalized_tez_am_container_size) - self.logger.info("DBG: Setting 'tez.am.resource.memory.mb' config value as : {0}".format(normalized_tez_am_container_size)) + self.logger.info(f"DBG: Setting 'tez.am.resource.memory.mb' config value as : {normalized_tez_am_container_size}") if not llap_concurrency_in_changed_configs: putHiveInteractiveSiteProperty('hive.server2.tez.sessions.per.default.queue', max(int(num_executors_per_node/16), 1)) @@ -1450,16 +1450,16 @@ def updateLlapConfigs(self, configurations, services, hosts, llap_queue_name): putHiveInteractiveEnvPropertyAttribute('num_llap_nodes', "maximum", node_manager_cnt) #TODO A single value is not being set for numNodes in case of a custom queue. Also the attribute is set to non-visible, so the UI likely ends up using an old cached value if (num_llap_nodes != num_llap_nodes_requested): - self.logger.info("DBG: User requested num_llap_nodes : {0}, but used/adjusted value for calculations is : {1}".format(num_llap_nodes_requested, num_llap_nodes)) + self.logger.info(f"DBG: User requested num_llap_nodes : {num_llap_nodes_requested}, but used/adjusted value for calculations is : {num_llap_nodes}") else: - self.logger.info("DBG: Used num_llap_nodes for calculations : {0}".format(num_llap_nodes_requested)) + self.logger.info(f"DBG: Used num_llap_nodes for calculations : {num_llap_nodes_requested}") # Safeguard for not adding "num_llap_nodes_for_llap_daemons" if it doesnt exist in hive-interactive-site. # This can happen if we upgrade from Ambari 2.4 (with HDP 2.5) to Ambari 2.5, as this config is from 2.6 stack onwards only. if "hive-interactive-env" in services["configurations"] and \ "num_llap_nodes_for_llap_daemons" in services["configurations"]["hive-interactive-env"]["properties"]: putHiveInteractiveEnvProperty('num_llap_nodes_for_llap_daemons', num_llap_nodes) - self.logger.info("DBG: Setting config 'num_llap_nodes_for_llap_daemons' as : {0}".format(num_llap_nodes)) + self.logger.info(f"DBG: Setting config 'num_llap_nodes_for_llap_daemons' as : {num_llap_nodes}") llap_container_size = int(llap_daemon_mem_per_node) putHiveInteractiveSiteProperty('hive.llap.daemon.yarn.container.mb', llap_container_size) @@ -1471,7 +1471,7 @@ def updateLlapConfigs(self, configurations, services, hosts, llap_queue_name): if is_cluster_create_opr or changed_configs_has_enable_hive_int: mem_per_thread_for_llap = int(mem_per_thread_for_llap) putHiveInteractiveSiteProperty('hive.tez.container.size', mem_per_thread_for_llap) - self.logger.info("DBG: Setting 'hive.tez.container.size' config value as : {0}".format(mem_per_thread_for_llap)) + self.logger.info(f"DBG: Setting 'hive.tez.container.size' config value as : {mem_per_thread_for_llap}") putTezInteractiveSiteProperty('tez.runtime.io.sort.mb', tez_runtime_io_sort_mb) if "tez-site" in services["configurations"] and "tez.runtime.sorter.class" in services["configurations"]["tez-site"]["properties"]: @@ -1482,7 +1482,7 @@ def updateLlapConfigs(self, configurations, services, hosts, llap_queue_name): putHiveInteractiveSiteProperty('hive.auto.convert.join.noconditionaltask.size', hive_auto_convert_join_noconditionaltask_size) num_executors_per_node = int(num_executors_per_node) - self.logger.info("DBG: Putting num_executors_per_node as {0}".format(num_executors_per_node)) + self.logger.info(f"DBG: Putting num_executors_per_node as {num_executors_per_node}") putHiveInteractiveSiteProperty('hive.llap.daemon.num.executors', num_executors_per_node) putHiveInteractiveSitePropertyAttribute('hive.llap.daemon.num.executors', "minimum", 1) putHiveInteractiveSitePropertyAttribute('hive.llap.daemon.num.executors', "maximum", int(num_executors_per_node_max)) @@ -1577,7 +1577,7 @@ def get_num_llap_nodes(self, services, configurations): elif hsi_env and 'num_llap_nodes' in hsi_env: num_llap_nodes = hsi_env['num_llap_nodes'] else: - self.logger.error("Couldn't retrieve Hive Server 'num_llap_nodes' config. Setting value to {0}".format(num_llap_nodes)) + self.logger.error(f"Couldn't retrieve Hive Server 'num_llap_nodes' config. Setting value to {num_llap_nodes}") return float(num_llap_nodes) @@ -1604,10 +1604,10 @@ def calculate_mem_per_thread_for_llap(self, services, nm_mem_per_node_normalized else: calculated_hive_tez_container_size = 4096 - self.logger.info("DBG: Calculated and returning 'hive_tez_container_size' : {0}".format(calculated_hive_tez_container_size)) + self.logger.info(f"DBG: Calculated and returning 'hive_tez_container_size' : {calculated_hive_tez_container_size}") return calculated_hive_tez_container_size else: - self.logger.info("DBG: Returning 'hive_tez_container_size' : {0}".format(hive_tez_container_size)) + self.logger.info(f"DBG: Returning 'hive_tez_container_size' : {hive_tez_container_size}") return hive_tez_container_size def get_hive_tez_container_size(self, services): @@ -1641,13 +1641,13 @@ def get_llap_headroom_space(self, services, configurations): # Check if 'llap_headroom_space' is modified in current SA invocation. if 'hive-interactive-env' in configurations and 'llap_headroom_space' in configurations['hive-interactive-env']['properties']: hive_container_size = float(configurations['hive-interactive-env']['properties']['llap_headroom_space']) - self.logger.info("'llap_headroom_space' read from configurations as : {0}".format(llap_headroom_space)) + self.logger.info(f"'llap_headroom_space' read from configurations as : {llap_headroom_space}") if llap_headroom_space is None: # Check if 'llap_headroom_space' is input in services array. if 'llap_headroom_space' in services['configurations']['hive-interactive-env']['properties']: llap_headroom_space = float(services['configurations']['hive-interactive-env']['properties']['llap_headroom_space']) - self.logger.info("'llap_headroom_space' read from services as : {0}".format(llap_headroom_space)) + self.logger.info(f"'llap_headroom_space' read from services as : {llap_headroom_space}") if not llap_headroom_space or llap_headroom_space < 1: llap_headroom_space = 6144 # 6GB self.logger.info("Couldn't read 'llap_headroom_space' from services or configurations. Returing default value : 6144 bytes") @@ -1775,8 +1775,8 @@ def checkAndManageLlapQueue(self, services, configurations, hosts, llap_queue_na self.logger.info("Created YARN Queue : '{0}' with capacity : {1}%. Adjusted 'default' queue capacity to : {2}%" \ .format(llap_queue_name, llap_queue_cap_perc, adjusted_default_queue_cap)) else: # Queue existed, only adjustments done. - self.logger.info("Adjusted YARN Queue : '{0}'. Current capacity : {1}%. State: RUNNING.".format(llap_queue_name, llap_queue_cap_perc)) - self.logger.info("Adjusted 'default' queue capacity to : {0}%".format(adjusted_default_queue_cap)) + self.logger.info(f"Adjusted YARN Queue : '{llap_queue_name}'. Current capacity : {llap_queue_cap_perc}%. State: RUNNING.") + self.logger.info(f"Adjusted 'default' queue capacity to : {adjusted_default_queue_cap}%") # Update Hive 'hive.llap.daemon.queue.name' prop to use 'llap' queue. putHiveInteractiveSiteProperty('hive.llap.daemon.queue.name', llap_queue_name) @@ -1784,7 +1784,7 @@ def checkAndManageLlapQueue(self, services, configurations, hosts, llap_queue_na # Update 'hive.llap.daemon.queue.name' prop combo entries and llap capacity YARN Service visibility. self.setLlapDaemonQueuePropAttributes(services, configurations) else: - self.logger.debug("Not creating/adjusting {0} queue. Current YARN queues : {1}".format(llap_queue_name, list(leafQueueNames))) + self.logger.debug(f"Not creating/adjusting {llap_queue_name} queue. Current YARN queues : {list(leafQueueNames)}") else: self.logger.error("Couldn't retrieve 'capacity-scheduler' properties while doing YARN queue adjustment for Hive Server Interactive.") @@ -1812,7 +1812,7 @@ def checkAndStopLlapQueue(self, services, configurations, llap_queue_name): if 'yarn.scheduler.capacity.root.'+llap_queue_name+'.state' in capacity_scheduler_properties.keys(): currLlapQueueState = capacity_scheduler_properties.get('yarn.scheduler.capacity.root.'+llap_queue_name+'.state') else: - self.logger.error("{0} queue 'state' property not present in capacity scheduler. Skipping adjusting queues.".format(llap_queue_name)) + self.logger.error(f"{llap_queue_name} queue 'state' property not present in capacity scheduler. Skipping adjusting queues.") return if currLlapQueueState == 'RUNNING': DEFAULT_MAX_CAPACITY = '100' @@ -1842,7 +1842,7 @@ def checkAndStopLlapQueue(self, services, configurations, llap_queue_name): elif prop.startswith('yarn.'): updated_llap_queue_configs = updated_llap_queue_configs + prop + "=" + val + "\n" else: - self.logger.debug("{0} queue state is : {1}. Skipping adjusting queues.".format(llap_queue_name, currLlapQueueState)) + self.logger.debug(f"{llap_queue_name} queue state is : {currLlapQueueState}. Skipping adjusting queues.") return if updated_default_queue_configs and updated_llap_queue_configs: @@ -1854,7 +1854,7 @@ def checkAndStopLlapQueue(self, services, configurations, llap_queue_name): putHiveInteractiveSiteProperty('hive.llap.daemon.queue.name', YARNRecommender.YARN_ROOT_DEFAULT_QUEUE_NAME) putHiveInteractiveSiteProperty('hive.server2.tez.default.queues', YARNRecommender.YARN_ROOT_DEFAULT_QUEUE_NAME) else: - self.logger.debug("Not removing '{0}' queue as number of Queues not equal to 2. Current YARN queues : {1}".format(llap_queue_name, list(leafQueueNames))) + self.logger.debug(f"Not removing '{llap_queue_name}' queue as number of Queues not equal to 2. Current YARN queues : {list(leafQueueNames)}") else: self.logger.error("Couldn't retrieve 'capacity-scheduler' properties while doing YARN queue adjustment for Hive Server Interactive.") @@ -1886,9 +1886,9 @@ def setLlapDaemonQueuePropAttributes(self, services, configurations): "count(configurations['capacity-scheduler']['properties']['capacity-scheduler']) = " "{0}".format(len(capacity_scheduler_properties))) else: - self.logger.info("Read configurations['capacity-scheduler']['properties']['capacity-scheduler'] is : {0}".format(cap_sched_props_as_str)) + self.logger.info(f"Read configurations['capacity-scheduler']['properties']['capacity-scheduler'] is : {cap_sched_props_as_str}") else: - self.logger.info("configurations['capacity-scheduler']['properties']['capacity-scheduler'] : {0}.".format(cap_sched_props_as_str)) + self.logger.info(f"configurations['capacity-scheduler']['properties']['capacity-scheduler'] : {cap_sched_props_as_str}.") # if 'capacity_scheduler_properties' is empty, implies we may have 'capacity-scheduler' configs as dictionary # in configurations, if 'capacity-scheduler' changed in current invocation. @@ -1897,7 +1897,7 @@ def setLlapDaemonQueuePropAttributes(self, services, configurations): capacity_scheduler_properties = cap_sched_props_as_dict self.logger.info("'capacity-scheduler' changed in current Stack Advisor invocation. Retrieved the configs as dictionary from configurations.") else: - self.logger.info("Read configurations['capacity-scheduler']['properties'] is : {0}".format(cap_sched_props_as_dict)) + self.logger.info(f"Read configurations['capacity-scheduler']['properties'] is : {cap_sched_props_as_dict}") else: self.logger.info("'capacity-scheduler' not modified in the current Stack Advisor invocation.") @@ -1914,7 +1914,7 @@ def setLlapDaemonQueuePropAttributes(self, services, configurations): leafQueues = [{"label": str(queueName), "value": queueName} for queueName in leafQueueNames] leafQueues = sorted(leafQueues, key=lambda q: q['value']) putHiveInteractiveSitePropertyAttribute("hive.llap.daemon.queue.name", "entries", leafQueues) - self.logger.info("'hive.llap.daemon.queue.name' config Property Attributes set to : {0}".format(leafQueues)) + self.logger.info(f"'hive.llap.daemon.queue.name' config Property Attributes set to : {leafQueues}") else: self.logger.error("Problem retrieving YARN queues. Skipping updating HIVE Server Interactve " "'hive.server2.tez.default.queues' property attributes.") @@ -1944,15 +1944,15 @@ def get_yarn_min_container_size(self, services, configurations): # Check if services["changed-configurations"] is empty and 'yarn.scheduler.minimum-allocation-mb' is modified in current ST invocation. if not services["changed-configurations"] and yarn_site and yarn_min_allocation_property in yarn_site: yarn_min_container_size = yarn_site[yarn_min_allocation_property] - self.logger.info("DBG: 'yarn.scheduler.minimum-allocation-mb' read from output as : {0}".format(yarn_min_container_size)) + self.logger.info(f"DBG: 'yarn.scheduler.minimum-allocation-mb' read from output as : {yarn_min_container_size}") # Check if 'yarn.scheduler.minimum-allocation-mb' is input in services array. elif yarn_site_properties and yarn_min_allocation_property in yarn_site_properties: yarn_min_container_size = yarn_site_properties[yarn_min_allocation_property] - self.logger.info("DBG: 'yarn.scheduler.minimum-allocation-mb' read from services as : {0}".format(yarn_min_container_size)) + self.logger.info(f"DBG: 'yarn.scheduler.minimum-allocation-mb' read from services as : {yarn_min_container_size}") if not yarn_min_container_size: - self.logger.error("{0} was not found in the configuration".format(yarn_min_allocation_property)) + self.logger.error(f"{yarn_min_allocation_property} was not found in the configuration") return yarn_min_container_size @@ -1992,7 +1992,7 @@ def get_yarn_nm_mem_in_mb(self, services, configurations): yarn_nm_mem_in_mb = float(yarn_site['yarn.nodemanager.resource.memory-mb']) if yarn_nm_mem_in_mb <= 0.0: - self.logger.warning("'yarn.nodemanager.resource.memory-mb' current value : {0}. Expected value : > 0".format(yarn_nm_mem_in_mb)) + self.logger.warning(f"'yarn.nodemanager.resource.memory-mb' current value : {yarn_nm_mem_in_mb}. Expected value : > 0") return yarn_nm_mem_in_mb @@ -2012,10 +2012,10 @@ def calculate_tez_am_container_size(self, services, total_cluster_capacity, is_c elif total_cluster_capacity > 98304: calculated_tez_am_resource_memory_mb = 4096 - self.logger.info("DBG: Calculated and returning 'tez_am_resource_memory_mb' as : {0}".format(calculated_tez_am_resource_memory_mb)) + self.logger.info(f"DBG: Calculated and returning 'tez_am_resource_memory_mb' as : {calculated_tez_am_resource_memory_mb}") return float(calculated_tez_am_resource_memory_mb) else: - self.logger.info("DBG: Returning 'tez_am_resource_memory_mb' as : {0}".format(tez_am_resource_memory_mb)) + self.logger.info(f"DBG: Returning 'tez_am_resource_memory_mb' as : {tez_am_resource_memory_mb}") return float(tez_am_resource_memory_mb) def get_tez_am_resource_memory_mb(self, services): @@ -2094,10 +2094,10 @@ def __getQueueAmFractionFromCapacityScheduler(self, capacity_scheduler_propertie for key in cap_sched_keys: if key.endswith("." + llap_daemon_selected_queue_name+".maximum-am-resource-percent"): llap_selected_queue_am_percent_key = key - self.logger.info("AM percent key got for '{0}' queue is : '{1}'".format(llap_daemon_selected_queue_name, llap_selected_queue_am_percent_key)) + self.logger.info(f"AM percent key got for '{llap_daemon_selected_queue_name}' queue is : '{llap_selected_queue_am_percent_key}'") break if llap_selected_queue_am_percent_key is None: - self.logger.info("Returning default AM percent value : '0.1' for queue : {0}".format(llap_daemon_selected_queue_name)) + self.logger.info(f"Returning default AM percent value : '0.1' for queue : {llap_daemon_selected_queue_name}") return 0.1 # Default value to use if we couldn't retrieve queue's corresponding AM Percent key. else: llap_selected_queue_am_percent = capacity_scheduler_properties.get(llap_selected_queue_am_percent_key) @@ -2110,7 +2110,7 @@ def __getSelectedQueueTotalCap(self, capacity_scheduler_properties, llap_daemon_ """ Calculates the total available capacity for the passed-in YARN queue of any level based on the percentages. """ - self.logger.info("Entered __getSelectedQueueTotalCap fn() with llap_daemon_selected_queue_name= '{0}'.".format(llap_daemon_selected_queue_name)) + self.logger.info(f"Entered __getSelectedQueueTotalCap fn() with llap_daemon_selected_queue_name= '{llap_daemon_selected_queue_name}'.") available_capacity = total_cluster_capacity queue_cap_key = self.__getQueueCapacityKeyFromCapacityScheduler(capacity_scheduler_properties, llap_daemon_selected_queue_name) if queue_cap_key: @@ -2120,13 +2120,13 @@ def __getSelectedQueueTotalCap(self, capacity_scheduler_properties, llap_daemon_ queue_path = queue_cap_key[24:] # Strip from beginning 'yarn.scheduler.capacity.' queue_path = queue_path[0:-9] # Strip from end '.capacity' queues_list = queue_path.split('.') - self.logger.info("Queue list : {0}".format(queues_list)) + self.logger.info(f"Queue list : {queues_list}") if queues_list: for queue in queues_list: queue_cap_key = self.__getQueueCapacityKeyFromCapacityScheduler(capacity_scheduler_properties, queue) queue_cap_perc = float(capacity_scheduler_properties.get(queue_cap_key)) available_capacity = queue_cap_perc / 100 * available_capacity - self.logger.info("Total capacity available for queue {0} is : {1}".format(queue, available_capacity)) + self.logger.info(f"Total capacity available for queue {queue} is : {available_capacity}") # returns the capacity calculated for passed-in queue in 'llap_daemon_selected_queue_name'. return available_capacity @@ -2332,7 +2332,7 @@ def sampleValidator(self, properties, recommendedDefaults, configurations, servi item = {"level": "ERROR|WARN", "message": "value"} ''' validationItems.append({"config-name": "my_config_property_name", - "item": self.getErrorItem("My custom message in method %s" % inspect.stack()[0][3])}) + "item": self.getErrorItem(f"My custom message in method {inspect.stack()[0][3]}")}) return self.toConfigurationValidationProblems(validationItems, "hadoop-env") def validateYARNSiteConfigurationsFromHDP206(self, properties, recommendedDefaults, configurations, services, hosts): @@ -2376,7 +2376,7 @@ def validateYARNSiteConfigurationsFromHDP26(self, properties, recommendedDefault if services["configurations"]["yarn-site"]["properties"]["yarn.log.server.web-service.url"] != propertyValue: validationItems = [ {"config-name": "yarn.log.server.web-service.url", - "item": self.getWarnItem("Value should be %s" % propertyValue)}] + "item": self.getWarnItem(f"Value should be {propertyValue}")}] if "yarn_hierarchy" in services["configurations"]["container-executor"]["properties"] \ and "yarn.nodemanager.linux-container-executor.cgroups.hierarchy" in services["configurations"]["yarn-site"]["properties"]: @@ -2468,7 +2468,7 @@ def sampleValidator(self, properties, recommendedDefaults, configurations, servi item = {"level": "ERROR|WARN", "message": "value"} ''' validationItems.append({"config-name": "my_config_property_name", - "item": self.getErrorItem("My custom message in method %s" % inspect.stack()[0][3])}) + "item": self.getErrorItem(f"My custom message in method {inspect.stack()[0][3]}")}) return self.toConfigurationValidationProblems(validationItems, "hadoop-env") def validateMapReduce2SiteConfigurationsFromHDP206(self, properties, recommendedDefaults, configurations, services, hosts): @@ -2505,20 +2505,20 @@ def validateMapReduce2SiteConfigurationsFromHDP22(self, properties, recommendedD mapreduceMapJavaOpts = self.formatXmxSizeToBytes(self.getXmxSize(properties['mapreduce.map.java.opts'])) / (1024.0 * 1024) mapreduceMapMemoryMb = self.to_number(properties['mapreduce.map.memory.mb']) if mapreduceMapJavaOpts > mapreduceMapMemoryMb: - validationItems.append({"config-name": 'mapreduce.map.java.opts', "item": self.getWarnItem("mapreduce.map.java.opts Xmx should be less than mapreduce.map.memory.mb ({0})".format(mapreduceMapMemoryMb))}) + validationItems.append({"config-name": 'mapreduce.map.java.opts', "item": self.getWarnItem(f"mapreduce.map.java.opts Xmx should be less than mapreduce.map.memory.mb ({mapreduceMapMemoryMb})")}) if 'mapreduce.reduce.java.opts' in properties and \ self.checkXmxValueFormat(properties['mapreduce.reduce.java.opts']): mapreduceReduceJavaOpts = self.formatXmxSizeToBytes(self.getXmxSize(properties['mapreduce.reduce.java.opts'])) / (1024.0 * 1024) mapreduceReduceMemoryMb = self.to_number(properties['mapreduce.reduce.memory.mb']) if mapreduceReduceJavaOpts > mapreduceReduceMemoryMb: - validationItems.append({"config-name": 'mapreduce.reduce.java.opts', "item": self.getWarnItem("mapreduce.reduce.java.opts Xmx should be less than mapreduce.reduce.memory.mb ({0})".format(mapreduceReduceMemoryMb))}) + validationItems.append({"config-name": 'mapreduce.reduce.java.opts', "item": self.getWarnItem(f"mapreduce.reduce.java.opts Xmx should be less than mapreduce.reduce.memory.mb ({mapreduceReduceMemoryMb})")}) if 'yarn.app.mapreduce.am.command-opts' in properties and \ self.checkXmxValueFormat(properties['yarn.app.mapreduce.am.command-opts']): yarnAppMapreduceAmCommandOpts = self.formatXmxSizeToBytes(self.getXmxSize(properties['yarn.app.mapreduce.am.command-opts'])) / (1024.0 * 1024) yarnAppMapreduceAmResourceMb = self.to_number(properties['yarn.app.mapreduce.am.resource.mb']) if yarnAppMapreduceAmCommandOpts > yarnAppMapreduceAmResourceMb: - validationItems.append({"config-name": 'yarn.app.mapreduce.am.command-opts', "item": self.getWarnItem("yarn.app.mapreduce.am.command-opts Xmx should be less than yarn.app.mapreduce.am.resource.mb ({0})".format(yarnAppMapreduceAmResourceMb))}) + validationItems.append({"config-name": 'yarn.app.mapreduce.am.command-opts', "item": self.getWarnItem(f"yarn.app.mapreduce.am.command-opts Xmx should be less than yarn.app.mapreduce.am.resource.mb ({yarnAppMapreduceAmResourceMb})")}) return self.toConfigurationValidationProblems(validationItems, "mapred-site") diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.3.0/services/RANGER/package/alerts/alert_ranger_admin_passwd_check.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.3.0/services/RANGER/package/alerts/alert_ranger_admin_passwd_check.py index e5c2fb6451c..8eec266b96e 100755 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.3.0/services/RANGER/package/alerts/alert_ranger_admin_passwd_check.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.3.0/services/RANGER/package/alerts/alert_ranger_admin_passwd_check.py @@ -73,8 +73,8 @@ def execute(configurations={}, parameters={}, host_name=None): ranger_link = configurations[RANGER_ADMIN_URL] if ranger_link.endswith('/'): ranger_link = ranger_link[:-1] - ranger_auth_link = '{0}/{1}'.format(ranger_link, 'service/public/api/repository/count') - ranger_get_user = '{0}/{1}'.format(ranger_link, 'service/xusers/users') + ranger_auth_link = f'{ranger_link}/service/public/api/repository/count' + ranger_get_user = f'{ranger_link}/service/xusers/users' if ADMIN_USERNAME in configurations: admin_username = configurations[ADMIN_USERNAME] @@ -106,19 +106,19 @@ def execute(configurations={}, parameters={}, host_name=None): user_http_code = check_ranger_login(ranger_auth_link, ranger_admin_username, ranger_admin_password) if user_http_code == 200: result_code = 'OK' - label = 'Login Successful for users {0} and {1}'.format(admin_username, ranger_admin_username) + label = f'Login Successful for users {admin_username} and {ranger_admin_username}' elif user_http_code == 401: result_code = 'CRITICAL' - label = 'User:{0} credentials on Ambari UI are not in sync with Ranger'.format(ranger_admin_username) + label = f'User:{ranger_admin_username} credentials on Ambari UI are not in sync with Ranger' else: result_code = 'WARNING' label = 'Ranger Admin service is not reachable, please restart the service' else: result_code = 'OK' - label = 'Login Successful for user: {0}. User:{1} user not yet synced with Ranger'.format(admin_username, ranger_admin_username) + label = f'Login Successful for user: {admin_username}. User:{ranger_admin_username} user not yet synced with Ranger' elif admin_http_code == 401: result_code = 'CRITICAL' - label = 'User:{0} credentials on Ambari UI are not in sync with Ranger'.format(admin_username) + label = f'User:{admin_username} credentials on Ambari UI are not in sync with Ranger' else: result_code = 'WARNING' label = 'Ranger Admin service is not reachable, please restart the service' @@ -139,22 +139,22 @@ def check_ranger_login(ranger_auth_link, username, password): return response code """ try: - usernamepassword = '{0}:{1}'.format(username, password) + usernamepassword = f'{username}:{password}' base_64_string = base64.b64encode(usernamepassword.encode()).decode().replace('\n', '') request = urllib.request.Request(ranger_auth_link) request.add_header("Content-Type", "application/json") request.add_header("Accept", "application/json") - request.add_header("Authorization", "Basic {0}".format(base_64_string)) + request.add_header("Authorization", f"Basic {base_64_string}") result = urllib.request.urlopen(request, timeout=20) response_code = result.getcode() if response_code == 200: response = json.loads(result.read()) return response_code except urllib.error.HTTPError as e: - logger.exception("Error during Ranger service authentication. Http status code - {0}. {1}".format(e.code, e.read())) + logger.exception(f"Error during Ranger service authentication. Http status code - {e.code}. {e.read()}") return e.code except urllib.error.URLError as e: - logger.exception("Error during Ranger service authentication. {0}".format(e.reason)) + logger.exception(f"Error during Ranger service authentication. {e.reason}") return None except Exception as e: return 401 @@ -168,13 +168,13 @@ def get_ranger_user(ranger_get_user, username, password, user): return Boolean if user exist or not """ try: - url = '{0}?name={1}'.format(ranger_get_user, user) - usernamepassword = '{0}:{1}'.format(username, password) + url = f'{ranger_get_user}?name={user}' + usernamepassword = f'{username}:{password}' base_64_string = base64.b64encode(usernamepassword.encode()).decode().replace('\n', '') request = urllib.request.Request(url) request.add_header("Content-Type", "application/json") request.add_header("Accept", "application/json") - request.add_header("Authorization", "Basic {0}".format(base_64_string)) + request.add_header("Authorization", f"Basic {base_64_string}") result = urllib.request.urlopen(request, timeout=20) response_code = result.getcode() response = json.loads(result.read()) @@ -185,10 +185,10 @@ def get_ranger_user(ranger_get_user, username, password, user): else: return False except urllib.error.HTTPError as e: - logger.exception("Error getting user from Ranger service. Http status code - {0}. {1}".format(e.code, e.read())) + logger.exception(f"Error getting user from Ranger service. Http status code - {e.code}. {e.read()}") return False except urllib.error.URLError as e: - logger.exception("Error getting user from Ranger service. {0}".format(e.reason)) + logger.exception(f"Error getting user from Ranger service. {e.reason}") return False except Exception as e: return False diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.3.0/services/RANGER/package/scripts/params.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.3.0/services/RANGER/package/scripts/params.py index 8904093cefc..4ea9d129ab9 100755 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.3.0/services/RANGER/package/scripts/params.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.3.0/services/RANGER/package/scripts/params.py @@ -371,7 +371,7 @@ core_site_auth_to_local_property = '' for item in range(len(rule_dict)): - rule_line = 'RULE:[2:$1@$0]({0}@{1})s/.*/{2}/\n'.format(rule_dict[item]['principal'], realm, rule_dict[item]['user']) + rule_line = f"RULE:[2:$1@$0]({rule_dict[item]['principal']}@{realm})s/.*/{rule_dict[item]['user']}/\n" core_site_auth_to_local_property = rule_line + core_site_auth_to_local_property core_site_auth_to_local_property = core_site_auth_to_local_property + 'DEFAULT' diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.3.0/services/RANGER/package/scripts/ranger_admin.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.3.0/services/RANGER/package/scripts/ranger_admin.py index 7f1a38d1323..2f81059d1ab 100755 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.3.0/services/RANGER/package/scripts/ranger_admin.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.3.0/services/RANGER/package/scripts/ranger_admin.py @@ -105,7 +105,7 @@ def post_upgrade_restart(self,env, upgrade_type=None): logoutput = True ) except ExecutionFailed as execution_exception: - Logger.error("Error adding field to Ranger Audits Solr Collection. Kindly check Infra Solr service to be up and running {0}".format(execution_exception)) + Logger.error(f"Error adding field to Ranger Audits Solr Collection. Kindly check Infra Solr service to be up and running {execution_exception}") def start(self, env, upgrade_type=None): import params @@ -241,7 +241,7 @@ def set_pre_start(self, env): if stack_select_packages is None: raise Fail("Unable to get packages for stack-select") - Logger.info("RANGER_ADMIN component will be stack-selected to version {0} using a {1} orchestration".format(params.version, orchestration.upper())) + Logger.info(f"RANGER_ADMIN component will be stack-selected to version {params.version} using a {orchestration.upper()} orchestration") for stack_select_package_name in stack_select_packages: stack_select.select(stack_select_package_name, params.version) diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.3.0/services/RANGER/package/scripts/setup_ranger_xml.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.3.0/services/RANGER/package/scripts/setup_ranger_xml.py index 51251f42c8e..05cb863c524 100755 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.3.0/services/RANGER/package/scripts/setup_ranger_xml.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.3.0/services/RANGER/package/scripts/setup_ranger_xml.py @@ -134,7 +134,7 @@ def setup_ranger_admin(upgrade_type=None): if os.path.isfile(params.ranger_admin_default_file): File(params.ranger_admin_default_file, owner=params.unix_user, group=params.unix_group) else: - Logger.warning('Required file {0} does not exist, copying the file to {1} path'.format(params.ranger_admin_default_file, ranger_conf)) + Logger.warning(f'Required file {params.ranger_admin_default_file} does not exist, copying the file to {ranger_conf} path') src_file = format('{ranger_home}/ews/webapp/WEB-INF/classes/conf.dist/ranger-admin-default-site.xml') dst_file = format('{ranger_home}/conf/ranger-admin-default-site.xml') Execute(('cp', '-f', src_file, dst_file), sudo=True) @@ -143,7 +143,7 @@ def setup_ranger_admin(upgrade_type=None): if os.path.isfile(params.security_app_context_file): File(params.security_app_context_file, owner=params.unix_user, group=params.unix_group) else: - Logger.warning('Required file {0} does not exist, copying the file to {1} path'.format(params.security_app_context_file, ranger_conf)) + Logger.warning(f'Required file {params.security_app_context_file} does not exist, copying the file to {ranger_conf} path') src_file = format('{ranger_home}/ews/webapp/WEB-INF/classes/conf.dist/security-applicationContext.xml') dst_file = format('{ranger_home}/conf/security-applicationContext.xml') Execute(('cp', '-f', src_file, dst_file), sudo=True) @@ -705,7 +705,7 @@ def setup_ranger_audit_solr(): secure_znode(format('{solr_znode}/configs/{ranger_solr_config_set}'), params.solr_jaas_file) secure_znode(format('{solr_znode}/collections/{ranger_solr_collection_name}'), params.solr_jaas_file) except ExecutionFailed as execution_exception: - Logger.error('Error when configuring Solr for Ranger, Kindly check Solr/Zookeeper services to be up and running:\n {0}'.format(execution_exception)) + Logger.error(f'Error when configuring Solr for Ranger, Kindly check Solr/Zookeeper services to be up and running:\n {execution_exception}') def setup_ranger_admin_passwd_change(username, user_password, user_default_password): import params diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.3.0/services/RANGER/service_advisor.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.3.0/services/RANGER/service_advisor.py index eccd644458e..a8234c699d5 100755 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.3.0/services/RANGER/service_advisor.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.3.0/services/RANGER/service_advisor.py @@ -238,7 +238,7 @@ def recommendRangerConfigurationsFromHDP206(self, configurations, clusterData, s else: ranger_admin_host = ranger_admin_hosts[0] - policymgr_external_url = "%s://%s:%s" % (protocol, ranger_admin_host, port) + policymgr_external_url = f"{protocol}://{ranger_admin_host}:{port}" putRangerAdminProperty('policymgr_external_url', policymgr_external_url) @@ -343,7 +343,7 @@ def recommendRangerUrlConfigurations(self, configurations, services, requiredSer if requiredServices[index]['service_name'] in servicesList: component_config_type = requiredServices[index]['config_type'] component_name = requiredServices[index]['service_name'] - component_config_property = 'ranger.plugin.{0}.policy.rest.url'.format(component_name.lower()) + component_config_property = f'ranger.plugin.{component_name.lower()}.policy.rest.url' if requiredServices[index]['service_name'] == 'RANGER_KMS': component_config_property = 'ranger.plugin.kms.policy.rest.url' putRangerSecurityProperty = self.putProperty(configurations, component_config_type, services) @@ -416,7 +416,7 @@ def recommendRangerConfigurationsFromHDP23(self, configurations, clusterData, se zookeeper_host_port = self.getZKHostPortString(services) ranger_audit_zk_port = '' if zookeeper_host_port: - ranger_audit_zk_port = '{0}/{1}'.format(zookeeper_host_port, 'ranger_audits') + ranger_audit_zk_port = f'{zookeeper_host_port}/ranger_audits' putRangerAdminProperty('ranger.audit.solr.zookeepers', ranger_audit_zk_port) else: putRangerAdminProperty('ranger.audit.solr.zookeepers', 'NONE') @@ -426,7 +426,7 @@ def recommendRangerConfigurationsFromHDP23(self, configurations, clusterData, se if include_hdfs: if 'core-site' in services['configurations'] and ('fs.defaultFS' in services['configurations']['core-site']['properties']): default_fs = services['configurations']['core-site']['properties']['fs.defaultFS'] - putRangerEnvProperty('xasecure.audit.destination.hdfs.dir', '{0}/{1}/{2}'.format(default_fs,'ranger','audit')) + putRangerEnvProperty('xasecure.audit.destination.hdfs.dir', f'{default_fs}/ranger/audit') # Recommend Ranger supported service's audit properties ranger_services = [ @@ -482,7 +482,7 @@ def recommendRangerConfigurationsFromHDP23(self, configurations, clusterData, se knox_host = knox_hosts[0] if 'gateway-site' in services['configurations'] and 'gateway.port' in services['configurations']["gateway-site"]["properties"]: knox_port = services['configurations']["gateway-site"]["properties"]['gateway.port'] - putRangerAdminProperty('ranger.sso.providerurl', 'https://{0}:{1}/gateway/knoxsso/api/v1/websso'.format(knox_host, knox_port)) + putRangerAdminProperty('ranger.sso.providerurl', f'https://{knox_host}:{knox_port}/gateway/knoxsso/api/v1/websso') required_services = [ {'service_name': 'HDFS', 'config_type': 'ranger-hdfs-security'}, @@ -527,7 +527,7 @@ def recommendRangerConfigurationsFromHDP25(self, configurations, clusterData, se if application_properties and 'atlas.server.http.port' in application_properties: atlas_port = application_properties['atlas.server.http.port'] - atlas_rest_endpoint = '{0}://{1}:{2}'.format(protocol, atlas_host, atlas_port) + atlas_rest_endpoint = f'{protocol}://{atlas_host}:{atlas_port}' putTagsyncSiteProperty('ranger.tagsync.source.atlas', 'true') putTagsyncSiteProperty('ranger.tagsync.source.atlasrest.endpoint', atlas_rest_endpoint) @@ -568,10 +568,10 @@ def recommendRangerConfigurationsFromHDP25(self, configurations, clusterData, se if 'infra-solr-env' in services['configurations'] and \ ('infra_solr_znode' in services['configurations']['infra-solr-env']['properties']): infra_solr_znode = services['configurations']['infra-solr-env']['properties']['infra_solr_znode'] - ranger_audit_zk_port = '{0}{1}'.format(zookeeper_host_port, infra_solr_znode) + ranger_audit_zk_port = f'{zookeeper_host_port}{infra_solr_znode}' putRangerAdminProperty('ranger.audit.solr.zookeepers', ranger_audit_zk_port) elif zookeeper_host_port and is_solr_cloud_enabled and is_external_solr_cloud_enabled: - ranger_audit_zk_port = '{0}/{1}'.format(zookeeper_host_port, 'ranger_audits') + ranger_audit_zk_port = f'{zookeeper_host_port}/ranger_audits' putRangerAdminProperty('ranger.audit.solr.zookeepers', ranger_audit_zk_port) else: putRangerAdminProperty('ranger.audit.solr.zookeepers', 'NONE') @@ -651,7 +651,7 @@ def recommendRangerConfigurationsFromHDP25(self, configurations, clusterData, se xasecure_audit_destination_hdfs = services['configurations']['ranger-env']['properties']['xasecure.audit.destination.hdfs'] if 'core-site' in services['configurations'] and ('fs.defaultFS' in services['configurations']['core-site']['properties']): - xasecure_audit_destination_hdfs_dir = '{0}/{1}/{2}'.format(services['configurations']['core-site']['properties']['fs.defaultFS'] ,'ranger','audit') + xasecure_audit_destination_hdfs_dir = f"{services['configurations']['core-site']['properties']['fs.defaultFS']}/ranger/audit" if 'xasecure.audit.destination.solr' in configurations['ranger-env']['properties']: xasecure_audit_destination_solr = configurations['ranger-env']['properties']['xasecure.audit.destination.solr'] diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.3.0/services/RANGER_KMS/package/scripts/kms.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.3.0/services/RANGER_KMS/package/scripts/kms.py index 0c7e35cbb1e..564b543e456 100755 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.3.0/services/RANGER_KMS/package/scripts/kms.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.3.0/services/RANGER_KMS/package/scripts/kms.py @@ -47,9 +47,9 @@ def password_validation(password, key): import params if password.strip() == "": - raise Fail("Blank password is not allowed for {0} property. Please enter valid password.".format(key)) + raise Fail(f"Blank password is not allowed for {key} property. Please enter valid password.") if re.search("[\\\`'\"]",password): - raise Fail("{0} password contains one of the unsupported special characters like \" ' \ `".format(key)) + raise Fail(f"{key} password contains one of the unsupported special characters like \" ' \\ `") else: Logger.info("Password validated") @@ -584,7 +584,7 @@ def enable_kms_plugin(): ) params.HdfsResource(None, action="execute") except Exception as err: - Logger.exception("Audit directory creation in HDFS for RANGER KMS Ranger plugin failed with error:\n{0}".format(err)) + Logger.exception(f"Audit directory creation in HDFS for RANGER KMS Ranger plugin failed with error:\n{err}") if params.xa_audit_hdfs_is_enabled and len(params.namenode_host) > 1: Logger.info('Audit to Hdfs enabled in NameNode HA environment, creating hdfs-site.xml') @@ -662,7 +662,7 @@ def create_repo(url, data, usernamepassword): "Content-Type": "application/json" } request = urllib.request.Request(base_url, data.encode(), headers) - request.add_header("Authorization", "Basic {0}".format(base64string)) + request.add_header("Authorization", f"Basic {base64string}") result = urllib.request.urlopen(request, timeout=20) response_code = result.getcode() response = json.loads(json.JSONEncoder().encode(result.read())) @@ -674,11 +674,11 @@ def create_repo(url, data, usernamepassword): return False except urllib.error.URLError as e: if isinstance(e, urllib.error.HTTPError): - raise Fail("Error creating service. Http status code - {0}. \n {1}".format(e.code, e.read())) + raise Fail(f"Error creating service. Http status code - {e.code}. \n {e.read()}") else: - raise Fail("Error creating service. Reason - {0}.".format(e.reason)) + raise Fail(f"Error creating service. Reason - {e.reason}.") except socket.timeout as e: - raise Fail("Error creating service. Reason - {0}".format(e)) + raise Fail(f"Error creating service. Reason - {e}") @safe_retry(times=5, sleep_time=8, backoff_factor=1.5, err_class=Fail, return_on_fail=False) def get_repo(url, name, usernamepassword): @@ -688,7 +688,7 @@ def get_repo(url, name, usernamepassword): base64string = base64.b64encode(usernamepassword.encode()).decode().replace('\n', '') request.add_header("Content-Type", "application/json") request.add_header("Accept", "application/json") - request.add_header("Authorization", "Basic {0}".format(base64string)) + request.add_header("Authorization", f"Basic {base64string}") result = urllib.request.urlopen(request, timeout=20) response_code = result.getcode() response = json.loads(result.read()) @@ -705,11 +705,11 @@ def get_repo(url, name, usernamepassword): return False except urllib.error.URLError as e: if isinstance(e, urllib.error.HTTPError): - raise Fail("Error getting {0} service. Http status code - {1}. \n {2}".format(name, e.code, e.read())) + raise Fail(f"Error getting {name} service. Http status code - {e.code}. \n {e.read()}") else: - raise Fail("Error getting {0} service. Reason - {1}.".format(name, e.reason)) + raise Fail(f"Error getting {name} service. Reason - {e.reason}.") except socket.timeout as e: - raise Fail("Error creating service. Reason - {0}".format(e)) + raise Fail(f"Error creating service. Reason - {e}") def check_ranger_service_support_kerberos(user, keytab, principal): import params @@ -723,7 +723,7 @@ def check_ranger_service_support_kerberos(user, keytab, principal): if response_code is not None and response_code[0] == 200: get_repo_name_response = ranger_adm_obj.get_repository_by_name_curl(user, keytab, principal, params.repo_name, 'kms', 'true', is_keyadmin = True) if get_repo_name_response is not None: - Logger.info('KMS repository {0} exist'.format(get_repo_name_response['name'])) + Logger.info(f"KMS repository {get_repo_name_response['name']} exist") return True else: create_repo_response = ranger_adm_obj.create_repository_curl(user, keytab, principal, params.repo_name, json.dumps(params.kms_ranger_plugin_repo), None, is_keyadmin = True) diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.3.0/services/RANGER_KMS/service_advisor.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.3.0/services/RANGER_KMS/service_advisor.py index 77a3fac5351..97f49695750 100755 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.3.0/services/RANGER_KMS/service_advisor.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.3.0/services/RANGER_KMS/service_advisor.py @@ -218,14 +218,14 @@ def recommendRangerKMSConfigurationsFromHDP23(self, configurations, clusterData, kmsUserOld = self.getOldValue(services, 'kms-env', 'kms_user') self.put_proxyuser_value(kmsUser, '*', is_groups=True, services=services, configurations=configurations, put_function=putCoreSiteProperty) if kmsUserOld is not None and kmsUser != kmsUserOld: - putCoreSitePropertyAttribute("hadoop.proxyuser.{0}.groups".format(kmsUserOld), 'delete', 'true') - services["forced-configurations"].append({"type" : "core-site", "name" : "hadoop.proxyuser.{0}.groups".format(kmsUserOld)}) - services["forced-configurations"].append({"type" : "core-site", "name" : "hadoop.proxyuser.{0}.groups".format(kmsUser)}) + putCoreSitePropertyAttribute(f"hadoop.proxyuser.{kmsUserOld}.groups", 'delete', 'true') + services["forced-configurations"].append({"type" : "core-site", "name" : f"hadoop.proxyuser.{kmsUserOld}.groups"}) + services["forced-configurations"].append({"type" : "core-site", "name" : f"hadoop.proxyuser.{kmsUser}.groups"}) if "HDFS" in servicesList: if 'core-site' in services['configurations'] and ('fs.defaultFS' in services['configurations']['core-site']['properties']): default_fs = services['configurations']['core-site']['properties']['fs.defaultFS'] - putRangerKmsAuditProperty('xasecure.audit.destination.hdfs.dir', '{0}/{1}/{2}'.format(default_fs,'ranger','audit')) + putRangerKmsAuditProperty('xasecure.audit.destination.hdfs.dir', f'{default_fs}/ranger/audit') required_services = [{'service' : 'YARN', 'config-type': 'yarn-env', 'property-name': 'yarn_user', 'proxy-category': ['hosts', 'users', 'groups']}, {'service' : 'SPARK', 'config-type': 'livy-env', 'property-name': 'livy_user', 'proxy-category': ['hosts', 'users', 'groups']}] @@ -242,16 +242,16 @@ def recommendRangerKMSConfigurationsFromHDP23(self, configurations, clusterData, ambari_user = self.getAmbariUser(services) if security_enabled: # adding for ambari user - putRangerKmsSiteProperty('hadoop.kms.proxyuser.{0}.users'.format(ambari_user), '*') - putRangerKmsSiteProperty('hadoop.kms.proxyuser.{0}.hosts'.format(ambari_user), '*') + putRangerKmsSiteProperty(f'hadoop.kms.proxyuser.{ambari_user}.users', '*') + putRangerKmsSiteProperty(f'hadoop.kms.proxyuser.{ambari_user}.hosts', '*') # adding for HTTP putRangerKmsSiteProperty('hadoop.kms.proxyuser.HTTP.users', '*') putRangerKmsSiteProperty('hadoop.kms.proxyuser.HTTP.hosts', '*') else: self.deleteKMSProxyUsers(configurations, services, hosts, required_services_for_secure) # deleting ambari user proxy properties - putRangerKmsSitePropertyAttribute('hadoop.kms.proxyuser.{0}.hosts'.format(ambari_user), 'delete', 'true') - putRangerKmsSitePropertyAttribute('hadoop.kms.proxyuser.{0}.users'.format(ambari_user), 'delete', 'true') + putRangerKmsSitePropertyAttribute(f'hadoop.kms.proxyuser.{ambari_user}.hosts', 'delete', 'true') + putRangerKmsSitePropertyAttribute(f'hadoop.kms.proxyuser.{ambari_user}.users', 'delete', 'true') # deleting HTTP proxy properties putRangerKmsSitePropertyAttribute('hadoop.kms.proxyuser.HTTP.hosts', 'delete', 'true') putRangerKmsSitePropertyAttribute('hadoop.kms.proxyuser.HTTP.users', 'delete', 'true') @@ -353,25 +353,25 @@ def recommendKMSProxyUsers(self, configurations, services, hosts, requiredServic service_old_user = self.getOldValue(services, config_type, property_name) if 'groups' in proxy_category: - putRangerKmsSiteProperty('hadoop.kms.proxyuser.{0}.groups'.format(service_user), '*') + putRangerKmsSiteProperty(f'hadoop.kms.proxyuser.{service_user}.groups', '*') if 'hosts' in proxy_category: - putRangerKmsSiteProperty('hadoop.kms.proxyuser.{0}.hosts'.format(service_user), '*') + putRangerKmsSiteProperty(f'hadoop.kms.proxyuser.{service_user}.hosts', '*') if 'users' in proxy_category: - putRangerKmsSiteProperty('hadoop.kms.proxyuser.{0}.users'.format(service_user), '*') + putRangerKmsSiteProperty(f'hadoop.kms.proxyuser.{service_user}.users', '*') if service_old_user is not None and service_user != service_old_user: if 'groups' in proxy_category: - putRangerKmsSitePropertyAttribute('hadoop.kms.proxyuser.{0}.groups'.format(service_old_user), 'delete', 'true') - services["forced-configurations"].append({"type" : "kms-site", "name" : "hadoop.kms.proxyuser.{0}.groups".format(service_old_user)}) - services["forced-configurations"].append({"type" : "kms-site", "name" : "hadoop.kms.proxyuser.{0}.groups".format(service_user)}) + putRangerKmsSitePropertyAttribute(f'hadoop.kms.proxyuser.{service_old_user}.groups', 'delete', 'true') + services["forced-configurations"].append({"type" : "kms-site", "name" : f"hadoop.kms.proxyuser.{service_old_user}.groups"}) + services["forced-configurations"].append({"type" : "kms-site", "name" : f"hadoop.kms.proxyuser.{service_user}.groups"}) if 'hosts' in proxy_category: - putRangerKmsSitePropertyAttribute('hadoop.kms.proxyuser.{0}.hosts'.format(service_old_user), 'delete', 'true') - services["forced-configurations"].append({"type" : "kms-site", "name" : "hadoop.kms.proxyuser.{0}.hosts".format(service_old_user)}) - services["forced-configurations"].append({"type" : "kms-site", "name" : "hadoop.kms.proxyuser.{0}.hosts".format(service_user)}) + putRangerKmsSitePropertyAttribute(f'hadoop.kms.proxyuser.{service_old_user}.hosts', 'delete', 'true') + services["forced-configurations"].append({"type" : "kms-site", "name" : f"hadoop.kms.proxyuser.{service_old_user}.hosts"}) + services["forced-configurations"].append({"type" : "kms-site", "name" : f"hadoop.kms.proxyuser.{service_user}.hosts"}) if 'users' in proxy_category: - putRangerKmsSitePropertyAttribute('hadoop.kms.proxyuser.{0}.users'.format(service_old_user), 'delete', 'true') - services["forced-configurations"].append({"type" : "kms-site", "name" : "hadoop.kms.proxyuser.{0}.users".format(service_old_user)}) - services["forced-configurations"].append({"type" : "kms-site", "name" : "hadoop.kms.proxyuser.{0}.users".format(service_user)}) + putRangerKmsSitePropertyAttribute(f'hadoop.kms.proxyuser.{service_old_user}.users', 'delete', 'true') + services["forced-configurations"].append({"type" : "kms-site", "name" : f"hadoop.kms.proxyuser.{service_old_user}.users"}) + services["forced-configurations"].append({"type" : "kms-site", "name" : f"hadoop.kms.proxyuser.{service_user}.users"}) def deleteKMSProxyUsers(self, configurations, services, hosts, requiredServices): servicesList = [service["StackServices"]["service_name"] for service in services["services"]] @@ -388,11 +388,11 @@ def deleteKMSProxyUsers(self, configurations, services, hosts, requiredServices) service_user = services['configurations'][config_type]['properties'][property_name] if 'groups' in proxy_category: - putRangerKmsSitePropertyAttribute('hadoop.kms.proxyuser.{0}.groups'.format(service_user), 'delete', 'true') + putRangerKmsSitePropertyAttribute(f'hadoop.kms.proxyuser.{service_user}.groups', 'delete', 'true') if 'hosts' in proxy_category: - putRangerKmsSitePropertyAttribute('hadoop.kms.proxyuser.{0}.hosts'.format(service_user), 'delete', 'true') + putRangerKmsSitePropertyAttribute(f'hadoop.kms.proxyuser.{service_user}.hosts', 'delete', 'true') if 'users' in proxy_category: - putRangerKmsSitePropertyAttribute('hadoop.kms.proxyuser.{0}.users'.format(service_user), 'delete', 'true') + putRangerKmsSitePropertyAttribute(f'hadoop.kms.proxyuser.{service_user}.users', 'delete', 'true') class RangerKMSValidator(service_advisor.ServiceAdvisor): """ diff --git a/ambari-server/src/main/resources/stacks/ambari_configuration.py b/ambari-server/src/main/resources/stacks/ambari_configuration.py index ece73879e1b..b8ada8f0710 100644 --- a/ambari-server/src/main/resources/stacks/ambari_configuration.py +++ b/ambari-server/src/main/resources/stacks/ambari_configuration.py @@ -309,7 +309,7 @@ def get_server_url(self): ''' ldap_host = self.get_server_host() ldap_port = self.get_server_port() - return None if ldap_host is None or ldap_port is None else '{}:{}'.format(ldap_host,ldap_port) + return None if ldap_host is None or ldap_port is None else f'{ldap_host}:{ldap_port}' def get_secondary_server_host(self): ''' @@ -330,7 +330,7 @@ def get_secondary_server_url(self): ''' ldap_host = self.get_secondary_server_host() ldap_port = self.get_secondary_server_port() - return None if ldap_host is None or ldap_port is None else '{}:{}'.format(ldap_host,ldap_port) + return None if ldap_host is None or ldap_port is None else f'{ldap_host}:{ldap_port}' def is_use_ssl(self): ''' diff --git a/ambari-server/src/main/resources/stacks/service_advisor.py b/ambari-server/src/main/resources/stacks/service_advisor.py index e79e0dcea98..d9ce3670ca6 100644 --- a/ambari-server/src/main/resources/stacks/service_advisor.py +++ b/ambari-server/src/main/resources/stacks/service_advisor.py @@ -175,7 +175,7 @@ def getServiceComponentCardinalityValidations(self, services, hosts, service_nam componentDisplayName) elif "ALL" == cardinality: if componentHostsCount != hostsCount: - message = "{0} component should be installed on all hosts in cluster.".format(componentDisplayName) + message = f"{componentDisplayName} component should be installed on all hosts in cluster." else: if componentHostsCount != int(cardinality): message = "exactly {0} {1} components should be installed in cluster.".format(int(cardinality), diff --git a/ambari-server/src/main/resources/stacks/stack_advisor.py b/ambari-server/src/main/resources/stacks/stack_advisor.py index 73de84d4667..3d651e2faae 100644 --- a/ambari-server/src/main/resources/stacks/stack_advisor.py +++ b/ambari-server/src/main/resources/stacks/stack_advisor.py @@ -706,7 +706,7 @@ def instantiateServiceAdvisor(self, service): class_name = service["StackServices"]["advisor_name"] if "advisor_name" in service["StackServices"] else None path = service["StackServices"]["advisor_path"] if "advisor_path" in service["StackServices"] else None - class_name_pattern = re.compile("%s.*?ServiceAdvisor" % service_name, re.IGNORECASE) + class_name_pattern = re.compile(f"{service_name}.*?ServiceAdvisor", re.IGNORECASE) if path is not None and os.path.exists(path) and class_name is not None: try: @@ -724,13 +724,13 @@ def instantiateServiceAdvisor(self, service): break if hasattr(service_advisor, best_class_name): - self.logger.info("ServiceAdvisor implementation for service {0} was loaded".format(service_name)) + self.logger.info(f"ServiceAdvisor implementation for service {service_name} was loaded") return getattr(service_advisor, best_class_name)() else: self.logger.error("Failed to load or create ServiceAdvisor implementation for service {0}: " \ "Expecting class name {1} but it was not found.".format(service_name, best_class_name)) except Exception as e: - self.logger.exception("Failed to load or create ServiceAdvisor implementation for service {0}".format(service_name)) + self.logger.exception(f"Failed to load or create ServiceAdvisor implementation for service {service_name}") return None @@ -954,7 +954,7 @@ def createComponentLayoutRecommendations(self, services, hosts): index = 0 for key in hostsComponentsMap.keys(): index += 1 - host_group_name = "host-group-{0}".format(index) + host_group_name = f"host-group-{index}" host_groups.insert(0, { "name": host_group_name, "components": hostsComponentsMap[key] } ) bindings.insert(0, {"name": host_group_name, "hosts": [{"fqdn": key}]}) @@ -1201,7 +1201,7 @@ def validateRequiredComponentsPresent(self, services): items.append({ "type": 'host-component', "level": 'ERROR', "message": message, "component-name": component["StackServiceComponents"]["component_name"]}) elif scope == "cluster" and not dependentComponentHosts: - message = "{0} requires {1} to be present in the cluster.".format(componentDisplayName, dependentComponentDisplayName) + message = f"{componentDisplayName} requires {dependentComponentDisplayName} to be present in the cluster." items.append({ "type": 'host-component', "level": 'ERROR', "message": message, "component-name": component["StackServiceComponents"]["component_name"]}) return items @@ -1510,13 +1510,13 @@ def convertToNumber(number): userValue = convertToNumber(configurations[configName]["properties"][propertyName]) maxValue = convertToNumber(recommendedDefaults[configName]["property_attributes"][propertyName]["maximum"]) if userValue > maxValue: - validationItems.extend([{"config-name": propertyName, "item": self.getWarnItem("Value is greater than the recommended maximum of {0} ".format(maxValue))}]) + validationItems.extend([{"config-name": propertyName, "item": self.getWarnItem(f"Value is greater than the recommended maximum of {maxValue} ")}]) if "minimum" in recommendedDefaults[configName]["property_attributes"][propertyName] and \ propertyName in recommendedDefaults[configName]["properties"]: userValue = convertToNumber(configurations[configName]["properties"][propertyName]) minValue = convertToNumber(recommendedDefaults[configName]["property_attributes"][propertyName]["minimum"]) if userValue < minValue: - validationItems.extend([{"config-name": propertyName, "item": self.getWarnItem("Value is less than the recommended minimum of {0} ".format(minValue))}]) + validationItems.extend([{"config-name": propertyName, "item": self.getWarnItem(f"Value is less than the recommended minimum of {minValue} ")}]) items.extend(self.toConfigurationValidationProblems(validationItems, configName)) pass @@ -1550,9 +1550,9 @@ def validateListOfConfigUsingMethod(self, configurations, recommendedDefaults, s siteProperties = self.getSiteProperties(configurations, configType) if siteProperties is not None: siteRecommendations = recommendedDefaults[configType]["properties"] - self.logger.info("SiteName: %s, method: %s" % (configType, method.__name__)) - self.logger.info("Site properties: %s" % str(siteProperties)) - self.logger.info("Recommendations: %s" % str(siteRecommendations)) + self.logger.info(f"SiteName: {configType}, method: {method.__name__}") + self.logger.info(f"Site properties: {str(siteProperties)}") + self.logger.info(f"Recommendations: {str(siteRecommendations)}") validationItems = method(siteProperties, siteRecommendations, configurations, services, hosts) items.extend(validationItems) return items @@ -1566,8 +1566,8 @@ def validateConfigurationsForSite(self, configurations, recommendedDefaults, ser siteProperties = self.getSiteProperties(configurations, siteName) if siteProperties is not None: siteRecommendations = recommendedDefaults[siteName]["properties"] - self.logger.info("SiteName: %s, method: %s" % (siteName, method.__name__)) - self.logger.info("Recommendations: %s" % str(siteRecommendations)) + self.logger.info(f"SiteName: {siteName}, method: {method.__name__}") + self.logger.info(f"Recommendations: {str(siteRecommendations)}") return method(siteProperties, siteRecommendations, configurations, services, hosts) return [] @@ -2663,9 +2663,9 @@ def getHadoopProxyUsersValidationItems(self, properties, services, hosts, config validationItems = [] users = self.getHadoopProxyUsers(services, hosts, configurations) for user_name, user_properties in users.items(): - props = ["hadoop.proxyuser.{0}.hosts".format(user_name)] + props = [f"hadoop.proxyuser.{user_name}.hosts"] if "propertyGroups" in user_properties: - props.append("hadoop.proxyuser.{0}.groups".format(user_name)) + props.append(f"hadoop.proxyuser.{user_name}.groups") for prop in props: validationItems.append({"config-name": prop, "item": self.validatorNotEmpty(properties, prop)}) @@ -2733,7 +2733,7 @@ def getServiceHadoopProxyUsersConfigurationDict(self): } def _getHadoopProxyUsersForService(self, serviceName, serviceUserComponents, services, hosts, configurations): - self.logger.info("Calculating Hadoop Proxy User recommendations for {0} service.".format(serviceName)) + self.logger.info(f"Calculating Hadoop Proxy User recommendations for {serviceName} service.") servicesList = self.get_services_list(services) resultUsers = {} @@ -2765,7 +2765,7 @@ def _getHadoopProxyUsersForService(self, serviceName, serviceUserComponents, ser componentHostNames.add(componentHostName) componentHostNamesString = ",".join(sorted(componentHostNames)) - self.logger.info("Host List for [service='{0}'; user='{1}'; components='{2}']: {3}".format(serviceName, user, ','.join(hostSelector), componentHostNamesString)) + self.logger.info(f"Host List for [service='{serviceName}'; user='{user}'; components='{','.join(hostSelector)}']: {componentHostNamesString}") if not proxyPropertyName in proxyUsers: proxyUsers[proxyPropertyName] = componentHostNamesString @@ -2790,27 +2790,27 @@ def recommendHadoopProxyUsers(self, configurations, services, hosts): if "HIVE" in servicesList: hive_user = get_from_dict(services, ("configurations", "hive-env", "properties", "hive_user"), default_value=None) if hive_user and get_from_dict(users, (hive_user, "propertyHosts"), default_value=None): - services["forced-configurations"].append({"type" : "core-site", "name" : "hadoop.proxyuser.{0}.hosts".format(hive_user)}) + services["forced-configurations"].append({"type" : "core-site", "name" : f"hadoop.proxyuser.{hive_user}.hosts"}) for user_name, user_properties in users.items(): # Add properties "hadoop.proxyuser.*.hosts", "hadoop.proxyuser.*.groups" to core-site for all users self.put_proxyuser_value(user_name, user_properties["propertyHosts"], services=services, configurations=configurations, put_function=putCoreSiteProperty) - self.logger.info("Updated hadoop.proxyuser.{0}.hosts as : {1}".format(user_name, user_properties["propertyHosts"])) + self.logger.info(f"Updated hadoop.proxyuser.{user_name}.hosts as : {user_properties['propertyHosts']}") if "propertyGroups" in user_properties: self.put_proxyuser_value(user_name, user_properties["propertyGroups"], is_groups=True, services=services, configurations=configurations, put_function=putCoreSiteProperty) # Remove old properties if user was renamed userOldValue = self.getOldValue(services, user_properties["config"], user_properties["propertyName"]) if userOldValue is not None and userOldValue != user_name: - putCoreSitePropertyAttribute("hadoop.proxyuser.{0}.hosts".format(userOldValue), 'delete', 'true') - services["forced-configurations"].append({"type" : "core-site", "name" : "hadoop.proxyuser.{0}.hosts".format(userOldValue)}) - services["forced-configurations"].append({"type" : "core-site", "name" : "hadoop.proxyuser.{0}.hosts".format(user_name)}) + putCoreSitePropertyAttribute(f"hadoop.proxyuser.{userOldValue}.hosts", 'delete', 'true') + services["forced-configurations"].append({"type" : "core-site", "name" : f"hadoop.proxyuser.{userOldValue}.hosts"}) + services["forced-configurations"].append({"type" : "core-site", "name" : f"hadoop.proxyuser.{user_name}.hosts"}) if "propertyGroups" in user_properties: - putCoreSitePropertyAttribute("hadoop.proxyuser.{0}.groups".format(userOldValue), 'delete', 'true') - services["forced-configurations"].append({"type" : "core-site", "name" : "hadoop.proxyuser.{0}.groups".format(userOldValue)}) - services["forced-configurations"].append({"type" : "core-site", "name" : "hadoop.proxyuser.{0}.groups".format(user_name)}) + putCoreSitePropertyAttribute(f"hadoop.proxyuser.{userOldValue}.groups", 'delete', 'true') + services["forced-configurations"].append({"type" : "core-site", "name" : f"hadoop.proxyuser.{userOldValue}.groups"}) + services["forced-configurations"].append({"type" : "core-site", "name" : f"hadoop.proxyuser.{user_name}.groups"}) self.recommendAmbariProxyUsersForHDFS(services, configurations, servicesList, putCoreSiteProperty, putCoreSitePropertyAttribute) @@ -2822,8 +2822,8 @@ def recommendAmbariProxyUsersForHDFS(self, services, configurations, servicesLis self.put_proxyuser_value(ambari_user, "*", is_groups=True, services=services, configurations=configurations, put_function=putCoreSiteProperty) old_ambari_user = self.getOldAmbariUser(services) if old_ambari_user is not None: - putCoreSitePropertyAttribute("hadoop.proxyuser.{0}.hosts".format(old_ambari_user), 'delete', 'true') - putCoreSitePropertyAttribute("hadoop.proxyuser.{0}.groups".format(old_ambari_user), 'delete', 'true') + putCoreSitePropertyAttribute(f"hadoop.proxyuser.{old_ambari_user}.hosts", 'delete', 'true') + putCoreSitePropertyAttribute(f"hadoop.proxyuser.{old_ambari_user}.groups", 'delete', 'true') def getAmbariUser(self, services): ambari_user = services['ambari-server-properties']['ambari-server.user'] @@ -2892,9 +2892,9 @@ def put_proxyuser_value(self, user_name, value, is_groups=False, services=None, result_value = ",".join(sorted([val for val in result_values_set if val])) if is_groups: - property_name = "hadoop.proxyuser.{0}.groups".format(user_name) + property_name = f"hadoop.proxyuser.{user_name}.groups" else: - property_name = "hadoop.proxyuser.{0}.hosts".format(user_name) + property_name = f"hadoop.proxyuser.{user_name}.hosts" put_function(property_name, result_value) @@ -2914,9 +2914,9 @@ def get_data_for_proxyuser(self, user_name, services, configurations, groups=Fal else: coreSite = {} if groups: - property_name = "hadoop.proxyuser.{0}.groups".format(user_name) + property_name = f"hadoop.proxyuser.{user_name}.groups" else: - property_name = "hadoop.proxyuser.{0}.hosts".format(user_name) + property_name = f"hadoop.proxyuser.{user_name}.hosts" if property_name in coreSite: property_value = coreSite[property_name] if property_value == "*": @@ -3016,7 +3016,7 @@ def validatorLessThenDefaultValue(self, properties, recommendedDefaults, propert if defaultValue is None: return None if value < defaultValue: - return self.getWarnItem("Value is less than the recommended default of {0}".format(defaultValue)) + return self.getWarnItem(f"Value is less than the recommended default of {defaultValue}") return None def validatorGreaterThenDefaultValue(self, properties, recommendedDefaults, propertyName): @@ -3034,22 +3034,22 @@ def validatorGreaterThenDefaultValue(self, properties, recommendedDefaults, prop if defaultValue is None: return None if value > defaultValue: - return self.getWarnItem("Value is greater than the recommended default of {0}".format(defaultValue)) + return self.getWarnItem(f"Value is greater than the recommended default of {defaultValue}") return None def validatorEqualsPropertyItem(self, properties1, propertyName1, properties2, propertyName2, emptyAllowed=False): if not propertyName1 in properties1: - return self.getErrorItem("Value should be set for %s" % propertyName1) + return self.getErrorItem(f"Value should be set for {propertyName1}") if not propertyName2 in properties2: - return self.getErrorItem("Value should be set for %s" % propertyName2) + return self.getErrorItem(f"Value should be set for {propertyName2}") value1 = properties1.get(propertyName1) if value1 is None and not emptyAllowed: - return self.getErrorItem("Empty value for %s" % propertyName1) + return self.getErrorItem(f"Empty value for {propertyName1}") value2 = properties2.get(propertyName2) if value2 is None and not emptyAllowed: - return self.getErrorItem("Empty value for %s" % propertyName2) + return self.getErrorItem(f"Empty value for {propertyName2}") if value1 != value2: return self.getWarnItem("It is recommended to set equal values " "for properties {0} and {1}".format(propertyName1, propertyName2)) @@ -3059,10 +3059,10 @@ def validatorEqualsPropertyItem(self, properties1, propertyName1, def validatorEqualsToRecommendedItem(self, properties, recommendedDefaults, propertyName): if not propertyName in properties: - return self.getErrorItem("Value should be set for %s" % propertyName) + return self.getErrorItem(f"Value should be set for {propertyName}") value = properties.get(propertyName) if not propertyName in recommendedDefaults: - return self.getErrorItem("Value should be recommended for %s" % propertyName) + return self.getErrorItem(f"Value should be recommended for {propertyName}") recommendedValue = recommendedDefaults.get(propertyName) if value != recommendedValue: return self.getWarnItem("It is recommended to set value {0} " @@ -3071,10 +3071,10 @@ def validatorEqualsToRecommendedItem(self, properties, recommendedDefaults, def validatorNotEmpty(self, properties, propertyName): if not propertyName in properties: - return self.getErrorItem("Value should be set for {0}".format(propertyName)) + return self.getErrorItem(f"Value should be set for {propertyName}") value = properties.get(propertyName) if not value: - return self.getWarnItem("Empty value for {0}".format(propertyName)) + return self.getWarnItem(f"Empty value for {propertyName}") return None def validatorNotRootFs(self, properties, recommendedDefaults, propertyName, hostInfo): @@ -3091,7 +3091,7 @@ def validatorNotRootFs(self, properties, recommendedDefaults, propertyName, host mountPoint = DefaultStackAdvisor.getMountPointForDir(dir, mountPoints) if "/" == mountPoint and self.getPreferredMountPoints(hostInfo)[0] != mountPoint: - return self.getWarnItem("It is not recommended to use root partition for {0}".format(propertyName)) + return self.getWarnItem(f"It is not recommended to use root partition for {propertyName}") return None @@ -3113,10 +3113,10 @@ def validatorEnoughDiskSpace(self, properties, propertyName, hostInfo, reqiuredD mountPoint = DefaultStackAdvisor.getMountPointForDir(dir, list(mountPoints.keys())) if not mountPoints: - return self.getErrorItem("No disk info found on host %s" % hostInfo["host_name"]) + return self.getErrorItem(f"No disk info found on host {hostInfo['host_name']}") if mountPoint is None: - return self.getErrorItem("No mount point in directory %s. Mount points: %s" % (dir, ', '.join(list(mountPoints.keys())))) + return self.getErrorItem(f"No mount point in directory {dir}. Mount points: {', '.join(list(mountPoints.keys()))}") if mountPoints[mountPoint] < reqiuredDiskSpace: msg = "Ambari Metrics disk space requirements not met. \n" \ diff --git a/ambari-server/src/test/python/TestAmbariServer.py b/ambari-server/src/test/python/TestAmbariServer.py index 738c4f5d7f7..97f7a6bb079 100644 --- a/ambari-server/src/test/python/TestAmbariServer.py +++ b/ambari-server/src/test/python/TestAmbariServer.py @@ -3860,7 +3860,7 @@ def side_effect_1(*args, **kwargs): self.assertFalse(remove_file_mock.called) self.assertTrue("Ambari-DDL-Postgres-CREATE.sql" in run_os_command_1_mock.call_args[0][0][3]) - self.assertTrue("-U {0}".format(db_username) in run_os_command_1_mock.call_args[0][0][3]) + self.assertTrue(f"-U {db_username}" in run_os_command_1_mock.call_args[0][0][3]) #if DB user name was changed args = reset_mocks() @@ -7511,7 +7511,7 @@ def test_ldap_sync_all(self, logger_mock, is_root_method, is_server_runing_mock, sync_ldap(options) - url = '{0}://{1}:{2!s}{3}'.format('http', '127.0.0.1', '8080', '/api/v1/ldap_sync_events') + url = 'http://127.0.0.1:8080/api/v1/ldap_sync_events' request = urlopen_mock.call_args_list[0][0][0] self.assertEqual(url, str(request.get_full_url())) @@ -7554,7 +7554,7 @@ def test_ldap_sync_all_post_process_existing_users(self, logger_mock, is_root_me sync_ldap(options) - url = '{0}://{1}:{2!s}{3}'.format('http', '127.0.0.1', '8080', '/api/v1/ldap_sync_events') + url = 'http://127.0.0.1:8080/api/v1/ldap_sync_events' request = urlopen_mock.call_args_list[0][0][0] self.assertEqual(url, str(request.get_full_url())) @@ -7788,7 +7788,7 @@ def test_ldap_sync_ssl(self, logger_mock, is_root_method, is_server_runing_mock, sync_ldap(options) - url = '{0}://{1}:{2!s}{3}'.format('https', socket.getfqdn(), '8443', '/api/v1/ldap_sync_events') + url = f'https://{socket.getfqdn()}:8443/api/v1/ldap_sync_events' request = urlopen_mock.call_args_list[0][0][0] self.assertEqual(url, str(request.get_full_url())) diff --git a/ambari-server/src/test/python/TestBootstrap.py b/ambari-server/src/test/python/TestBootstrap.py index 494ac72e4e3..94c1048f208 100644 --- a/ambari-server/src/test/python/TestBootstrap.py +++ b/ambari-server/src/test/python/TestBootstrap.py @@ -50,14 +50,14 @@ def test_getRemoteName(self): utime2 = 12345 bootstrap_obj.getUtime = MagicMock(return_value=utime1) remote1 = bootstrap_obj.getRemoteName("/tmp/setupAgent.sh") - self.assertEqual(remote1, "/tmp/setupAgent{0}.sh".format(utime1)) + self.assertEqual(remote1, f"/tmp/setupAgent{utime1}.sh") bootstrap_obj.getUtime.return_value=utime2 remote1 = bootstrap_obj.getRemoteName("/tmp/setupAgent.sh") - self.assertEqual(remote1, "/tmp/setupAgent{0}.sh".format(utime1)) + self.assertEqual(remote1, f"/tmp/setupAgent{utime1}.sh") remote2 = bootstrap_obj.getRemoteName("/tmp/host_pass") - self.assertEqual(remote2, "/tmp/host_pass{0}".format(utime2)) + self.assertEqual(remote2, f"/tmp/host_pass{utime2}") # TODO: Test bootstrap timeout diff --git a/ambari-server/src/test/python/TestMpacks.py b/ambari-server/src/test/python/TestMpacks.py index 31363995280..4d725c789eb 100644 --- a/ambari-server/src/test/python/TestMpacks.py +++ b/ambari-server/src/test/python/TestMpacks.py @@ -258,7 +258,7 @@ def test_install_mpack_with_malformed_mpack(self, get_ambari_properties_mock, do try: install_mpack(options) except FatalException as e: - self.assertEqual("Malformed management pack {0}. Metadata file missing!".format(options.mpack_path), e.reason) + self.assertEqual(f"Malformed management pack {options.mpack_path}. Metadata file missing!", e.reason) fail = True self.assertTrue(fail) diff --git a/ambari-server/src/test/python/TestServerUtils.py b/ambari-server/src/test/python/TestServerUtils.py index 9c1fa4ea482..f2b42c7e927 100644 --- a/ambari-server/src/test/python/TestServerUtils.py +++ b/ambari-server/src/test/python/TestServerUtils.py @@ -72,7 +72,7 @@ def test_get_ambari_server_api_base(self): CLIENT_API_PORT_PROPERTY: None }) result = get_ambari_server_api_base(properties) - self.assertEqual(result, 'https://{0}:8443/api/v1/'.format(fqdn)) + self.assertEqual(result, f'https://{fqdn}:8443/api/v1/') def test_get_ambari_admin_credentials_from_cli_options(self): diff --git a/ambari-server/src/test/python/stacks/stack-hooks/after-INSTALL/test_after_install.py b/ambari-server/src/test/python/stacks/stack-hooks/after-INSTALL/test_after_install.py index 16e8dd74f74..17f7d5e3c9e 100644 --- a/ambari-server/src/test/python/stacks/stack-hooks/after-INSTALL/test_after_install.py +++ b/ambari-server/src/test/python/stacks/stack-hooks/after-INSTALL/test_after_install.py @@ -73,7 +73,7 @@ def test_hook_default(self): def test_hook_default_conf_select(self, rmtree_mock, symlink_mock, conf_select_select_mock, conf_select_create_mock): def mocked_conf_select(arg1, arg2, arg3, dry_run = False): - return "/etc/{0}/{1}/0".format(arg2, arg3) + return f"/etc/{arg2}/{arg3}/0" conf_select_create_mock.side_effect = mocked_conf_select @@ -140,7 +140,7 @@ def mocked_conf_select(arg1, arg2, arg3, dry_run = False, ignore_errors = False) raise Exception("whoops") else: return None - return "/etc/{0}/{1}/0".format(arg2, arg3) + return f"/etc/{arg2}/{arg3}/0" conf_select_create_mock.side_effect = mocked_conf_select conf_select_select_mock.side_effect = mocked_conf_select @@ -213,7 +213,7 @@ def test_hook_default_stack_select_specific_version(self, rmtree_mock, symlink_m """ def mocked_conf_select(arg1, arg2, arg3, dry_run = False): - return "/etc/{0}/{1}/0".format(arg2, arg3) + return f"/etc/{arg2}/{arg3}/0" conf_select_create_mock.side_effect = mocked_conf_select @@ -244,7 +244,7 @@ def mocked_conf_select(arg1, arg2, arg3, dry_run = False): def test_hook_default_conf_select_suspended(self, rmtree_mock, symlink_mock, conf_select_select_mock, conf_select_create_mock): def mocked_conf_select(arg1, arg2, arg3, dry_run = False): - return "/etc/{0}/{1}/0".format(arg2, arg3) + return f"/etc/{arg2}/{arg3}/0" conf_select_create_mock.side_effect = mocked_conf_select @@ -310,7 +310,7 @@ def test_hook_setup_stack_symlinks_skipped(self, rmtree_mock, symlink_mock, conf """ def mocked_conf_select(arg1, arg2, arg3, dry_run = False): - return "/etc/{0}/{1}/0".format(arg2, arg3) + return f"/etc/{arg2}/{arg3}/0" conf_select_create_mock.side_effect = mocked_conf_select diff --git a/ambari-server/src/test/python/stacks/stack-hooks/before-START/test_before_start.py b/ambari-server/src/test/python/stacks/stack-hooks/before-START/test_before_start.py index 08c5d8515af..2b8d5ae7331 100644 --- a/ambari-server/src/test/python/stacks/stack-hooks/before-START/test_before_start.py +++ b/ambari-server/src/test/python/stacks/stack-hooks/before-START/test_before_start.py @@ -286,7 +286,7 @@ def test_hook_default_hdfs(self): self.assertNoMoreResources() def test_hook_refresh_topology_custom_directories(self): - config_file = "{0}/test/python/stacks/configs/default.json".format(self.get_src_folder()) + config_file = f"{self.get_src_folder()}/test/python/stacks/configs/default.json" with open(config_file, "r") as f: default_json = json.load(f) diff --git a/ambari-server/src/test/python/stacks/utils/RMFTestCase.py b/ambari-server/src/test/python/stacks/utils/RMFTestCase.py index a1ebd29bd17..76f8b81f472 100644 --- a/ambari-server/src/test/python/stacks/utils/RMFTestCase.py +++ b/ambari-server/src/test/python/stacks/utils/RMFTestCase.py @@ -131,7 +131,7 @@ def executeScript(self, path, classname=None, command=None, config_file=None, Script.repository_util = RepositoryUtil(self.config_dict) method = RMFTestCase._get_attr(script_class_inst, command) except IOError as err: - raise RuntimeError("Cannot load class %s from %s: %s" % (classname, norm_path, err.message)) + raise RuntimeError(f"Cannot load class {classname} from {norm_path}: {err.message}") # Reload params import, otherwise it won't change properties during next import if 'params' in sys.modules: @@ -264,7 +264,7 @@ def _getStackTestsFolder(): def _get_attr(module, attr): module_methods = dir(module) if not attr in module_methods: - raise RuntimeError("'{0}' has no attribute '{1}'".format(module, attr)) + raise RuntimeError(f"'{module}' has no attribute '{attr}'") method = getattr(module, attr) return method @@ -285,8 +285,7 @@ def reindent(self, s, numSpaces): def printResources(self, intendation=4): print for resource in RMFTestCase.env.resource_list: - s = "'{0}', {1},".format( - resource.__class__.__name__, self._ppformat(resource.name)) + s = f"'{resource.__class__.__name__}', {self._ppformat(resource.name)}," has_arguments = False for k,v in resource.arguments.items(): has_arguments = True @@ -296,7 +295,7 @@ def printResources(self, intendation=4): elif isinstance( v, UnknownConfiguration): val = "UnknownConfigurationMock()" elif hasattr(v, '__call__') and hasattr(v, '__name__'): - val = "FunctionMock('{0}')".format(v.__name__) + val = f"FunctionMock('{v.__name__}')" else: val = self._ppformat(v) # If value is multiline, format it @@ -306,7 +305,7 @@ def printResources(self, intendation=4): nextlines = "\n".join(lines [1:]) nextlines = self.reindent(nextlines, 2) val = "\n".join([firstLine, nextlines]) - param_str="{0} = {1},".format(k, val) + param_str=f"{k} = {val}," s+="\n" + self.reindent(param_str, intendation) # Decide whether we want bracket to be at next line if has_arguments: @@ -314,7 +313,7 @@ def printResources(self, intendation=4): else: before_bracket = "" # Add assertion - s = "self.assertResourceCalled({0}{1})".format(s, before_bracket) + s = f"self.assertResourceCalled({s}{before_bracket})" # Intendation s = self.reindent(s, intendation) print(s) @@ -362,14 +361,14 @@ def assertResourceCalledRegexp(self, resource_type, name, **kwargs): actual_value = kwargs.get(key, '') if self.isstring(resource_value): self.assertRegex(resource_value, actual_value, - msg="Key '%s': '%s' does not match with '%s'" % (key, resource_value, actual_value)) + msg=f"Key '{key}': '{resource_value}' does not match with '{actual_value}'") else: # check only the type of a custom object self.assertEqual(resource_value.__class__.__name__, actual_value.__class__.__name__) def assertRegexpMatches(self, value, pattern, msg=None): if not re.match(pattern, value): if msg is None: - raise AssertionError('pattern %s does not match %s' % (pattern, value)) + raise AssertionError(f'pattern {pattern} does not match {value}') def isstring(self, s): if (sys.version_info[0] == 3): diff --git a/ambari-server/src/test/python/unitTests.py b/ambari-server/src/test/python/unitTests.py index 450e67cb09a..57019660950 100644 --- a/ambari-server/src/test/python/unitTests.py +++ b/ambari-server/src/test/python/unitTests.py @@ -148,7 +148,7 @@ def resolve_paths_to_import_from_common_services(metainfo_file, base_stack_folde if os.path.isdir(inherited_from_older_version_path): paths_to_import += resolve_paths_to_import_from_common_services(metainfo_file, inherited_from_older_version_path, common_services_parent_dir, service) else: - print("Service %s. Could not get extract from metainfo file: %s. This may prevent modules from being imported." % (service, str(metainfo_file))) + print(f"Service {service}. Could not get extract from metainfo file: {str(metainfo_file)}. This may prevent modules from being imported.") return paths_to_import @@ -212,7 +212,7 @@ def stack_test_executor(base_folder, service, stack, test_mask, executor_result) except: executor_result.put({'exit_code': 1, 'tests_run': 0, - 'errors': [("Failed to load test files {0}".format(str(modules)), traceback.format_exc(), "ERROR")], + 'errors': [(f"Failed to load test files {str(modules)}", traceback.format_exc(), "ERROR")], 'failures': []}) executor_result.put(1) return @@ -350,14 +350,14 @@ def main(): for failed_tests in [test_errors,test_failures]: for err in failed_tests: - sys.stderr.write("{0}: {1}\n".format(err[2],err[0])) + sys.stderr.write(f"{err[2]}: {err[0]}\n") sys.stderr.write("----------------------------------------------------------------------\n") - sys.stderr.write("{0}\n".format(err[1])) + sys.stderr.write(f"{err[1]}\n") sys.stderr.write("----------------------------------------------------------------------\n") - sys.stderr.write("Total run:{0}\n".format(test_runs)) - sys.stderr.write("Total errors:{0}\n".format(len(test_errors))) - sys.stderr.write("Total failures:{0}\n".format(len(test_failures))) + sys.stderr.write(f"Total run:{test_runs}\n") + sys.stderr.write(f"Total errors:{len(test_errors)}\n") + sys.stderr.write(f"Total failures:{len(test_failures)}\n") try: shutil.rmtree(newtmpdirpath) diff --git a/ambari-server/src/test/resources/stacks/old_stack_advisor.py b/ambari-server/src/test/resources/stacks/old_stack_advisor.py index 64f726497c7..0181ad69387 100644 --- a/ambari-server/src/test/resources/stacks/old_stack_advisor.py +++ b/ambari-server/src/test/resources/stacks/old_stack_advisor.py @@ -410,7 +410,7 @@ def createComponentLayoutRecommendations(self, services, hosts): index = 0 for key in hostsComponentsMap.keys(): index += 1 - host_group_name = "host-group-{0}".format(index) + host_group_name = f"host-group-{index}" host_groups.append( { "name": host_group_name, "components": hostsComponentsMap[key] } ) bindings.append( { "name": host_group_name, "hosts": [{ "fqdn": socket.getfqdn(key) }] } )