diff --git a/CHANGELOG.md b/CHANGELOG.md index 1f64c326ebf..00724808158 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ - to simplify config (default usage of `copy_materialization='table'` if is is not found in global or local config) - to let copy several source tables into single target table at a time. ([Google doc reference](https://cloud.google.com/bigquery/docs/managing-tables#copying_multiple_source_tables)) - Customize ls task JSON output by adding new flag `--output-keys` ([#3778](https://github.com/dbt-labs/dbt/issues/3778), [#3395](https://github.com/dbt-labs/dbt/issues/3395)) +- Normalize global CLI arguments/flags ([#2990](https://github.com/dbt-labs/dbt/issues/2990), [#3839](https://github.com/dbt-labs/dbt/pull/3839)) ### Fixes @@ -15,9 +16,6 @@ - Fix issue when running the `deps` task after the `list` task in the RPC server ([#3846](https://github.com/dbt-labs/dbt/issues/3846), [#3848](https://github.com/dbt-labs/dbt/pull/3848), [#3850](https://github.com/dbt-labs/dbt/pull/3850)) - Fix bug with initializing a dataclass that inherits from `typing.Protocol`, specifically for `dbt.config.profile.Profile` ([#3843](https://github.com/dbt-labs/dbt/issues/3843), [#3855](https://github.com/dbt-labs/dbt/pull/3855)) - Introduce a macro, `get_where_subquery`, for tests that use `where` config. Alias filtering subquery as `dbt_subquery` instead of resource identifier ([#3857](https://github.com/dbt-labs/dbt/issues/3857), [#3859](https://github.com/dbt-labs/dbt/issues/3859)) - -### Fixes - - Separated table vs view configuration for BigQuery since some configuration is not possible to set for tables vs views. ([#3682](https://github.com/dbt-labs/dbt/issues/3682), [#3691](https://github.com/dbt-labs/dbt/issues/3682)) ### Under the hood diff --git a/core/dbt/adapters/base/connections.py b/core/dbt/adapters/base/connections.py index 90e2fc2938a..67789c9834f 100644 --- a/core/dbt/adapters/base/connections.py +++ b/core/dbt/adapters/base/connections.py @@ -238,12 +238,6 @@ def _close_handle(cls, connection: Connection) -> None: @classmethod def _rollback(cls, connection: Connection) -> None: """Roll back the given connection.""" - if flags.STRICT_MODE: - if not isinstance(connection, Connection): - raise dbt.exceptions.CompilerException( - f'In _rollback, got {connection} - not a Connection!' - ) - if connection.transaction_open is False: raise dbt.exceptions.InternalException( f'Tried to rollback transaction on connection ' @@ -257,12 +251,6 @@ def _rollback(cls, connection: Connection) -> None: @classmethod def close(cls, connection: Connection) -> Connection: - if flags.STRICT_MODE: - if not isinstance(connection, Connection): - raise dbt.exceptions.CompilerException( - f'In close, got {connection} - not a Connection!' - ) - # if the connection is in closed or init, there's nothing to do if connection.state in {ConnectionState.CLOSED, ConnectionState.INIT}: return connection diff --git a/core/dbt/adapters/base/impl.py b/core/dbt/adapters/base/impl.py index 1d9018446e5..921df9f338a 100644 --- a/core/dbt/adapters/base/impl.py +++ b/core/dbt/adapters/base/impl.py @@ -16,7 +16,6 @@ get_relation_returned_multiple_results, InternalException, NotImplementedException, RuntimeException, ) -from dbt import flags from dbt import deprecations from dbt.adapters.protocol import ( @@ -289,9 +288,7 @@ def clear_macro_manifest(self): def _schema_is_cached(self, database: Optional[str], schema: str) -> bool: """Check if the schema is cached, and by default logs if it is not.""" - if flags.USE_CACHE is False: - return False - elif (database, schema) not in self.cache: + if (database, schema) not in self.cache: logger.debug( 'On "{}": cache miss for schema "{}.{}", this is inefficient' .format(self.nice_connection_name(), database, schema) @@ -340,9 +337,6 @@ def _relations_cache_for_schemas(self, manifest: Manifest) -> None: """Populate the relations cache for the given schemas. Returns an iterable of the schemas populated, as strings. """ - if not flags.USE_CACHE: - return - cache_schemas = self._get_cache_schemas(manifest) with executor(self.config) as tpe: futures: List[Future[List[BaseRelation]]] = [] @@ -375,9 +369,6 @@ def set_relations_cache( """Run a query that gets a populated cache of the relations in the database and set the cache on this adapter. """ - if not flags.USE_CACHE: - return - with self.cache.lock: if clear: self.cache.clear() @@ -391,8 +382,7 @@ def cache_added(self, relation: Optional[BaseRelation]) -> str: raise_compiler_error( 'Attempted to cache a null relation for {}'.format(name) ) - if flags.USE_CACHE: - self.cache.add(relation) + self.cache.add(relation) # so jinja doesn't render things return '' @@ -406,8 +396,7 @@ def cache_dropped(self, relation: Optional[BaseRelation]) -> str: raise_compiler_error( 'Attempted to drop a null relation for {}'.format(name) ) - if flags.USE_CACHE: - self.cache.drop(relation) + self.cache.drop(relation) return '' @available @@ -428,8 +417,7 @@ def cache_renamed( .format(src_name, dst_name, name) ) - if flags.USE_CACHE: - self.cache.rename(from_relation, to_relation) + self.cache.rename(from_relation, to_relation) return '' ### diff --git a/core/dbt/adapters/sql/connections.py b/core/dbt/adapters/sql/connections.py index 11d8dce773a..d44764b7b69 100644 --- a/core/dbt/adapters/sql/connections.py +++ b/core/dbt/adapters/sql/connections.py @@ -11,7 +11,6 @@ Connection, ConnectionState, AdapterResponse ) from dbt.logger import GLOBAL_LOGGER as logger -from dbt import flags class SQLConnectionManager(BaseConnectionManager): @@ -144,13 +143,6 @@ def add_commit_query(self): def begin(self): connection = self.get_thread_connection() - - if flags.STRICT_MODE: - if not isinstance(connection, Connection): - raise dbt.exceptions.CompilerException( - f'In begin, got {connection} - not a Connection!' - ) - if connection.transaction_open is True: raise dbt.exceptions.InternalException( 'Tried to begin a new transaction on connection "{}", but ' @@ -163,12 +155,6 @@ def begin(self): def commit(self): connection = self.get_thread_connection() - if flags.STRICT_MODE: - if not isinstance(connection, Connection): - raise dbt.exceptions.CompilerException( - f'In commit, got {connection} - not a Connection!' - ) - if connection.transaction_open is False: raise dbt.exceptions.InternalException( 'Tried to commit transaction on connection "{}", but ' diff --git a/core/dbt/config/profile.py b/core/dbt/config/profile.py index da45d65825f..ceadb74ba14 100644 --- a/core/dbt/config/profile.py +++ b/core/dbt/config/profile.py @@ -20,6 +20,7 @@ from .renderer import ProfileRenderer DEFAULT_THREADS = 1 +# This is where PROFILES_DIR is initially set. Override in main.py. DEFAULT_PROFILES_DIR = os.path.join(os.path.expanduser('~'), '.dbt') PROFILES_DIR = os.path.expanduser( os.getenv('DBT_PROFILES_DIR', DEFAULT_PROFILES_DIR) diff --git a/core/dbt/config/runtime.py b/core/dbt/config/runtime.py index 8b4598f32b6..420f5f34b3a 100644 --- a/core/dbt/config/runtime.py +++ b/core/dbt/config/runtime.py @@ -12,6 +12,7 @@ from .project import Project from .renderer import DbtProjectYamlRenderer, ProfileRenderer from .utils import parse_cli_vars +from dbt import flags from dbt import tracking from dbt.adapters.factory import get_relation_class_by_name, get_include_paths from dbt.helper_types import FQNPath, PathSet @@ -144,7 +145,7 @@ def new_project(self, project_root: str) -> 'RuntimeConfig': project = Project.from_project_root( project_root, renderer, - verify_version=getattr(self.args, 'version_check', False), + verify_version=bool(flags.VERSION_CHECK), ) cfg = self.from_parts( @@ -197,7 +198,7 @@ def collect_parts( ) -> Tuple[Project, Profile]: # profile_name from the project project_root = args.project_dir if args.project_dir else os.getcwd() - version_check = getattr(args, 'version_check', False) + version_check = bool(flags.VERSION_CHECK) partial = Project.partial_load( project_root, verify_version=version_check diff --git a/core/dbt/context/base.py b/core/dbt/context/base.py index 26eb6241b0d..def23fa6f14 100644 --- a/core/dbt/context/base.py +++ b/core/dbt/context/base.py @@ -526,8 +526,6 @@ def flags(self) -> Any: The list of valid flags are: - - `flags.STRICT_MODE`: True if `--strict` (or `-S`) was provided on the - command line - `flags.FULL_REFRESH`: True if `--full-refresh` was provided on the command line - `flags.NON_DESTRUCTIVE`: True if `--non-destructive` was provided on diff --git a/core/dbt/contracts/connection.py b/core/dbt/contracts/connection.py index 310b287d7f7..058a4bb96a6 100644 --- a/core/dbt/contracts/connection.py +++ b/core/dbt/contracts/connection.py @@ -186,9 +186,6 @@ class UserConfigContract(Protocol): partial_parse: Optional[bool] = None printer_width: Optional[int] = None - def set_values(self, cookie_dir: str) -> None: - ... - class HasCredentials(Protocol): credentials: Credentials diff --git a/core/dbt/contracts/graph/manifest.py b/core/dbt/contracts/graph/manifest.py index 265fb88e0e2..96bf8542cdb 100644 --- a/core/dbt/contracts/graph/manifest.py +++ b/core/dbt/contracts/graph/manifest.py @@ -223,9 +223,7 @@ def __post_init__(self): self.user_id = tracking.active_user.id if self.send_anonymous_usage_stats is None: - self.send_anonymous_usage_stats = ( - not tracking.active_user.do_not_track - ) + self.send_anonymous_usage_stats = flags.SEND_ANONYMOUS_USAGE_STATS @classmethod def default(cls): diff --git a/core/dbt/contracts/graph/parsed.py b/core/dbt/contracts/graph/parsed.py index c30aa4c6f72..a33cdeb6fcb 100644 --- a/core/dbt/contracts/graph/parsed.py +++ b/core/dbt/contracts/graph/parsed.py @@ -156,13 +156,6 @@ def patch(self, patch: 'ParsedNodePatch'): self.columns = patch.columns self.meta = patch.meta self.docs = patch.docs - if flags.STRICT_MODE: - # It seems odd that an instance can be invalid - # Maybe there should be validation or restrictions - # elsewhere? - assert isinstance(self, dbtClassMixin) - dct = self.to_dict(omit_none=False) - self.validate(dct) def get_materialization(self): return self.config.materialized @@ -509,11 +502,6 @@ def patch(self, patch: ParsedMacroPatch): self.meta = patch.meta self.docs = patch.docs self.arguments = patch.arguments - if flags.STRICT_MODE: - # What does this actually validate? - assert isinstance(self, dbtClassMixin) - dct = self.to_dict(omit_none=False) - self.validate(dct) def same_contents(self, other: Optional['ParsedMacro']) -> bool: if other is None: diff --git a/core/dbt/contracts/project.py b/core/dbt/contracts/project.py index 4576e29da17..e0cb64982e2 100644 --- a/core/dbt/contracts/project.py +++ b/core/dbt/contracts/project.py @@ -1,9 +1,7 @@ from dbt.contracts.util import Replaceable, Mergeable, list_str -from dbt.contracts.connection import UserConfigContract, QueryComment +from dbt.contracts.connection import QueryComment, UserConfigContract from dbt.helper_types import NoValue from dbt.logger import GLOBAL_LOGGER as logger # noqa -from dbt import tracking -from dbt import ui from dbt.dataclass_schema import ( dbtClassMixin, ValidationError, HyphenatedDbtClassMixin, @@ -230,18 +228,13 @@ class UserConfig(ExtensibleDbtClassMixin, Replaceable, UserConfigContract): use_colors: Optional[bool] = None partial_parse: Optional[bool] = None printer_width: Optional[int] = None - - def set_values(self, cookie_dir): - if self.send_anonymous_usage_stats: - tracking.initialize_tracking(cookie_dir) - else: - tracking.do_not_track() - - if self.use_colors is not None: - ui.use_colors(self.use_colors) - - if self.printer_width: - ui.printer_width(self.printer_width) + write_json: Optional[bool] = None + warn_error: Optional[bool] = None + log_format: Optional[bool] = None + debug: Optional[bool] = None + version_check: Optional[bool] = None + fail_fast: Optional[bool] = None + use_experimental_parser: Optional[bool] = None @dataclass diff --git a/core/dbt/flags.py b/core/dbt/flags.py index c37f3e465cd..1f9c507340e 100644 --- a/core/dbt/flags.py +++ b/core/dbt/flags.py @@ -6,18 +6,42 @@ from pathlib import Path from typing import Optional -# initially all flags are set to None, the on-load call of reset() will set -# them for their first time. -STRICT_MODE = None -FULL_REFRESH = None -USE_CACHE = None -WARN_ERROR = None -TEST_NEW_PARSER = None + +STRICT_MODE = False # Only here for backwards compatibility +FULL_REFRESH = False # subcommand +STORE_FAILURES = False # subcommand + +# Global CLI commands USE_EXPERIMENTAL_PARSER = None +WARN_ERROR = None WRITE_JSON = None PARTIAL_PARSE = None USE_COLORS = None -STORE_FAILURES = None +PROFILES_DIR = None +DEBUG = None +LOG_FORMAT = None +VERSION_CHECK = None +FAIL_FAST = None +SEND_ANONYMOUS_USAGE_STATS = None +PRINTER_WIDTH = 80 + +# Global CLI defaults. These flags are set from three places: +# CLI args, environment variables, and user_config (profiles.yml). +# Environment variables use the pattern 'DBT_{flag name}', like DBT_PROFILES_DIR +flag_defaults = { + "USE_EXPERIMENTAL_PARSER": False, + "WARN_ERROR": False, + "WRITE_JSON": True, + "PARTIAL_PARSE": False, + "USE_COLORS": True, + "PROFILES_DIR": None, + "DEBUG": False, + "LOG_FORMAT": None, + "VERSION_CHECK": True, + "FAIL_FAST": False, + "SEND_ANONYMOUS_USAGE_STATS": True, + "PRINTER_WIDTH": 80 +} def env_set_truthy(key: str) -> Optional[str]: @@ -30,6 +54,12 @@ def env_set_truthy(key: str) -> Optional[str]: return value +def env_set_bool(env_value): + if env_value in ('1', 't', 'true', 'y', 'yes'): + return True + return False + + def env_set_path(key: str) -> Optional[Path]: value = os.getenv(key) if value is None: @@ -50,56 +80,97 @@ def _get_context(): return multiprocessing.get_context('spawn') +# This is not a flag, it's a place to store the lock MP_CONTEXT = _get_context() -def reset(): - global STRICT_MODE, FULL_REFRESH, USE_CACHE, WARN_ERROR, TEST_NEW_PARSER, \ - USE_EXPERIMENTAL_PARSER, WRITE_JSON, PARTIAL_PARSE, MP_CONTEXT, USE_COLORS, \ - STORE_FAILURES - - STRICT_MODE = False - FULL_REFRESH = False - USE_CACHE = True - WARN_ERROR = False - TEST_NEW_PARSER = False - USE_EXPERIMENTAL_PARSER = False - WRITE_JSON = True - PARTIAL_PARSE = False - MP_CONTEXT = _get_context() - USE_COLORS = True - STORE_FAILURES = False - - -def set_from_args(args): - global STRICT_MODE, FULL_REFRESH, USE_CACHE, WARN_ERROR, TEST_NEW_PARSER, \ - USE_EXPERIMENTAL_PARSER, WRITE_JSON, PARTIAL_PARSE, MP_CONTEXT, USE_COLORS, \ - STORE_FAILURES - - USE_CACHE = getattr(args, 'use_cache', USE_CACHE) +def set_from_args(args, user_config): + global STRICT_MODE, FULL_REFRESH, WARN_ERROR, \ + USE_EXPERIMENTAL_PARSER, WRITE_JSON, PARTIAL_PARSE, USE_COLORS, \ + STORE_FAILURES, PROFILES_DIR, DEBUG, LOG_FORMAT,\ + VERSION_CHECK, FAIL_FAST, SEND_ANONYMOUS_USAGE_STATS, PRINTER_WIDTH + STRICT_MODE = False # backwards compatibility + # cli args without user_config or env var option FULL_REFRESH = getattr(args, 'full_refresh', FULL_REFRESH) - STRICT_MODE = getattr(args, 'strict', STRICT_MODE) - WARN_ERROR = ( - STRICT_MODE or - getattr(args, 'warn_error', STRICT_MODE or WARN_ERROR) - ) - - TEST_NEW_PARSER = getattr(args, 'test_new_parser', TEST_NEW_PARSER) - USE_EXPERIMENTAL_PARSER = getattr(args, 'use_experimental_parser', USE_EXPERIMENTAL_PARSER) - WRITE_JSON = getattr(args, 'write_json', WRITE_JSON) - PARTIAL_PARSE = getattr(args, 'partial_parse', None) - MP_CONTEXT = _get_context() - - # The use_colors attribute will always have a value because it is assigned - # None by default from the add_mutually_exclusive_group function - use_colors_override = getattr(args, 'use_colors') - - if use_colors_override is not None: - USE_COLORS = use_colors_override - STORE_FAILURES = getattr(args, 'store_failures', STORE_FAILURES) - -# initialize everything to the defaults on module load -reset() + # global cli flags with env var and user_config alternatives + USE_EXPERIMENTAL_PARSER = get_flag_value('USE_EXPERIMENTAL_PARSER', args, user_config) + WARN_ERROR = get_flag_value('WARN_ERROR', args, user_config) + WRITE_JSON = get_flag_value('WRITE_JSON', args, user_config) + PARTIAL_PARSE = get_flag_value('PARTIAL_PARSE', args, user_config) + USE_COLORS = get_flag_value('USE_COLORS', args, user_config) + if hasattr(args, 'profiles_dir'): # Should always exist except for tests + PROFILES_DIR = args.profiles_dir # special case + DEBUG = get_flag_value('DEBUG', args, user_config) + LOG_FORMAT = get_flag_value('LOG_FORMAT', args, user_config) + VERSION_CHECK = get_flag_value('VERSION_CHECK', args, user_config) + FAIL_FAST = get_flag_value('FAIL_FAST', args, user_config) + SEND_ANONYMOUS_USAGE_STATS = get_flag_value('SEND_ANONYMOUS_USAGE_STATS', args, user_config) + PRINTER_WIDTH = get_flag_value('PRINTER_WIDTH', args, user_config) + + +def get_flag_value(flag, args, user_config): + lc_flag = flag.lower() + flag_value = getattr(args, lc_flag, None) + if flag_value is None: + # Environment variables use pattern 'DBT_{flag name}' + env_flag = f"DBT_{flag}" + env_value = os.getenv(env_flag) + if env_value is not None and env_value != '': + env_value = env_value.lower() + # non Boolean values + if flag in ['LOG_FORMAT', 'PROFILES_DIR', 'PRINTER_WIDTH']: + flag_value = env_value + else: + flag_value = env_set_bool(env_value) + elif user_config is not None and getattr(user_config, lc_flag, None) is not None: + flag_value = getattr(user_config, lc_flag) + else: + flag_value = flag_defaults[flag] + if flag == 'PRINTER_WIDTH': # printer_width must be an int or it hangs + flag_value = int(flag_value) + + return flag_value + + +def get_flag_dict(): + return { + "use_experimental_parser": USE_EXPERIMENTAL_PARSER, + "warn_error": WARN_ERROR, + "write_json": WRITE_JSON, + "partial_parse": PARTIAL_PARSE, + "use_colors": USE_COLORS, + "profiles_dir": PROFILES_DIR, + "debug": DEBUG, + "log_format": LOG_FORMAT, + "version_check": VERSION_CHECK, + "fail_fast": FAIL_FAST, + "send_anonymous_usage_stats": SEND_ANONYMOUS_USAGE_STATS, + "printer_width": PRINTER_WIDTH, + } + + +# This method isn't currently used, but leaving here in case it's useful for tests +def reset(): + global STRICT_MODE, FULL_REFRESH, WARN_ERROR, \ + USE_EXPERIMENTAL_PARSER, WRITE_JSON, PARTIAL_PARSE, USE_COLORS, \ + STORE_FAILURES, DEBUG, LOG_FORMAT,\ + VERSION_CHECK, FAIL_FAST, SEND_ANONYMOUS_USAGE_STATS, PRINTER_WIDTH + + STRICT_MODE = False # Only here for backwards compatibility + FULL_REFRESH = False # subcommand + STORE_FAILURES = False # subcommand + + USE_EXPERIMENTAL_PARSER = flag_defaults['USE_EXPERIMENTAL_PARSER'] + WARN_ERROR = flag_defaults['WARN_ERROR'] + WRITE_JSON = flag_defaults['WRITE_JSON'] + PARTIAL_PARSE = flag_defaults['PARTIAL_PARSE'] + USE_COLORS = flag_defaults['USE_COLORS'] + DEBUG = flag_defaults['DEBUG'] + LOG_FORMAT = flag_defaults['LOG_FORMAT'] + VERSION_CHECK = flag_defaults['VERSION_CHECK'] + FAIL_FAST = flag_defaults['FAIL_FAST'] + SEND_ANONYMOUS_USAGE_STATS = flag_defaults['SEND_ANONYMOUS_USAGE_STATS'] + PRINTER_WIDTH = flag_defaults['PRINTER_WIDTH'] diff --git a/core/dbt/main.py b/core/dbt/main.py index 18f95dddcf9..60ef010aab8 100644 --- a/core/dbt/main.py +++ b/core/dbt/main.py @@ -160,17 +160,6 @@ def handle(args): return res -def initialize_config_values(parsed): - """Given the parsed args, initialize the dbt tracking code. - - It would be nice to re-use this profile later on instead of parsing it - twice, but dbt's intialization is not structured in a way that makes that - easy. - """ - cfg = read_user_config(parsed.profiles_dir) - cfg.set_values(parsed.profiles_dir) - - @contextmanager def adapter_management(): reset_adapters() @@ -184,8 +173,15 @@ def handle_and_check(args): with log_manager.applicationbound(): parsed = parse_args(args) - # we've parsed the args - we can now decide if we're debug or not - if parsed.debug: + # Set flags from args, user config, and env vars + user_config = read_user_config(parsed.profiles_dir) # This is read again later + flags.set_from_args(parsed, user_config) + dbt.tracking.initialize_from_flags() + # Set log_format from flags + parsed.cls.set_log_format() + + # we've parsed the args and set the flags - we can now decide if we're debug or not + if flags.DEBUG: log_manager.set_debug() profiler_enabled = False @@ -198,8 +194,6 @@ def handle_and_check(args): outfile=parsed.record_timing_info ): - initialize_config_values(parsed) - with adapter_management(): task, res = run_from_args(parsed) @@ -233,15 +227,17 @@ def track_run(task): def run_from_args(parsed): log_cache_events(getattr(parsed, 'log_cache_events', False)) - flags.set_from_args(parsed) - parsed.cls.pre_init_hook(parsed) # we can now use the logger for stdout + # set log_format in the logger + parsed.cls.pre_init_hook(parsed) logger.info("Running with dbt{}".format(dbt.version.installed)) # this will convert DbtConfigErrors into RuntimeExceptions + # task could be any one of the task objects task = parsed.cls.from_args(args=parsed) + logger.debug("running dbt with arguments {parsed}", parsed=str(parsed)) log_path = None @@ -275,7 +271,8 @@ def _build_base_subparser(): base_subparser.add_argument( '--profiles-dir', - default=PROFILES_DIR, + default=None, + dest='sub_profiles_dir', # Main cli arg precedes subcommand type=str, help=''' Which directory to look in for the profiles.yml file. Default = {} @@ -319,15 +316,6 @@ def _build_base_subparser(): help=argparse.SUPPRESS, ) - base_subparser.add_argument( - '--bypass-cache', - action='store_false', - dest='use_cache', - help=''' - If set, bypass the adapter-level cache of database state - ''', - ) - base_subparser.set_defaults(defer=None, state=None) return base_subparser @@ -394,6 +382,7 @@ def _build_build_subparser(subparsers, base_subparser): sub.add_argument( '-x', '--fail-fast', + dest='sub_fail_fast', action='store_true', help=''' Stop execution upon a first failure. @@ -497,6 +486,7 @@ def _build_run_subparser(subparsers, base_subparser): run_sub.add_argument( '-x', '--fail-fast', + dest='sub_fail_fast', action='store_true', help=''' Stop execution upon a first failure. @@ -620,8 +610,9 @@ def _add_table_mutability_arguments(*subparsers): def _add_version_check(sub): sub.add_argument( '--no-version-check', - dest='version_check', + dest='sub_version_check', # main cli arg precedes subcommands action='store_false', + default=None, help=''' If set, skip ensuring dbt's version matches the one specified in the dbt_project.yml file ('require-dbt-version') @@ -715,6 +706,7 @@ def _build_test_subparser(subparsers, base_subparser): sub.add_argument( '-x', '--fail-fast', + dest='sub_fail_fast', action='store_true', help=''' Stop execution upon a first test failure. @@ -922,6 +914,7 @@ def parse_args(args, cls=DBTArgumentParser): '-d', '--debug', action='store_true', + default=None, help=''' Display debug logging during dbt execution. Useful for debugging and making bug reports. @@ -931,13 +924,14 @@ def parse_args(args, cls=DBTArgumentParser): p.add_argument( '--log-format', choices=['text', 'json', 'default'], - default='default', + default=None, help='''Specify the log format, overriding the command's default.''' ) p.add_argument( '--no-write-json', action='store_false', + default=None, dest='write_json', help=''' If set, skip writing the manifest and run_results.json files to disk @@ -948,6 +942,7 @@ def parse_args(args, cls=DBTArgumentParser): '--use-colors', action='store_const', const=True, + default=None, dest='use_colors', help=''' Colorize the output DBT prints to the terminal. Output is colorized by @@ -969,18 +964,17 @@ def parse_args(args, cls=DBTArgumentParser): ) p.add_argument( - '-S', - '--strict', - action='store_true', + '--printer-width', + dest='printer_width', help=''' - Run schema validations at runtime. This will surface bugs in dbt, but - may incur a performance penalty. + Sets the width of terminal output ''' ) p.add_argument( '--warn-error', action='store_true', + default=None, help=''' If dbt would normally warn, instead raise an exception. Examples include --models that selects nothing, deprecations, configurations @@ -989,6 +983,17 @@ def parse_args(args, cls=DBTArgumentParser): ''' ) + p.add_argument( + '--no-version-check', + dest='version_check', + action='store_false', + default=None, + help=''' + If set, skip ensuring dbt's version matches the one specified in + the dbt_project.yml file ('require-dbt-version') + ''' + ) + p.add_optional_argument_inverse( '--partial-parse', enable_help=''' @@ -1011,26 +1016,48 @@ def parse_args(args, cls=DBTArgumentParser): help=argparse.SUPPRESS, ) - # if set, extract all models and blocks with the jinja block extractor, and - # verify that we don't fail anywhere the actual jinja parser passes. The - # reverse (passing files that ends up failing jinja) is fine. - # TODO remove? - p.add_argument( - '--test-new-parser', - action='store_true', - help=argparse.SUPPRESS - ) - # if set, will use the tree-sitter-jinja2 parser and extractor instead of # jinja rendering when possible. p.add_argument( '--use-experimental-parser', action='store_true', + default=None, help=''' Uses an experimental parser to extract jinja values. ''' ) + p.add_argument( + '--profiles-dir', + default=PROFILES_DIR, + dest='profiles_dir', + type=str, + help=''' + Which directory to look in for the profiles.yml file. Default = {} + '''.format(PROFILES_DIR) + ) + + p.add_argument( + '--no-anonymous-usage-stats', + action='store_false', + default=None, + dest='send_anonymous_usage_stats', + help=''' + Do not send anonymous usage stat to dbt Labs + ''' + ) + + p.add_argument( + '-x', + '--fail-fast', + dest='fail_fast', + action='store_true', + default=None, + help=''' + Stop execution upon a first failure. + ''' + ) + subs = p.add_subparsers(title="Available sub-commands") base_subparser = _build_base_subparser() @@ -1078,9 +1105,26 @@ def parse_args(args, cls=DBTArgumentParser): parsed = p.parse_args(args) + # profiles_dir is set before subcommands and after, so normalize + if hasattr(parsed, 'sub_profiles_dir'): + if parsed.sub_profiles_dir is not None: + parsed.profiles_dir = parsed.sub_profiles_dir + delattr(parsed, 'sub_profiles_dir') if hasattr(parsed, 'profiles_dir'): parsed.profiles_dir = os.path.abspath(parsed.profiles_dir) + # version_check is set before subcommands and after, so normalize + if hasattr(parsed, 'sub_version_check'): + if parsed.sub_version_check is False: + parsed.version_check = False + delattr(parsed, 'sub_version_check') + + # fail_fast is set before subcommands and after, so normalize + if hasattr(parsed, 'sub_fail_fast'): + if parsed.sub_fail_fast is True: + parsed.fail_fast = True + delattr(parsed, 'sub_fail_fast') + if getattr(parsed, 'project_dir', None) is not None: expanded_user = os.path.expanduser(parsed.project_dir) parsed.project_dir = os.path.abspath(expanded_user) diff --git a/core/dbt/parser/manifest.py b/core/dbt/parser/manifest.py index 4bb3df2cb4d..2e4caf48c45 100644 --- a/core/dbt/parser/manifest.py +++ b/core/dbt/parser/manifest.py @@ -64,7 +64,6 @@ PARTIAL_PARSE_FILE_NAME = 'partial_parse.msgpack' PARSING_STATE = DbtProcessState('parsing') -DEFAULT_PARTIAL_PARSE = False class ReparseReason(StrEnum): @@ -536,18 +535,8 @@ def is_partial_parsable(self, manifest: Manifest) -> Tuple[bool, Optional[str]]: reparse_reason = ReparseReason.project_config_changed return valid, reparse_reason - def _partial_parse_enabled(self): - # if the CLI is set, follow that - if flags.PARTIAL_PARSE is not None: - return flags.PARTIAL_PARSE - # if the config is set, follow that - elif self.root_project.config.partial_parse is not None: - return self.root_project.config.partial_parse - else: - return DEFAULT_PARTIAL_PARSE - def read_manifest_for_partial_parse(self) -> Optional[Manifest]: - if not self._partial_parse_enabled(): + if not flags.PARTIAL_PARSE: logger.debug('Partial parsing not enabled') return None path = os.path.join(self.root_project.target_path, @@ -584,7 +573,7 @@ def read_manifest_for_partial_parse(self) -> Optional[Manifest]: def build_perf_info(self): mli = ManifestLoaderInfo( - is_partial_parse_enabled=self._partial_parse_enabled(), + is_partial_parse_enabled=flags.PARTIAL_PARSE, is_static_analysis_enabled=flags.USE_EXPERIMENTAL_PARSER ) for project in self.all_projects.values(): diff --git a/core/dbt/rpc/task_handler.py b/core/dbt/rpc/task_handler.py index a066b7614b0..824588d27e1 100644 --- a/core/dbt/rpc/task_handler.py +++ b/core/dbt/rpc/task_handler.py @@ -67,15 +67,16 @@ def _spawn_setup(self): keeps everything in memory. """ # reset flags - dbt.flags.set_from_args(self.task.args) + user_config = None + if self.task.config is not None: + user_config = self.task.config.config + dbt.flags.set_from_args(self.task.args, user_config) + dbt.tracking.initialize_from_flags() # reload the active plugin load_plugin(self.task.config.credentials.type) # register it register_adapter(self.task.config) - # reset tracking, etc - self.task.config.config.set_values(self.task.args.profiles_dir) - def task_exec(self) -> None: """task_exec runs first inside the child process""" if type(self.task) != RemoteListTask: diff --git a/core/dbt/task/base.py b/core/dbt/task/base.py index 6d546e7ac76..e37c2d5e0cd 100644 --- a/core/dbt/task/base.py +++ b/core/dbt/task/base.py @@ -7,6 +7,7 @@ from dbt import tracking from dbt import ui +from dbt import flags from dbt.contracts.graph.manifest import Manifest from dbt.contracts.results import ( NodeStatus, RunResult, collect_timing_info, RunStatus @@ -69,6 +70,13 @@ def pre_init_hook(cls, args): else: log_manager.format_text() + @classmethod + def set_log_format(cls): + if flags.LOG_FORMAT == 'json': + log_manager.format_json() + else: + log_manager.format_text() + @classmethod def from_args(cls, args): try: diff --git a/core/dbt/task/debug.py b/core/dbt/task/debug.py index b9cd29bf66c..46df164b5b0 100644 --- a/core/dbt/task/debug.py +++ b/core/dbt/task/debug.py @@ -5,6 +5,7 @@ from typing import Optional, Dict, Any, List from dbt.logger import GLOBAL_LOGGER as logger +from dbt import flags import dbt.clients.system import dbt.exceptions from dbt.adapters.factory import get_adapter, register_adapter @@ -156,7 +157,7 @@ def _load_project(self): self.project = Project.from_project_root( self.project_dir, renderer, - verify_version=getattr(self.args, 'version_check', False), + verify_version=flags.VERSION_CHECK, ) except dbt.exceptions.DbtConfigError as exc: self.project_fail_details = str(exc) @@ -195,7 +196,7 @@ def _choose_profile_names(self) -> Optional[List[str]]: try: partial = Project.partial_load( os.path.dirname(self.project_path), - verify_version=getattr(self.args, 'version_check', False), + verify_version=bool(flags.VERSION_CHECK), ) renderer = DbtProjectYamlRenderer( generate_base_context(self.cli_vars) diff --git a/core/dbt/task/printer.py b/core/dbt/task/printer.py index 28becb3a28e..4062c1466d2 100644 --- a/core/dbt/task/printer.py +++ b/core/dbt/task/printer.py @@ -30,8 +30,8 @@ def print_fancy_output_line( progress=progress, message=msg) - truncate_width = ui.PRINTER_WIDTH - 3 - justified = prefix.ljust(ui.PRINTER_WIDTH, ".") + truncate_width = ui.printer_width() - 3 + justified = prefix.ljust(ui.printer_width(), ".") if truncate and len(justified) > truncate_width: justified = justified[:truncate_width] + '...' diff --git a/core/dbt/task/runnable.py b/core/dbt/task/runnable.py index e046b498d07..e44e6d2b7bb 100644 --- a/core/dbt/task/runnable.py +++ b/core/dbt/task/runnable.py @@ -189,7 +189,7 @@ def call_runner(self, runner): logger.debug('Finished running node {}'.format( runner.node.unique_id)) - fail_fast = getattr(self.config.args, 'fail_fast', False) + fail_fast = flags.FAIL_FAST if result.status in (NodeStatus.Error, NodeStatus.Fail) and fail_fast: self._raise_next_tick = FailFastException( @@ -256,7 +256,7 @@ def callback(result): self._submit(pool, args, callback) # block on completion - if getattr(self.config.args, 'fail_fast', False): + if flags.FAIL_FAST: # checkout for an errors after task completion in case of # fast failure while self.job_queue.wait_until_something_was_done(): @@ -546,7 +546,11 @@ def get_result(self, results, elapsed_time, generated_at): ) def args_to_dict(self): - var_args = vars(self.args) + var_args = vars(self.args).copy() + # update the args with the flags, which could also come from environment + # variables or user_config + flag_dict = flags.get_flag_dict() + var_args.update(flag_dict) dict_args = {} # remove args keys that clutter up the dictionary for key in var_args: @@ -554,10 +558,11 @@ def args_to_dict(self): continue if var_args[key] is None: continue + # TODO: add more default_false_keys default_false_keys = ( 'debug', 'full_refresh', 'fail_fast', 'warn_error', - 'single_threaded', 'test_new_parser', 'log_cache_events', - 'strict' + 'single_threaded', 'log_cache_events', + 'use_experimental_parser', ) if key in default_false_keys and var_args[key] is False: continue diff --git a/core/dbt/tracking.py b/core/dbt/tracking.py index b464cfeb897..1152b0ed4e9 100644 --- a/core/dbt/tracking.py +++ b/core/dbt/tracking.py @@ -5,6 +5,7 @@ ) from dbt.logger import GLOBAL_LOGGER as logger from dbt import version as dbt_version +from dbt import flags from snowplow_tracker import Subject, Tracker, Emitter, logger as sp_logger from snowplow_tracker import SelfDescribingJson from datetime import datetime @@ -184,7 +185,6 @@ def get_invocation_context(user, config, args): "command": args.which, "options": None, "version": str(dbt_version.installed), - "run_type": get_run_type(args), "adapter_type": adapter_type, "adapter_unique_id": adapter_unique_id, @@ -509,3 +509,11 @@ def process(self, record): "run_started_at": active_user.run_started_at.isoformat(), "invocation_id": active_user.invocation_id, }) + + +def initialize_from_flags(): + # Setting these used to be in UserConfig, but had to be moved here + if flags.SEND_ANONYMOUS_USAGE_STATS: + initialize_tracking(flags.PROFILES_DIR) + else: + do_not_track() diff --git a/core/dbt/ui.py b/core/dbt/ui.py index 484a668ded9..6ea57adfd2a 100644 --- a/core/dbt/ui.py +++ b/core/dbt/ui.py @@ -17,17 +17,6 @@ COLOR_FG_YELLOW = COLORS['yellow'] COLOR_RESET_ALL = COLORS['reset_all'] -PRINTER_WIDTH = 80 - - -def use_colors(use_colors_val=True): - flags.USE_COLORS = use_colors_val - - -def printer_width(printer_width): - global PRINTER_WIDTH - PRINTER_WIDTH = printer_width - def color(text: str, color_code: str): if flags.USE_COLORS: @@ -36,6 +25,12 @@ def color(text: str, color_code: str): return text +def printer_width(): + if flags.PRINTER_WIDTH: + return flags.PRINTER_WIDTH + return 80 + + def green(text: str): return color(text, COLOR_FG_GREEN) @@ -56,7 +51,7 @@ def line_wrap_message( newlines to newlines and avoid calling textwrap.fill() on them (like markdown) ''' - width = PRINTER_WIDTH - subtract + width = printer_width() - subtract if dedent: msg = textwrap.dedent(msg) diff --git a/plugins/bigquery/dbt/adapters/bigquery/impl.py b/plugins/bigquery/dbt/adapters/bigquery/impl.py index b6580b0ffa9..24aa1cb36a2 100644 --- a/plugins/bigquery/dbt/adapters/bigquery/impl.py +++ b/plugins/bigquery/dbt/adapters/bigquery/impl.py @@ -4,7 +4,6 @@ import dbt.deprecations import dbt.exceptions -import dbt.flags as flags import dbt.clients.gcloud import dbt.clients.agate_helper @@ -15,7 +14,6 @@ from dbt.adapters.bigquery.relation import BigQueryRelation from dbt.adapters.bigquery import BigQueryColumn from dbt.adapters.bigquery import BigQueryConnectionManager -from dbt.contracts.connection import Connection from dbt.contracts.graph.manifest import Manifest from dbt.logger import GLOBAL_LOGGER as logger, print_timestamped_line from dbt.utils import filter_null_values @@ -515,19 +513,6 @@ def execute_model(self, model, materialization, sql_override=None, if sql_override is None: sql_override = model.get('compiled_sql') - if flags.STRICT_MODE: - connection = self.connections.get_thread_connection() - if not isinstance(connection, Connection): - dbt.exceptions.raise_compiler_error( - f'Got {connection} - not a Connection!' - ) - model_uid = model.get('unique_id') - if connection.name != model_uid: - raise dbt.exceptions.InternalException( - f'Connection had name "{connection.name}", expected model ' - f'unique id of "{model_uid}"' - ) - if materialization == 'view': res = self._materialize_as_view(model) elif materialization == 'table': diff --git a/test/integration/006_simple_dependency_test/test_local_dependency.py b/test/integration/006_simple_dependency_test/test_local_dependency.py index 3691b19c5d7..0f8233fd868 100644 --- a/test/integration/006_simple_dependency_test/test_local_dependency.py +++ b/test/integration/006_simple_dependency_test/test_local_dependency.py @@ -8,6 +8,7 @@ import dbt.semver import dbt.config import dbt.exceptions +import dbt.flags class BaseDependencyTest(DBTIntegrationTest): @@ -45,8 +46,6 @@ def packages_config(self): } def run_dbt(self, *args, **kwargs): - strict = kwargs.pop('strict', False) - kwargs['strict'] = strict return super().run_dbt(*args, **kwargs) @@ -115,12 +114,9 @@ def models(self): @use_profile('postgres') def test_postgres_missing_dependency(self): - # dbt should raise a dbt exception, not raise a parse-time TypeError. - with self.assertRaises(dbt.exceptions.Exception) as exc: - self.run_dbt(['compile'], strict=False) - message = str(exc.exception) - self.assertIn('no_such_dependency', message) - self.assertIn('is undefined', message) + # dbt should raise a runtime exception + with self.assertRaises(dbt.exceptions.RuntimeException) as exc: + self.run_dbt(['compile']) class TestSimpleDependencyWithSchema(TestSimpleDependency): @@ -175,6 +171,54 @@ def test_postgres_local_dependency_out_of_date_no_check(self, mock_get): self.assertEqual(len(results), 5) +class TestSimpleDependencyNoVersionCheckConfig(TestSimpleDependency): + def run_dbt(self, cmd, *args, **kwargs): + # we can't add this to the config because Sources don't respect dbt_project.yml + vars_arg = yaml.safe_dump({ + 'schema_override': self.base_schema(), + }) + cmd.extend(['--vars', vars_arg]) + return super().run_dbt(cmd, *args, **kwargs) + + @property + def project_config(self): + return { + 'config-version': 2, + 'macro-paths': ['schema_override_macros'], + 'models': { + 'schema': 'dbt_test', + }, + 'seeds': { + 'schema': 'dbt_test', + } + } + + @property + def profile_config(self): + return { + 'config': { + 'send_anonymous_usage_stats': False, + 'version_check': False, + } + } + + def base_schema(self): + return 'dbt_test_{}_macro'.format(self.unique_schema()) + + def configured_schema(self): + return 'configured_{}_macro'.format(self.unique_schema()) + + @use_profile('postgres') + @mock.patch('dbt.config.project.get_installed_version') + def test_postgres_local_dependency_out_of_date_no_check(self, mock_get): + mock_get.return_value = dbt.semver.VersionSpecifier.from_version_string('0.0.1') + self.run_dbt(['deps']) + self.assertFalse(dbt.flags.VERSION_CHECK) + self.run_dbt(['seed']) + results = self.run_dbt(['run']) + self.assertEqual(len(results), 5) + + class TestSimpleDependencyHooks(DBTIntegrationTest): @property def schema(self): @@ -245,11 +289,6 @@ def packages_config(self): ] } - def run_dbt(self, *args, **kwargs): - strict = kwargs.pop('strict', False) - kwargs['strict'] = strict - return super().run_dbt(*args, **kwargs) - @use_profile('postgres') def test_postgres_local_dependency_same_name(self): with self.assertRaises(dbt.exceptions.DependencyException): diff --git a/test/integration/006_simple_dependency_test/test_simple_dependency.py b/test/integration/006_simple_dependency_test/test_simple_dependency.py index 445cc45e9a3..b6e6d8dcc18 100644 --- a/test/integration/006_simple_dependency_test/test_simple_dependency.py +++ b/test/integration/006_simple_dependency_test/test_simple_dependency.py @@ -1,7 +1,7 @@ import os import tempfile from test.integration.base import DBTIntegrationTest, use_profile -from dbt.exceptions import CompilationException +from dbt.exceptions import CompilationException, DependencyException from dbt import deprecations @@ -110,10 +110,7 @@ def packages_config(self): @use_profile('postgres') def test_postgres_simple_dependency(self): - with self.assertRaises(CompilationException) as exc: - self.run_dbt(["deps"]) - assert 'is not pinned' in str(exc.exception) - self.run_dbt(['deps'], strict=False) + self.run_dbt(["deps"]) class TestSimpleDependencyWithDuplicates(DBTIntegrationTest): diff --git a/test/integration/008_schema_tests_test/test_schema_v2_tests.py b/test/integration/008_schema_tests_test/test_schema_v2_tests.py index 58eb08aa4fa..e6bfc882e97 100644 --- a/test/integration/008_schema_tests_test/test_schema_v2_tests.py +++ b/test/integration/008_schema_tests_test/test_schema_v2_tests.py @@ -117,12 +117,9 @@ def run_schema_validations(self): return test_task.run() @use_profile('postgres') - def test_postgres_malformed_schema_strict_will_break_run(self): + def test_postgres_malformed_schema_will_break_run(self): with self.assertRaises(CompilationException): - self.run_dbt(strict=True) - # even if strict = False! - with self.assertRaises(CompilationException): - self.run_dbt(strict=False) + self.run_dbt() class TestCustomConfigSchemaTests(DBTIntegrationTest): @@ -149,16 +146,11 @@ def project_config(self): def test_postgres_config(self): """ Test that tests use configs properly. All tests for this project will fail, configs are set to make test pass. """ - results = self.run_dbt() - results = self.run_dbt(['test'], strict=False) + results = self.run_dbt(['test'], expect_pass=False) self.assertEqual(len(results), 7) for result in results: self.assertFalse(result.skipped) - self.assertEqual( - result.failures, 0, - 'test {} failed'.format(result.node.name) - ) class TestHooksInTests(DBTIntegrationTest): @@ -393,16 +385,16 @@ def models(self): @use_profile('postgres') def test_postgres_schema_lowercase_sql(self): - results = self.run_dbt(strict=False) + results = self.run_dbt() self.assertEqual(len(results), 2) - results = self.run_dbt(['test', '-m', 'lowercase'], strict=False) + results = self.run_dbt(['test', '-m', 'lowercase']) self.assertEqual(len(results), 1) @use_profile('postgres') def test_postgres_schema_uppercase_sql(self): - results = self.run_dbt(strict=False) + results = self.run_dbt() self.assertEqual(len(results), 2) - results = self.run_dbt(['test', '-m', 'uppercase'], strict=False) + results = self.run_dbt(['test', '-m', 'uppercase']) self.assertEqual(len(results), 1) @@ -440,7 +432,7 @@ def test_postgres_test_context_tests(self): # This test tests the the TestContext and TestMacroNamespace # are working correctly self.run_dbt(['deps']) - results = self.run_dbt(strict=False) + results = self.run_dbt() self.assertEqual(len(results), 3) run_result = self.run_dbt(['test'], expect_pass=False) @@ -457,7 +449,7 @@ def test_postgres_test_context_tests(self): self.assertEqual(results[3].status, TestStatus.Fail) self.assertRegex(results[3].node.compiled_sql, r'union all') # type_two_model_a_ - self.assertEqual(results[4].status, TestStatus.Fail) + self.assertEqual(results[4].status, TestStatus.Warn) self.assertEqual(results[4].node.config.severity, 'WARN') class TestSchemaTestContextWithMacroNamespace(DBTIntegrationTest): @@ -500,7 +492,7 @@ def test_postgres_test_context_with_macro_namespace(self): # This test tests the the TestContext and TestMacroNamespace # are working correctly self.run_dbt(['deps']) - results = self.run_dbt(strict=False) + results = self.run_dbt() self.assertEqual(len(results), 3) run_result = self.run_dbt(['test'], expect_pass=False) @@ -515,7 +507,7 @@ def test_postgres_test_context_with_macro_namespace(self): self.assertEqual(results[2].status, TestStatus.Fail) self.assertRegex(results[2].node.compiled_sql, r'union all') # type_two_model_a_ - self.assertEqual(results[3].status, TestStatus.Fail) + self.assertEqual(results[3].status, TestStatus.Warn) self.assertEqual(results[3].node.config.severity, 'WARN') class TestSchemaTestNameCollision(DBTIntegrationTest): diff --git a/test/integration/012_deprecation_tests/test_deprecations.py b/test/integration/012_deprecation_tests/test_deprecations.py index 25973dcff56..923a26615ec 100644 --- a/test/integration/012_deprecation_tests/test_deprecations.py +++ b/test/integration/012_deprecation_tests/test_deprecations.py @@ -25,12 +25,12 @@ def models(self): @use_profile('postgres') def test_postgres_deprecations_fail(self): - self.run_dbt(strict=True, expect_pass=False) + self.run_dbt(['--warn-error', 'run'], expect_pass=False) @use_profile('postgres') def test_postgres_deprecations(self): self.assertEqual(deprecations.active_deprecations, set()) - self.run_dbt(strict=False) + self.run_dbt() expected = {'adapter:already_exists'} self.assertEqual(expected, deprecations.active_deprecations) @@ -50,12 +50,12 @@ def project_config(self): @use_profile('postgres') def test_postgres_deprecations_fail(self): # this should fail at runtime - self.run_dbt(strict=True, expect_pass=False) + self.run_dbt(['--warn-error', 'run'], expect_pass=False) @use_profile('postgres') def test_postgres_deprecations(self): self.assertEqual(deprecations.active_deprecations, set()) - self.run_dbt(strict=False) + self.run_dbt() expected = {'materialization-return'} self.assertEqual(expected, deprecations.active_deprecations) @@ -75,7 +75,7 @@ def project_config(self): @use_profile('postgres') def test_postgres_adapter_macro(self): self.assertEqual(deprecations.active_deprecations, set()) - self.run_dbt(strict=False) + self.run_dbt() expected = {'adapter-macro'} self.assertEqual(expected, deprecations.active_deprecations) @@ -83,7 +83,7 @@ def test_postgres_adapter_macro(self): def test_postgres_adapter_macro_fail(self): self.assertEqual(deprecations.active_deprecations, set()) with self.assertRaises(dbt.exceptions.CompilationException) as exc: - self.run_dbt(strict=True) + self.run_dbt(['--warn-error', 'run']) exc_str = ' '.join(str(exc.exception).split()) # flatten all whitespace assert 'The "adapter_macro" macro has been deprecated' in exc_str @@ -91,7 +91,7 @@ def test_postgres_adapter_macro_fail(self): def test_redshift_adapter_macro(self): self.assertEqual(deprecations.active_deprecations, set()) # pick up the postgres macro - self.run_dbt(strict=False) + self.run_dbt() expected = {'adapter-macro'} self.assertEqual(expected, deprecations.active_deprecations) @@ -100,7 +100,7 @@ def test_bigquery_adapter_macro(self): self.assertEqual(deprecations.active_deprecations, set()) # picked up the default -> error with self.assertRaises(dbt.exceptions.CompilationException) as exc: - self.run_dbt(strict=False, expect_pass=False) + self.run_dbt(expect_pass=False) exc_str = ' '.join(str(exc.exception).split()) # flatten all whitespace assert 'not allowed' in exc_str # we saw the default macro @@ -120,7 +120,7 @@ def project_config(self): @use_profile('postgres') def test_postgres_adapter_macro_pkg(self): self.assertEqual(deprecations.active_deprecations, set()) - self.run_dbt(strict=False) + self.run_dbt() expected = {'adapter-macro'} self.assertEqual(expected, deprecations.active_deprecations) @@ -128,7 +128,7 @@ def test_postgres_adapter_macro_pkg(self): def test_postgres_adapter_macro_pkg_fail(self): self.assertEqual(deprecations.active_deprecations, set()) with self.assertRaises(dbt.exceptions.CompilationException) as exc: - self.run_dbt(strict=True) + self.run_dbt(['--warn-error', 'run']) exc_str = ' '.join(str(exc.exception).split()) # flatten all whitespace assert 'The "adapter_macro" macro has been deprecated' in exc_str @@ -137,7 +137,7 @@ def test_redshift_adapter_macro_pkg(self): self.assertEqual(deprecations.active_deprecations, set()) # pick up the postgres macro self.assertEqual(deprecations.active_deprecations, set()) - self.run_dbt(strict=False) + self.run_dbt() expected = {'adapter-macro'} self.assertEqual(expected, deprecations.active_deprecations) @@ -146,7 +146,7 @@ def test_bigquery_adapter_macro_pkg(self): self.assertEqual(deprecations.active_deprecations, set()) # picked up the default -> error with self.assertRaises(dbt.exceptions.CompilationException) as exc: - self.run_dbt(strict=False, expect_pass=False) + self.run_dbt(expect_pass=False) exc_str = ' '.join(str(exc.exception).split()) # flatten all whitespace assert 'not allowed' in exc_str # we saw the default macro @@ -176,7 +176,7 @@ def project_config(self): @use_profile('postgres') def test_postgres_adapter_macro(self): self.assertEqual(deprecations.active_deprecations, set()) - self.run_dbt(strict=False) + self.run_dbt() expected = {'dispatch-packages'} self.assertEqual(expected, deprecations.active_deprecations) @@ -184,7 +184,7 @@ def test_postgres_adapter_macro(self): def test_postgres_adapter_macro_fail(self): self.assertEqual(deprecations.active_deprecations, set()) with self.assertRaises(dbt.exceptions.CompilationException) as exc: - self.run_dbt(strict=True) + self.run_dbt(['--warn-error', 'run']) exc_str = ' '.join(str(exc.exception).split()) # flatten all whitespace assert 'Raised during dispatch for: string_literal' in exc_str @@ -208,7 +208,7 @@ def packages_config(self): @use_profile('postgres') def test_postgres_package_redirect(self): self.assertEqual(deprecations.active_deprecations, set()) - self.run_dbt(['deps'], strict=False) + self.run_dbt(['deps']) expected = {'package-redirect'} self.assertEqual(expected, deprecations.active_deprecations) @@ -216,7 +216,7 @@ def test_postgres_package_redirect(self): def test_postgres_package_redirect_fail(self): self.assertEqual(deprecations.active_deprecations, set()) with self.assertRaises(dbt.exceptions.CompilationException) as exc: - self.run_dbt(['deps'], strict=True) + self.run_dbt(['deps']) exc_str = ' '.join(str(exc.exception).split()) # flatten all whitespace expected = "The `fishtown-analytics/dbt_utils` package is deprecated in favor of `dbt-labs/dbt_utils`" - assert expected in exc_str \ No newline at end of file + assert expected in exc_str diff --git a/test/integration/013_context_var_tests/test_context_vars.py b/test/integration/013_context_var_tests/test_context_vars.py index ed3ecfdfdf3..c7e13b5f0f7 100644 --- a/test/integration/013_context_var_tests/test_context_vars.py +++ b/test/integration/013_context_var_tests/test_context_vars.py @@ -143,7 +143,8 @@ def test_postgres_env_vars_prod(self): @use_profile('postgres') def test_postgres_env_vars_secrets(self): - _, log_output = self.run_dbt_and_capture(['--debug', 'run', '--target', 'prod']) + os.environ['DBT_DEBUG'] = 'True' + _, log_output = self.run_dbt_and_capture(['run', '--target', 'prod']) self.assertFalse("secret_variable" in log_output) self.assertTrue("regular_variable" in log_output) @@ -159,9 +160,7 @@ def models(self): @use_profile('postgres') def test_postgres_warn(self): - with pytest.raises(dbt.exceptions.CompilationException): - self.run_dbt(['run'], strict=True) - self.run_dbt(['run'], strict=False, expect_pass=True) + self.run_dbt(['run'], expect_pass=True) class TestVarDependencyInheritance(DBTIntegrationTest): @@ -199,9 +198,9 @@ def project_config(self): @use_profile('postgres') def test_postgres_var_mutual_overrides_v1_conversion(self): - self.run_dbt(['deps'], strict=False) - assert len(self.run_dbt(['seed'], strict=False)) == 2 - assert len(self.run_dbt(['run'], strict=False)) == 2 + self.run_dbt(['deps']) + assert len(self.run_dbt(['seed'])) == 2 + assert len(self.run_dbt(['run'])) == 2 self.assertTablesEqual('root_model_expected', 'model') self.assertTablesEqual('first_dep_expected', 'first_dep_model') diff --git a/test/integration/029_docs_generate_tests/test_docs_generate.py b/test/integration/029_docs_generate_tests/test_docs_generate.py index 95fa20c449c..fce9755f4db 100644 --- a/test/integration/029_docs_generate_tests/test_docs_generate.py +++ b/test/integration/029_docs_generate_tests/test_docs_generate.py @@ -2964,7 +2964,6 @@ def verify_manifest(self, expected_manifest): 'project_id'] == '098f6bcd4621d373cade4e832627b4f6' assert 'send_anonymous_usage_stats' in metadata and metadata[ 'send_anonymous_usage_stats'] is False - assert 'user_id' in metadata and metadata['user_id'] is None assert 'adapter_type' in metadata and metadata['adapter_type'] == self.adapter_type else: self.assertIn(key, expected_manifest) # sanity check @@ -3292,7 +3291,7 @@ def test_postgres_override_used(self): self.assertEqual(len(self.run_dbt(['run'])), 1) # this should pick up our failure macro and raise a compilation exception with self.assertRaises(CompilationException) as exc: - self.run_dbt(['docs', 'generate']) + self.run_dbt(['--warn-error', 'docs', 'generate']) self.assertIn('rejected: no catalogs for you', str(exc.exception)) diff --git a/test/integration/039_config_test/test_configs.py b/test/integration/039_config_test/test_configs.py index 3488f98efec..380919d1573 100644 --- a/test/integration/039_config_test/test_configs.py +++ b/test/integration/039_config_test/test_configs.py @@ -180,11 +180,11 @@ def test_postgres_disable_seed_partial_parse(self): @use_profile('postgres') def test_postgres_conditional_model(self): # no seeds/models - enabled should eval to False because of the target - results = self.run_dbt(['seed', '--target', 'disabled'], strict=False) + results = self.run_dbt(['seed', '--target', 'disabled']) self.assertEqual(len(results), 0) - results = self.run_dbt(['run', '--target', 'disabled'], strict=False) + results = self.run_dbt(['run', '--target', 'disabled']) self.assertEqual(len(results), 0) - results = self.run_dbt(['test', '--target', 'disabled'], strict=False) + results = self.run_dbt(['test', '--target', 'disabled']) self.assertEqual(len(results), 0) # has seeds/models - enabled should eval to True because of the target @@ -192,7 +192,7 @@ def test_postgres_conditional_model(self): self.assertEqual(len(results), 1) results = self.run_dbt(['run']) self.assertEqual(len(results), 2) - results = self.run_dbt(['test'], strict=False) + results = self.run_dbt(['test']) self.assertEqual(len(results), 5) @@ -234,14 +234,14 @@ def models(self): @use_profile('postgres') def test_postgres_warn_unused_configuration_paths(self): with self.assertRaises(CompilationException) as exc: - self.run_dbt(['seed']) + self.run_dbt(['--warn-error', 'seed']) self.assertIn('Configuration paths exist', str(exc.exception)) self.assertIn('- sources.test', str(exc.exception)) self.assertIn('- models.test', str(exc.exception)) self.assertIn('- models.test', str(exc.exception)) - self.run_dbt(['seed'], strict=False) + self.run_dbt(['seed']) class TestConfigIndivTests(DBTIntegrationTest): @property @@ -280,19 +280,19 @@ def test_postgres_configuring_individual_tests(self): self.assertEqual(len(self.run_dbt(['run'])), 2) # all tests on (minus sleeper_agent) + WARN - self.assertEqual(len(self.run_dbt(['test'], strict=False)), 5) + self.assertEqual(len(self.run_dbt(['test'])), 5) # turn off two of them directly - self.assertEqual(len(self.run_dbt(['test', '--vars', '{"enabled_direct": False}'], strict=False)), 3) + self.assertEqual(len(self.run_dbt(['test', '--vars', '{"enabled_direct": False}'])), 3) # turn on sleeper_agent data test directly self.assertEqual(len(self.run_dbt(['test', '--models', 'sleeper_agent', - '--vars', '{"enabled_direct": True}'], strict=False)), 1) + '--vars', '{"enabled_direct": True}'])), 1) # set three to ERROR directly results = self.run_dbt(['test', '--models', 'config.severity:error', '--vars', '{"enabled_direct": True, "severity_direct": "ERROR"}' - ], strict=False, expect_pass = False) + ], expect_pass = False) self.assertEqual(len(results), 2) self.assertEqual(results[0].status, 'fail') self.assertEqual(results[1].status, 'fail') diff --git a/test/integration/040_override_database_test/test_override_database.py b/test/integration/040_override_database_test/test_override_database.py index 1686ca1214e..f1f28430a87 100644 --- a/test/integration/040_override_database_test/test_override_database.py +++ b/test/integration/040_override_database_test/test_override_database.py @@ -68,9 +68,6 @@ def project_config(self): } } - def run_dbt_notstrict(self, args): - return self.run_dbt(args, strict=False) - class TestModelOverride(BaseOverrideDatabase): def run_database_override(self): @@ -79,9 +76,9 @@ def run_database_override(self): else: func = lambda x: x - self.run_dbt_notstrict(['seed']) + self.run_dbt(['seed']) - self.assertEqual(len(self.run_dbt_notstrict(['run'])), 4) + self.assertEqual(len(self.run_dbt(['run'])), 4) self.assertManyRelationsEqual([ (func('seed'), self.unique_schema(), self.default_database), (func('view_2'), self.unique_schema(), self.alternative_database), @@ -115,8 +112,8 @@ def default_database(self): assert False, 'No profile database found!' def run_database_override(self): - self.run_dbt_notstrict(['seed']) - self.assertEqual(len(self.run_dbt_notstrict(['run'])), 4) + self.run_dbt(['seed']) + self.assertEqual(len(self.run_dbt(['run'])), 4) self.assertExpectedRelations() def assertExpectedRelations(self): @@ -217,9 +214,9 @@ def run_database_override(self): 'database': self.alternative_database }, }) - self.run_dbt_notstrict(['seed']) + self.run_dbt(['seed']) - self.assertEqual(len(self.run_dbt_notstrict(['run'])), 4) + self.assertEqual(len(self.run_dbt(['run'])), 4) self.assertManyRelationsEqual([ (func('seed'), self.unique_schema(), self.alternative_database), (func('view_2'), self.unique_schema(), self.alternative_database), diff --git a/test/integration/042_sources_test/test_sources.py b/test/integration/042_sources_test/test_sources.py index c9633e86e41..00e1065916b 100644 --- a/test/integration/042_sources_test/test_sources.py +++ b/test/integration/042_sources_test/test_sources.py @@ -51,7 +51,7 @@ def run_dbt_with_vars(self, cmd, *args, **kwargs): class SuccessfulSourcesTest(BaseSourcesTest): def setUp(self): super().setUp() - self.run_dbt_with_vars(['seed'], strict=False) + self.run_dbt_with_vars(['seed']) self.maxDiff = None self._id = 101 # this is the db initial value @@ -445,14 +445,9 @@ def models(self): return "malformed_models" @use_profile('postgres') - def test_postgres_malformed_schema_nonstrict_will_break_run(self): - with self.assertRaises(CompilationException): - self.run_dbt_with_vars(['seed'], strict=False) - - @use_profile('postgres') - def test_postgres_malformed_schema_strict_will_break_run(self): + def test_postgres_malformed_schema_will_break_run(self): with self.assertRaises(CompilationException): - self.run_dbt_with_vars(['seed'], strict=True) + self.run_dbt_with_vars(['seed']) class TestRenderingInSourceTests(BaseSourcesTest): diff --git a/test/integration/045_test_severity_tests/test_severity.py b/test/integration/045_test_severity_tests/test_severity.py index c63f7933fc4..b741349fbf5 100644 --- a/test/integration/045_test_severity_tests/test_severity.py +++ b/test/integration/045_test_severity_tests/test_severity.py @@ -28,10 +28,10 @@ def run_dbt_with_vars(self, cmd, strict_var, *args, **kwargs): @use_profile('postgres') def test_postgres_severity_warnings(self): - self.run_dbt_with_vars(['seed'], 'false', strict=False) - self.run_dbt_with_vars(['run'], 'false', strict=False) + self.run_dbt_with_vars(['seed'], 'false') + self.run_dbt_with_vars(['run'], 'false') results = self.run_dbt_with_vars( - ['test', '--schema'], 'false', strict=False) + ['test', '--schema'], 'false') self.assertEqual(len(results), 2) self.assertEqual(results[0].status, 'warn') self.assertEqual(results[0].failures, 2) @@ -40,10 +40,10 @@ def test_postgres_severity_warnings(self): @use_profile('postgres') def test_postgres_severity_rendered_errors(self): - self.run_dbt_with_vars(['seed'], 'false', strict=False) - self.run_dbt_with_vars(['run'], 'false', strict=False) + self.run_dbt_with_vars(['seed'], 'false') + self.run_dbt_with_vars(['run'], 'false') results = self.run_dbt_with_vars( - ['test', '--schema'], 'true', strict=False, expect_pass=False) + ['test', '--schema'], 'true', expect_pass=False) self.assertEqual(len(results), 2) self.assertEqual(results[0].status, 'fail') self.assertEqual(results[0].failures, 2) @@ -52,42 +52,42 @@ def test_postgres_severity_rendered_errors(self): @use_profile('postgres') def test_postgres_severity_warnings_strict(self): - self.run_dbt_with_vars(['seed'], 'false', strict=False) - self.run_dbt_with_vars(['run'], 'false', strict=False) + self.run_dbt_with_vars(['seed'], 'false') + self.run_dbt_with_vars(['run'], 'false') results = self.run_dbt_with_vars( - ['test', '--schema'], 'false', expect_pass=False) + ['test', '--schema'], 'false', expect_pass=True) self.assertEqual(len(results), 2) - self.assertEqual(results[0].status, 'fail') + self.assertEqual(results[0].status, 'warn') self.assertEqual(results[0].failures, 2) - self.assertEqual(results[1].status, 'fail') + self.assertEqual(results[1].status, 'warn') self.assertEqual(results[1].failures, 2) @use_profile('postgres') def test_postgres_data_severity_warnings(self): - self.run_dbt_with_vars(['seed'], 'false', strict=False) - self.run_dbt_with_vars(['run'], 'false', strict=False) + self.run_dbt_with_vars(['seed'], 'false') + self.run_dbt_with_vars(['run'], 'false') results = self.run_dbt_with_vars( - ['test', '--data'], 'false', strict=False) + ['test', '--data'], 'false') self.assertEqual(len(results), 1) self.assertEqual(results[0].status, 'warn') self.assertEqual(results[0].failures, 2) @use_profile('postgres') def test_postgres_data_severity_rendered_errors(self): - self.run_dbt_with_vars(['seed'], 'false', strict=False) - self.run_dbt_with_vars(['run'], 'false', strict=False) + self.run_dbt_with_vars(['seed'], 'false') + self.run_dbt_with_vars(['run'], 'false') results = self.run_dbt_with_vars( - ['test', '--data'], 'true', strict=False, expect_pass=False) + ['test', '--data'], 'true', expect_pass=False) self.assertEqual(len(results), 1) self.assertEqual(results[0].status, 'fail') self.assertEqual(results[0].failures, 2) @use_profile('postgres') def test_postgres_data_severity_warnings_strict(self): - self.run_dbt_with_vars(['seed'], 'false', strict=False) - self.run_dbt_with_vars(['run'], 'false', strict=False) + self.run_dbt_with_vars(['seed'], 'false') + self.run_dbt_with_vars(['run'], 'false') results = self.run_dbt_with_vars( - ['test', '--data'], 'false', expect_pass=False) + ['test', '--data'], 'false', expect_pass=True) self.assertEqual(len(results), 1) self.assertTrue(results[0].status, 'fail') self.assertEqual(results[0].failures, 2) diff --git a/test/integration/047_dbt_ls_test/test_ls.py b/test/integration/047_dbt_ls_test/test_ls.py index f7847310c3f..90345e1e7ec 100644 --- a/test/integration/047_dbt_ls_test/test_ls.py +++ b/test/integration/047_dbt_ls_test/test_ls.py @@ -44,8 +44,7 @@ def run_dbt_ls(self, args=None, expect_pass=True): if args is not None: full_args = full_args + args - result = self.run_dbt(args=full_args, expect_pass=expect_pass, - strict=False, parser=False) + result = self.run_dbt(args=full_args, expect_pass=expect_pass) log_manager.stdout_console() return result diff --git a/test/integration/052_column_quoting/test_column_quotes.py b/test/integration/052_column_quoting/test_column_quotes.py index f18ea6ff828..5b1c16d5882 100644 --- a/test/integration/052_column_quoting/test_column_quotes.py +++ b/test/integration/052_column_quoting/test_column_quotes.py @@ -33,7 +33,7 @@ def models(self): return self.dir('models-unquoted') def run_dbt(self, *args, **kwargs): - return super().run_dbt(*args, strict=False, **kwargs) + return super().run_dbt(*args, **kwargs) @use_profile('postgres') def test_postgres_column_quotes(self): diff --git a/test/integration/058_fail_fast/test_fail_fast_run.py b/test/integration/058_fail_fast/test_fail_fast_run.py index d8aa9cd9aa0..c5b92043c00 100644 --- a/test/integration/058_fail_fast/test_fail_fast_run.py +++ b/test/integration/058_fail_fast/test_fail_fast_run.py @@ -43,8 +43,27 @@ def check_audit_table(self, count=1): vals = self.run_sql(query, fetch='all') self.assertFalse(len(vals) == count, 'Execution was not stopped before run end') + @use_profile('postgres') def test_postgres_fail_fast_run(self): with self.assertRaises(FailFastException): self.run_dbt(['run', '--threads', '1', '--fail-fast']) self.check_audit_table() + + +class FailFastFromConfig(TestFastFailingDuringRun): + + @property + def profile_config(self): + return { + 'config': { + 'send_anonymous_usage_stats': False, + 'fail_fast': True, + } + } + + @use_profile('postgres') + def test_postgres_fail_fast_run_user_config(self): + with self.assertRaises(FailFastException): + self.run_dbt(['run', '--threads', '1']) + self.check_audit_table() diff --git a/test/integration/062_defer_state_test/test_modified_state.py b/test/integration/062_defer_state_test/test_modified_state.py index 0db59c2378d..e83d33e649f 100644 --- a/test/integration/062_defer_state_test/test_modified_state.py +++ b/test/integration/062_defer_state_test/test_modified_state.py @@ -54,7 +54,7 @@ def setUp(self): @use_profile('postgres') def test_postgres_changed_seed_contents_state(self): - results = self.run_dbt(['ls', '--resource-type', 'seed', '--select', 'state:modified', '--state', './state'], strict=False, expect_pass=True) + results = self.run_dbt(['ls', '--resource-type', 'seed', '--select', 'state:modified', '--state', './state'], expect_pass=True) assert len(results) == 0 with open('data/seed.csv') as fp: fp.readline() @@ -91,12 +91,12 @@ def test_postgres_changed_seed_contents_state(self): fp.write(f'{idx},{value}{newline}') # now if we run again, we should get a warning - results = self.run_dbt(['ls', '--resource-type', 'seed', '--select', 'state:modified', '--state', './state'], strict=False) + results = self.run_dbt(['ls', '--resource-type', 'seed', '--select', 'state:modified', '--state', './state']) assert len(results) == 1 assert results[0] == 'test.seed' with pytest.raises(CompilationException) as exc: - self.run_dbt(['ls', '--resource-type', 'seed', '--select', 'state:modified', '--state', './state'], strict=True) + self.run_dbt(['--warn-error', 'ls', '--resource-type', 'seed', '--select', 'state:modified', '--state', './state']) assert '>1MB' in str(exc.value) shutil.rmtree('./state') @@ -106,12 +106,12 @@ def test_postgres_changed_seed_contents_state(self): with open('data/seed.csv', 'a') as fp: fp.write(f'{random},test{newline}') - results = self.run_dbt(['ls', '--resource-type', 'seed', '--select', 'state:modified', '--state', './state'], strict=False, expect_pass=True) + results = self.run_dbt(['ls', '--resource-type', 'seed', '--select', 'state:modified', '--state', './state'], expect_pass=True) assert len(results) == 0 @use_profile('postgres') def test_postgres_changed_seed_config(self): - results = self.run_dbt(['ls', '--resource-type', 'seed', '--select', 'state:modified', '--state', './state'], strict=False, expect_pass=True) + results = self.run_dbt(['ls', '--resource-type', 'seed', '--select', 'state:modified', '--state', './state'], expect_pass=True) assert len(results) == 0 self.use_default_project({'seeds': {'test': {'quote_columns': False}}}) @@ -123,7 +123,7 @@ def test_postgres_changed_seed_config(self): @use_profile('postgres') def test_postgres_unrendered_config_same(self): - results = self.run_dbt(['ls', '--resource-type', 'model', '--select', 'state:modified', '--state', './state'], strict=False, expect_pass=True) + results = self.run_dbt(['ls', '--resource-type', 'model', '--select', 'state:modified', '--state', './state'], expect_pass=True) assert len(results) == 0 # although this is the default value, dbt will recognize it as a change @@ -135,7 +135,7 @@ def test_postgres_unrendered_config_same(self): @use_profile('postgres') def test_postgres_changed_model_contents(self): - results = self.run_dbt(['run', '--models', 'state:modified', '--state', './state'], strict=False) + results = self.run_dbt(['run', '--models', 'state:modified', '--state', './state']) assert len(results) == 0 with open('models/table_model.sql') as fp: @@ -164,7 +164,7 @@ def test_postgres_new_macro(self): with open('macros/second_macro.sql', 'w') as fp: fp.write(new_macro) - results, stdout = self.run_dbt_and_capture(['run', '--models', 'state:modified', '--state', './state'], strict=False) + results, stdout = self.run_dbt_and_capture(['run', '--models', 'state:modified', '--state', './state']) assert len(results) == 0 os.remove('macros/second_macro.sql') @@ -172,7 +172,7 @@ def test_postgres_new_macro(self): with open('macros/macros.sql', 'a') as fp: fp.write(new_macro) - results, stdout = self.run_dbt_and_capture(['run', '--models', 'state:modified', '--state', './state'], strict=False) + results, stdout = self.run_dbt_and_capture(['run', '--models', 'state:modified', '--state', './state']) assert len(results) == 0 @use_profile('postgres') @@ -191,7 +191,7 @@ def test_postgres_changed_macro_contents(self): fp.write(newline) # table_model calls this macro - results, stdout = self.run_dbt_and_capture(['run', '--models', 'state:modified', '--state', './state'], strict=False) + results, stdout = self.run_dbt_and_capture(['run', '--models', 'state:modified', '--state', './state']) assert len(results) == 1 @use_profile('postgres') diff --git a/test/integration/068_partial_parsing_tests/test_partial_parsing.py b/test/integration/068_partial_parsing_tests/test_partial_parsing.py index 1ecd637918a..81afd4fe0fa 100644 --- a/test/integration/068_partial_parsing_tests/test_partial_parsing.py +++ b/test/integration/068_partial_parsing_tests/test_partial_parsing.py @@ -113,7 +113,7 @@ def test_postgres_pp_models(self): shutil.copyfile('extra-files/models-schema2.yml', 'models-a/schema.yml') os.remove(normalize('models-a/model_three.sql')) with self.assertRaises(CompilationException): - results = self.run_dbt(["--partial-parse", "run"]) + results = self.run_dbt(["--partial-parse", "--warn-error", "run"]) # Put model back again shutil.copyfile('extra-files/model_three.sql', 'models-a/model_three.sql') @@ -161,7 +161,7 @@ def test_postgres_pp_models(self): # Remove the macro os.remove(normalize('macros/my_macro.sql')) with self.assertRaises(CompilationException): - results = self.run_dbt(["--partial-parse", "run"]) + results = self.run_dbt(["--partial-parse", "--warn-error", "run"]) # put back macro file, got back to schema file with no macro # add separate macro patch schema file @@ -317,7 +317,7 @@ def test_postgres_pp_sources(self): # Change seed name to wrong name shutil.copyfile('extra-files/schema-sources5.yml', 'models-b/sources.yml') with self.assertRaises(CompilationException): - results = self.run_dbt(["--partial-parse", "run"]) + results = self.run_dbt(["--partial-parse", "--warn-error", "run"]) # Put back seed name to right name shutil.copyfile('extra-files/schema-sources4.yml', 'models-b/sources.yml') @@ -441,7 +441,7 @@ def tearDown(self): def test_postgres_nested_macros(self): shutil.copyfile('extra-files/custom_schema_tests1.sql', 'macros-macros/custom_schema_tests.sql') - results = self.run_dbt(strict=False) + results = self.run_dbt() self.assertEqual(len(results), 2) manifest = get_manifest() macro_child_map = manifest.build_macro_child_map() @@ -454,7 +454,7 @@ def test_postgres_nested_macros(self): self.assertEqual(results[0].status, TestStatus.Fail) self.assertRegex(results[0].node.compiled_sql, r'union all') # type_two_model_a_ - self.assertEqual(results[1].status, TestStatus.Fail) + self.assertEqual(results[1].status, TestStatus.Warn) self.assertEqual(results[1].node.config.severity, 'WARN') shutil.copyfile('extra-files/custom_schema_tests2.sql', 'macros-macros/custom_schema_tests.sql') diff --git a/test/integration/072_experimental_parser_tests/test_all_experimental_parser.py b/test/integration/072_experimental_parser_tests/test_all_experimental_parser.py index 0c357722870..0b9874ec971 100644 --- a/test/integration/072_experimental_parser_tests/test_all_experimental_parser.py +++ b/test/integration/072_experimental_parser_tests/test_all_experimental_parser.py @@ -32,4 +32,15 @@ def test_postgres_experimental_parser(self): self.assertEqual(node.sources, [['my_src', 'my_tbl']]) self.assertEqual(node.config._extra, {'x': True}) self.assertEqual(node.config.tags, ['hello', 'world']) - \ No newline at end of file + + @use_profile('postgres') + def test_postgres_env_experimental_parser(self): + os.environ['DBT_USE_EXPERIMENTAL_PARSER'] = 'true' + results = self.run_dbt(['parse']) + manifest = get_manifest() + node = manifest.nodes['model.test.model_a'] + self.assertEqual(node.refs, [['model_a']]) + self.assertEqual(node.sources, [['my_src', 'my_tbl']]) + self.assertEqual(node.config._extra, {'x': True}) + self.assertEqual(node.config.tags, ['hello', 'world']) + diff --git a/test/integration/100_rpc_test/test_rpc.py b/test/integration/100_rpc_test/test_rpc.py index 45f4693b96a..8fb488a2d44 100644 --- a/test/integration/100_rpc_test/test_rpc.py +++ b/test/integration/100_rpc_test/test_rpc.py @@ -112,7 +112,7 @@ def setUp(self): super().setUp() os.environ['DBT_TEST_SCHEMA_NAME_VARIABLE'] = 'test_run_schema' if self.should_seed: - self.run_dbt_with_vars(['seed'], strict=False) + self.run_dbt_with_vars(['seed']) port = random.randint(49152, 61000) self._server = self.ServerProcess( cli_vars='{{test_run_schema: {}}}'.format(self.unique_schema()), diff --git a/test/integration/base.py b/test/integration/base.py index c7854921e26..c7076e16454 100644 --- a/test/integration/base.py +++ b/test/integration/base.py @@ -390,7 +390,6 @@ def setUp(self): self._created_schemas = set() reset_deprecations() - flags.reset() template_cache.clear() self.use_profile(self._pick_profile()) @@ -570,8 +569,8 @@ def project_config(self): def profile_config(self): return {} - def run_dbt(self, args=None, expect_pass=True, strict=True, parser=True, profiles_dir=True): - res, success = self.run_dbt_and_check(args=args, strict=strict, parser=parser, profiles_dir=profiles_dir) + def run_dbt(self, args=None, expect_pass=True, profiles_dir=True): + res, success = self.run_dbt_and_check(args=args, profiles_dir=profiles_dir) self.assertEqual( success, expect_pass, "dbt exit state did not match expected") @@ -594,17 +593,13 @@ def run_dbt_and_capture(self, *args, **kwargs): return res, stdout - def run_dbt_and_check(self, args=None, strict=True, parser=False, profiles_dir=True): + def run_dbt_and_check(self, args=None, profiles_dir=True): log_manager.reset_handlers() if args is None: args = ["run"] final_args = [] - if strict: - final_args.append('--strict') - if parser: - final_args.append('--test-new-parser') if os.getenv('DBT_TEST_SINGLE_THREADED') in ('y', 'Y', '1'): final_args.append('--single-threaded') diff --git a/test/unit/test_bigquery_adapter.py b/test/unit/test_bigquery_adapter.py index 50239013a60..46f38afde40 100644 --- a/test/unit/test_bigquery_adapter.py +++ b/test/unit/test_bigquery_adapter.py @@ -40,8 +40,6 @@ def _bq_conn(): class BaseTestBigQueryAdapter(unittest.TestCase): def setUp(self): - flags.STRICT_MODE = True - self.raw_profile = { 'outputs': { 'oauth': { @@ -375,7 +373,7 @@ def test_get_columns_in_relation(self): class TestBigQueryRelation(unittest.TestCase): def setUp(self): - flags.STRICT_MODE = True + pass def test_view_temp_relation(self): kwargs = { @@ -455,7 +453,7 @@ def test_invalid_relation(self): class TestBigQueryInformationSchema(unittest.TestCase): def setUp(self): - flags.STRICT_MODE = True + pass def test_replace(self): diff --git a/test/unit/test_compiler.py b/test/unit/test_compiler.py index 0d5502f78c8..75cbf1e7607 100644 --- a/test/unit/test_compiler.py +++ b/test/unit/test_compiler.py @@ -22,8 +22,6 @@ def assertEqualIgnoreWhitespace(self, a, b): "".join(b.split())) def setUp(self): - dbt.flags.STRICT_MODE = True - self.maxDiff = None self.model_config = NodeConfig.from_dict({ diff --git a/test/unit/test_contracts_graph_parsed.py b/test/unit/test_contracts_graph_parsed.py index 3b6d01fa89f..a1095fb5fe0 100644 --- a/test/unit/test_contracts_graph_parsed.py +++ b/test/unit/test_contracts_graph_parsed.py @@ -45,13 +45,6 @@ from .utils import ContractTestCase, assert_symmetric, assert_from_dict, compare_dicts, assert_fails_validation, dict_replace, replace_config -@pytest.fixture(autouse=True) -def strict_mode(): - flags.STRICT_MODE = True - yield - flags.STRICT_MODE = False - - @pytest.fixture def populated_node_config_object(): result = NodeConfig( @@ -728,13 +721,6 @@ def test_patch_parsed_model(basic_parsed_model_object, basic_parsed_model_patch_ assert patched_model_object == pre_patch -def test_patch_parsed_model_invalid(basic_parsed_model_object, basic_parsed_model_patch_object): - pre_patch = basic_parsed_model_object # ParsedModelNode - patch = basic_parsed_model_patch_object.replace(description=None) - with pytest.raises(ValidationError): - pre_patch.patch(patch) - - @pytest.fixture def minimal_parsed_hook_dict(): return { diff --git a/test/unit/test_docs_generate.py b/test/unit/test_docs_generate.py index 49576e9e100..f4b2eedbff7 100644 --- a/test/unit/test_docs_generate.py +++ b/test/unit/test_docs_generate.py @@ -9,7 +9,6 @@ class GenerateTest(unittest.TestCase): def setUp(self): - dbt.flags.STRICT_MODE = True self.maxDiff = None self.manifest = mock.MagicMock() self.patcher = mock.patch('dbt.task.generate.get_unique_id_mapping') diff --git a/test/unit/test_flags.py b/test/unit/test_flags.py new file mode 100644 index 00000000000..e9ae4533658 --- /dev/null +++ b/test/unit/test_flags.py @@ -0,0 +1,193 @@ +import os +from unittest import mock, TestCase +from argparse import Namespace + +from .utils import normalize +from dbt import flags +from dbt.contracts.project import UserConfig +from dbt.config.profile import PROFILES_DIR + + +class TestFlags(TestCase): + + def setUp(self): + self.args = Namespace() + self.user_config = UserConfig() + + def test__flags(self): + + # use_experimental_parser + self.user_config.use_experimental_parser = True + flags.set_from_args(self.args, self.user_config) + self.assertEqual(flags.USE_EXPERIMENTAL_PARSER, True) + os.environ['DBT_USE_EXPERIMENTAL_PARSER'] = 'false' + flags.set_from_args(self.args, self.user_config) + self.assertEqual(flags.USE_EXPERIMENTAL_PARSER, False) + setattr(self.args, 'use_experimental_parser', True) + flags.set_from_args(self.args, self.user_config) + self.assertEqual(flags.USE_EXPERIMENTAL_PARSER, True) + # cleanup + os.environ.pop('DBT_USE_EXPERIMENTAL_PARSER') + delattr(self.args, 'use_experimental_parser') + flags.USE_EXPERIMENTAL_PARSER = False + self.user_config.use_experimental_parser = None + + # warn_error + self.user_config.warn_error = False + flags.set_from_args(self.args, self.user_config) + self.assertEqual(flags.WARN_ERROR, False) + os.environ['DBT_WARN_ERROR'] = 'true' + flags.set_from_args(self.args, self.user_config) + self.assertEqual(flags.WARN_ERROR, True) + setattr(self.args, 'warn_error', False) + flags.set_from_args(self.args, self.user_config) + self.assertEqual(flags.WARN_ERROR, False) + # cleanup + os.environ.pop('DBT_WARN_ERROR') + delattr(self.args, 'warn_error') + flags.WARN_ERROR = False + self.user_config.warn_error = None + + # write_json + self.user_config.write_json = True + flags.set_from_args(self.args, self.user_config) + self.assertEqual(flags.WRITE_JSON, True) + os.environ['DBT_WRITE_JSON'] = 'false' + flags.set_from_args(self.args, self.user_config) + self.assertEqual(flags.WRITE_JSON, False) + setattr(self.args, 'write_json', True) + flags.set_from_args(self.args, self.user_config) + self.assertEqual(flags.WRITE_JSON, True) + # cleanup + os.environ.pop('DBT_WRITE_JSON') + delattr(self.args, 'write_json') + + # partial_parse + self.user_config.partial_parse = True + flags.set_from_args(self.args, self.user_config) + self.assertEqual(flags.PARTIAL_PARSE, True) + os.environ['DBT_PARTIAL_PARSE'] = 'false' + flags.set_from_args(self.args, self.user_config) + self.assertEqual(flags.PARTIAL_PARSE, False) + setattr(self.args, 'partial_parse', True) + flags.set_from_args(self.args, self.user_config) + self.assertEqual(flags.PARTIAL_PARSE, True) + # cleanup + os.environ.pop('DBT_PARTIAL_PARSE') + delattr(self.args, 'partial_parse') + self.user_config.partial_parse = False + + # use_colors + self.user_config.use_colors = True + flags.set_from_args(self.args, self.user_config) + self.assertEqual(flags.USE_COLORS, True) + os.environ['DBT_USE_COLORS'] = 'false' + flags.set_from_args(self.args, self.user_config) + self.assertEqual(flags.USE_COLORS, False) + setattr(self.args, 'use_colors', True) + flags.set_from_args(self.args, self.user_config) + self.assertEqual(flags.USE_COLORS, True) + # cleanup + os.environ.pop('DBT_USE_COLORS') + delattr(self.args, 'use_colors') + + # profiles_dir + setattr(self.args, 'profiles_dir', PROFILES_DIR) + os.environ['DBT_PROFILES_DIR'] = '/user/jane/.dbt' + flags.set_from_args(self.args, self.user_config) + self.assertRegex(flags.PROFILES_DIR, r'.dbt') + setattr(self.args, 'profiles_dir', normalize('/user/joe/dbt-profiles')) + flags.set_from_args(self.args, self.user_config) + self.assertEqual(flags.PROFILES_DIR, normalize('/user/joe/dbt-profiles')) + # cleanup + os.environ.pop('DBT_PROFILES_DIR') + delattr(self.args, 'profiles_dir') + + # debug + self.user_config.debug = True + flags.set_from_args(self.args, self.user_config) + self.assertEqual(flags.DEBUG, True) + os.environ['DBT_DEBUG'] = 'True' + flags.set_from_args(self.args, self.user_config) + self.assertEqual(flags.DEBUG, True) + os.environ['DBT_DEUBG'] = 'False' + setattr(self.args, 'debug', True) + flags.set_from_args(self.args, self.user_config) + self.assertEqual(flags.DEBUG, True) + # cleanup + os.environ.pop('DBT_DEBUG') + delattr(self.args, 'debug') + self.user_config.debug = None + + # log_format -- text, json, default + self.user_config.log_format = 'text' + flags.set_from_args(self.args, self.user_config) + self.assertEqual(flags.LOG_FORMAT, 'text') + os.environ['DBT_LOG_FORMAT'] = 'json' + flags.set_from_args(self.args, self.user_config) + self.assertEqual(flags.LOG_FORMAT, 'json') + setattr(self.args, 'log_format', 'text') + flags.set_from_args(self.args, self.user_config) + self.assertEqual(flags.LOG_FORMAT, 'text') + # cleanup + os.environ.pop('DBT_LOG_FORMAT') + delattr(self.args, 'log_format') + self.user_config.log_format = None + + # version_check + self.user_config.version_check = True + flags.set_from_args(self.args, self.user_config) + self.assertEqual(flags.VERSION_CHECK, True) + os.environ['DBT_VERSION_CHECK'] = 'false' + flags.set_from_args(self.args, self.user_config) + self.assertEqual(flags.VERSION_CHECK, False) + setattr(self.args, 'version_check', True) + flags.set_from_args(self.args, self.user_config) + self.assertEqual(flags.VERSION_CHECK, True) + # cleanup + os.environ.pop('DBT_VERSION_CHECK') + delattr(self.args, 'version_check') + + # fail_fast + self.user_config.fail_fast = True + flags.set_from_args(self.args, self.user_config) + self.assertEqual(flags.FAIL_FAST, True) + os.environ['DBT_FAIL_FAST'] = 'false' + flags.set_from_args(self.args, self.user_config) + self.assertEqual(flags.FAIL_FAST, False) + setattr(self.args, 'fail_fast', True) + flags.set_from_args(self.args, self.user_config) + self.assertEqual(flags.FAIL_FAST, True) + # cleanup + os.environ.pop('DBT_FAIL_FAST') + delattr(self.args, 'fail_fast') + self.user_config.fail_fast = False + + # send_anonymous_usage_stats + self.user_config.send_anonymous_usage_stats = True + flags.set_from_args(self.args, self.user_config) + self.assertEqual(flags.SEND_ANONYMOUS_USAGE_STATS, True) + os.environ['DBT_SEND_ANONYMOUS_USAGE_STATS'] = 'false' + flags.set_from_args(self.args, self.user_config) + self.assertEqual(flags.SEND_ANONYMOUS_USAGE_STATS, False) + setattr(self.args, 'send_anonymous_usage_stats', True) + flags.set_from_args(self.args, self.user_config) + self.assertEqual(flags.SEND_ANONYMOUS_USAGE_STATS, True) + # cleanup + os.environ.pop('DBT_SEND_ANONYMOUS_USAGE_STATS') + delattr(self.args, 'send_anonymous_usage_stats') + + # printer_width + self.user_config.printer_width = 100 + flags.set_from_args(self.args, self.user_config) + self.assertEqual(flags.PRINTER_WIDTH, 100) + os.environ['DBT_PRINTER_WIDTH'] = '80' + flags.set_from_args(self.args, self.user_config) + self.assertEqual(flags.PRINTER_WIDTH, 80) + setattr(self.args, 'printer_width', '120') + flags.set_from_args(self.args, self.user_config) + self.assertEqual(flags.PRINTER_WIDTH, 120) + # cleanup + os.environ.pop('DBT_PRINTER_WIDTH') + delattr(self.args, 'printer_width') + self.user_config.printer_width = None diff --git a/test/unit/test_graph.py b/test/unit/test_graph.py index 03270b8c374..440205fd55c 100644 --- a/test/unit/test_graph.py +++ b/test/unit/test_graph.py @@ -41,7 +41,6 @@ def tearDown(self): def setUp(self): # create various attributes - dbt.flags.STRICT_MODE = True self.graph_result = None tracking.do_not_track() self.profile = { diff --git a/test/unit/test_main.py b/test/unit/test_main.py deleted file mode 100644 index 09f65aa1d7b..00000000000 --- a/test/unit/test_main.py +++ /dev/null @@ -1,106 +0,0 @@ -import os -import shutil -import tempfile -import unittest -from unittest import mock - -import yaml - -from dbt import main - - -class FakeArgs: - def __init__(self, profiles_dir): - self.profiles_dir = profiles_dir - self.profile = 'test' - - -@mock.patch('dbt.ui.use_colors') -@mock.patch('dbt.tracking.do_not_track') -@mock.patch('dbt.tracking.initialize_tracking') -class TestInitializeConfig(unittest.TestCase): - def setUp(self): - self.base_dir = tempfile.mkdtemp() - self.profiles_path = os.path.join(self.base_dir, 'profiles.yml') - self.args = FakeArgs(self.base_dir) - - def _base_config(self): - return { - 'test': { - 'outputs': { - 'default': { - 'type': 'postgres', - 'host': 'test', - 'port': 5555, - 'user': 'db_user', - 'pass': 'db_pass', - 'dbname': 'dbname', - 'schema': 'schema', - }, - }, - 'target': 'default', - } - } - - def set_up_empty_config(self): - with open(self.profiles_path, 'w') as f: - f.write(yaml.dump(self._base_config())) - - def set_up_config_options(self, **kwargs): - config = self._base_config() - config.update(config=kwargs) - with open(self.profiles_path, 'w') as f: - f.write(yaml.dump(config)) - - def tearDown(self): - try: - shutil.rmtree(self.base_dir) - except: - pass - - def test__implicit_missing(self, initialize_tracking, do_not_track, use_colors): - main.initialize_config_values(self.args) - - initialize_tracking.assert_called_once_with(self.base_dir) - do_not_track.assert_not_called() - use_colors.assert_not_called() - - def test__implicit_opt_in_colors(self, initialize_tracking, do_not_track, use_colors): - self.set_up_empty_config() - main.initialize_config_values(self.args) - - initialize_tracking.assert_called_once_with(self.base_dir) - do_not_track.assert_not_called() - use_colors.assert_not_called() - - def test__explicit_opt_out(self, initialize_tracking, do_not_track, use_colors): - self.set_up_config_options(send_anonymous_usage_stats=False) - main.initialize_config_values(self.args) - - initialize_tracking.assert_not_called() - do_not_track.assert_called_once_with() - use_colors.assert_not_called() - - def test__explicit_opt_in(self, initialize_tracking, do_not_track, use_colors): - self.set_up_config_options(send_anonymous_usage_stats=True) - main.initialize_config_values(self.args) - - initialize_tracking.assert_called_once_with(self.base_dir) - do_not_track.assert_not_called() - use_colors.assert_not_called() - - def test__explicit_no_colors(self, initialize_tracking, do_not_track, use_colors): - self.set_up_config_options(use_colors=False) - main.initialize_config_values(self.args) - - initialize_tracking.assert_called_once_with(self.base_dir) - do_not_track.assert_not_called() - use_colors.assert_called_once_with(False) - - def test__explicit_yes_colors(self, initialize_tracking, do_not_track, use_colors): - self.set_up_config_options(use_colors=True) - main.initialize_config_values(self.args) - - initialize_tracking.assert_called_once_with(self.base_dir) - do_not_track.assert_not_called() - use_colors.assert_called_once_with(True) diff --git a/test/unit/test_manifest.py b/test/unit/test_manifest.py index 10a6a3f994c..3dda2ab45cb 100644 --- a/test/unit/test_manifest.py +++ b/test/unit/test_manifest.py @@ -55,7 +55,6 @@ class ManifestTest(unittest.TestCase): def setUp(self): - dbt.flags.STRICT_MODE = True # TODO: why is this needed for tests in this module to pass? tracking.active_user = None @@ -373,7 +372,7 @@ def test__build_flat_graph(self): def test_metadata(self, mock_user): mock_user.id = 'cfc9500f-dc7f-4c83-9ea7-2c581c1b38cf' mock_user.invocation_id = '01234567-0123-0123-0123-0123456789ab' - mock_user.do_not_track = True + dbt.flags.SEND_ANONYMOUS_USAGE_STATS = False now = datetime.utcnow() self.assertEqual( ManifestMetadata( @@ -396,7 +395,7 @@ def test_metadata(self, mock_user): def test_no_nodes_with_metadata(self, mock_user): mock_user.id = 'cfc9500f-dc7f-4c83-9ea7-2c581c1b38cf' mock_user.invocation_id = '01234567-0123-0123-0123-0123456789ab' - mock_user.do_not_track = True + dbt.flags.SEND_ANONYMOUS_USAGE_STATS = False metadata = ManifestMetadata( project_id='098f6bcd4621d373cade4e832627b4f6', adapter_type='postgres', @@ -486,8 +485,6 @@ def test_get_resource_fqns(self): class MixedManifestTest(unittest.TestCase): def setUp(self): - dbt.flags.STRICT_MODE = True - self.maxDiff = None self.model_config = NodeConfig.from_dict({ diff --git a/test/unit/test_parser.py b/test/unit/test_parser.py index 4a50662e82e..bbaa7cf8e48 100644 --- a/test/unit/test_parser.py +++ b/test/unit/test_parser.py @@ -68,7 +68,6 @@ def _generate_macros(self): yield pm def setUp(self): - dbt.flags.STRICT_MODE = True dbt.flags.WARN_ERROR = True # HACK: this is needed since tracking events can # be sent when using the model parser diff --git a/test/unit/test_postgres_adapter.py b/test/unit/test_postgres_adapter.py index 8f5cd59013e..1a31b2b99e9 100644 --- a/test/unit/test_postgres_adapter.py +++ b/test/unit/test_postgres_adapter.py @@ -23,7 +23,6 @@ class TestPostgresAdapter(unittest.TestCase): def setUp(self): - flags.STRICT_MODE = True project_cfg = { 'name': 'X', 'version': '0.1', @@ -331,8 +330,6 @@ def test_get_catalog_various_schemas(self, mock_get_schemas, mock_execute): class TestConnectingPostgresAdapter(unittest.TestCase): def setUp(self): - flags.STRICT_MODE = False - self.target_dict = { 'type': 'postgres', 'dbname': 'postgres', diff --git a/test/unit/test_redshift_adapter.py b/test/unit/test_redshift_adapter.py index 5519aa165c9..285c41ac1a6 100644 --- a/test/unit/test_redshift_adapter.py +++ b/test/unit/test_redshift_adapter.py @@ -30,8 +30,6 @@ def fetch_cluster_credentials(*args, **kwargs): class TestRedshiftAdapter(unittest.TestCase): def setUp(self): - flags.STRICT_MODE = True - profile_cfg = { 'outputs': { 'test': { diff --git a/test/unit/test_snowflake_adapter.py b/test/unit/test_snowflake_adapter.py index a782b7fa6ef..472a229d315 100644 --- a/test/unit/test_snowflake_adapter.py +++ b/test/unit/test_snowflake_adapter.py @@ -21,8 +21,6 @@ class TestSnowflakeAdapter(unittest.TestCase): def setUp(self): - flags.STRICT_MODE = False - profile_cfg = { 'outputs': { 'test': {