From 1a2d917d0a2b58ba54046fe013f81c697b324e46 Mon Sep 17 00:00:00 2001 From: hd121024 Date: Fri, 4 Dec 2020 14:55:04 -0800 Subject: [PATCH 1/5] pydbapi-81 Make pytd work with teradatasql driver --- setup.py | 4 +- teradata/api.py | 52 +- teradata/datatypes.py | 88 +- teradata/pulljson.py | 4 - teradata/tdodbc.py | 1159 ------------ teradata/tdrest.py | 467 ----- teradata/tdsql.py | 606 ++++++ teradata/udaexec.py | 233 +-- teradata/util.py | 206 +-- test/testClobSp.sql | 2 +- test/test_pulljson.py | 6 +- test/test_tdrest.py | 151 -- test/{test_tdodbc.py => test_teradatasql.py} | 66 +- test/test_udaexec_config.py | 4 +- test/test_udaexec_datatypes.py | 1725 ++++++++++++++---- test/test_udaexec_execute.py | 378 ++-- test/udaexec.ini | 28 +- 17 files changed, 2493 insertions(+), 2686 deletions(-) delete mode 100644 teradata/tdodbc.py delete mode 100644 teradata/tdrest.py create mode 100644 teradata/tdsql.py delete mode 100644 test/test_tdrest.py rename test/{test_tdodbc.py => test_teradatasql.py} (71%) diff --git a/setup.py b/setup.py index a215427..ed42e9c 100755 --- a/setup.py +++ b/setup.py @@ -24,8 +24,8 @@ from setuptools import setup # Make sure correct version of python is being used. -if sys.version_info[0] < 2 or (sys.version_info[0] == 2 and sys.version_info[1] < 7): - print("The teradata module does not support this version of Python, the version must be 2.7 or later.") +if sys.version_info[0] < 3 or (sys.version_info[0] == 3 and sys.version_info[1] < 4): + print("The teradata module does not support this version of Python, the version must be 3.4 or later.") sys.exit(1) with open('teradata/version.py') as f: diff --git a/teradata/api.py b/teradata/api.py index 37ba01f..68b7527 100644 --- a/teradata/api.py +++ b/teradata/api.py @@ -42,6 +42,24 @@ def emit(self, record): logging.getLogger("teradata").addHandler(NullHandler()) +class InParam (): + + """Represents an input parameter from a Stored Procedure""" + + def __init__(self, value, dataType=None, size=None): + self.inValue = value + self.dataType = dataType + self.size = size + self.escapeParamType = determineEscapeParamType (dataType, value) + + def setValueFunc(self, valueFunc): + self.valueFunc = valueFunc + + def value(self): + return None if self.valueFunc is None else self.valueFunc() + + def __repr__(self): + return "InParam(value={}, dataType={})".format(self.inValue, self.dataType) class OutParam (): @@ -70,14 +88,44 @@ class InOutParam (OutParam): def __init__(self, value, name=None, dataType=None, size=None): OutParam.__init__(self, name, dataType, size) self.inValue = value + self.escapeParamType = determineEscapeParamType (dataType, value) def __repr__(self): return "InOutParam(value={}, name={}, dataType={}, size={})".format( self.inValue, self.name, self.dataType, self.size) -# Define exceptions +def determineEscapeParamType (datatype, value): + from .datatypes import Interval, Period + + if datatype is None or value is None: + return datatype + + if datatype.endswith ("AS LOCATOR") and isinstance (value, (bytes, bytearray)): + return datatype + + if datatype.startswith (("BYTE", "VARBYTE", "LONG VARBYTE")) and isinstance (value, (bytes, bytearray)): + return datatype + + if datatype in {"BYTEINT", "BIGINT", "INTEGER", "SMALLINT", "INT"} and isinstance (value, int): + return datatype + if datatype.startswith(("INTERVAL", "PERIOD", "DATE", "VARCHAR", "CHAR", "FLOAT", "NUMBER", "DECIMAL", "XML", "LONG VARCHAR")) and isinstance (value, str): + return datatype + if datatype.startswith ("INTERVAL") and isinstance (value, Interval): + return datatype + + if datatype.startswith ("PERIOD") and isinstance (value, Period): + return datatype + + if datatype.startswith ("TIME"): + return datatype + + return None + #end determineEscapeParamType + + +# Define exceptions class Warning(Exception): # @ReservedAssignment def __init__(self, msg): @@ -124,6 +172,7 @@ def __init__(self, code, msg): class ProgrammingError(DatabaseError): def __init__(self, code, msg): + DatabaseError.__init__(self, code, msg) self.value = (code, msg) self.args = (code, msg) @@ -152,6 +201,7 @@ def __init__(self, code, msg): class OperationalError(DatabaseError): def __init__(self, code, msg): + DatabaseError.__init__(self, code, msg) self.value = (code, msg) self.args = (code, msg) diff --git a/teradata/datatypes.py b/teradata/datatypes.py index e8c1649..6a56aff 100644 --- a/teradata/datatypes.py +++ b/teradata/datatypes.py @@ -44,8 +44,12 @@ hourToSecondIntervalRegEx = re.compile("^(-?)(\d+):(\d+):(\d+\.?\d*)$") minuteToSecondIntervalRegEx = re.compile("^(-?)(\d+):(\d+\.?\d*)$") secondIntervalRegEx = re.compile("^(-?)(\d+\.?\d*)$") -periodRegEx1 = re.compile("\('(.*)',\s*'(.*)'\)") -periodRegEx2 = re.compile("ResultStruct:PERIOD\(.*\)\[(.*),\s*(.*)\]") +periodRegEx = re.compile("(.*),\s*(.*)") + +T_BLOB_AS_LOCATOR = 408 +T_CLOB_AS_LOCATOR = 424 +T_JSON_AS_LOCATOR = 884 +T_XML_AS_LOCATOR = 860 NUMBER_TYPES = {"BYTEINT", "BIGINT", "DECIMAL", "DOUBLE", "DOUBLE PRECISION", "INTEGER", "NUMBER", "SMALLINT", "FLOAT", "INT", "NUMERIC", @@ -55,8 +59,14 @@ FLOAT_TYPES = {"FLOAT", "DOUBLE", "DOUBLE PRECISION", "REAL"} -BINARY_TYPES = {"BLOB", "BYTE", "VARBYTE"} +BINARY_TYPES = {"BLOB", "BYTE", "VARBYTE", "LONG VARBYTE"} +LOB_LOCATOR_TYPES = { + T_BLOB_AS_LOCATOR : "BLOB AS LOCATOR", + T_CLOB_AS_LOCATOR : "CLOB AS LOCATOR", + T_JSON_AS_LOCATOR : "JSON AS LOCATOR", + T_XML_AS_LOCATOR : "XML AS LOCATOR" +} def _getMs(m, num): ms = m.group(num) @@ -134,6 +144,7 @@ def _convertInterval(dataType, value, regEx, *args): def convertInterval(dataType, value): value = value.strip() + dataType = re.sub (" ?\(([0-9, ]+)\)", "", dataType) if dataType == "INTERVAL YEAR": return _convertScalarInterval(dataType, value, "years") elif dataType == "INTERVAL YEAR TO MONTH": @@ -172,9 +183,7 @@ def convertInterval(dataType, value): def convertPeriod(dataType, value): - m = periodRegEx1.match(value) - if not m: - m = periodRegEx2.match(value) + m = periodRegEx.match(value) if m: if "TIMESTAMP" in dataType: start = convertTimestamp(m.group(1)) @@ -187,13 +196,38 @@ def convertPeriod(dataType, value): end = convertDate(m.group(2)) else: raise InterfaceError("INVALID_PERIOD", - "Unknown PERIOD data type: {}".format( + "Unknown PERIOD data type: {} {}".format( dataType, value)) else: raise InterfaceError( "INVALID_PERIOD", "{} format invalid: {}".format(dataType, value)) return Period(start, end) + #end convertPeriod +def removeTrailingZerosFromPeriod (value): + if value is None: + return value + m = re.compile("\('(.*)',\s*'(.*)'\)").match(str(value)) + if m is not None and len (m.groups()) == 2: + value = "{},{}".format (removeTrailingZeros (m.group (1)), removeTrailingZeros (m.group (2))) + return value + #end convertInParamPeriod + +def removeTrailingZerosFromTimeAndTimestamp (value): + if value is None: + return value + return removeTrailingZeros (str(value)) + +def removeTrailingZeros (value): + seconds = re.compile(".*(\.[0-9]*).*").match (str(value)) + + if seconds is not None: + sSecond = seconds.group (1).rstrip ('0') + sSecond = "" if len (sSecond) == 1 else sSecond + value = re.sub ('\.[0-9]*', sSecond, str(value)) + + return value + #end removeTrailingZeros def zeroIfNone(value): if value is None: @@ -205,13 +239,13 @@ class DataTypeConverter: """Handles conversion of result set data types into python objects.""" - def convertValue(self, dbType, dataType, typeCode, value): + def convertValue(self, dataType, typeCode, value): """Converts the value returned by the database into the desired python object.""" raise NotImplementedError( "convertValue must be implemented by sub-class") - def convertType(self, dbType, dataType): + def convertType(self, dataType): """Converts the data type to a python type code.""" raise NotImplementedError( "convertType must be implemented by sub-class") @@ -224,62 +258,57 @@ class DefaultDataTypeConverter (DataTypeConverter): def __init__(self, useFloat=False): self.useFloat = useFloat - def convertValue(self, dbType, dataType, typeCode, value): + def convertValue(self, dataType, typeCode, value): """Converts the value returned by the database into the desired python object.""" logger.trace( "Converting \"%s\" to (%s, %s).", value, dataType, typeCode) if value is not None: if typeCode == NUMBER: - try: - return NUMBER(value) - except: - # Handle infinity and NaN for older ODBC drivers. - if value == "1.#INF": - return NUMBER('Infinity') - elif value == "-1.#INF": - return NUMBER('-Infinity') - else: - return NUMBER('NaN') + return NUMBER(value) elif typeCode == float: - return value if not util.isString else float(value) + return value if not isinstance(value, str) else float(value) elif typeCode == Timestamp: - if util.isString(value): + if isinstance(value, str): return convertTimestamp(value) + elif isinstance (value, datetime.date): + return (value) else: return datetime.datetime.fromtimestamp( value // SECS_IN_MILLISECS).replace( microsecond=value % SECS_IN_MILLISECS * MILLISECS_IN_MICROSECS) elif typeCode == Time: - if util.isString(value): + if isinstance(value, str): return convertTime(value) + elif isinstance (value, datetime.time): + return (value) else: return datetime.datetime.fromtimestamp( value // SECS_IN_MILLISECS).replace( microsecond=value % SECS_IN_MILLISECS * MILLISECS_IN_MICROSECS).time() elif typeCode == Date: - if util.isString(value): + if isinstance(value, str): return convertDate(value) - else: + elif type (value) is int: return datetime.datetime.fromtimestamp( value // SECS_IN_MILLISECS).replace( microsecond=value % SECS_IN_MILLISECS * MILLISECS_IN_MICROSECS).date() elif typeCode == BINARY: - if util.isString(value): + if isinstance(value, str): return bytearray.fromhex(value) elif dataType.startswith("INTERVAL"): return convertInterval(dataType, value) - elif dataType.startswith("JSON") and util.isString(value): + elif dataType.startswith("JSON") and isinstance(value, str): return json.loads(value, parse_int=decimal.Decimal, parse_float=decimal.Decimal) elif dataType.startswith("PERIOD"): return convertPeriod(dataType, value) return value - def convertType(self, dbType, dataType): + def convertType(self, dataType): """Converts the data type to a python type code.""" typeCode = STRING if dataType in NUMBER_TYPES: @@ -414,6 +443,8 @@ def __str__(self): _appendInterval(s, self.seconds, separator=":") if self.negative: s.insert(0, "-") + else: + s.insert(0, " ") return "".join(s) def __repr__(self): @@ -436,6 +467,7 @@ class Period: def __init__(self, start, end): self.start = start self.end = end + s = "('" + str(start) + "', '" + str(end) + "')" def __str__(self): return "('" + str(self.start) + "', '" + str(self.end) + "')" diff --git a/teradata/pulljson.py b/teradata/pulljson.py index 8b801db..5c1ff1a 100644 --- a/teradata/pulljson.py +++ b/teradata/pulljson.py @@ -28,10 +28,6 @@ import json import logging from . import util # @UnusedImport # noqa -if sys.version_info[0] == 2: - from StringIO import StringIO # @UnresolvedImport #@UnusedImport -else: - from io import StringIO # @UnresolvedImport @UnusedImport @Reimport # noqa logger = logging.getLogger(__name__) diff --git a/teradata/tdodbc.py b/teradata/tdodbc.py deleted file mode 100644 index cfabf16..0000000 --- a/teradata/tdodbc.py +++ /dev/null @@ -1,1159 +0,0 @@ -"""An implementation of the Python Database API Specification v2.0 - using Teradata ODBC.""" - -# The MIT License (MIT) -# -# Copyright (c) 2015 by Teradata -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. - -import atexit -import collections -import ctypes -import platform -import re -import sys -import threading - -from . import util, datatypes -from .api import * # @UnusedWildImport # noqa - - -logger = logging.getLogger(__name__) - -# ODBC Constants -SQL_ATTR_ODBC_VERSION, SQL_OV_ODBC2, SQL_OV_ODBC3 = 200, 2, 3 -SQL_ATTR_QUERY_TIMEOUT, SQL_ATTR_AUTOCOMMIT = 0, 102 -SQL_NULL_HANDLE, SQL_HANDLE_ENV, SQL_HANDLE_DBC, SQL_HANDLE_STMT = 0, 1, 2, 3 -SQL_SUCCESS, SQL_SUCCESS_WITH_INFO = 0, 1, -SQL_ERROR, SQL_INVALID_HANDLE = -1, -2 -SQL_NEED_DATA, SQL_NO_DATA = 99, 100 -SQL_CLOSE, SQL_UNBIND, SQL_RESET_PARAMS = 0, 2, 3 -SQL_PARAM_TYPE_UNKNOWN = 0 -SQL_PARAM_INPUT, SQL_PARAM_INPUT_OUTPUT, SQL_PARAM_OUTPUT = 1, 2, 4 -SQL_ATTR_PARAM_BIND_TYPE = 18 -SQL_ATTR_ROWS_FETCHED_PTR, SQL_ATTR_ROW_STATUS_PTR = 26, 25 -SQL_ATTR_ROW_ARRAY_SIZE = 27 -SQL_ATTR_PARAMS_PROCESSED_PTR, SQL_ATTR_PARAM_STATUS_PTR = 21, 20 -SQL_ATTR_PARAMSET_SIZE = 22 -SQL_PARAM_BIND_BY_COLUMN = 0 -SQL_NULL_DATA, SQL_NTS = -1, -3 -SQL_IS_POINTER, SQL_IS_UINTEGER, SQL_IS_INTEGER = -4, -5, -6 -SQL_FETCH_NEXT, SQL_FETCH_FIRST, SQL_FETCH_LAST = 1, 2, 4 - -SQL_SIGNED_OFFSET = -20 -SQL_C_BINARY, SQL_BINARY, SQL_VARBINARY, SQL_LONGVARBINARY = -2, -2, -3, -4 -SQL_C_WCHAR, SQL_WCHAR, SQL_WVARCHAR, SQL_WLONGVARCHAR = -8, -8, -9, -10 -SQL_C_SBIGINT = -5 + SQL_SIGNED_OFFSET -SQL_FLOAT = 6 -SQL_C_FLOAT = SQL_REAL = 7 -SQL_C_DOUBLE = SQL_DOUBLE = 8 -SQL_DESC_TYPE_NAME = 14 -SQL_COMMIT, SQL_ROLLBACK = 0, 1 - -SQL_STATE_DATA_TRUNCATED = '01004' -SQL_STATE_CONNECTION_NOT_OPEN = '08003' -SQL_STATE_INVALID_TRANSACTION_STATE = '25000' - -SQLLEN = ctypes.c_ssize_t -SQLULEN = ctypes.c_size_t -SQLUSMALLINT = ctypes.c_ushort -SQLSMALLINT = ctypes.c_short -SQLINTEGER = ctypes.c_int -SQLFLOAT = ctypes.c_float -SQLDOUBLE = ctypes.c_double -SQLBYTE = ctypes.c_ubyte -SQLCHAR = ctypes.c_char -SQLWCHAR = ctypes.c_wchar -SQLRETURN = SQLSMALLINT -SQLPOINTER = ctypes.c_void_p -SQLHANDLE = ctypes.c_void_p - -ADDR = ctypes.byref -PTR = ctypes.POINTER -ERROR_BUFFER_SIZE = 2 ** 10 -SMALL_BUFFER_SIZE = 2 ** 12 -LARGE_BUFFER_SIZE = 2 ** 20 -TRUE = 1 -FALSE = 0 - -odbc = None -hEnv = None -drivers = None -lock = threading.Lock() -pyVer = sys.version_info[0] -osType = platform.system() - -# The amount of seconds to wait when submitting non-user defined SQL (e.g. -# set query bands, etc). -QUERY_TIMEOUT = 120 - -if pyVer > 2: - unicode = str # @ReservedAssignment - -# Define OS specific methods for handling buffers and strings. -if osType == "Darwin" or osType == "Windows" or osType.startswith('CYGWIN'): - # Mac OSx and Windows - def _createBuffer(l): - return ctypes.create_unicode_buffer(l) - - def _inputStr(s, l=None): - if s is None: - return None - return ctypes.create_unicode_buffer( - (s if util.isString(s) else str(s)), l) - - def _outputStr(s): - return s.value - - def _convertParam(s): - if s is None: - return None - return s if util.isString(s) else str(s) -else: - # Unix/Linux - # Multiply by 3 as one UTF-16 character can require 3 UTF-8 bytes. - def _createBuffer(l): - return ctypes.create_string_buffer(l * 3) - - def _inputStr(s, l=None): - if s is None: - return None - return ctypes.create_string_buffer( - (s if util.isString(s) else str(s)).encode('utf8'), l) - - def _outputStr(s): - return unicode(s.raw.partition(b'\00')[0], 'utf8') - - def _convertParam(s): - if s is None: - return None - return (s if util.isString(s) else str(s)).encode('utf8') - - SQLWCHAR = ctypes.c_char - -connections = [] - - -def cleanupConnections(): - """Cleanup open connections.""" - if connections: - logger.warn( - "%s open connections found on exit, attempting to close...", - len(connections)) - for conn in list(connections): - conn.close() - - -def getDiagnosticInfo(handle, handleType=SQL_HANDLE_STMT): - """Gets diagnostic information associated with ODBC calls, particularly - when errors occur.""" - info = [] - infoNumber = 1 - while True: - sqlState = _createBuffer(6) - nativeError = SQLINTEGER() - messageBuffer = _createBuffer(ERROR_BUFFER_SIZE) - messageLength = SQLSMALLINT() - rc = odbc.SQLGetDiagRecW(handleType, handle, infoNumber, sqlState, - ADDR(nativeError), messageBuffer, - len(messageBuffer), ADDR(messageLength)) - if rc == SQL_SUCCESS_WITH_INFO and \ - messageLength.value > ctypes.sizeof(messageBuffer): - # Resize buffer to fit entire message. - messageBuffer = _createBuffer(messageLength.value) - continue - if rc == SQL_SUCCESS or rc == SQL_SUCCESS_WITH_INFO: - info.append( - (_outputStr(sqlState), _outputStr(messageBuffer), - abs(nativeError.value))) - infoNumber += 1 - elif rc == SQL_NO_DATA: - return info - elif rc == SQL_INVALID_HANDLE: - raise InterfaceError( - 'SQL_INVALID_HANDLE', - "Invalid handle passed to SQLGetDiagRecW.") - elif rc == SQL_ERROR: - if infoNumber > 1: - return info - raise InterfaceError( - "SQL_ERROR", "SQL_ERROR returned from SQLGetDiagRecW.") - else: - raise InterfaceError( - "UNKNOWN_RETURN_CODE", - "SQLGetDiagRecW returned an unknown return code: %s", rc) - - -def checkStatus(rc, hEnv=SQL_NULL_HANDLE, hDbc=SQL_NULL_HANDLE, - hStmt=SQL_NULL_HANDLE, method="Method", ignore=None): - """ Check return status code and log any information or error messages. - If error is returned, raise exception.""" - sqlState = [] - logger.trace("%s returned status code %s", method, rc) - if rc not in (SQL_SUCCESS, SQL_NO_DATA): - if hStmt != SQL_NULL_HANDLE: - info = getDiagnosticInfo(hStmt, SQL_HANDLE_STMT) - elif hDbc != SQL_NULL_HANDLE: - info = getDiagnosticInfo(hDbc, SQL_HANDLE_DBC) - else: - info = getDiagnosticInfo(hEnv, SQL_HANDLE_ENV) - for i in info: - sqlState.append(i[0]) - if rc == SQL_SUCCESS_WITH_INFO: - logger.debug( - u"{} succeeded with info: [{}] {}".format(method, - i[0], i[1])) - elif not ignore or i[0] not in ignore: - logger.debug((u"{} returned non-successful error code " - u"{}: [{}] {}").format(method, rc, i[0], i[1])) - msg = ", ".join(map(lambda m: m[1], info)) - if re.search(r'[^0-9\s]', msg) is None or i[0] == 'I': - msg = msg + (". Check that the ODBC driver is installed " - "and the ODBCINI or ODBCINST environment " - "variables are correctly set.") - raise DatabaseError(i[2], u"[{}] {}".format(i[0], msg), i[0]) - else: - logger.debug( - u"Ignoring return of {} from {}: [{}] {}".format(rc, - method, - i[0], - i[1])) - # Breaking here because this error is ignored and info could - # contain older error messages. - # E.g. if error was SQL_STATE_CONNECTION_NOT_OPEN, the next - # error would be the original connection error. - break - if not info: - logger.info( - "No information associated with return code %s from %s", - rc, method) - return sqlState - - -def prototype(func, *args): - """Setup function prototype""" - func.restype = SQLRETURN - func.argtypes = args - - -def initFunctionPrototypes(): - """Initialize function prototypes for ODBC calls.""" - prototype(odbc.SQLAllocHandle, SQLSMALLINT, SQLHANDLE, PTR(SQLHANDLE)) - prototype(odbc.SQLGetDiagRecW, SQLSMALLINT, SQLHANDLE, SQLSMALLINT, - PTR(SQLWCHAR), PTR(SQLINTEGER), PTR(SQLWCHAR), SQLSMALLINT, - PTR(SQLSMALLINT)) - prototype(odbc.SQLSetEnvAttr, SQLHANDLE, - SQLINTEGER, SQLPOINTER, SQLINTEGER) - prototype(odbc.SQLDriverConnectW, SQLHANDLE, SQLHANDLE, - PTR(SQLWCHAR), SQLSMALLINT, PTR(SQLWCHAR), SQLSMALLINT, - PTR(SQLSMALLINT), SQLUSMALLINT) - prototype(odbc.SQLFreeHandle, SQLSMALLINT, SQLHANDLE) - prototype(odbc.SQLExecDirectW, SQLHANDLE, PTR(SQLWCHAR), SQLINTEGER) - prototype(odbc.SQLNumResultCols, SQLHANDLE, PTR(SQLSMALLINT)) - prototype(odbc.SQLDescribeColW, SQLHANDLE, SQLUSMALLINT, PTR(SQLWCHAR), - SQLSMALLINT, PTR(SQLSMALLINT), PTR(SQLSMALLINT), PTR(SQLULEN), - PTR(SQLSMALLINT), PTR(SQLSMALLINT)) - prototype(odbc.SQLColAttributeW, SQLHANDLE, SQLUSMALLINT, - SQLUSMALLINT, SQLPOINTER, SQLSMALLINT, PTR(SQLSMALLINT), - PTR(SQLLEN)) - prototype(odbc.SQLFetch, SQLHANDLE) - prototype(odbc.SQLGetData, SQLHANDLE, SQLUSMALLINT, - SQLSMALLINT, SQLPOINTER, SQLLEN, PTR(SQLLEN)) - prototype(odbc.SQLFreeStmt, SQLHANDLE, SQLUSMALLINT) - prototype(odbc.SQLPrepareW, SQLHANDLE, PTR(SQLWCHAR), SQLINTEGER) - prototype(odbc.SQLNumParams, SQLHANDLE, PTR(SQLSMALLINT)) - prototype(odbc.SQLDescribeParam, SQLHANDLE, SQLUSMALLINT, PTR( - SQLSMALLINT), PTR(SQLULEN), PTR(SQLSMALLINT), PTR(SQLSMALLINT)) - prototype(odbc.SQLBindParameter, SQLHANDLE, SQLUSMALLINT, SQLSMALLINT, - SQLSMALLINT, SQLSMALLINT, SQLULEN, SQLSMALLINT, SQLPOINTER, - SQLLEN, PTR(SQLLEN)) - prototype(odbc.SQLExecute, SQLHANDLE) - prototype(odbc.SQLSetStmtAttr, SQLHANDLE, - SQLINTEGER, SQLPOINTER, SQLINTEGER) - prototype(odbc.SQLMoreResults, SQLHANDLE) - prototype(odbc.SQLDisconnect, SQLHANDLE) - prototype(odbc.SQLSetConnectAttr, SQLHANDLE, - SQLINTEGER, SQLPOINTER, SQLINTEGER) - prototype(odbc.SQLEndTran, SQLSMALLINT, SQLHANDLE, SQLSMALLINT) - prototype(odbc.SQLRowCount, SQLHANDLE, PTR(SQLLEN)) - prototype(odbc.SQLBindCol, SQLHANDLE, SQLUSMALLINT, SQLSMALLINT, - SQLPOINTER, SQLLEN, PTR(SQLLEN)) - prototype(odbc.SQLDrivers, SQLHANDLE, SQLUSMALLINT, PTR(SQLCHAR), - SQLSMALLINT, PTR(SQLSMALLINT), PTR(SQLCHAR), SQLSMALLINT, - PTR(SQLSMALLINT)) - - -def initOdbcLibrary(odbcLibPath=None): - """Initialize the ODBC Library.""" - global odbc - if odbc is None: - if osType == "Windows": - odbc = ctypes.windll.odbc32 - else: - if not odbcLibPath: - # If MAC OSx - if osType == "Darwin": - odbcLibPath = "libiodbc.dylib" - elif osType.startswith("CYGWIN"): - odbcLibPath = "odbc32.dll" - else: - odbcLibPath = 'libodbc.so' - logger.info("Loading ODBC Library: %s", odbcLibPath) - odbc = ctypes.cdll.LoadLibrary(odbcLibPath) - - -def initDriverList(): - global drivers - if drivers is None: - drivers = [] - description = ctypes.create_string_buffer(SMALL_BUFFER_SIZE) - descriptionLength = SQLSMALLINT() - attributesLength = SQLSMALLINT() - rc = SQL_SUCCESS - direction = SQL_FETCH_FIRST - while True: - rc = odbc.SQLDrivers(hEnv, direction, description, - len(description), ADDR(descriptionLength), - None, 0, attributesLength) - checkStatus(rc, hEnv=hEnv) - if rc == SQL_NO_DATA: - break - drivers.append(description.value.decode("utf-8")) - direction = SQL_FETCH_NEXT - logger.info("Available drivers: {}".format(", ".join(drivers))) - - -def initOdbcEnv(): - """Initialize ODBC environment handle.""" - global hEnv - if hEnv is None: - hEnv = SQLPOINTER() - rc = odbc.SQLAllocHandle(SQL_HANDLE_ENV, SQL_NULL_HANDLE, ADDR(hEnv)) - checkStatus(rc, hEnv=hEnv) - atexit.register(cleanupOdbcEnv) - atexit.register(cleanupConnections) - # Set the ODBC environment's compatibility level to ODBC 3.0 - rc = odbc.SQLSetEnvAttr(hEnv, SQL_ATTR_ODBC_VERSION, SQL_OV_ODBC3, 0) - checkStatus(rc, hEnv=hEnv) - - -def cleanupOdbcEnv(): - """Cleanup ODBC environment handle.""" - if hEnv: - odbc.SQLFreeHandle(SQL_HANDLE_ENV, hEnv) - - -def init(odbcLibPath=None): - try: - lock.acquire() - initOdbcLibrary(odbcLibPath) - initFunctionPrototypes() - initOdbcEnv() - initDriverList() - finally: - lock.release() - - -def determineDriver(dbType, driver): - retval = driver - if driver is not None: - if driver not in drivers: - raise InterfaceError( - "DRIVER_NOT_FOUND", - "No driver found with name '{}'. " - " Available drivers: {}".format(driver, ",".join(drivers))) - else: - matches = [] - for driver in drivers: - if dbType in driver: - matches.append(driver) - if not matches: - raise InterfaceError( - "DRIVER_NOT_FOUND", - "No driver found for '{}'. " - "Available drivers: {}".format(dbType, ",".join(drivers))) - else: - retval = matches[len(matches) - 1] - if len(matches) > 1: - logger.warning( - "More than one driver found " - "for '{}'. Using '{}'." - " Specify the 'driver' option to " - "select a specific driver.".format(dbType, retval)) - return retval - - -class OdbcConnection: - - """Represents a Connection to Teradata using ODBC.""" - - def __init__(self, dbType="Teradata", system=None, - username=None, password=None, autoCommit=False, - transactionMode=None, queryBands=None, odbcLibPath=None, - dataTypeConverter=datatypes.DefaultDataTypeConverter(), - driver=None, **kwargs): - """Creates an ODBC connection.""" - self.hDbc = SQLPOINTER() - self.cursorCount = 0 - self.sessionno = 0 - self.cursors = [] - self.dbType = dbType - self.converter = dataTypeConverter - - # Initialize connection handle - init(odbcLibPath) - - # Build connect string - extraParams = set(k.lower() for k in kwargs) - connectParams = collections.OrderedDict() - if "dsn" not in extraParams: - connectParams["DRIVER"] = determineDriver(dbType, driver) - if system: - connectParams["DBCNAME"] = system - if username: - connectParams["UID"] = username - if password: - connectParams["PWD"] = password - if transactionMode: - connectParams["SESSIONMODE"] = "Teradata" \ - if transactionMode == "TERA" else transactionMode - connectParams.update(kwargs) - connectString = u";".join(u"{}={}".format(key, value) - for key, value in connectParams.items()) - - rc = odbc.SQLAllocHandle(SQL_HANDLE_DBC, hEnv, ADDR(self.hDbc)) - checkStatus(rc, hEnv=hEnv, method="SQLAllocHandle") - - # Create connection - logger.debug("Creating connection using ODBC ConnectString: %s", - re.sub("PWD=.*?(;|$)", "PWD=XXX;", connectString)) - try: - lock.acquire() - rc = odbc.SQLDriverConnectW(self.hDbc, 0, _inputStr(connectString), - SQL_NTS, None, 0, None, 0) - finally: - lock.release() - try: - checkStatus(rc, hDbc=self.hDbc, method="SQLDriverConnectW") - except: - rc = odbc.SQLFreeHandle(SQL_HANDLE_DBC, self.hDbc) - self.hDbc = None - raise - connections.append(self) - - # Setup autocommit, query bands, etc. - try: - logger.debug("Setting AUTOCOMMIT to %s", - "True" if util.booleanValue(autoCommit) else "False") - rc = odbc.SQLSetConnectAttr( - self.hDbc, SQL_ATTR_AUTOCOMMIT, - TRUE if util.booleanValue(autoCommit) else FALSE, 0) - checkStatus( - rc, hDbc=self.hDbc, - method="SQLSetConnectAttr - SQL_ATTR_AUTOCOMMIT") - if dbType == "Teradata": - with self.cursor() as c: - self.sessionno = c.execute( - "SELECT SESSION", - queryTimeout=QUERY_TIMEOUT).fetchone()[0] - logger.debug("SELECT SESSION returned %s", self.sessionno) - if queryBands: - c.execute(u"SET QUERY_BAND = '{};' FOR SESSION".format( - u";".join(u"{}={}".format(util.toUnicode(k), - util.toUnicode(v)) - for k, v in queryBands.items())), - queryTimeout=QUERY_TIMEOUT) - self.commit() - logger.debug("Created session %s.", self.sessionno) - except Exception: - self.close() - raise - - def close(self): - """CLoses an ODBC Connection.""" - if self.hDbc: - if self.sessionno: - logger.debug("Closing session %s...", self.sessionno) - for cursor in list(self.cursors): - cursor.close() - rc = odbc.SQLDisconnect(self.hDbc) - sqlState = checkStatus( - rc, hDbc=self.hDbc, method="SQLDisconnect", - ignore=[SQL_STATE_CONNECTION_NOT_OPEN, - SQL_STATE_INVALID_TRANSACTION_STATE]) - if SQL_STATE_INVALID_TRANSACTION_STATE in sqlState: - logger.warning("Rolling back open transaction for session %s " - "so it can be closed.", self.sessionno) - rc = odbc.SQLEndTran(SQL_HANDLE_DBC, self.hDbc, SQL_ROLLBACK) - checkStatus( - rc, hDbc=self.hDbc, - method="SQLEndTran - SQL_ROLLBACK - Disconnect") - rc = odbc.SQLDisconnect(self.hDbc) - checkStatus(rc, hDbc=self.hDbc, method="SQLDisconnect") - rc = odbc.SQLFreeHandle(SQL_HANDLE_DBC, self.hDbc) - if rc != SQL_INVALID_HANDLE: - checkStatus(rc, hDbc=self.hDbc, method="SQLFreeHandle") - connections.remove(self) - self.hDbc = None - if self.sessionno: - logger.debug("Session %s closed.", self.sessionno) - - def commit(self): - """Commits a transaction.""" - logger.debug("Committing transaction...") - rc = odbc.SQLEndTran(SQL_HANDLE_DBC, self.hDbc, SQL_COMMIT) - checkStatus(rc, hDbc=self.hDbc, method="SQLEndTran - SQL_COMMIT") - - def rollback(self): - """Rollsback a transaction.""" - logger.debug("Rolling back transaction...") - rc = odbc.SQLEndTran(SQL_HANDLE_DBC, self.hDbc, SQL_ROLLBACK) - checkStatus(rc, hDbc=self.hDbc, method="SQLEndTran - SQL_ROLLBACK") - - def cursor(self): - """Returns a cursor.""" - cursor = OdbcCursor( - self, self.dbType, self.converter, self.cursorCount) - self.cursorCount += 1 - return cursor - - def __del__(self): - self.close() - - def __enter__(self): - return self - - def __exit__(self, t, value, traceback): - self.close() - - def __repr__(self): - return "OdbcConnection(sessionno={})".format(self.sessionno) - -connect = OdbcConnection - - -class OdbcCursor (util.Cursor): - - """Represents an ODBC Cursor.""" - - def __init__(self, connection, dbType, converter, num): - util.Cursor.__init__(self, connection, dbType, converter) - self.num = num - self.moreResults = None - if num > 0: - logger.debug( - "Creating cursor %s for session %s.", self.num, - self.connection.sessionno) - self.hStmt = SQLPOINTER() - rc = odbc.SQLAllocHandle( - SQL_HANDLE_STMT, connection.hDbc, ADDR(self.hStmt)) - checkStatus(rc, hStmt=self.hStmt) - connection.cursors.append(self) - - def callproc(self, procname, params, queryTimeout=0): - self._checkClosed() - query = "CALL {} (".format(procname) - for i in range(0, len(params)): - if i > 0: - query += ", " - query += "?" - query += ")" - logger.debug("Executing Procedure: %s", query) - self.execute(query, params, queryTimeout=queryTimeout) - return util.OutParams(params, self.dbType, self.converter) - - def close(self): - if self.hStmt: - if self.num > 0: - logger.debug( - "Closing cursor %s for session %s.", self.num, - self.connection.sessionno) - rc = odbc.SQLFreeHandle(SQL_HANDLE_STMT, self.hStmt) - checkStatus(rc, hStmt=self.hStmt) - self.connection.cursors.remove(self) - self.hStmt = None - - def _setQueryTimeout(self, queryTimeout): - rc = odbc.SQLSetStmtAttr( - self.hStmt, SQL_ATTR_QUERY_TIMEOUT, SQLPOINTER(queryTimeout), - SQL_IS_UINTEGER) - checkStatus( - rc, hStmt=self.hStmt, - method="SQLSetStmtStmtAttr - SQL_ATTR_QUERY_TIMEOUT") - - def execute(self, query, params=None, queryTimeout=0): - self._checkClosed() - if params: - self.executemany(query, [params, ], queryTimeout) - else: - if self.connection.sessionno: - logger.debug( - "Executing query on session %s using SQLExecDirectW: %s", - self.connection.sessionno, query) - self._free() - self._setQueryTimeout(queryTimeout) - rc = odbc.SQLExecDirectW( - self.hStmt, _inputStr(_convertLineFeeds(query)), SQL_NTS) - checkStatus(rc, hStmt=self.hStmt, method="SQLExecDirectW") - self._handleResults() - return self - - def executemany(self, query, params, batch=False, queryTimeout=0): - self._checkClosed() - self._free() - # Prepare the query - rc = odbc.SQLPrepareW( - self.hStmt, _inputStr(_convertLineFeeds(query)), SQL_NTS) - checkStatus(rc, hStmt=self.hStmt, method="SQLPrepare") - self._setQueryTimeout(queryTimeout) - # Get the number of parameters in the SQL statement. - numParams = SQLSMALLINT() - rc = odbc.SQLNumParams(self.hStmt, ADDR(numParams)) - checkStatus(rc, hStmt=self.hStmt, method="SQLNumParams") - numParams = numParams.value - # The argument types. - dataTypes = [] - for paramNum in range(0, numParams): - dataType = SQLSMALLINT() - parameterSize = SQLULEN() - decimalDigits = SQLSMALLINT() - nullable = SQLSMALLINT() - rc = odbc.SQLDescribeParam( - self.hStmt, paramNum + 1, ADDR(dataType), ADDR(parameterSize), - ADDR(decimalDigits), ADDR(nullable)) - checkStatus(rc, hStmt=self.hStmt, method="SQLDescribeParams") - dataTypes.append(dataType.value) - if batch: - logger.debug( - "Executing query on session %s using batched SQLExecute: %s", - self.connection.sessionno, query) - self._executeManyBatch(params, numParams, dataTypes) - else: - logger.debug( - "Executing query on session %s using SQLExecute: %s", - self.connection.sessionno, query) - rc = odbc.SQLSetStmtAttr(self.hStmt, SQL_ATTR_PARAMSET_SIZE, 1, 0) - checkStatus(rc, hStmt=self.hStmt, method="SQLSetStmtAttr") - paramSetNum = 0 - for p in params: - paramSetNum += 1 - logger.trace("ParamSet %s: %s", paramSetNum, p) - if len(p) != numParams: - raise InterfaceError( - "PARAMS_MISMATCH", "The number of supplied parameters " - "({}) does not match the expected number of " - "parameters ({}).".format(len(p), numParams)) - paramArray = [] - lengthArray = [] - for paramNum in range(0, numParams): - val = p[paramNum] - inputOutputType = _getInputOutputType(val) - valueType, paramType = _getParamValueType( - dataTypes[paramNum]) - param, length, null = _getParamValue(val, valueType, False) - paramArray.append(param) - if param is not None: - if valueType == SQL_C_BINARY: - bufSize = SQLLEN(length) - lengthArray.append(SQLLEN(length)) - columnSize = SQLULEN(length) - elif valueType == SQL_C_DOUBLE: - bufSize = SQLLEN(length) - lengthArray.append(SQLLEN(length)) - columnSize = SQLULEN(length) - param = ADDR(param) - else: - bufSize = SQLLEN(ctypes.sizeof(param)) - lengthArray.append(SQLLEN(SQL_NTS)) - columnSize = SQLULEN(length) - if null: - # Handle INOUT parameter with NULL input value. - lengthArray.pop(-1) - lengthArray.append(SQLLEN(SQL_NULL_DATA)) - else: - bufSize = SQLLEN(0) - columnSize = SQLULEN(0) - lengthArray.append(SQLLEN(SQL_NULL_DATA)) - logger.trace("Binding parameter %s...", paramNum + 1) - rc = odbc.SQLBindParameter( - self.hStmt, paramNum + 1, inputOutputType, valueType, - paramType, columnSize, 0, param, bufSize, - ADDR(lengthArray[paramNum])) - checkStatus( - rc, hStmt=self.hStmt, method="SQLBindParameter") - logger.debug("Executing prepared statement.") - rc = odbc.SQLExecute(self.hStmt) - for paramNum in range(0, numParams): - val = p[paramNum] - if isinstance(val, OutParam): - val.size = lengthArray[paramNum].value - checkStatus(rc, hStmt=self.hStmt, method="SQLExecute") - self._handleResults() - return self - - def _executeManyBatch(self, params, numParams, dataTypes): - # Get the number of parameter sets. - paramSetSize = len(params) - # Set the SQL_ATTR_PARAM_BIND_TYPE statement attribute to use - # column-wise binding. - rc = odbc.SQLSetStmtAttr( - self.hStmt, SQL_ATTR_PARAM_BIND_TYPE, SQL_PARAM_BIND_BY_COLUMN, 0) - checkStatus(rc, hStmt=self.hStmt, method="SQLSetStmtAttr") - # Specify the number of elements in each parameter array. - rc = odbc.SQLSetStmtAttr( - self.hStmt, SQL_ATTR_PARAMSET_SIZE, paramSetSize, 0) - checkStatus(rc, hStmt=self.hStmt, method="SQLSetStmtAttr") - # Specify a PTR to get the number of parameters processed. - # paramsProcessed = SQLULEN() - # rc = odbc.SQLSetStmtAttr(self.hStmt, SQL_ATTR_PARAMS_PROCESSED_PTR, - # ADDR(paramsProcessed), SQL_IS_POINTER) - # checkStatus(rc, hStmt=self.hStmt, method="SQLSetStmtAttr") - # Specify a PTR to get the status of the parameters processed. - # paramsStatus = (SQLUSMALLINT * paramSetSize)() - # rc = odbc.SQLSetStmtAttr(self.hStmt, SQL_ATTR_PARAM_STATUS_PTR, - # ADDR(paramsStatus), SQL_IS_POINTER) - # checkStatus(rc, hStmt=self.hStmt, method="SQLSetStmtAttr") - # Bind the parameters. - paramArrays = [] - lengthArrays = [] - paramSetSize = len(params) - paramSetNum = 0 - debugEnabled = logger.isEnabledFor(logging.DEBUG) - for p in params: - paramSetNum += 1 - if debugEnabled: - logger.debug("ParamSet %s: %s", paramSetNum, p) - if len(p) != numParams: - raise InterfaceError( - "PARAMS_MISMATCH", "The number of supplied parameters " - "({}) does not match the expected number of parameters " - "({}).".format(len(p), numParams)) - for paramNum in range(0, numParams): - p = [] - valueType, paramType = _getParamValueType(dataTypes[paramNum]) - maxLen = 0 - for paramSetNum in range(0, paramSetSize): - param, length, null = _getParamValue( # @UnusedVariable - params[paramSetNum][paramNum], valueType, True) - if length > maxLen: - maxLen = length - p.append(param) - if debugEnabled: - logger.debug("Max length for parameter %s is %s.", - paramNum + 1, maxLen) - if valueType == SQL_C_BINARY: - valueSize = SQLLEN(maxLen) - paramArrays.append((SQLBYTE * (paramSetSize * maxLen))()) - elif valueType == SQL_C_DOUBLE: - valueSize = SQLLEN(maxLen) - paramArrays.append((SQLDOUBLE * paramSetSize)()) - else: - maxLen += 1 - valueSize = SQLLEN(ctypes.sizeof(SQLWCHAR) * maxLen) - paramArrays.append(_createBuffer(paramSetSize * maxLen)) - lengthArrays.append((SQLLEN * paramSetSize)()) - for paramSetNum in range(0, paramSetSize): - index = paramSetNum * maxLen - if p[paramSetNum] is not None: - if valueType == SQL_C_DOUBLE: - paramArrays[paramNum][paramSetNum] = p[paramSetNum] - else: - for c in p[paramSetNum]: - paramArrays[paramNum][index] = c - index += 1 - if valueType == SQL_C_BINARY: - lengthArrays[paramNum][ - paramSetNum] = len(p[paramSetNum]) - else: - lengthArrays[paramNum][ - paramSetNum] = SQLLEN(SQL_NTS) - paramArrays[paramNum][ - index] = _convertParam("\x00")[0] - else: - lengthArrays[paramNum][paramSetNum] = SQLLEN(SQL_NULL_DATA) - if valueType == SQL_C_WCHAR: - paramArrays[paramNum][index] = _convertParam("\x00")[0] - if debugEnabled: - logger.debug("Binding parameter %s...", paramNum + 1) - rc = odbc.SQLBindParameter(self.hStmt, paramNum + 1, - SQL_PARAM_INPUT, valueType, paramType, - SQLULEN(maxLen), 0, - paramArrays[paramNum], valueSize, - lengthArrays[paramNum]) - checkStatus(rc, hStmt=self.hStmt, method="SQLBindParameter") - # Execute the SQL statement. - if debugEnabled: - logger.debug("Executing prepared statement.") - rc = odbc.SQLExecute(self.hStmt) - checkStatus(rc, hStmt=self.hStmt, method="SQLExecute") - - def _handleResults(self): - # Rest cursor attributes. - self.description = None - self.rowcount = -1 - self.rownumber = None - self.columns = {} - self.types = [] - self.moreResults = None - # Get column count in result set. - columnCount = SQLSMALLINT() - rc = odbc.SQLNumResultCols(self.hStmt, ADDR(columnCount)) - checkStatus(rc, hStmt=self.hStmt, method="SQLNumResultCols") - rowCount = SQLLEN() - rc = odbc.SQLRowCount(self.hStmt, ADDR(rowCount)) - checkStatus(rc, hStmt=self.hStmt, method="SQLRowCount") - self.rowcount = rowCount.value - # Get column meta data and create row iterator. - if columnCount.value > 0: - self.description = [] - nameBuf = _createBuffer(SMALL_BUFFER_SIZE) - nameLength = SQLSMALLINT() - dataType = SQLSMALLINT() - columnSize = SQLULEN() - decimalDigits = SQLSMALLINT() - nullable = SQLSMALLINT() - for col in range(0, columnCount.value): - rc = odbc.SQLDescribeColW( - self.hStmt, col + 1, nameBuf, len(nameBuf), - ADDR(nameLength), ADDR(dataType), ADDR(columnSize), - ADDR(decimalDigits), ADDR(nullable)) - checkStatus(rc, hStmt=self.hStmt, method="SQLDescribeColW") - columnName = _outputStr(nameBuf) - odbc.SQLColAttributeW( - self.hStmt, col + 1, SQL_DESC_TYPE_NAME, ADDR(nameBuf), - len(nameBuf), None, None) - checkStatus(rc, hStmt=self.hStmt, method="SQLColAttributeW") - typeName = _outputStr(nameBuf) - typeCode = self.converter.convertType(self.dbType, typeName) - self.columns[columnName.lower()] = col - self.types.append((typeName, typeCode, dataType.value)) - self.description.append(( - columnName, typeCode, None, columnSize.value, - decimalDigits.value, None, nullable.value)) - self.iterator = rowIterator(self) - - def nextset(self): - self._checkClosed() - if self.moreResults is None: - self._checkForMoreResults() - if self.moreResults: - self._handleResults() - return True - - def _checkForMoreResults(self): - rc = odbc.SQLMoreResults(self.hStmt) - checkStatus(rc, hStmt=self.hStmt, method="SQLMoreResults") - self.moreResults = rc == SQL_SUCCESS or rc == SQL_SUCCESS_WITH_INFO - return self.moreResults - - def _free(self): - rc = odbc.SQLFreeStmt(self.hStmt, SQL_CLOSE) - checkStatus(rc, hStmt=self.hStmt, method="SQLFreeStmt - SQL_CLOSE") - rc = odbc.SQLFreeStmt(self.hStmt, SQL_RESET_PARAMS) - checkStatus( - rc, hStmt=self.hStmt, method="SQLFreeStmt - SQL_RESET_PARAMS") - - def _checkClosed(self): - if not self.hStmt: - raise InterfaceError("CURSOR_CLOSED", - "Operations cannot be performed on a " - "closed cursor.") - - -def _convertLineFeeds(query): - return "\r".join(util.linesplit(query)) - - -def _getInputOutputType(val): - inputOutputType = SQL_PARAM_INPUT - if isinstance(val, InOutParam): - inputOutputType = SQL_PARAM_INPUT_OUTPUT - elif isinstance(val, OutParam): - inputOutputType = SQL_PARAM_OUTPUT - return inputOutputType - - -def _getParamValueType(dataType): - valueType = SQL_C_WCHAR - paramType = SQL_WVARCHAR - if dataType in (SQL_BINARY, SQL_VARBINARY, SQL_LONGVARBINARY): - valueType = SQL_C_BINARY - paramType = dataType - elif dataType == SQL_WLONGVARCHAR: - paramType = SQL_WLONGVARCHAR - elif dataType in (SQL_FLOAT, SQL_DOUBLE, SQL_REAL): - valueType = SQL_C_DOUBLE - paramType = SQL_DOUBLE - return valueType, paramType - - -def _getParamBufferSize(val): - return SMALL_BUFFER_SIZE if val.size is None else val.size - - -def _getParamValue(val, valueType, batch): - length = 0 - null = False - if val is None: - param = None - elif valueType == SQL_C_BINARY: - ba = val - if isinstance(val, InOutParam): - ba = val.inValue - if val.inValue is None: - null = True - ba = bytearray(_getParamBufferSize(val)) - elif isinstance(val, OutParam): - ba = bytearray(_getParamBufferSize(val)) - if ba is not None and not isinstance(ba, bytearray): - raise InterfaceError("Expected bytearray for BINARY parameter.") - length = len(ba) - if batch: - param = ba - else: - byteArr = SQLBYTE * length - param = byteArr.from_buffer(ba) - if isinstance(val, OutParam): - val.setValueFunc(lambda: ba[:val.size]) - elif valueType == SQL_C_DOUBLE: - f = val - if isinstance(val, InOutParam): - f = val.inValue - if f is None: - null = True - f = float(0) - elif isinstance(val, OutParam): - f = float(0) - param = SQLDOUBLE(f if not util.isString(f) else float(f)) - length = ctypes.sizeof(param) - if isinstance(val, OutParam): - val.setValueFunc(lambda: param.value) - else: - if batch: - param = _convertParam(val) - length = len(param) - elif isinstance(val, InOutParam): - length = _getParamBufferSize(val) - if val.inValue is not None: - param = _inputStr(val.inValue, length) - else: - param = _createBuffer(length) - null = True - val.setValueFunc(lambda: _outputStr(param)) - elif isinstance(val, OutParam): - length = _getParamBufferSize(val) - param = _createBuffer(length) - val.setValueFunc(lambda: _outputStr(param)) - else: - param = _inputStr(val) - length = len(param) - return param, length, null - - -def _getFetchSize(cursor): - """Gets the fetch size associated with the cursor.""" - fetchSize = cursor.fetchSize - for dataType in cursor.types: - if dataType[2] in (SQL_LONGVARBINARY, SQL_WLONGVARCHAR): - fetchSize = 1 - break - return fetchSize - - -def _getBufSize(cursor, colIndex): - bufSize = cursor.description[colIndex - 1][3] + 1 - dataType = cursor.types[colIndex - 1][0] - if dataType in datatypes.BINARY_TYPES: - pass - elif dataType in datatypes.FLOAT_TYPES: - bufSize = ctypes.sizeof(ctypes.c_double) - elif dataType in datatypes.INT_TYPES: - bufSize = 30 - elif cursor.types[colIndex - 1][2] in (SQL_WCHAR, SQL_WVARCHAR, - SQL_WLONGVARCHAR): - pass - elif dataType.startswith("DATE"): - bufSize = 20 - elif dataType.startswith("TIMESTAMP"): - bufSize = 40 - elif dataType.startswith("TIME"): - bufSize = 30 - elif dataType.startswith("INTERVAL"): - bufSize = 80 - elif dataType.startswith("PERIOD"): - bufSize = 80 - elif dataType.startswith("DECIMAL"): - bufSize = 42 - else: - bufSize = 2 ** 16 + 1 - return bufSize - - -def _setupColumnBuffers(cursor, buffers, bufSizes, dataTypes, indicators, - lastFetchSize): - """Sets up the column buffers for retrieving multiple rows of a result set - at a time""" - fetchSize = _getFetchSize(cursor) - # If the fetchSize hasn't changed since the last time setupBuffers - # was called, then we can reuse the previous buffers. - if fetchSize != lastFetchSize: - logger.debug("FETCH_SIZE: %s" % fetchSize) - rc = odbc.SQLSetStmtAttr( - cursor.hStmt, SQL_ATTR_ROW_ARRAY_SIZE, fetchSize, 0) - checkStatus(rc, hStmt=cursor.hStmt, - method="SQLSetStmtAttr - SQL_ATTR_ROW_ARRAY_SIZE") - for col in range(1, len(cursor.description) + 1): - dataType = SQL_C_WCHAR - buffer = None - bufSize = _getBufSize(cursor, col) - lob = False - if cursor.types[col - 1][2] == SQL_LONGVARBINARY: - lob = True - bufSize = LARGE_BUFFER_SIZE - buffer = (ctypes.c_byte * bufSize)() - dataType = SQL_LONGVARBINARY - elif cursor.types[col - 1][2] == SQL_WLONGVARCHAR: - lob = True - buffer = _createBuffer(LARGE_BUFFER_SIZE) - bufSize = ctypes.sizeof(buffer) - dataType = SQL_WLONGVARCHAR - elif cursor.description[col - 1][1] == BINARY: - dataType = SQL_C_BINARY - buffer = (ctypes.c_byte * bufSize * fetchSize)() - elif cursor.types[col - 1][0] in datatypes.FLOAT_TYPES: - dataType = SQL_C_DOUBLE - buffer = (ctypes.c_double * fetchSize)() - else: - buffer = _createBuffer(bufSize * fetchSize) - bufSize = int(ctypes.sizeof(buffer) / fetchSize) - dataTypes.append(dataType) - buffers.append(buffer) - bufSizes.append(bufSize) - logger.debug("Buffer size for column %s: %s", col, bufSize) - indicators.append((SQLLEN * fetchSize)()) - if not lob: - rc = odbc.SQLBindCol(cursor.hStmt, col, dataType, buffer, - bufSize, indicators[col - 1]) - checkStatus(rc, hStmt=cursor.hStmt, method="SQLBindCol") - return fetchSize - - -def _getLobData(cursor, colIndex, buf, binary): - """ Get LOB Data """ - length = SQLLEN() - dataType = SQL_C_WCHAR - bufSize = ctypes.sizeof(buf) - if binary: - dataType = SQL_C_BINARY - rc = odbc.SQLGetData( - cursor.hStmt, colIndex, dataType, buf, bufSize, ADDR(length)) - sqlState = checkStatus(rc, hStmt=cursor.hStmt, method="SQLGetData") - val = None - if length.value != SQL_NULL_DATA: - if SQL_STATE_DATA_TRUNCATED in sqlState: - logger.debug( - "Data truncated. Calling SQLGetData to get next part " - "of data for column %s of size %s.", - colIndex, length.value) - if dataType == SQL_C_BINARY: - val = bytearray(length.value) - val[0:bufSize] = buf - newBufSize = len(val) - bufSize - newBuffer = (ctypes.c_byte * newBufSize).from_buffer( - val, bufSize) - rc = odbc.SQLGetData( - cursor.hStmt, colIndex, dataType, newBuffer, - newBufSize, ADDR(length)) - checkStatus( - rc, hStmt=cursor.hStmt, method="SQLGetData2") - else: - val = [_outputStr(buf), ] - while SQL_STATE_DATA_TRUNCATED in sqlState: - rc = odbc.SQLGetData( - cursor.hStmt, colIndex, dataType, buf, bufSize, - ADDR(length)) - sqlState = checkStatus( - rc, hStmt=cursor.hStmt, method="SQLGetData2") - val.append(_outputStr(buf)) - val = "".join(val) - else: - if dataType == SQL_C_BINARY: - val = bytearray( - (ctypes.c_byte * length.value).from_buffer(buf)) - else: - val = _outputStr(buf) - return val - - -def _getRow(cursor, buffers, bufSizes, dataTypes, indicators, rowIndex): - """Reads a row of data from the fetched input buffers. If the column - type is a BLOB or CLOB, then that data is obtained via calls to - SQLGetData.""" - row = [] - for col in range(1, len(cursor.description) + 1): - val = None - buf = buffers[col - 1] - bufSize = bufSizes[col - 1] - dataType = dataTypes[col - 1] - length = indicators[col - 1][rowIndex] - if length != SQL_NULL_DATA: - if dataType == SQL_C_BINARY: - val = bytearray((ctypes.c_byte * length).from_buffer( - buf, bufSize * rowIndex)) - elif dataType == SQL_C_DOUBLE: - val = ctypes.c_double.from_buffer(buf, - bufSize * rowIndex).value - elif dataType == SQL_WLONGVARCHAR: - val = _getLobData(cursor, col, buf, False) - elif dataType == SQL_LONGVARBINARY: - val = _getLobData(cursor, col, buf, True) - else: - chLen = (int)(bufSize / ctypes.sizeof(SQLWCHAR)) - chBuf = (SQLWCHAR * chLen) - val = _outputStr(chBuf.from_buffer(buf, - bufSize * rowIndex)) - row.append(val) - return row - - -def rowIterator(cursor): - buffers = [] - bufSizes = [] - dataTypes = [] - indicators = [] - rowCount = SQLULEN() - lastFetchSize = None - rc = odbc.SQLSetStmtAttr( - cursor.hStmt, SQL_ATTR_ROWS_FETCHED_PTR, ADDR(rowCount), 0) - checkStatus(rc, hStmt=cursor.hStmt, - method="SQLSetStmtAttr - SQL_ATTR_ROWS_FETCHED_PTR") - while cursor.description is not None: - lastFetchSize = _setupColumnBuffers(cursor, buffers, bufSizes, - dataTypes, indicators, - lastFetchSize) - rc = odbc.SQLFetch(cursor.hStmt) - checkStatus(rc, hStmt=cursor.hStmt, method="SQLFetch") - if rc == SQL_NO_DATA: - break - for rowIndex in range(0, rowCount.value): - yield _getRow(cursor, buffers, bufSizes, dataTypes, - indicators, rowIndex) - if not cursor._checkForMoreResults(): - cursor._free() diff --git a/teradata/tdrest.py b/teradata/tdrest.py deleted file mode 100644 index 7dd8e68..0000000 --- a/teradata/tdrest.py +++ /dev/null @@ -1,467 +0,0 @@ -"""An implementation of the Python Database API Specification v2.0 using -Teradata REST.""" - -# The MIT License (MIT) -# -# Copyright (c) 2015 by Teradata -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. - -import atexit -import base64 -import json -import ssl -import sys -import time -import io - -from . import pulljson, util, datatypes -from .api import * # @UnusedWildImport # noqa - - -if sys.version_info[0] == 2: - import httplib as httplib # @UnresolvedImport #@UnusedImport -else: - import http.client as httplib # @UnresolvedImport @UnusedImport @Reimport - unicode = str - -logger = logging.getLogger(__name__) - -REST_ERROR = "REST_ERROR" -HTTP_STATUS_DATABASE_ERROR = 420 -ERROR_USER_GENERATED_TRANSACTION_ABORT = 3514 -MAX_CONNECT_RETRIES = 5 - -connections = [] - - -def cleanup(): - for conn in connections: - conn.close() -atexit.register(cleanup) - - -class RestConnection: - - """ Represents a Connection to Teradata using the REST API for - Teradata Database """ - - def __init__(self, dbType="Teradata", host=None, system=None, - username=None, password=None, protocol='http', port=None, - webContext='/tdrest', autoCommit=False, implicit=False, - transactionMode='TERA', queryBands=None, charset=None, - verifyCerts=True, sslContext=None, database=None, - authentication=None, - dataTypeConverter=datatypes.DefaultDataTypeConverter()): - self.dbType = dbType - self.system = system - self.sessionId = None - self.implicit = implicit - self.transactionMode = transactionMode - self.dataTypeConverter = dataTypeConverter - self.cursors = [] - # Support TERA and Teradata as transaction mode to be consistent with - # ODBC. - if transactionMode == "Teradata": - self.transactionMode = "TERA" - self.autoCommit = False - if port is None: - if protocol == 'http': - port = 1080 - elif protocol == 'https': - port = 1443 - else: - raise InterfaceError( - CONFIG_ERROR, "Unsupported protocol: {}".format(protocol)) - if host is None: - raise InterfaceError(0, - "\"host\" is a required field, " - "set to location of " - "TDREST server.") - self.template = RestTemplate( - protocol, host, int(port), webContext, username, password, - accept='application/vnd.com.teradata.rest-v1.0+json', - verifyCerts=util.booleanValue(verifyCerts), sslContext=sslContext) - with self.template.connect() as conn: - if not self.implicit: - options = {} - options['autoCommit'] = autoCommit - options['transactionMode'] = transactionMode - if queryBands: - options['queryBands'] = queryBands - if charset: - options['charSet'] = charset - if database: - options['defaultDatabase'] = database - if authentication: - options['logMech'] = authentication - try: - session = conn.post( - '/systems/{0}/sessions'.format(self.system), - options).readObject() - self.sessionId = session['sessionId'] - connections.append(self) - logger.info("Created explicit session: %s", session) - except (pulljson.JSONParseError) as e: - raise InterfaceError( - e.code, "Error reading JSON response: " + e.msg) - - def close(self): - """ Closes an Explicit Session using the REST API for Teradata - Database """ - if hasattr(self, 'sessionId') and self.sessionId is not None: - with self.template.connect() as conn: - try: - conn.delete( - '/systems/{0}/sessions/{1}'.format( - self.system, self.sessionId)) - except InterfaceError as e: - # Ignore if the session is already closed. - if e.code != 404: - raise - logger.info("Closing session: %s", self.sessionId) - self.sessionId = None - connections.remove(self) - for cursor in list(self.cursors): - cursor.close() - - def commit(self): - with self.cursor() as cursor: - if self.transactionMode == 'ANSI': - cursor.execute("COMMIT") - else: - cursor.execute("ET") - - def rollback(self): - with self.cursor() as cursor: - try: - cursor.execute("ROLLBACK") - except DatabaseError as e: - if e.code != ERROR_USER_GENERATED_TRANSACTION_ABORT: - raise - - def cursor(self): - return RestCursor(self) - - def __del__(self): - self.close() - - def __enter__(self): - return self - - def __exit__(self, t, value, traceback): - self.close() - -connect = RestConnection - - -class RestCursor (util.Cursor): - - def __init__(self, connection): - self.conn = None - util.Cursor.__init__( - self, connection, connection.dbType, connection.dataTypeConverter) - self.conn = connection.template.connect() - connection.cursors.append(self) - - def callproc(self, procname, params, queryTimeout=None): - inparams = None - outparams = None - count = 0 - query = "CALL {} (".format(procname) - if params is not None: - inparams = [[]] - outparams = [] - for p in params: - if count > 0: - query += ", " - if isinstance(p, InOutParam): - inparams[0].append(p.inValue) - outparams.append(p.inValue) - elif isinstance(p, OutParam): - outparams.append(None) - else: - inparams[0].append(p) - count += 1 - query += "?" - query += ")" - outparams = self._handleResults(self._execute( - query, inparams, outparams, queryTimeout=queryTimeout), - len(outparams) > 0) - return util.OutParams(params, self.dbType, self.converter, outparams) - - def close(self): - if self.conn: - self.conn.close() - - def execute(self, query, params=None, queryTimeout=None): - if params is not None: - params = [params] - self._handleResults( - self._execute(query, params, queryTimeout=queryTimeout)) - return self - - def executemany(self, query, params, batch=False, queryTimeout=None): - self._handleResults( - self._execute(query, params, batch=batch, - queryTimeout=queryTimeout)) - return self - - def _handleResults(self, results, hasOutParams=False): - self.results = results - try: - results.expectObject() - self.queueDuration = results.expectField( - "queueDuration", pulljson.NUMBER) - self.queryDuration = results.expectField( - "queryDuration", pulljson.NUMBER) - logger.debug("Durations reported by REST service: Queue Duration: " - "%s, Query Duration: %s", self.queueDuration, - self.queryDuration) - results.expectField("results", pulljson.ARRAY) - results.expectObject() - return self._handleResultSet(results, hasOutParams) - except (pulljson.JSONParseError) as e: - raise InterfaceError( - e.code, "Error reading JSON response: " + e.msg) - - def _execute(self, query, params=None, outParams=None, batch=False, - queryTimeout=None): - options = {} - options['query'] = query - options['format'] = 'array' - options['includeColumns'] = 'true' - options['rowLimit'] = 0 - if params is not None: - options['params'] = list( - list(_convertParam(p) for p in paramSet) - for paramSet in params) - options['batch'] = batch - if outParams is not None: - options['outParams'] = outParams - if not self.connection.implicit: - options['session'] = str(self.connection.sessionId) - if queryTimeout is not None: - options['queryTimeout'] = queryTimeout - options['queueTimeout'] = queryTimeout - return self.conn.post('/systems/{0}/queries'.format( - self.connection.system), options) - - def _handleResultSet(self, results, hasOutParams=False): - outParams = None - if hasOutParams: - outParams = results.expectField( - "outParams", pulljson.ARRAY, readAll=True) - self.resultSet = None - else: - try: - self.resultSet = results.expectField( - "resultSet", pulljson.BOOLEAN) - except pulljson.JSONParseError: - # Workaround for Batch mode and Stored procedures which doens't - # include a resultSet. - self.resultSet = None - if self.resultSet: - index = 0 - self.columns = {} - self.description = [] - self.types = [] - self.rowcount = -1 - self.rownumber = None - for column in results.expectField("columns", pulljson.ARRAY): - self.columns[column["name"].lower()] = index - type_code = self.converter.convertType( - self.dbType, column["type"]) - self.types.append((column["type"], type_code)) - self.description.append( - (column["name"], type_code, None, None, None, None, None)) - index += 1 - self.iterator = results.expectField("data", pulljson.ARRAY) - else: - self.columns = None - self.description = None - self.rownumber = None - self.rowcount = -1 - if self.resultSet is not None: - self.rowcount = results.expectField("count") - return outParams - - def nextset(self): - for row in self: # @UnusedVariable - pass - for event in self.results: - if event.type == pulljson.START_OBJECT: - self._handleResultSet(self.results) - return True - - -def _convertParam(p): - if util.isString(p) or p is None: - return p - elif isinstance(p, bytearray): - return ''.join('{:02x}'.format(x) for x in p) - else: - return unicode(p) - - -class RestTemplate: - - def __init__(self, protocol, host, port, webContext, username, password, - sslContext=None, verifyCerts=True, accept=None): - self.protocol = protocol - self.host = host - self.port = port - self.webContext = webContext - self.headers = {} - self.headers['Content-Type'] = 'application/json' - if accept is not None: - self.headers['Accept'] = accept - self.headers['Authorization'] = 'Basic ' + \ - base64.b64encode( - (username + ":" + password).encode('utf_8')).decode('ascii') - self.sslContext = sslContext - if sslContext is None and not verifyCerts: - self.sslContext = ssl.create_default_context() - self.sslContext.check_hostname = False - self.sslContext.verify_mode = ssl.CERT_NONE - - def connect(self): - return HttpConnection(self) - - -class HttpConnection: - - def __init__(self, template): - self.template = template - if template.protocol.lower() == "http": - self.conn = httplib.HTTPConnection(template.host, template.port) - elif template.protocol.lower() == "https": - self.conn = httplib.HTTPSConnection( - template.host, template.port, context=template.sslContext) - else: - raise InterfaceError( - REST_ERROR, "Unknown protocol: %s" % template.protocol) - failureCount = 0 - while True: - try: - self.conn.connect() - break - except Exception as e: - eofError = "EOF occurred in violation of protocol" in str(e) - failureCount += 1 - if not eofError or failureCount > MAX_CONNECT_RETRIES: - raise InterfaceError( - REST_ERROR, - "Error accessing {}:{}. ERROR: {}".format( - template.host, template.port, e)) - else: - logger.debug( - "Received an \"EOF occurred in violation of " - "protocol\" error, retrying connection.") - - def close(self): - if self.conn: - self.conn.close() - - def post(self, uri, data={}): - return self.send(uri, 'POST', data) - - def delete(self, uri): - self.send(uri, 'DELETE', None) - - def get(self, uri): - return self.send(uri, 'GET', None) - - def __enter__(self): - return self - - def __exit__(self, t, value, traceback): - self.close() - - def send(self, uri, method, data): - response = None - url = self.template.webContext + uri - try: - start = time.time() - payload = json.dumps(data).encode('utf8') if data else None - logger.trace("%s: %s, %s", method, url, payload) - self.conn.request(method, url, payload, self.template.headers) - response = self.conn.getresponse() - duration = time.time() - start - logger.debug("Roundtrip Duration: %.3f seconds", duration) - except Exception as e: - raise InterfaceError( - REST_ERROR, 'Error accessing {}. ERROR: {}'.format(url, e)) - if response.status < 300: - return pulljson.JSONPullParser( - HttpResponseAsUnicodeStream(response)) - if response.status < 400: - raise InterfaceError( - response.status, - "HTTP Status: {}. ERROR: Redirection not supported.") - else: - msg = response.read().decode("utf8") - try: - errorDetails = json.loads(msg) - except Exception: - raise InterfaceError( - response.status, "HTTP Status: " + str(response.status) + - ", URL: " + url + ", Details: " + str(msg)) - if response.status == HTTP_STATUS_DATABASE_ERROR: - raise DatabaseError( - int(errorDetails['error']), errorDetails['message']) - else: - raise InterfaceError(response.status, "HTTP Status: " + str( - response.status) + ", URL: " + url + - ", Details: " + str(errorDetails)) - - -class HttpResponseAsUnicodeStream: - - def __init__(self, buf): - self.stream = io.TextIOWrapper( - HttpResponseIOWrapper(buf), encoding="utf8") - - def read(self, size): - data = "" - if not self.stream.closed: - data = self.stream.read(size) - return data - - -class HttpResponseIOWrapper: - - def __init__(self, buf): - self.buf = buf - self.closed = False - - def readable(self): - return True - - def writable(self): - return False - - def seekable(self): - return False - - def read1(self, n=-1): - return self.read(n) - - def read(self, size): - return self.buf.read(size) diff --git a/teradata/tdsql.py b/teradata/tdsql.py new file mode 100644 index 0000000..78b6f9c --- /dev/null +++ b/teradata/tdsql.py @@ -0,0 +1,606 @@ +"""An implementation of the Python Database API Specification v2.0 + using Teradata Python Driver.""" + +# The MIT License (MIT) +# +# Copyright (c) 2015 by Teradata +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import atexit +import collections +import ctypes +import inspect +import json +import platform +import re +import sys +import threading + +import logging + +import teradatasql + +from . import util, datatypes +from .api import * # @UnusedWildImport # noqa + + +logger = logging.getLogger(__name__) + +NATIVE_SQL_AUTOCOMMIT_ON = "{fn teradata_nativesql}{fn teradata_autocommit_on}" +NATIVE_SQL_AUTOCOMMIT_OFF = "{fn teradata_nativesql}{fn teradata_autocommit_off}" +NATIVE_SQL_SESSION_NUMBER = "{fn teradata_nativesql}{fn teradata_session_number}" +TERADATA_FAKE_RESULT_SETS = "{fn teradata_fake_result_sets}" + +#FAKE RESULTS +NATIVE_SQL = 0 +WARNING_CODE = 5 +WARNING_MSG = 6 +COLUMN_METADATA = 7 + +lock = threading.Lock() + +# The amount of seconds to wait when submitting non-user defined SQL (e.g. +# set query bands, etc). Currently not being used. +QUERY_TIMEOUT = 120 + +connections = [] + +def cleanupConnections(): + """Cleanup open connections.""" + if connections: + logger.warning( + "%s open connections found on exit, attempting to close...", + len(connections)) + for conn in list(connections): + conn.close() + + +class TeradataSqlConnection: + + """Represents a Connection to Teradata using Teradata Python Driver.""" + + def __init__(self, system=None, + username=None, autoCommit=True, + transactionMode=None, queryBands=None, + dataTypeConverter=datatypes.DefaultDataTypeConverter(), + charset=None, **kwargs): + """Creates a teradatasql connection.""" + + if charset is not None and charset != 'UTF8': + raise InterfaceError(util.INVALID_ARGUMENT, + "Connection charset {} is not valid only UTF8 is supported".format(charset)) + + if "host" not in kwargs and system is not None: + kwargs ["host"] = system + + if "user" not in kwargs and username is not None: + kwargs ["user"] = username + + if "tmode" not in kwargs and transactionMode is not None: + kwargs ["tmode"] = transactionMode + + sConParams = json.dumps(kwargs) + logger.trace ('> enter __init__ {} sConParams={} kwargs={}'.format (self.__class__.__name__, sConParams, kwargs)) + try: + self.cursorCount = 0 + self.sessionno = 0 + self.cursors = [] + self.converter = dataTypeConverter + bAutoCommit = util.booleanValue(autoCommit) + + # Create connection + logger.debug("Creating connection using teradatasql parameters are: %s", + re.sub("password=.*?(;|$)", "password=XXX;", sConParams)) + + try: + lock.acquire() + try: + self.conn = teradatasql.connect (sConParams) + except Exception as e: + raise (_convertError (e)) + finally: + lock.release() + connections.append(self) + + # Setup autocommit, query bands, etc. + try: + with self.conn.cursor () as c: + if not bAutoCommit: + logger.debug("Turning off AUTOCOMMIT") + c.execute (NATIVE_SQL_AUTOCOMMIT_OFF) + self.sessionno = c.execute (NATIVE_SQL_SESSION_NUMBER).fetchone () [0] + logger.debug("SELECT SESSION returned %s", self.sessionno) + if queryBands: + c.execute(u"SET QUERY_BAND = '{};' FOR SESSION".format( + u";".join(u"{}={}".format(k,v) + for k, v in queryBands.items()))) + + if not bAutoCommit: + self.commit() + logger.debug("Created session %s.", self.sessionno) + except Exception: + self.close() + raise + finally: + logger.trace ('> leave __init__ {}'.format (self)) + # end __init__ + + def close(self): + """Closes a teradatasql Connection.""" + logger.trace ('> enter close {}'.format (self)) + try: + if self.sessionno: + logger.debug("Closing session %s...", self.sessionno) + for cursor in list(self.cursors): + cursor.close() + self.conn.close () + connections.remove(self) + if self.sessionno: + logger.debug("Session %s closed.", self.sessionno) + self.sessionno = 0 + finally: + logger.trace ('< leave close {}'.format (self)) + #end close + + def commit (self): + """Commits a transaction.""" + logger.debug ("Committing transaction...") + self.conn.commit () + + def rollback(self): + """Rollsback a transaction.""" + logger.debug("Rolling back transaction...") + self.conn.rollback () + + def cursor(self): + """Returns a cursor.""" + cursor = TeradataSqlCursor (self, self.conn, self.converter, self.cursorCount) + self.cursorCount += 1 + return cursor + + def __del__(self): + self.close() + + def __enter__(self): + return self + + def __exit__(self, t, value, traceback): + self.close() + + def __repr__(self): + return "{} sessionno={}".format(self.__class__.__name__, self.sessionno) + + # end class TeradataSqlConnection + +connect = TeradataSqlConnection + + +class TeradataSqlCursor: + + """Represents a teradatasql cursor.""" + + def __init__(self, connection, tdConn, converter, num): + self.connection = connection + self.converter = converter + self.results = None + self.arraysize = 1 + self.fetchSize = None + self.rowcount = -1 + self.description = None + self.columns = None + self.types = None + self.aLobLocators = {} + self.iterator = None + self.rownumber = None + + self.num = num + self.moreResults = None + if num > 0: + logger.debug( + "Creating cursor %s for session %s.", self.num, + self.connection.sessionno) + self.tdConn = tdConn + self.cur = tdConn.cursor () + connection.cursors.append(self) + self.bClosed = False + # end __init__ + + def callproc(self, procname, params, queryTimeout=0): + logger.trace ('> enter callproc ({}) {}'.format (procname, self)) + try: + if params is None: + sCall = "{call " + procname + "()}" + else: + asPars = ["?" if type (params [i]) is not OutParam else "p%s" % i for i in range (len(params))] + + sCall = "{call " + procname + "(" + ', '.join (asPars) + ")}" + sEscParamTypes = "" + nIndex = 1 + aoParams = [] + for nParam in range (0, len (params)): + if isinstance(params [nParam], InOutParam) or isinstance (params [nParam], InParam): + if params [nParam].escapeParamType is not None: + sCall = "{fn teradata_parameter(%s, %s)} " % (nIndex, params [nParam].escapeParamType) + sCall + if params [nParam].dataType is not None and params [nParam].dataType.startswith("PERIOD"): + params [nParam].setValueFunc (lambda: datatypes.removeTrailingZerosFromPeriod(params [nParam].inValue)) + elif params [nParam].dataType is not None and params [nParam].dataType.startswith("TIME"): + params [nParam].setValueFunc (lambda: datatypes.removeTrailingZerosFromTimeAndTimestamp(params [nParam].inValue)) + else: + params [nParam].setValueFunc (lambda: params [nParam].inValue) + logger.debug ("appending values {}".format (params [nParam].value ())) + aoParams.append (params [nParam].value ()) + + elif not isinstance(params [nParam], OutParam): + aoParams.append (params [nParam]) + if not isinstance (params [nParam], OutParam) or isinstance(params [nParam], InOutParam): + nIndex += 1 + + logger.debug("Executing Procedure: %s", sCall) + self.executemany(sCall, params=aoParams, queryTimeout=queryTimeout) + return util.OutParams(params, self.converter, outparams=self._getRow()) + finally: + logger.trace ('< leave callproc {}'.format (self)) + # end callproc + + def close(self): + logger.trace ('> enter close {}'.format (self)) + try: + if not self.bClosed: + self.bClosed = True + if self.num > 0: + logger.debug( + "Closing cursor %s for session %s.", self.num, + self.connection.sessionno) + self.cur.close () + self.connection.cursors.remove(self) + finally: + logger.trace ('< leave close {}'.format (self)) + # end close + + def _setQueryTimeout(self, queryTimeout): + pass + + def execute(self, query, params=None, queryTimeout=0): + logger.trace ('> enter execute {}'.format (self)) + try: + return self.executemany (query, params, queryTimeout) + finally: + logger.trace ('> leave execute {}'.format (self)) + # end execute + + def executemany (self, query, params, batch=False, ignoreErrors = None, queryTimeout=0): + logger.trace ('> enter executemany {} : {}'.format (self, query)) + try: + self._setQueryTimeout(queryTimeout) + + self.bFakeResult = True + if (params): + + if isinstance (params, tuple): + params = list (params) + + # Need to convert interval and period types to their string values + for i in range (0, len (params)): + if isinstance (params [i], list): + for j in range (0, len (params [i])): + if isinstance (params [i][j], (datatypes.Interval, datatypes.Period)): + params [i][j] = str (params [i][j]) + elif isinstance (params [i], (datatypes.Interval, datatypes.Period)): + params [i] = str (params [i]) + + # If batch is false and a batch request was submitted, process each request one by one + if not batch and len (params) > 0 and type (params [0]) in [list, tuple]: + logger.debug("Executing each query in the batch one by one") + try: + for p in params: + self.cur.execute (TERADATA_FAKE_RESULT_SETS + query, p) + self._handleResults() + except Exception as e: + raise (_convertError (e)) + return self + + try: + if self.connection.sessionno: + logger.debug( + "Executing query on session %s using execute: %s", + self.connection.sessionno, query) + self.cur.execute (TERADATA_FAKE_RESULT_SETS + query, params, ignoreErrors) + except Exception as e: + raise (_convertError (e)) + self._handleResults() + return self + finally: + logger.trace ('> leave executeMany {}'.format (self)) + # end executeMany + + def _handleResults(self): + logger.trace ('> enter _handleResults {}'.format (self)) + try: + self.columnCount = 0 + self.moreResults = False + + self._obtainResultMetaData () + # After obtaining result set metadata, check if a resuslt set was returned + if not self.moreResults: + return + self.columnCount = len (self.cur.description) + self.rowcount = self.cur.rowcount + logger.debug ("Row count {}, column count {}".format (self.rowcount, self.columnCount)) + self.iterator = self.rowIterator() + # Processing current result, set more results to false to force self.nextset to call self.cur.nextset + self.moreResults = False + finally: + logger.trace ('< leave _handleResults {}'.format (self)) + # end _handleResults + + def _obtainResultMetaData (self): + logger.trace ('> enter _obtainResultMetaData {}'.format (self)) + try: + # It is possible to use cur.description returned from teradata.sql but we + # need to identify lobs + + row = self.cur.fetchone () + + if not self.bFakeResult: + self.description = self.cur.description + # Don't move past current result + self.moreResults = True + return + + if logger.isEnabledFor (logging.DEBUG): + [ logger.debug (" Column {} {:15} = {}".format (i + 1, self.cur.description [i][0], row [i])) for i in range (0, len (row)) ] + + if int (row [WARNING_CODE]) > 0: + logger.warning ("{} succeeded with warning: [code ]{} message {}".format (row [NATIVE_SQL], row [WARNING_CODE], row [WARNING_MSG])) + + aJsonColMetadata = json.loads (row [COLUMN_METADATA]) + if aJsonColMetadata is None: + # No column metadata returned get empty result set + self.moreResults = self.cur.nextset () + return + + columnCount = len (aJsonColMetadata) + # Get column meta data + if columnCount > 0: + self.description = [] + self.columns = {} + self.types = [] + self.aLobLocators = {} + for col in range(0, columnCount): + columnName = aJsonColMetadata [col] ['Title'] if aJsonColMetadata [col] ['Title'] is not None else aJsonColMetadata [col] ['Name'] + sTypeName = aJsonColMetadata [col] ['TypeName'] + pythonType = self.converter.convertType(sTypeName) + columnSize = aJsonColMetadata [col] ['ByteCount'] + decimalDigits = aJsonColMetadata [col] ['Precision'] + nullable = aJsonColMetadata [col] ['Nullable'] + nCookedType = aJsonColMetadata [col] ['CookedDataType'] + if nCookedType in datatypes.LOB_LOCATOR_TYPES: + self.aLobLocators [col + 1] = datatypes.LOB_LOCATOR_TYPES [nCookedType] + self.columns[columnName.lower()] = col + self.types.append((sTypeName, pythonType)) + self.description.append(( + columnName, pythonType, None, columnSize, + decimalDigits, None, nullable)) + # Move past metadata result set + self.moreResults = self.cur.nextset () + finally: + logger.trace ('< leave _obtainResultMetaData {}'.format (self)) + # end _obtainResultMetaData + + def fetchone(self): + self.fetchSize = 1 + return next(self, None) + + def fetchmany(self, size=None): + logger.trace ('> enter fetchmany {}'.format (self)) + try: + if size is None: + size = self.arraysize + self.fetchSize = size + rows = [] + count = 0 + for row in self: + rows.append(row) + count += 1 + if count == size: + break + return rows + finally: + logger.trace ('< leave fetchmany {}'.format (self)) + + def fetchall(self): + logger.trace ('> enter fetchall {}'.format (self)) + try: + self.fetchSize = self.arraysize + rows = [] + for row in self: + rows.append(row) + return rows + finally: + logger.trace ('< leave fetchall {}'.format (self)) + #end fetchall + + def nextset(self): + logger.trace ('> enter nextset {}'.format (self)) + try: + if not self.moreResults: + self.moreResults = self.cur.nextset () + + if self.moreResults: + self._handleResults() + return True + finally: + logger.trace ('< leave nextset {}'.format (self)) + # end nextset + + def setinputsizes(self, sizes): + pass + + def setoutputsize(self, size, column=None): + pass + + def __iter__(self): + return self + + def __next__(self): + logger.trace ("> enter __next__ {}".format (self)) + try: + self.fetchSize = self.arraysize + if self.iterator: + if self.rownumber is None: + self.rownumber = 0 + else: + self.rownumber += 1 + values = next(self.iterator) + for i in range(0, len(values)): + values[i] = self.converter.convertValue( + self.types[i][0], self.types[i][1], values[i]) + row = Row(self.columns, values, self.rownumber + 1) + if logger.isEnabledFor (logging.DEBUG): + [ logger.debug (" Column {} {:15} = {}".format (i + 1, self.cur.description [i][0], row [i])) for i in range (0, len (row)) ] + + return row + raise StopIteration() + finally: + logger.trace ('< leave __next__ {}'.format (self)) + #end __next__ + + def next(self): + return self.__next__() + + def __enter__(self): + return self + + def __exit__(self, t, value, traceback): + self.close() + + def __repr__(self): + return "{} sessionno={} cursor={} bClosed={}".format (self.__class__.__name__, + self.connection.sessionno, self.num, self.bClosed) + + def rowIterator (self): + for rowIndex in range(0, self.rowcount): + yield self._getRow() + + def _getRow(self): + """Reads a row of data. If the column type is a LOB, + then that data is obtained via a call to _readLobValue.""" + logger.trace ('> enter _getRow {}'.format (self)) + try: + aPyRow = self.cur.fetchone () + row = [] + oColVal = None + for col in range(1, len(self.cur.description) + 1): + + if aPyRow [col - 1] is not None and col in self.aLobLocators: + logger.debug ("retrieving lob value col = {}".format (col)) + row.append (self._readLobValue (aPyRow [col - 1], self.aLobLocators [col])) + else: + row.append (aPyRow [col - 1]) + + return row + finally: + logger.trace ('< leave _getRow {}'.format (self)) + # end _getRow + + def _readLobValue (self, abyInputLocator, sDataType): + logger.trace ("> enter _readLobValue {}".format (self)) + try: + if type (abyInputLocator) is not bytes: + raise Error ("abyInputLocator must be bytes not {}".format (type (abyInputLocator))) + + with self.tdConn.cursor () as c: + olobValue = c.execute ("{fn teradata_parameter(1," + sDataType + ")}select ?", [abyInputLocator]).fetchone () [0] + self.connection.commit() + + return (olobValue) + finally: + logger.trace ('< leave _readLobValue {}'.format (self)) + # end _readLobValue + + # end class TeradataSqlCursor + + +def _convertError (e): + nErrCode = 0 + sErrMsg = "{}".format (e) + + mat = re.compile ("\\[Error (\\d+)\\]").search (sErrMsg) + if mat and mat.lastindex == 1: + nErrCode = int (mat.group (1)) + + if "[Teradata Database]" in sErrMsg: + return DatabaseError (nErrCode, sErrMsg) + + if e.__class__.__name__ == "ProgrammingError": + return ProgrammingError (nErrCode, sErrMsg) + + return OperationalError (nErrCode, sErrMsg) + #end _convertError + + +class Row (object): + + """Represents a table row.""" + + def __init__(self, columns, values, rowNum): + super(Row, self).__setattr__("columns", columns) + super(Row, self).__setattr__("values", values) + super(Row, self).__setattr__("rowNum", rowNum) + + def __getattr__(self, name): + try: + index = self.columns[name.lower()] + return self.values[index] + except KeyError: + raise AttributeError("No such attribute: " + name) + + def __setattr__(self, name, value): + try: + self.values[self.columns[name.lower()]] = value + except KeyError: + raise AttributeError("No such attribute: " + name) + + def __setitem__(self, key, value): + try: + self.values[key] = value + except TypeError: + self.values[self.columns[key.lower()]] = value + + def __getitem__(self, key): + try: + if isinstance (key, int) and key < 0 or key >= len (self.values): + raise ProgrammingError (0, "Invalid key index {} cannot be less than 0 or greater than or equal to size of results {} ".format (key, len (self.values))) + return self.values[key] + except TypeError: + index = self.columns[key.lower()] + return self.values[index] + + def __len__(self): + return len(self.values) + + def __str__(self): + return "Row " + str(self.rowNum) + ": [" + \ + ", ".join(map(str, self.values)) + "]" + + def __iter__(self): + return self.values.__iter__() + + diff --git a/teradata/udaexec.py b/teradata/udaexec.py index d4d602d..4ae1795 100644 --- a/teradata/udaexec.py +++ b/teradata/udaexec.py @@ -1,6 +1,6 @@ """ A Python Database API Specification v2.0 implementation that provides configuration loading, variable substitution, logging, query banding, - etc and options to use either ODBC or REST""" + etc and options to use either Teradata Python Driver""" # The MIT License (MIT) # @@ -27,8 +27,10 @@ import atexit import codecs import collections +import configparser import datetime import getpass +import locale import logging import os.path import platform @@ -37,24 +39,14 @@ import sys import time -from . import tdodbc, util, api, datatypes -from . import tdrest # @UnresolvedImport -from .util import toUnicode +from . import tdsql, util, api, datatypes from .version import __version__ # @UnresolvedImport # The module logger -logger = logging.getLogger(__name__) - -METHOD_REST = "rest" -METHOD_ODBC = "odbc" - -# Implement python version specific setup. -if sys.version_info[0] == 2: - import ConfigParser as configparser # @UnresolvedImport #@UnusedImport -else: - import configparser # @UnresolvedImport @UnusedImport @Reimport +logger = logging.getLogger (__name__) +METHOD_TERADATASQL = "tdsql" def handleUncaughtException(exc_type, exc_value, exc_traceback): """Make sure that uncaught exceptions are logged""" @@ -92,7 +84,6 @@ def __init__(self, appName="${appName}", parseCmdLineArgs=True, gitPath="${gitPath}", production="${production}", - odbcLibPath="${odbcLibPath}", dataTypeConverter=datatypes.DefaultDataTypeConverter()): """ Initializes the UdaExec framework """ # Load configuration files. @@ -125,17 +116,17 @@ def __init__(self, appName="${appName}", int(self.config.resolve(logRetention, default="90")), logMsgs) # Log messages that were collected prior to logging being configured. for (level, msg) in logMsgs: - logger.log(level, toUnicode(msg)) + logger.log(level, msg) self._initVersion(self.config.resolve( version, default=""), self.config.resolve(gitPath, default="")) self._initQueryBands(self.config.resolve(production, default="false")) self._initCheckpoint(checkpointFile) - self.odbcLibPath = self.config.resolve(odbcLibPath, default="") self.dataTypeConverter = dataTypeConverter logger.info(self) logger.debug(self.config) # Register exit function. d atexit.register(exiting) + # end __init__ def connect(self, externalDSN=None, dataTypeConverter=None, **kwargs): """Creates a database connection""" @@ -155,11 +146,9 @@ def connect(self, externalDSN=None, dataTypeConverter=None, **kwargs): if externalDSN: paramsToLog['externalDSN'] = externalDSN logger.info("Creating connection: %s", paramsToLog) - # Determine connection method. - method = None + method = METHOD_TERADATASQL if 'method' in args: method = args.pop('method') - util.raiseIfNone('method', method) if 'queryBands' in args: queryBands = args.pop('queryBands') self.queryBands.update(queryBands) @@ -170,17 +159,11 @@ def connect(self, externalDSN=None, dataTypeConverter=None, **kwargs): # Create the connection try: start = time.time() - if method.lower() == METHOD_REST: + if method.lower() == METHOD_TERADATASQL: conn = UdaExecConnection( - self, tdrest.connect(queryBands=self.queryBands, - dataTypeConverter=dataTypeConverter, - **args)) - elif method.lower() == METHOD_ODBC: - conn = UdaExecConnection( - self, tdodbc.connect(queryBands=self.queryBands, - odbcLibPath=self.odbcLibPath, - dataTypeConverter=dataTypeConverter, - **args)) + self, tdsql.connect(queryBands=self.queryBands, + dataTypeConverter=dataTypeConverter, + **args)) else: raise api.InterfaceError( api.CONFIG_ERROR, @@ -193,56 +176,72 @@ def connect(self, externalDSN=None, dataTypeConverter=None, **kwargs): except Exception: logger.exception("Unable to create connection: %s", paramsToLog) raise + # end connect def checkpoint(self, checkpointName=None): """ Sets or clears the current checkpoint.""" - if checkpointName is None: - logger.info("Clearing checkpoint....") - self.currentCheckpoint = None - self.skip = False - if self.checkpointManager: - self.checkpointManager.clearCheckpoint() - else: - self.currentCheckpoint = checkpointName - if self.skip: - if self.resumeFromCheckpoint == self.currentCheckpoint: - logger.info( - "Reached resume checkpoint: \"%s\". " - "Resuming execution...", checkpointName) - self.skip = False - else: - logger.info("Reached checkpoint: \"%s\"", checkpointName) + logger.trace ('> enter checkpoint {}'.format (self._log())) + try: + if checkpointName is None: + logger.info("Clearing checkpoint....") + self.currentCheckpoint = None + self.skip = False if self.checkpointManager: - self.checkpointManager.saveCheckpoint(checkpointName) + self.checkpointManager.clearCheckpoint() + else: + self.currentCheckpoint = checkpointName + if self.skip: + if self.resumeFromCheckpoint == self.currentCheckpoint: + logger.info( + "Reached resume checkpoint: \"%s\". " + "Resuming execution...", checkpointName) + self.skip = False + else: + logger.info("Reached checkpoint: \"%s\"", checkpointName) + if self.checkpointManager: + self.checkpointManager.saveCheckpoint(checkpointName) + finally: + logger.trace ('< leave checkpoint {}'.format (self._log())) + # end checkpoint def setCheckpointManager(self, checkpointManager): """ Sets a custom Checkpoint Manager. """ - util.raiseIfNone("checkpointManager", checkpointManager) - logger.info("Setting custom checkpoint manager: %s", checkpointManager) - self.checkpointManager = checkpointManager - logger.info("Loading resume checkpoint from checkpoint manager...") - self.setResumeCheckpoint(checkpointManager.loadCheckpoint()) + logger.trace ('> enter setCheckpointManager {}'.format (self._log())) + try: + util.raiseIfNone("checkpointManager", checkpointManager) + logger.info("Setting custom checkpoint manager: %s", checkpointManager) + self.checkpointManager = checkpointManager + logger.info("Loading resume checkpoint from checkpoint manager...") + self.setResumeCheckpoint(checkpointManager.loadCheckpoint()) + finally: + logger.trace ('< leave setCheckpointManager {}'.format (self._log())) + # end setCheckpointManager def setResumeCheckpoint(self, resumeCheckpoint): """ Sets the checkpoint that must be hit for executes to not be skipped.""" - self.resumeFromCheckpoint = resumeCheckpoint - if resumeCheckpoint: - logger.info( - "Resume checkpoint changed to \"%s\". Skipping all calls to " - "execute until checkpoint is reached.", - self.resumeFromCheckpoint) - self.skip = True - else: - self.resumeFromCheckpoint = None - if self.skip: - self.skip = False + logger.trace ('> enter setResumeCheckpoint {}'.format (self._log())) + try: + self.resumeFromCheckpoint = resumeCheckpoint + if resumeCheckpoint: logger.info( - "Resume checkpoint cleared. Execute calls will " - "no longer be skipped.") + "Resume checkpoint changed to \"%s\". Skipping all calls to " + "execute until checkpoint is reached.", + self.resumeFromCheckpoint) + self.skip = True else: - logger.info( - "No resume checkpoint set, continuing execution...") + self.resumeFromCheckpoint = None + if self.skip: + self.skip = False + logger.info( + "Resume checkpoint cleared. Execute calls will " + "no longer be skipped.") + else: + logger.info( + "No resume checkpoint set, continuing execution...") + finally: + logger.trace ('< leave setResumeCheckpoint {}'.format (self._log())) + # end setResumeCheckpoint def _initLogging(self, logDir, logFile, logConsole, level, logRetention, logMsgs): @@ -256,31 +255,41 @@ def _initLogging(self, logDir, logFile, logConsole, level, logRetention, "%(asctime)s - %(name)s - %(levelname)s - %(message)s") fh = logging.FileHandler(self.logFile, mode="a", encoding="utf8") fh.setFormatter(formatter) - sh = logging.StreamHandler(sys.stdout) - sh.setFormatter(formatter) + #sh = logging.StreamHandler(sys.stdout) + #sh.setFormatter(formatter) root = logging.getLogger() if level != logging.NOTSET: root.setLevel(level) root.addHandler(fh) if logConsole: + stream = codecs.StreamWriter(sys.stdout, errors="replace") + stream.encode = lambda msg, errors="strict": (msg.encode(locale.getpreferredencoding(False), errors).decode(), msg) + sh = logging.StreamHandler(stream) + sh.setFormatter(formatter) root.addHandler(sh) sys.excepthook = handleUncaughtException + # end _initLogging def _cleanupLogs(self, logDir, logRetention, logMsgs): """Cleanup older log files.""" - logMsgs.append( - (logging.INFO, - "Cleaning up log files older than {} days.".format(logRetention))) - cutoff = time.time() - (logRetention * 86400) - count = 0 - for f in os.listdir(logDir): - f = os.path.join(logDir, f) - if os.stat(f).st_mtime < cutoff: - logMsgs.append( - (logging.DEBUG, "Removing log file: {}".format(f))) - os.remove(f) - count += 1 - logMsgs.append((logging.INFO, "Removed {} log files.".format(count))) + logger.trace ('> enter _cleanupLogs {}'.format (self._log())) + try: + logMsgs.append( + (logging.INFO, + "Cleaning up log files older than {} days.".format(logRetention))) + cutoff = time.time() - (logRetention * 86400) + count = 0 + for f in os.listdir(logDir): + f = os.path.join(logDir, f) + if os.stat(f).st_mtime < cutoff: + logMsgs.append( + (logging.DEBUG, "Removing log file: {}".format(f))) + os.remove(f) + count += 1 + logMsgs.append((logging.INFO, "Removed {} log files.".format(count))) + finally: + logger.trace ('< leave _cleanupLogs {}'.format (self._log())) + # end _cleanupLogs def _initRunNumber(self, runNumberFile, runNumber, logMsgs): """Initialize the run number unique to this particular execution.""" @@ -350,6 +359,7 @@ def _initCheckpoint(self, checkpointFile): self.checkpointManager = None self.resumeFromCheckpoint = None logger.info("Checkpoint file disabled.") + # end _initCheckpoint def _initVersion(self, version, gitPath): """Initialize the version and GIT revision.""" @@ -384,6 +394,7 @@ def _initVersion(self, version, gitPath): "passed in as a parameter, specified in a config file, " "or pulled from a git repository.") self.config['version'] = version + # end _initVersion def _initQueryBands(self, production): """Initialize the Query Band that will be set on future connections.""" @@ -401,29 +412,30 @@ def _initQueryBands(self, production): self.queryBands['gitDirty'] = self.gitDirty self.queryBands['UtilityName'] = 'PyTd' self.queryBands['UtilityVersion'] = __version__ + # end _initQueryBands def __str__(self): value = u"Execution Details:\n/" value += u'*' * 80 value += u"\n" value += u" * Application Name: {}\n".format( - toUnicode(self.config['appName'])) + self.config['appName']) value += u" * Version: {}\n".format( - toUnicode(self.config['version'])) - value += u" * Run Number: {}\n".format(toUnicode(self.runNumber)) + self.config['version']) + value += u" * Run Number: {}\n".format(self.runNumber) value += u" * Host: {}\n".format( - toUnicode(platform.node())) + platform.node()) value += u" * Platform: {}\n".format( platform.platform(aliased=True)) value += u" * OS User: {}\n".format( - toUnicode(getpass.getuser())) + getpass.getuser()) value += u" * Python Version: {}\n".format(platform.python_version()) value += u" * Python Compiler: {}\n".format( platform.python_compiler()) value += u" * Python Build: {}\n".format(platform.python_build()) value += u" * UdaExec Version: {}\n".format(__version__) - value += u" * Program Name: {}\n".format(toUnicode(sys.argv[0])) - value += u" * Working Dir: {}\n".format(toUnicode(os.getcwd())) + value += u" * Program Name: {}\n".format(sys.argv[0]) + value += u" * Working Dir: {}\n".format(os.getcwd()) if self.gitRevision: value += u" * Git Version: {}\n".format(self.gitVersion) value += u" * Git Revision: {}\n".format(self.gitRevision) @@ -432,24 +444,28 @@ def __str__(self): ",".join(self.modifiedFiles) + "]") if self.configureLogging: value += u" * Log Dir: {}\n".format( - toUnicode(self.logDir)) + self.logDir) value += u" * Log File: {}\n".format( - toUnicode(self.logFile)) + self.logFile) value += u" * Config Files: {}\n".format( - toUnicode(self.config.configFiles)) + self.config.configFiles) value += u" * Query Bands: {}\n".format( - u";".join(u"{}={}".format(toUnicode(k), toUnicode(v)) + u";".join(u"{}={}".format(k, v) for k, v in self.queryBands.items())) value += '*' * 80 value += '/' return value + def _log (self): + return "{} appName={}".format(self.__class__.__name__, self.config['appName']) + + #end class UdaExec def _appendConfigFiles(configFiles, *args): for arg in args: if arg is None: continue - if util.isString(arg): + if isinstance(arg, str): configFiles.append(arg) else: configFiles.extend(arg) @@ -486,14 +502,14 @@ def __init__(self, f): def loadCheckpoint(self): resumeFromCheckpoint = None if os.path.isfile(self.file): - logger.info(u"Found checkpoint file: \"%s\"", toUnicode(self.file)) + logger.info(u"Found checkpoint file: \"%s\"", self.file) with open(self.file, "r") as f: resumeFromCheckpoint = f.readline() if not resumeFromCheckpoint: logger.warn( - u"No checkpoint found in %s.", toUnicode(self.file)) + u"No checkpoint found in %s.", self.file) else: - logger.info(u"Checkpoint file not found: %s", toUnicode(self.file)) + logger.info(u"Checkpoint file not found: %s", self.file) return resumeFromCheckpoint def saveCheckpoint(self, checkpointName): @@ -523,8 +539,8 @@ def __init__(self, configFiles, encoding, configSection, parseCmdLineArgs, configParser = configparser.ConfigParser() configParser.optionxform = str configFiles = [os.path.expanduser(f) for f in configFiles] - self.configFiles = [toUnicode(os.path.abspath( - f)) + (": Found" if os.path.isfile(f) else ": Not Found") + self.configFiles = [os.path.abspath( + f) + (": Found" if os.path.isfile(f) else ": Not Found") for f in configFiles] logMsgs.append( (logging.INFO, @@ -546,8 +562,8 @@ def __init__(self, configFiles, encoding, configSection, parseCmdLineArgs, key = key[2:] logMsgs.append( (logging.DEBUG, u"Configuration value was set via " - "command line: {}={}".format(toUnicode(key), - toUnicode(val)))) + "command line: {}={}".format(key, + val))) self.sections[configSection][key] = val def __iter__(self): @@ -560,7 +576,7 @@ def resolveDict(self, d, sections=None): if sections is None: sections = [self.configSection] for key, value in d.items(): - if util.isString(value): + if isinstance(value, str): d[key] = self._resolve(value, sections, None, None) return d @@ -570,7 +586,7 @@ def resolve(self, value, sections=None, default=None, errorMsg=None): raise api.InterfaceError(api.CONFIG_ERROR, errorMsg) else: util.raiseIfNone("value", value) - if not util.isString(value): + if not isinstance(value, str): return value if sections is None: sections = [self.configSection] @@ -627,9 +643,8 @@ def __str__(self): value += u'*' * 80 value += u"\n" for key in sorted(self.sections[self.configSection]): - value += u" * {}: {}\n".format(toUnicode(key.rjust(length)), - toUnicode( - self.resolve("${" + key + "}")) + value += u" * {}: {}\n".format(key.rjust(length), + self.resolve("${" + key + "}") if 'password' not in key.lower() else u'XXXX') value += '*' * 80 @@ -639,7 +654,7 @@ def __str__(self): class UdaExecConnection: - """A UdaExec connection wrapper for ODBC or REST connections.""" + """A UdaExec connection wrapper for Teradata Python Driver.""" def __init__(self, udaexec, conn): self.udaexec = udaexec @@ -682,7 +697,7 @@ def executemany(self, query, params, **kwargs): class UdaExecCursor: - """A UdaExec cursor wrapper for ODBC or REST cursors.""" + """A UdaExec cursor wrapper for teradatasql cursors.""" def __init__(self, udaexec, cursor): self.udaexec = udaexec @@ -741,7 +756,7 @@ def execute(self, query=None, params=None, file=None, fileType=None, if file is None: util.raiseIfNone("query", query) if query is not None: - if util.isString(query): + if isinstance(query, str): self._execute(self.cursor.execute, query, params, **kwargs) else: for q in query: diff --git a/teradata/util.py b/teradata/util.py index 1cb7214..a951aeb 100644 --- a/teradata/util.py +++ b/teradata/util.py @@ -24,7 +24,6 @@ import sys import re -import codecs import argparse import inspect import copy @@ -34,8 +33,8 @@ INVALID_ARGUMENT = "INVALID_ARGUMENT" # Create new trace log level -TRACE = 5 -logging.addLevelName(TRACE, "TRACE") +TRACE = 15 +logging.addLevelName(TRACE, 'TRACE') def trace(self, message, *args, **kws): @@ -45,30 +44,7 @@ def trace(self, message, *args, **kws): logging.TRACE = TRACE logging.Logger.trace = trace -logger = logging.getLogger(__name__) - -if sys.version_info[0] == 2: - openfile = codecs.open -else: - openfile = open - - -def isString(value): - # Implement python version specific setup. - if sys.version_info[0] == 2: - return isinstance(value, basestring) # @UndefinedVariable - else: - return isinstance(value, str) # @UndefinedVariable - - -def toUnicode(string): - if not isString(string): - string = str(string) - if sys.version_info[0] == 2: - if isinstance(string, str): - string = string.decode("utf8") - return string - +logger = logging.getLogger (__name__) def raiseIfNone(name, value): if not value: @@ -78,158 +54,15 @@ def raiseIfNone(name, value): def booleanValue(value): retval = value - if isString(value): + if isinstance (value, str): retval = value.lower() in ["1", "on", "true", "yes"] return retval - -class Cursor: - - """An abstract cursor for encapsulating shared functionality of connection - specific implementations (e.g. ODBC, REST)""" - - def __init__(self, connection, dbType, dataTypeConverter): - self.connection = connection - self.converter = dataTypeConverter - self.dbType = dbType - self.results = None - self.arraysize = 1 - self.fetchSize = None - self.rowcount = -1 - self.description = None - self.types = None - self.iterator = None - self.rownumber = None - - def callproc(self, procname, params): - # Abstract method, defined by convention only - raise NotImplementedError("Subclass must implement abstract method") - - def close(self): - pass - - def execute(self, query, params=None): - # Abstract method, defined by convention only - raise NotImplementedError("Subclass must implement abstract method") - - def executemany(self, query, params, batch=False): - # Abstract method, defined by convention only - raise NotImplementedError("Subclass must implement abstract method") - - def fetchone(self): - self.fetchSize = 1 - return next(self, None) - - def fetchmany(self, size=None): - if size is None: - size = self.arraysize - self.fetchSize = size - rows = [] - count = 0 - for row in self: - rows.append(row) - count += 1 - if count == size: - break - return rows - - def fetchall(self): - self.fetchSize = self.arraysize - rows = [] - for row in self: - rows.append(row) - return rows - - def nextset(self): - # Abstract method, defined by convention only - raise NotImplementedError("Subclass must implement abstract method") - - def setinputsizes(self, sizes): - pass - - def setoutputsize(self, size, column=None): - pass - - def __iter__(self): - return self - - def __next__(self): - self.fetchSize = self.arraysize - if self.iterator: - if self.rownumber is None: - self.rownumber = 0 - else: - self.rownumber += 1 - values = next(self.iterator) - for i in range(0, len(values)): - values[i] = self.converter.convertValue( - self.dbType, self.types[i][0], self.types[i][1], values[i]) - row = Row(self.columns, values, self.rownumber + 1) - # logger.debug("%s", row) - return row - raise StopIteration() - - def next(self): - return self.__next__() - - def __enter__(self): - return self - - def __exit__(self, t, value, traceback): - self.close() - - -class Row (object): - - """Represents a table row.""" - - def __init__(self, columns, values, rowNum): - super(Row, self).__setattr__("columns", columns) - super(Row, self).__setattr__("values", values) - super(Row, self).__setattr__("rowNum", rowNum) - - def __getattr__(self, name): - try: - index = self.columns[name.lower()] - return self.values[index] - except KeyError: - raise AttributeError("No such attribute: " + name) - - def __setattr__(self, name, value): - try: - self.values[self.columns[name.lower()]] = value - except KeyError: - raise AttributeError("No such attribute: " + name) - - def __setitem__(self, key, value): - try: - self.values[key] = value - except TypeError: - self.values[self.columns[key.lower()]] = value - - def __getitem__(self, key): - try: - return self.values[key] - except TypeError: - index = self.columns[key.lower()] - return self.values[index] - - def __len__(self): - return len(self.values) - - def __str__(self): - return "Row " + str(self.rowNum) + ": [" + \ - ", ".join(map(str, self.values)) + "]" - - def __iter__(self): - return self.values.__iter__() - - class OutParams (object): """ Represents a set of Output parameters. """ - def __init__(self, params, dbType, dataTypeConverter, outparams=None): + def __init__(self, params, dataTypeConverter, outparams=None): names = {} copy = [] for p in params: @@ -239,10 +72,11 @@ def __init__(self, params, dbType, dataTypeConverter, outparams=None): else: value = p.value() if p.dataType is not None: - typeCode = dataTypeConverter.convertType( - dbType, p.dataType) + typeCode = dataTypeConverter.convertType(p.dataType) value = dataTypeConverter.convertValue( - dbType, p.dataType, typeCode, value) + p.dataType, typeCode, value) + if isinstance (p, OutParam) and value is not None and p.size is not None and isinstance (value, (str, bytes, bytearray)): + value = value [:p.size] copy.append(value) if p.name is not None: names[p.name] = value @@ -285,7 +119,7 @@ class SqlScript: def __init__(self, filename, delimiter=";", encoding=None): self.delimiter = delimiter - with openfile(filename, mode='r', encoding=encoding) as f: + with open(filename, mode='r', encoding=encoding) as f: self.sql = f.read() def __iter__(self): @@ -298,7 +132,7 @@ class BteqScript: def __init__(self, filename, encoding=None): self.file = filename - with openfile(self.file, mode='r', encoding=encoding) as f: + with open(self.file, mode='r', encoding=encoding) as f: self.lines = f.readlines() def __iter__(self): @@ -309,7 +143,7 @@ def sqlsplit(sql, delimiter=";"): """A generator function for splitting out SQL statements according to the specified delimiter. Ignores delimiter when in strings or comments.""" tokens = re.split("(--|'|\n|" + re.escape(delimiter) + "|\"|/\*|\*/)", - sql if isString(sql) else delimiter.join(sql)) + sql if isinstance(sql, str) else delimiter.join(sql)) statement = [] inComment = False inLineComment = False @@ -354,7 +188,7 @@ def linesplit(sql, newline="\n"): """A generator function for splitting out SQL statements according to the specified delimiter. Ignores delimiter when in strings or comments.""" tokens = re.split("(--|'|" + re.escape(newline) + "|\"|/\*|\*/)", - sql if isString(sql) else newline.join(sql)) + sql if isinstance(sql, str) else newline.join(sql)) statement = [] inComment = False inLineComment = False @@ -457,7 +291,7 @@ def createTestCasePerDSN(testCase, baseCls, dataSourceNames): def setupTestUser(udaExec, dsn, user=None, passwd=None, perm=100000000): - """A utility method for creating a test user to be use by unittests.""" + """A utility method for creating a test user to be used by unittests.""" if user is None: user = "py%s_%std_%s_test" % ( sys.version_info[0], sys.version_info[1], getpass.getuser()) @@ -471,12 +305,22 @@ def setupTestUser(udaExec, dsn, user=None, passwd=None, perm=100000000): if e.code == 3802: conn.execute( "CREATE USER " + user + - " FROM DBC AS PERM = %s, PASSWORD = %s" % (perm, passwd)) + " AS PERM = %s, PASSWORD = %s" % (perm, passwd)) conn.execute("GRANT UDTTYPE ON SYSUDTLIB to %s" % user) conn.execute( "GRANT CREATE PROCEDURE ON %s to %s" % (user, user)) return user +def cleanupTestUser (udaExec, dsn, user=None, passwd=None): + """A utility method for dropping a test user used by unittests.""" + if user is None: + user = "py%s_%std_%s_test" % ( + sys.version_info[0], sys.version_info[1], getpass.getuser()) + if passwd is None: + passwd = user + with udaExec.connect(dsn) as conn: + conn.execute("DELETE DATABASE " + user) + conn.execute("DROP USER " + user) class CommandLineArgumentParser: diff --git a/test/testClobSp.sql b/test/testClobSp.sql index e7bc5ba..23d2a73 100644 --- a/test/testClobSp.sql +++ b/test/testClobSp.sql @@ -76,5 +76,5 @@ BEGIN ) ; -END +END; ;; diff --git a/test/test_pulljson.py b/test/test_pulljson.py index 1727c55..46703dd 100644 --- a/test/test_pulljson.py +++ b/test/test_pulljson.py @@ -22,11 +22,7 @@ from teradata import pulljson import unittest import sys - -if sys.version_info[0] == 2: - from StringIO import StringIO # @UnresolvedImport #@UnusedImport -else: - from io import StringIO # @UnresolvedImport @UnusedImport @Reimport +from io import StringIO # @UnresolvedImport @UnusedImport @Reimport class TestJSONPullParser (unittest.TestCase): diff --git a/test/test_tdrest.py b/test/test_tdrest.py deleted file mode 100644 index 99ed03f..0000000 --- a/test/test_tdrest.py +++ /dev/null @@ -1,151 +0,0 @@ -# The MIT License (MIT) -# -# Copyright (c) 2015 by Teradata -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. -import unittest -import os -import teradata -from teradata import tdrest, util - - -class TdRestTest (unittest.TestCase): - - @classmethod - def setUpClass(cls): - cls.username = cls.password = util.setupTestUser(udaExec, dsn) - - def testGlobals(self): - self.assertEqual(tdrest.apilevel, "2.0") - self.assertEqual(tdrest.threadsafety, 1) - self.assertEqual(tdrest.paramstyle, "qmark") - - def testBadHost(self): - badHost = "badhostname" - with self.assertRaises(tdrest.InterfaceError) as cm: - tdrest.connect( - host=badHost, system=system, username=self.username, - password=self.password) - self.assertEqual(cm.exception.code, tdrest.REST_ERROR) - self.assertTrue(badHost in cm.exception.msg, - '{} not found in "{}"'.format( - badHost, cm.exception.msg)) - - def testSystemNotFound(self): - with self.assertRaises(tdrest.InterfaceError) as cm: - tdrest.connect( - host=host, system="unknown", username=self.username, - password=self.password) - self.assertEqual(cm.exception.code, 404) - # print(cm.exception) - self.assertTrue( - "404" in cm.exception.msg, - '404 not found in "{}"'.format(cm.exception.msg)) - - def testBadCredentials(self): - with self.assertRaises(tdrest.DatabaseError) as cm: - tdrest.connect( - host=host, system=system, username="bad", password="bad") - # print(cm.exception) - self.assertEqual(cm.exception.code, 8017, cm.exception.msg) - - def testConnect(self): - conn = tdrest.connect( - host=host, system=system, username=self.username, - password=self.password) - self.assertIsNotNone(conn) - conn.close() - - def testCursorBasics(self): - with tdrest.connect(host=host, system=system, username=self.username, - password=self.password) as conn: - self.assertIsNotNone(conn) - cursor = conn.cursor() - count = 0 - for row in cursor.execute("SELECT * FROM DBC.DBCInfo"): - self.assertEqual(len(row), 2) - self.assertIsNotNone(row[0]) - self.assertIsNotNone(row['InfoKey']) - self.assertIsNotNone(row['infokey']) - self.assertIsNotNone(row.InfoKey) - self.assertIsNotNone(row.infokey) - self.assertIsNotNone(row[1]) - self.assertIsNotNone(row['InfoData']) - self.assertIsNotNone(row['infodata']) - self.assertIsNotNone(row.infodata) - self.assertIsNotNone(row.InfoData) - - row[0] = "test1" - self.assertEqual(row[0], "test1") - self.assertEqual(row['InfoKey'], "test1") - self.assertEqual(row.infokey, "test1") - - row['infokey'] = "test2" - self.assertEqual(row[0], "test2") - self.assertEqual(row['InfoKey'], "test2") - self.assertEqual(row.infokey, "test2") - - row.infokey = "test3" - self.assertEqual(row[0], "test3") - self.assertEqual(row['InfoKey'], "test3") - self.assertEqual(row.InfoKey, "test3") - count += 1 - - self.assertEqual(cursor.description[0][0], "InfoKey") - self.assertEqual(cursor.description[0][1], tdrest.STRING) - self.assertEqual(cursor.description[1][0], "InfoData") - self.assertEqual(cursor.description[1][1], tdrest.STRING) - self.assertEqual(count, 3) - - def testExecuteWithParamsMismatch(self): - with self.assertRaises(teradata.InterfaceError) as cm: - with tdrest.connect(host=host, system=system, - username=self.username, - password=self.password, - autoCommit=True) as conn: - self.assertIsNotNone(conn) - with conn.cursor() as cursor: - cursor.execute( - "CREATE TABLE testExecuteWithParamsMismatch (id INT, " - "name VARCHAR(128), dob TIMESTAMP)") - cursor.execute( - "INSERT INTO testExecuteWithParamsMismatch " - "VALUES (?, ?, ?)", (1, "TEST", )) - self.assertEqual(cm.exception.code, 400, cm.exception.msg) - - def testSessionAlreadyClosed(self): - with tdrest.connect(host=host, system=system, username=self.username, - password=self.password, autoCommit=True) as conn: - self.assertIsNotNone(conn) - with conn.template.connect() as http: - http.delete( - "/systems/{}/sessions/{}".format(conn.system, - conn.sessionId)) - -configFiles = [os.path.join(os.path.dirname(__file__), 'udaexec.ini')] -udaExec = teradata.UdaExec(configFiles=configFiles, configureLogging=False) -dsn = 'HTTP' -restConfig = udaExec.config.section(dsn) -host = restConfig['host'] -system = restConfig['system'] -super_username = restConfig['username'] -super_password = restConfig['password'] - -if __name__ == '__main__': - unittest.main() diff --git a/test/test_tdodbc.py b/test/test_teradatasql.py similarity index 71% rename from test/test_tdodbc.py rename to test/test_teradatasql.py index 4d2c619..7959688 100644 --- a/test/test_tdodbc.py +++ b/test/test_teradatasql.py @@ -22,47 +22,51 @@ import unittest import os import teradata -from teradata import tdodbc, util +from teradata import tdsql, util -class TdOdbcTest (unittest.TestCase): +class TdSqlTest (unittest.TestCase): @classmethod def setUpClass(cls): cls.username = cls.password = util.setupTestUser(udaExec, dsn) + @classmethod + def tearDownClass(cls): + util.cleanupTestUser(udaExec, dsn) + def testGlobals(self): - self.assertEqual(tdodbc.apilevel, "2.0") - self.assertEqual(tdodbc.threadsafety, 1) - self.assertEqual(tdodbc.paramstyle, "qmark") + self.assertEqual(tdsql.apilevel, "2.0") + self.assertEqual(tdsql.threadsafety, 1) + self.assertEqual(tdsql.paramstyle, "qmark") def testSystemNotFound(self): - with self.assertRaises(tdodbc.DatabaseError) as cm: - tdodbc.connect(system="continuum.td.teradata.com", + with self.assertRaises(tdsql.OperationalError) as cm: + tdsql.connect(system="hostNotFound.td.teradata.com", username=self.username, password=self.password) - self.assertTrue("08004" in cm.exception.msg, cm.exception) + self.assertTrue("Hostname lookup failed" in str(cm.exception), cm.exception) def testBadCredentials(self): - with self.assertRaises(tdodbc.DatabaseError) as cm: - tdodbc.connect(system=system, username="bad", password="bad") + with self.assertRaises(tdsql.DatabaseError) as cm: + tdsql.connect(system=system, username="bad", password="bad") self.assertEqual(cm.exception.code, 8017, cm.exception.msg) + with self.assertRaises(tdsql.InterfaceError) as cm: + tdsql.connect(system=system, + username=self.username, password=self.password, charset="UTF16") + self.assertTrue("Connection charset" in str(cm.exception), cm.exception) + def testConnect(self): - conn = tdodbc.connect( + conn1 = tdsql.connect( system=system, username=self.username, password=self.password) - self.assertIsNotNone(conn) - conn.close() - - def testConnectBadDriver(self): - with self.assertRaises(tdodbc.InterfaceError) as cm: - tdodbc.connect( - system=system, username=self.username, - password=self.password, - driver="BadDriver") - self.assertEqual(cm.exception.code, "DRIVER_NOT_FOUND") + self.assertIsNotNone(conn1) + conn2 = tdsql.connect( + system=system, username=self.username, password=self.password) + self.assertIsNotNone(conn2) + tdsql.cleanupConnections () def testCursorBasics(self): - with tdodbc.connect(system=system, username=self.username, + with tdsql.connect(system=system, username=self.username, password=self.password, autoCommit=True) as conn: self.assertIsNotNone(conn) with conn.cursor() as cursor: @@ -97,14 +101,14 @@ def testCursorBasics(self): count += 1 self.assertEqual(cursor.description[0][0], "InfoKey") - self.assertEqual(cursor.description[0][1], tdodbc.STRING) + self.assertEqual(cursor.description[0][1], tdsql.STRING) self.assertEqual(cursor.description[1][0], "InfoData") - self.assertEqual(cursor.description[1][1], tdodbc.STRING) + self.assertEqual(cursor.description[1][1], tdsql.STRING) self.assertEqual(count, 3) def testExecuteWithParamsMismatch(self): - with self.assertRaises(teradata.InterfaceError) as cm: - with tdodbc.connect(system=system, username=self.username, + with self.assertRaises(teradata.DatabaseError) as cm: + with tdsql.connect(system=system, username=self.username, password=self.password, autoCommit=True) as conn: self.assertIsNotNone(conn) @@ -116,15 +120,13 @@ def testExecuteWithParamsMismatch(self): "INSERT INTO testExecuteWithParamsMismatch " "VALUES (?, ?, ?)", (1, "TEST", )) self.assertEqual( - cm.exception.code, "PARAMS_MISMATCH", cm.exception.msg) + cm.exception.code, 3939, cm.exception.msg) configFiles = [os.path.join(os.path.dirname(__file__), 'udaexec.ini')] udaExec = teradata.UdaExec(configFiles=configFiles, configureLogging=False) -dsn = 'ODBC' -odbcConfig = udaExec.config.section(dsn) -system = odbcConfig['system'] -super_username = odbcConfig['username'] -super_password = odbcConfig['password'] +dsn = 'TERADATASQL' +tdsqlConfig = udaExec.config.section(dsn) +system = tdsqlConfig['system'] if __name__ == '__main__': unittest.main() diff --git a/test/test_udaexec_config.py b/test/test_udaexec_config.py index 7bfd9cd..71f7c44 100644 --- a/test/test_udaexec_config.py +++ b/test/test_udaexec_config.py @@ -86,7 +86,7 @@ def testResumeFromCheckPoint(self): udaExec = teradata.UdaExec( configFiles=configFiles, configureLogging=False) self.assertEqual(udaExec.resumeFromCheckpoint, checkpoint) - with udaExec.connect("ODBC") as session: + with udaExec.connect("TERADATASQL") as session: self.assertIsNone(session.execute( "SELECT 1").fetchone(), "Query was executed but should have been skipped.") @@ -105,7 +105,7 @@ def testResumeFromCheckPoint(self): self.assertEqual(udaExec.resumeFromCheckpoint, checkpoint) def testVariableResolutionEscapeCharacter(self): - with self.udaExec.connect("ODBC") as session: + with self.udaExec.connect("TERADATASQL") as session: self.assertEqual( session.execute( "SELECT '$${ThisShouldBeTreatedAsALiteral}'").fetchone()[ diff --git a/test/test_udaexec_datatypes.py b/test/test_udaexec_datatypes.py index c99953b..4fb05fe 100644 --- a/test/test_udaexec_datatypes.py +++ b/test/test_udaexec_datatypes.py @@ -19,9 +19,11 @@ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. +import codecs import datetime import decimal import json +import locale import logging import math import os @@ -41,41 +43,61 @@ def setUpClass(cls): cls.username = cls.password = util.setupTestUser(udaExec, cls.dsn) cls.failure = False + @classmethod + def tearDownClass(cls): + util.cleanupTestUser(udaExec, cls.dsn) + def testCharacterLimits(self): - # REST-310 - REST does not support CLOB inserts more than 64k - # characters. - if self.dsn == "ODBC": - with udaExec.connect(self.dsn, username=self.username, - password=self.password) as conn: - self.assertIsNotNone(conn) - cursor = conn.execute( - """CREATE TABLE testCharacterLimits (id INTEGER, - a CHAR CHARACTER SET UNICODE, - b CHAR(4) CHARACTER SET UNICODE, - c VARCHAR(100) CHARACTER SET UNICODE, - d VARCHAR(16000) CHARACTER SET UNICODE, - e CLOB (2000000) CHARACTER SET UNICODE)""") - cursor.arraysize = 10 - params = [ - (101, u"\u3456", u"\u3456" * 4, u"\u3456" * 100, - u"\u3456" * 10666, u"\u3456" * 2000000), - (102, None, None, None, None, None)] - for p in params: - conn.execute( - "INSERT INTO testCharacterLimits " - "VALUES (?, ?, ?, ?, ?, ?)", p) - cursor = conn.execute("SELECT * FROM testCharacterLimits") - for desc in cursor.description: - print(desc) - for desc in cursor.types: - print(desc) - rowIndex = 0 - for row in cursor: - colIndex = 0 - for col in row: - self.assertEqual(col, params[rowIndex][colIndex]) - colIndex += 1 - rowIndex += 1 + with udaExec.connect(self.dsn, username=self.username, + password=self.password) as conn: + self.assertIsNotNone(conn) + cursor = conn.execute( + """CREATE TABLE testCharacterLimits (id INTEGER, + a CHAR CHARACTER SET UNICODE, + b CHAR(4) CHARACTER SET UNICODE, + c VARCHAR(100) CHARACTER SET UNICODE, + d VARCHAR(16000) CHARACTER SET UNICODE, + e CLOB (2000000) CHARACTER SET UNICODE)""") + cursor.arraysize = 10 + params = [ + (101, u"\u3456", u"\u3456" * 4, u"\u3456" * 100, + u"\u3456" * 10666, u"\u3456" * 2000000), + (102, None, None, None, None, None)] + for p in params: + conn.execute( + "INSERT INTO testCharacterLimits " + "VALUES (?, ?, ?, ?, ?, ?)", p) + cursor = conn.execute("SELECT * FROM testCharacterLimits") + aoDesc = [['id', decimal.Decimal, None, 4 , 10, None, True], + ['a' , str , None, 3 , 0, None, True], + ['b' , str , None, 12 , 0, None, True], + ['c' , str , None, 300 , 0, None, True], + ['d' , str , None, 48000 , 0, None, True], + ['e' , str , None, 6000000, 0, None, True]] + + aoType = [['INTEGER', decimal.Decimal], + ['CHAR' , str], + ['CHAR' , str], + ['VARCHAR', str], + ['VARCHAR', str], + ['CLOB' , str]] + nRowIndex = 0 + for desc in cursor.description: + for nCol in range (len (desc)): + self.assertEqual (desc [nCol], aoDesc [nRowIndex][nCol]) + nRowIndex += 1 + nRowIndex = 0 + for oType in cursor.types: + for nCol in range (len (oType)): + self.assertEqual (oType [nCol], aoType [nRowIndex][nCol]) + nRowIndex += 1 + rowIndex = 0 + for row in cursor: + colIndex = 0 + for col in row: + self.assertEqual(col, params[rowIndex][colIndex]) + colIndex += 1 + rowIndex += 1 def testStringDataTypes(self): with udaExec.connect(self.dsn, username=self.username, @@ -98,166 +120,172 @@ def testStringDataTypes(self): batch=True) for row in conn.execute("SELECT * FROM testStringDataTypes " "ORDER BY id"): - # SEE REST-309 for more details about why the strip is - # required. + # The strip is required on the CHAR columns because they are fixed width + # and therefore have additonal spaces when compared to the data inserted. self.assertEqual(row.a.strip(), str(row.id % 10)) self.assertEqual(row.a2.strip(), str(row.id % 100)) self.assertEqual(row.b, str(row.id) * 10) self.assertEqual(row.c, str(row.id) * 20) self.assertIsNone(row.d) - # REST-310 - REST does not support CLOB inserts more than 64k - # characters. - if self.dsn == "ODBC": - unicodeString = u"\u4EC5\u6062\u590D\u914D\u7F6E\u3002\u73B0" - "\u6709\u7684\u5386\u53F2\u76D1\u63A7\u6570\u636E\u5C06" - "\u4FDD\u7559\uFF0C\u4E0D\u4F1A\u4ECE\u5907\u4EFD\u4E2D" - "\u6062\u590D\u3002" - params = (101, None, None, None, unicodeString * 100000, None) - conn.execute( - "INSERT INTO testStringDataTypes " - "VALUES (?, ?, ?, ?, ?, ?)", params) - for row in conn.execute("SELECT * FROM testStringDataTypes " - "WHERE id = 101"): - self.assertEqual(row.c, params[4]) - conn.executemany("INSERT INTO testStringDataTypes " - "VALUES (?, ?, ?, ?, ?, ?)", - [(i, str(i % 10), str(i % 100), str(i) * 10, - str(i % 10) * 64000, None) - for i in range(102, 112)], - batch=True) - for row in conn.execute("SELECT * FROM testStringDataTypes " - "WHERE id > 101"): - self.assertEqual(row.c, str(row.id % 10) * 64000) + + unicodeString = u"\u4EC5\u6062\u590D\u914D\u7F6E\u3002\u73B0" + "\u6709\u7684\u5386\u53F2\u76D1\u63A7\u6570\u636E\u5C06" + "\u4FDD\u7559\uFF0C\u4E0D\u4F1A\u4ECE\u5907\u4EFD\u4E2D" + "\u6062\u590D\u3002" + params = (101, None, None, None, unicodeString * 100000, None) + conn.execute( + "INSERT INTO testStringDataTypes " + "VALUES (?, ?, ?, ?, ?, ?)", params) + for row in conn.execute("SELECT * FROM testStringDataTypes " + "WHERE id = 101"): + self.assertEqual(row.c, params[4]) + conn.executemany("INSERT INTO testStringDataTypes " + "VALUES (?, ?, ?, ?, ?, ?)", + [(i, str(i % 10), str(i % 100), str(i) * 10, + str(i % 10) * 64000, None) + for i in range(102, 112)], + batch=True) + for row in conn.execute("{fn teradata_lobselect(S)}" + "SELECT * FROM testStringDataTypes " + "WHERE id > 101"): + self.assertEqual(row.c, (str(row.id % 10) * 64000)) def testBinaryLimits(self): - # REST Does not support binary data types at this time. - if self.dsn == "ODBC": - with udaExec.connect(self.dsn, username=self.username, - password=self.password) as conn: - self.assertIsNotNone(conn) - cursor = conn.execute( - """CREATE TABLE testBinaryLimits (id INTEGER, - a BYTE, - c VARBYTE(10000), - e BLOB (2000000))""") - cursor.arraysize = 10 - params = [ - (101, bytearray(os.urandom(1)), - bytearray(os.urandom(10000)), - bytearray(os.urandom(2000000))), - (102, None, None, None)] - for p in params: - conn.execute( - "INSERT INTO testBinaryLimits " - "VALUES (?, ?, ?, ?)", p) - cursor = conn.execute("SELECT * FROM testBinaryLimits") - for desc in cursor.description: - print(desc) - for desc in cursor.types: - print(desc) - rowIndex = 0 - for row in cursor: - colIndex = 0 - for col in row: - self.assertEqual(col, params[rowIndex][colIndex]) - colIndex += 1 - rowIndex += 1 + with udaExec.connect(self.dsn, username=self.username, + password=self.password) as conn: + self.assertIsNotNone(conn) + cursor = conn.execute( + """CREATE TABLE testBinaryLimits (id INTEGER, + a BYTE, + c VARBYTE(10000), + e BLOB (2000000))""") + cursor.arraysize = 10 + params = [ + (101, bytearray(os.urandom(1)), + bytearray(os.urandom(10000)), + bytearray(os.urandom(2000000))), + (102, None, None, None)] + for p in params: + conn.execute( + "INSERT INTO testBinaryLimits " + "VALUES (?, ?, ?, ?)", p) + cursor = conn.execute("SELECT * FROM testBinaryLimits") + + aoDesc = [['id', decimal.Decimal, None, 4, 10, None, True], + ['a' , bytearray , None, 1, 0, None, True], + ['c' , bytearray , None, 10000, 0, None, True], + ['e' , bytearray , None, 2000000, 0, None, True]] + aoType = [['INTEGER', decimal.Decimal], + ['BYTE' , bytearray], + ['VARBYTE', bytearray], + ['BLOB' , bytearray]] + nRowIndex = 0 + for desc in cursor.description: + for nCol in range (len (desc)): + self.assertEqual (desc [nCol], aoDesc [nRowIndex][nCol]) + nRowIndex += 1 + nRowIndex = 0 + for oType in cursor.types: + for nCol in range (len (oType)): + self.assertEqual (oType [nCol], aoType [nRowIndex][nCol]) + nRowIndex += 1 + rowIndex = 0 + for row in cursor: + colIndex = 0 + for col in row: + self.assertEqual(col, params[rowIndex][colIndex]) + colIndex += 1 + rowIndex += 1 def testBinaryDataTypes(self): - # REST Does not support binary data types at this time. - if self.dsn == "ODBC": - with udaExec.connect(self.dsn, username=self.username, - password=self.password) as conn: - self.assertIsNotNone(conn) - conn.execute( - "CREATE TABLE testByteDataTypes (id INTEGER, a BYTE, " - "b VARBYTE(6), c BYTE(4), d BLOB, e BLOB)") - conn.execute( - "INSERT INTO testByteDataTypes VALUES (1, 'FF'XBF, " - "'AABBCCDDEEFF'XBV, 'AABBCCDD'XBF, " - "'010203040506070809AABBCCDDEEFF'XBV, NULL)") - conn.execute("INSERT INTO testByteDataTypes " - "VALUES (2, ?, ?, ?, ?, ?)", - (bytearray([0xFF]), - bytearray([0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF]), - bytearray([0xAA, 0xBB, 0xCC, 0xDD]), - bytearray([0x01, 0x02, 0x03, 0x04, 0x05, 0x06, - 0x07, 0x08, 0x09, 0xAA, 0xBB, 0xCC, - 0xDD, 0xEE, 0xFF]), None)) - for row in conn.execute("SELECT * FROM testByteDataTypes " - "ORDER BY id"): - self.assertEqual(row.a, bytearray([0xFF])) - self.assertEqual( - row.b, bytearray([0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF])) - self.assertEqual( - row.c, bytearray([0xAA, 0xBB, 0xCC, 0xDD])) - self.assertEqual(row.d, bytearray( - [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x8, 0x9, - 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF])) - self.assertIsNone(row.e) - params = (3, bytearray(os.urandom(1)), - bytearray(os.urandom(6)), - bytearray(os.urandom(4)), - bytearray(os.urandom(10000000)), None) - conn.execute( - "INSERT INTO testByteDataTypes VALUES (?, ?, ?, ?, ?, ?)", - params) - for row in conn.execute("SELECT * FROM testByteDataTypes " - "WHERE id > 2 ORDER BY id"): - self.assertEqual(row.a, params[1]) - self.assertEqual(row.b, params[2]) - self.assertEqual(row.c, params[3]) - self.assertEqual(row.d, params[4]) - self.assertIsNone(row.e) - conn.execute("DELETE FROM testByteDataTypes WHERE id > 2") - params = [(i, bytearray(os.urandom(1)), - bytearray(os.urandom(6)), - bytearray(os.urandom(4)), - bytearray(os.urandom(10000)), None) - for i in range(3, 100)] - conn.executemany( - "INSERT INTO testByteDataTypes VALUES (?, ?, ?, ?, ?, ?)", - params, batch=True) - for row in conn.execute("SELECT * FROM testByteDataTypes " - "WHERE id > 3 ORDER BY id"): - param = params[int(row.id) - 3] - self.assertEqual(row.a, param[1]) - self.assertEqual(row.b, param[2]) - self.assertEqual(row.c, param[3]) - self.assertEqual(row.d, param[4]) - self.assertIsNone(row.e) + with udaExec.connect(self.dsn, username=self.username, + password=self.password) as conn: + self.assertIsNotNone(conn) + conn.execute( + "CREATE TABLE testByteDataTypes (id INTEGER, a BYTE, " + "b VARBYTE(6), c BYTE(4), d BLOB, e BLOB)") + conn.execute( + "INSERT INTO testByteDataTypes VALUES (1, 'FF'XBF, " + "'AABBCCDDEEFF'XBV, 'AABBCCDD'XBF, " + "'010203040506070809AABBCCDDEEFF'XBV, NULL)") + conn.execute("INSERT INTO testByteDataTypes " + "VALUES (2, ?, ?, ?, ?, ?)", + (bytearray([0xFF]), + bytearray([0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF]), + bytearray([0xAA, 0xBB, 0xCC, 0xDD]), + bytearray([0x01, 0x02, 0x03, 0x04, 0x05, 0x06, + 0x07, 0x08, 0x09, 0xAA, 0xBB, 0xCC, + 0xDD, 0xEE, 0xFF]), None)) + for row in conn.execute("SELECT * FROM testByteDataTypes " + "ORDER BY id"): + self.assertEqual(row.a, bytearray([0xFF])) + self.assertEqual( + row.b, bytearray([0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF])) + self.assertEqual( + row.c, bytearray([0xAA, 0xBB, 0xCC, 0xDD])) + self.assertEqual(row.d, bytearray( + [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x8, 0x9, + 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF])) + self.assertIsNone(row.e) + params = (3, bytearray(os.urandom(1)), + bytearray(os.urandom(6)), + bytearray(os.urandom(4)), + bytearray(os.urandom(10000000)), None) + conn.execute( + "INSERT INTO testByteDataTypes VALUES (?, ?, ?, ?, ?, ?)", + params) + for row in conn.execute("SELECT * FROM testByteDataTypes " + "WHERE id > 2 ORDER BY id"): + self.assertEqual(row.a, params[1]) + self.assertEqual(row.b, params[2]) + self.assertEqual(row.c, params[3]) + self.assertEqual(row.d, params[4]) + self.assertIsNone(row.e) + conn.execute("DELETE FROM testByteDataTypes WHERE id > 2") + params = [(i, bytearray(os.urandom(1)), + bytearray(os.urandom(6)), + bytearray(os.urandom(4)), + bytearray(os.urandom(10000)), None) + for i in range(3, 100)] + conn.executemany( + "INSERT INTO testByteDataTypes VALUES (?, ?, ?, ?, ?, ?)", + params, batch=True) + for row in conn.execute("SELECT * FROM testByteDataTypes " + "WHERE id > 3 ORDER BY id"): + param = params[int(row.id) - 3] + self.assertEqual(row.a, param[1]) + self.assertEqual(row.b, param[2]) + self.assertEqual(row.c, param[3]) + self.assertEqual(row.d, param[4]) + self.assertIsNone(row.e) def testMixedDataTypes(self): - # Test for GitHub issue #7 - # REST Does not support binary data types at this time. - if self.dsn == "ODBC": - with udaExec.connect(self.dsn, username=self.username, - password=self.password) as conn: - self.assertIsNotNone(conn) - conn.execute( - "CREATE TABLE testByteDataType (id INTEGER, b BYTE(4), " - "c CHAR(8) CHARACTER SET LATIN NOT CASESPECIFIC NOT NULL, " - "d BYTE(4))") - conn.execute("INSERT INTO testByteDataType " - "VALUES (1, ?, ?, ?)", - (bytearray([0xAA, 0xBB, 0xCC, 0xDD]), "test", - bytearray([0xDD, 0xCC, 0xBB, 0xAA]))) - for row in conn.execute("SELECT * FROM testByteDataType " - "WHERE id = 1"): - self.assertEqual( - row.b, bytearray([0xAA, 0xBB, 0xCC, 0xDD])) - self.assertEqual(row.c.strip(), "test") - self.assertEqual( - row.d, bytearray([0xDD, 0xCC, 0xBB, 0xAA])) - conn.execute("UPDATE testByteDataType SET b = ? WHERE c = ?", - (bytearray([0xAA, 0xAA, 0xAA, 0xAA]), "test")) - for row in conn.execute("SELECT * FROM testByteDataType " - "WHERE id = 1"): - self.assertEqual( - row.b, bytearray([0xAA, 0xAA, 0xAA, 0xAA])) - self.assertEqual(row.c.strip(), "test") - self.assertEqual( - row.d, bytearray([0xDD, 0xCC, 0xBB, 0xAA])) + with udaExec.connect(self.dsn, username=self.username, + password=self.password) as conn: + self.assertIsNotNone(conn) + conn.execute( + "CREATE TABLE testByteDataType (id INTEGER, b BYTE(4), " + "c CHAR(8) CHARACTER SET LATIN NOT CASESPECIFIC NOT NULL, " + "d BYTE(4))") + conn.execute("INSERT INTO testByteDataType " + "VALUES (1, ?, ?, ?)", + (bytearray([0xAA, 0xBB, 0xCC, 0xDD]), "test", + bytearray([0xDD, 0xCC, 0xBB, 0xAA]))) + for row in conn.execute("SELECT * FROM testByteDataType " + "WHERE id = 1"): + self.assertEqual( + row.b, bytearray([0xAA, 0xBB, 0xCC, 0xDD])) + self.assertEqual(row.c.strip(), "test") + self.assertEqual( + row.d, bytearray([0xDD, 0xCC, 0xBB, 0xAA])) + conn.execute("UPDATE testByteDataType SET b = ? WHERE c = ?", + (bytearray([0xAA, 0xAA, 0xAA, 0xAA]), "test")) + for row in conn.execute("SELECT * FROM testByteDataType " + "WHERE id = 1"): + self.assertEqual( + row.b, bytearray([0xAA, 0xAA, 0xAA, 0xAA])) + self.assertEqual(row.c.strip(), "test") + self.assertEqual( + row.d, bytearray([0xDD, 0xCC, 0xBB, 0xAA])) def testNumberLimits(self): with udaExec.connect( @@ -281,21 +309,17 @@ def testNumberLimits(self): decimal.Decimal("-." + "1" * 37))) params.append((2, -2 ** 7, -2 ** 15, -2 ** 31, -2 ** 63, float(-2 ** 63), decimal.Decimal("." + "1" * 37))) - print(params) conn.executemany( "INSERT INTO testNumericLimits (?, ?, ?, ?, ?, ?, ?)", params) cursor = conn.execute( "SELECT * FROM testNumericLimits ORDER BY id") - py2 = sys.version_info[0] == 2 rowIndex = 0 for r in cursor: colIndex = 0 for col in r: - # Precious is lost with floats and REST. - if colIndex != 5 or self.dsn == 'ODBC' or not py2: - self.assertEqual( - col, params[rowIndex][colIndex]) + self.assertEqual( + col, params[rowIndex][colIndex]) colIndex += 1 rowIndex += 1 @@ -328,10 +352,7 @@ def testNumericDataTypes(self): "99999.99999, 99999.999999)") cursor = conn.execute( "SELECT * FROM testNumericDataTypes ORDER BY id") - # for t in cursor.types: - # print(t) for row in cursor: - # print(row) if row.id < 128: for col in row: self.assertEqual(col, row.id) @@ -342,7 +363,7 @@ def testNumericDataTypes(self): pass elif count < 6: self.assertEqual(col, 10 ** count - 1) - elif count < 9 or self.dsn != 'ODBC': + elif count < 9: self.assertEqual( col, decimal.Decimal("99999." + "9" * (count - 5))) @@ -352,45 +373,6 @@ def testNumericDataTypes(self): (count - 5))) count += 1 - def testInfinityAndNaN(self): - self.assertEqual(float('inf'), decimal.Decimal('Infinity')) - self.assertEqual(float('-inf'), decimal.Decimal('-Infinity')) - self.assertEqual( - math.isnan(float('NaN')), math.isnan(decimal.Decimal('NaN'))) - # Infinities are not support by REST. - if self.dsn == "ODBC": - with udaExec.connect(self.dsn, username=self.username, - password=self.password) as conn: - self.assertIsNotNone(conn) - conn.execute("CREATE TABLE testInfinity (id INTEGER, " - "a FLOAT)") - for batch in (False, True): - offset = 6 if batch else 0 - conn.executemany( - "INSERT INTO testInfinity (?, ?)", - ((1 + offset, float('Inf')), - (2 + offset, decimal.Decimal('Infinity'))), - batch=batch) - for row in conn.execute("SELECT * FROM testInfinity " - "WHERE id > ?", (offset, )): - self.assertEqual(row[1], float('inf')) - conn.executemany( - "INSERT INTO testInfinity (?, ?)", - ((3 + offset, float('-Inf')), - (4 + offset, decimal.Decimal('-Infinity'))), - batch=batch) - for row in conn.execute("SELECT * FROM testInfinity " - "WHERE id > ?", (2 + offset, )): - self.assertEqual(row[1], float('-inf')) - conn.executemany( - "INSERT INTO testInfinity (?, ?)", - ((5 + offset, float('NaN')), - (6 + offset, decimal.Decimal('NaN'))), - batch=batch) - for row in conn.execute("SELECT * FROM testInfinity " - "WHERE id > ?", (4 + offset, )): - self.assertTrue(math.isnan(row[1])) - def testFloatTypes(self): for useFloat in (False, True): with udaExec.connect( @@ -411,19 +393,14 @@ def testFloatTypes(self): paramCount = 5 for i in range(2, paramCount): f = i / (i - 1) - if self.dsn == 'ODBC': - params.append( - [i, f, decimal.Decimal(f), f, str(f), f, - decimal.Decimal(f)]) - else: - # REST doesn't like large str conversion of - # decimal.Decimal(f) - params.append([i, f, f, str(f), f, f, f]) + params.append( + [i, f, decimal.Decimal(str(f)), f, str(f), f, + decimal.Decimal(str(f))]) params.append([paramCount, None, None, None, None, None, None]) f = math.sqrt(3) self.assertEqual(f, decimal.Decimal(f)) self.assertEqual(f, float(decimal.Decimal(f))) - params.append([paramCount + 1, f, f, f, f, f, f]) + params.append([paramCount + 1, f, decimal.Decimal(str(f)), f, str(f), f, decimal.Decimal(str(f))]) for batch in (False, True): conn.executemany( "INSERT INTO testFloatTypes (?, ?, ?, ?, ?, ?, ?)", @@ -431,12 +408,9 @@ def testFloatTypes(self): count = 0 for row in conn.execute("SELECT * FROM testFloatTypes " "ORDER BY id"): - # REST-312 - floating point number precision is lost - # when get float as a string from JDBC driver. - if self.dsn == 'ODBC': - self.assertEqual(row.a1, params[count][1]) - self.assertEqual(row.b1, params[count][1]) - self.assertEqual(row.c1, params[count][1]) + self.assertEqual(row.a1, params[count][1]) + self.assertEqual(row.b1, params[count][1]) + self.assertEqual(row.c1, params[count][1]) self.assertEqual(row.a1, row.a2) self.assertEqual(row.b1, row.b2) self.assertEqual(row.c1, row.c2) @@ -509,25 +483,21 @@ def testDateAndTimeDataTypes(self): self.assertEqual(t.month, 5) self.assertEqual(t.day, 18) if count != 4: - # Per REST-302 - Time is being returned in GMT. - if count != 1 or self.dsn == "ODBC": + if count != 1: self.assertEqual(t.hour, 12, "Count is {}".format(count)) self.assertEqual(t.minute, 34) self.assertEqual(t.second, 56) self.assertEqual(t.microsecond, 789000) count += 1 - # Time zone information is not coming back for REST per - # REST-302. - if self.dsn == "ODBC": - self.assertEqual( - row.timestampWithZone.tzinfo.utcoffset(None), - datetime.timedelta(hours=-5)) - self.assertEqual(row.timeWithZone.tzinfo.utcoffset( - None), datetime.timedelta(hours=10, minutes=30)) - self.assertEqual( - row.timestampWithZone, timestampWithZone) - self.assertEqual(row.timeWithZone, timeWithZone) + self.assertEqual( + row.timestampWithZone.tzinfo.utcoffset(None), + datetime.timedelta(hours=-5)) + self.assertEqual(row.timeWithZone.tzinfo.utcoffset( + None), datetime.timedelta(hours=10, minutes=30)) + self.assertEqual( + row.timestampWithZone, timestampWithZone) + self.assertEqual(row.timeWithZone, timeWithZone) self.assertEqual(row.timestamp, timestamp) self.assertEqual(row.time, time) self.assertEqual(row.date, date) @@ -769,17 +739,12 @@ def testArrayDataTypes(self): "NEW ${arrayPrefix}_test_int_array (11, 12, 13, 21, 22, 23, " "31, 32, 33, 41, 42, 43, 51, 52, 53, 61, 62, 63, 71, 72, 73, " "81, 82, 83))") - # REST-304 - REST Does not support array data types. - if self.dsn == "ODBC": - cursor = conn.execute( - "SELECT * FROM testArrayDataTypes ORDER BY id") - # for t in cursor.types: - # Type comes back as VARCHAR() =( - # print(t) - for row in cursor: - self.assertEqual( - row.integerArray, "(11,12,13,21,22,23,31,32,33,41,42," - "43,51,52,53,61,62,63,71,72,73,81,82,83)") + cursor = conn.execute( + "SELECT * FROM testArrayDataTypes ORDER BY id") + for row in cursor: + self.assertEqual( + row.integerArray, "(11,12,13,21,22,23,31,32,33,41,42," + "43,51,52,53,61,62,63,71,72,73,81,82,83)") def testJSONDataTypes(self): with udaExec.connect(self.dsn, username=self.username, @@ -824,102 +789,100 @@ def testJSONDataTypes(self): self.assertIsNone(row.data2) def testPeriodDataTypes(self): - # REST-304 - REST Does not support for period data types. - if self.dsn == "ODBC": - with udaExec.connect(self.dsn, username=self.username, - password=self.password) as conn: - self.assertIsNotNone(conn) - conn.execute("""CREATE TABLE testPeriodDataTypes (id INTEGER, - a PERIOD(DATE), - b PERIOD(DATE) FORMAT 'YYYY-MM-DD', - c PERIOD(DATE) FORMAT 'YYYYMMDD', - d PERIOD(TIMESTAMP), - e PERIOD(TIMESTAMP WITH TIME ZONE), - f PERIOD(TIME), - g PERIOD(TIME WITH TIME ZONE))""") - - period = datatypes.Period( - datetime.date(1980, 4, 10), datetime.date(2015, 7, 2)) - conn.execute( - "INSERT INTO testPeriodDataTypes (id, a, b, c) VALUES " - "(1, PERIOD(DATE '1980-04-10', DATE '2015-07-02'), " - "'(1980-04-10, 2015-07-02)'," - "'(1980-04-10, 2015-07-02))')") - conn.execute( - "INSERT INTO testPeriodDataTypes (id, a, b, c) VALUES " - "(2, ?, ?, ?)", - ("('1980-04-10', '2015-07-02')", - '(1980-04-10, 2015-07-02)', - '(1980-04-10, 2015-07-02)')) - conn.execute( - "INSERT INTO testPeriodDataTypes (id, a, b, c) VALUES " - "(3, ?, ?, ?)", (period, period, period)) - for row in conn.execute("SELECT * FROM testPeriodDataTypes " - "WHERE id IN (1,2,3) ORDER BY id"): - self.assertEqual(row.a, period) - self.assertEqual(row.b, period) - self.assertEqual(row.c, period) - - periodWithZone = datatypes.Period( - datetime.datetime(1980, 4, 10, 23, 45, 15, 0, - datatypes.TimeZone("+", 0, 0)), - datetime.datetime(2015, 7, 2, 17, 36, 33, 0, - datatypes.TimeZone("+", 0, 0))) - periodWithoutZone = datatypes.Period( - datetime.datetime(1980, 4, 10, 23, 45, 15), - datetime.datetime(2015, 7, 2, 17, 36, 33)) - conn.execute( - "INSERT INTO testPeriodDataTypes (id, d, e) VALUES " - "(4, PERIOD(TIMESTAMP '1980-04-10 23:45:15', " - "TIMESTAMP '2015-07-02 17:36:33'), " - "'(1980-04-10 23:45:15+00:00, " - "2015-07-02 17:36:33+00:00)')") - conn.execute( - "INSERT INTO testPeriodDataTypes (id, d, e) VALUES " - "(5, ?, ?)", (periodWithoutZone, periodWithZone)) - for row in conn.execute("SELECT * FROM testPeriodDataTypes " - "WHERE id IN (4,5) ORDER BY id"): - self.assertEqual( - row.d, periodWithoutZone, str(row.d) + "!=" + - str(periodWithoutZone)) - self.assertEqual( - row.e, periodWithZone, str(row.e) + "!=" + - str(periodWithZone)) - - timeWithZone = datatypes.Period( - datetime.time(17, 36, 33, 0, - datatypes.TimeZone("+", 0, 0)), - datetime.time(23, 45, 15, 0, - datatypes.TimeZone("+", 0, 0))) - timeWithoutZone = datatypes.Period( - datetime.time(17, 36, 33), datetime.time(23, 45, 15)) - conn.execute( - "INSERT INTO testPeriodDataTypes (id, f, g) VALUES " - "(6, PERIOD(TIME '17:36:33', TIME '23:45:15'), " - "'(17:36:33+00:00, 23:45:15+00:00)')") - conn.execute( - "INSERT INTO testPeriodDataTypes (id, f, g) VALUES " - "(7, ?, ?)", (timeWithoutZone, timeWithZone)) - for row in conn.execute("SELECT * FROM testPeriodDataTypes " - "WHERE id IN (6,7) ORDER BY id"): - self.assertEqual( - row.f, timeWithoutZone, str(row.f) + "!=" + - str(timeWithoutZone)) - self.assertEqual( - row.g, timeWithZone, str(row.g) + "!=" + - str(timeWithZone)) + with udaExec.connect(self.dsn, username=self.username, + password=self.password) as conn: + self.assertIsNotNone(conn) + conn.execute("""CREATE TABLE testPeriodDataTypes (id INTEGER, + a PERIOD(DATE), + b PERIOD(DATE) FORMAT 'YYYY-MM-DD', + c PERIOD(DATE) FORMAT 'YYYYMMDD', + d PERIOD(TIMESTAMP), + e PERIOD(TIMESTAMP WITH TIME ZONE), + f PERIOD(TIME), + g PERIOD(TIME WITH TIME ZONE))""") - periodUntilChange = datatypes.Period( - datetime.date(1980, 4, 10), datetime.date(9999, 12, 31)) - conn.execute( - "INSERT INTO testPeriodDataTypes (id, a, b, c) VALUES " - "(8, PERIOD(DATE '1980-04-10', UNTIL_CHANGED), " - "PERIOD(DATE '1980-04-10', UNTIL_CHANGED), NULL)") - for row in conn.execute("SELECT * FROM testPeriodDataTypes " - "WHERE id IN (8) ORDER BY id"): - self.assertEqual(row.a, periodUntilChange) - self.assertEqual(row.b, periodUntilChange) - self.assertIsNone(row.c) + period = datatypes.Period( + datetime.date(1980, 4, 10), datetime.date(2015, 7, 2)) + conn.execute( + "INSERT INTO testPeriodDataTypes (id, a, b, c) VALUES " + "(1, PERIOD(DATE '1980-04-10', DATE '2015-07-02'), " + "'(1980-04-10, 2015-07-02)'," + "'(1980-04-10, 2015-07-02))')") + conn.execute( + "INSERT INTO testPeriodDataTypes (id, a, b, c) VALUES " + "(2, ?, ?, ?)", + ("('1980-04-10', '2015-07-02')", + '(1980-04-10, 2015-07-02)', + '(1980-04-10, 2015-07-02)')) + conn.execute( + "INSERT INTO testPeriodDataTypes (id, a, b, c) VALUES " + "(3, ?, ?, ?)", (period, period, period)) + for row in conn.execute("SELECT * FROM testPeriodDataTypes " + "WHERE id IN (1,2,3) ORDER BY id"): + self.assertEqual(row.a, period) + self.assertEqual(row.b, period) + self.assertEqual(row.c, period) + + periodWithZone = datatypes.Period( + datetime.datetime(1980, 4, 10, 23, 45, 15, 0, + datatypes.TimeZone("+", 0, 0)), + datetime.datetime(2015, 7, 2, 17, 36, 33, 0, + datatypes.TimeZone("+", 0, 0))) + periodWithoutZone = datatypes.Period( + datetime.datetime(1980, 4, 10, 23, 45, 15), + datetime.datetime(2015, 7, 2, 17, 36, 33)) + conn.execute( + "INSERT INTO testPeriodDataTypes (id, d, e) VALUES " + "(4, PERIOD(TIMESTAMP '1980-04-10 23:45:15', " + "TIMESTAMP '2015-07-02 17:36:33'), " + "'(1980-04-10 23:45:15+00:00, " + "2015-07-02 17:36:33+00:00)')") + conn.execute( + "INSERT INTO testPeriodDataTypes (id, d, e) VALUES " + "(5, ?, ?)", (periodWithoutZone, periodWithZone)) + for row in conn.execute("SELECT * FROM testPeriodDataTypes " + "WHERE id IN (4,5) ORDER BY id"): + self.assertEqual( + row.d, periodWithoutZone, str(row.d) + "!=" + + str(periodWithoutZone)) + self.assertEqual( + row.e, periodWithZone, str(row.e) + "!=" + + str(periodWithZone)) + + timeWithZone = datatypes.Period( + datetime.time(17, 36, 33, 0, + datatypes.TimeZone("+", 0, 0)), + datetime.time(23, 45, 15, 0, + datatypes.TimeZone("+", 0, 0))) + timeWithoutZone = datatypes.Period( + datetime.time(17, 36, 33), datetime.time(23, 45, 15)) + conn.execute( + "INSERT INTO testPeriodDataTypes (id, f, g) VALUES " + "(6, PERIOD(TIME '17:36:33', TIME '23:45:15'), " + "'(17:36:33+00:00, 23:45:15+00:00)')") + conn.execute( + "INSERT INTO testPeriodDataTypes (id, f, g) VALUES " + "(7, ?, ?)", (timeWithoutZone, timeWithZone)) + for row in conn.execute("SELECT * FROM testPeriodDataTypes " + "WHERE id IN (6,7) ORDER BY id"): + self.assertEqual( + row.f, timeWithoutZone, str(row.f) + "!=" + + str(timeWithoutZone)) + self.assertEqual( + row.g, timeWithZone, str(row.g) + "!=" + + str(timeWithZone)) + + periodUntilChange = datatypes.Period( + datetime.date(1980, 4, 10), datetime.date(9999, 12, 31)) + conn.execute( + "INSERT INTO testPeriodDataTypes (id, a, b, c) VALUES " + "(8, PERIOD(DATE '1980-04-10', UNTIL_CHANGED), " + "PERIOD(DATE '1980-04-10', UNTIL_CHANGED), NULL)") + for row in conn.execute("SELECT * FROM testPeriodDataTypes " + "WHERE id IN (8) ORDER BY id"): + self.assertEqual(row.a, periodUntilChange) + self.assertEqual(row.b, periodUntilChange) + self.assertIsNone(row.c) def testLargeTestView(self): with udaExec.connect(self.dsn, username=self.username, @@ -928,18 +891,1034 @@ def testLargeTestView(self): os.path.dirname(__file__), "testlargeview.sql") conn.execute(file=scriptFile) view = conn.execute("SHOW VIEW LARGE_TEST_VIEW").fetchone()[0] - # print(view) self.assertEqual(len(view), 30398) -# The unit tests in the UdaExecExecuteTest are execute once for each named -# data source below. + def testBatchPeriodIntervalDataTypes(self): + with udaExec.connect(self.dsn, username=self.username, + password=self.password) as conn: + self.assertIsNotNone(conn) + conn.execute("""CREATE TABLE testBatchPeriodIntervalDataTypes ( + id INTEGER, + a PERIOD(DATE), + b PERIOD(TIMESTAMP), + c PERIOD(TIMESTAMP WITH TIME ZONE), + d PERIOD(TIME), + e PERIOD(TIME WITH TIME ZONE), + f INTERVAL YEAR TO MONTH, + g INTERVAL MONTH, + h INTERVAL DAY TO SECOND, + i INTERVAL HOUR TO SECOND, + j INTERVAL MINUTE TO SECOND)""") + + d = datetime.date + t = datetime.time + dt = datetime.datetime + tz = datatypes.TimeZone + period = datatypes.Period + interval = datatypes.Interval + + periodDate = period(d(1995, 4, 15), d(2020, 7, 15)) + + periodTS = period( + dt(1970, 1, 2, 3, 4, 5, 100000), + dt(1976, 7, 8, 9, 10, 11, 900000)) + + periodTSWithTZ = period ( + dt(1970, 1, 2, 3, 4, 5, 123000, tz("+", 5, 30)), + dt(1976, 7, 8, 9, 10, 11, 123000, tz("+", 5, 30))) + + periodTime = period( + t(11, 22, 33, 234560), + t(22, 33, 44, 345600)) + + periodTimeWithTZ = period( + t( 3, 4, 5, 600000, tz("+", 0, 30)), + t(12, 13, 14, 140000, tz("+", 5, 30))) + + intervalYearToMonth = interval(years=3, months=3) + intervalMonth = interval(months=3) + intervalDayToSec = interval(days=3, hours=3, minutes=3, seconds=3.03) + intervalHourToSec = interval(hours=3, minutes=3, seconds=3) + intervalMinToSec = interval(minutes=3, seconds=3) + + + conn.executemany( + "INSERT INTO testBatchPeriodIntervalDataTypes " + "(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", + [[ + 1, + periodDate, + periodTS, + periodTSWithTZ, + periodTime, + periodTimeWithTZ, + intervalYearToMonth, + intervalMonth, + intervalDayToSec, + intervalHourToSec, + intervalMinToSec + ],[ + 2, + periodDate, + periodTS, + periodTSWithTZ, + periodTime, + periodTimeWithTZ, + intervalYearToMonth, + intervalMonth, + intervalDayToSec, + intervalHourToSec, + intervalMinToSec + + ],[ + 3, + periodDate, + periodTS, + periodTSWithTZ, + periodTime, + periodTimeWithTZ, + intervalYearToMonth, + intervalMonth, + intervalDayToSec, + intervalHourToSec, + intervalMinToSec + + ]], batch=True) + + conn.executemany( + "INSERT INTO testBatchPeriodIntervalDataTypes " + "(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", + [[ + 4, + periodDate, + periodTS, + periodTSWithTZ, + periodTime, + periodTimeWithTZ, + intervalYearToMonth, + intervalMonth, + intervalDayToSec, + intervalHourToSec, + intervalMinToSec + ],[ + 5, + periodDate, + periodTS, + periodTSWithTZ, + periodTime, + periodTimeWithTZ, + intervalYearToMonth, + intervalMonth, + intervalDayToSec, + intervalHourToSec, + intervalMinToSec + + ],[ + 6, + periodDate, + periodTS, + periodTSWithTZ, + periodTime, + periodTimeWithTZ, + intervalYearToMonth, + intervalMonth, + intervalDayToSec, + intervalHourToSec, + intervalMinToSec + + ]], batch=False) + + nRowNum = 1 + for row in conn.execute("SELECT * FROM testBatchPeriodIntervalDataTypes ORDER BY id"): + self.assertEqual(row.id, nRowNum) + self.assertEqual(row.a, periodDate, str(row.a) + "!=" + str(periodDate)) + self.assertEqual(row.b, periodTS, str(row.b) + "!=" + str(periodTS)) + self.assertEqual(row.c, periodTSWithTZ, str(row.c) + "!=" + str(periodTSWithTZ)) + self.assertEqual(row.d, periodTime, str(row.d) + "!=" + str(periodTime)) + self.assertEqual(row.e, periodTimeWithTZ, str(row.e) + "!=" + str(periodTimeWithTZ)) + self.assertEqual(row.f, intervalYearToMonth, str(row.f) + "!=" + str(intervalYearToMonth)) + self.assertEqual(row.g, intervalMonth, str(row.g) + "!=" + str(intervalMonth)) + self.assertEqual(row.h, intervalDayToSec, str(row.h) + "!=" + str(intervalDayToSec)) + self.assertEqual(row.i, intervalHourToSec, str(row.i) + "!=" + str(intervalHourToSec)) + self.assertEqual(row.j, intervalMinToSec, str(row.j) + "!=" + str(intervalMinToSec)) + nRowNum += 1 + #end testBatchPeriodDataTypes + + def testProcedurePeriodNulls(self): + with udaExec.connect(self.dsn, username=self.username, + password=self.password) as conn: + self.assertIsNotNone(conn) + conn.execute( + """REPLACE PROCEDURE testProcedurePeriodNulls + ( + in p1 period (date) , inout p2 period (date) , out p3 period (date) , + in p4 period (time(0)) , inout p5 period (time(0)) , out p6 period (time(0)) , + in p7 period (time) , inout p8 period (time) , out p9 period (time) , + in p10 period (time(0) with time zone) , inout p11 period (time(0) with time zone) , out p12 period (time(0) with time zone) , + in p13 period (time (4) with time zone) , inout p14 period (time (4) with time zone) , out p15 period (time (4) with time zone) , + in p16 period (timestamp (0)) , inout p17 period (timestamp (0)) , out p18 period (timestamp (0)) , + in p19 period (timestamp) , inout p20 period (timestamp) , out p21 period (timestamp) , + in p22 period (timestamp (0) with time zone), inout p23 period (timestamp (0) with time zone), out p24 period (timestamp (0) with time zone), + in p25 period (timestamp (3) with time zone), inout p26 period (timestamp (3) with time zone), out p27 period (timestamp (3) with time zone) + ) begin + set p3 = p2 ; set p2 = p1 ; + set p6 = p5 ; set p5 = p4 ; + set p9 = p8 ; set p8 = p7 ; + set p12 = p11 ; set p11 = p10 ; + set p15 = p14 ; set p14 = p13 ; + set p18 = p17 ; set p17 = p16 ; + set p21 = p20 ; set p20 = p19 ; + set p24 = p23 ; set p23 = p22 ; + set p27 = p26 ; set p26 = p25 ; + END;""") + + try: + d = datetime.date + t = datetime.time + dt = datetime.datetime + tz = datatypes.TimeZone + period = datatypes.Period + + aaoParameters = [ + [ # Use period types in IN/INOUT parameters and cast as PERIOD types + teradata.InParam (None, dataType='PERIOD (DATE)'), # p1 period(date) + teradata.InOutParam(period(d(2000, 12, 22),d(2008, 10, 27)), "p2", dataType='PERIOD (DATE)'), # p2 period(date) + teradata.OutParam ("p3", dataType="PERIOD (DATE)"), # p3 period(date) + + teradata.InParam (period (t (9, 9, 9), t (10, 10, 10)), dataType='PERIOD (TIME (0))'), # p4 period(time (0)) + teradata.InOutParam(None, "p5", dataType = 'PERIOD (TIME (0))'), # p5 period(time (0)) + teradata.OutParam ("p6", dataType = 'PERIOD (TIME (0))'), # p6 period(time (0)) + + teradata.InParam(None, dataType='PERIOD (TIME)'), # p7 period(time) + teradata.InOutParam(period(t(8, 45, 59, 500600), t(12, 10, 45, 123000)), "p8", dataType = 'PERIOD (TIME)'), # p8 period(time) + teradata.OutParam("p9", dataType = 'PERIOD (TIME)'), # p9 period(time) + + teradata.InParam(period(t (2, 12, 12, 0, tz("+", 0, 30)), t (22, 3, 44, 0, tz("+", 5, 30))), dataType='PERIOD (TIME (0) WITH TIME ZONE)'), #p10 period(time) + teradata.InOutParam(None, "p11", dataType = 'PERIOD (TIME (0) WITH TIME ZONE)'), #p11 period(time) + teradata.OutParam("p12", dataType = 'PERIOD (TIME (0) WITH TIME ZONE)'), #p12 period(time) + + teradata.InParam(period(t (3, 4, 5, 60000, tz("+", 0, 30)), t (12, 13, 14, 561000, tz ("+", 5, 30))), dataType='PERIOD (TIME (4) WITH TIME ZONE)'), #p13 period(time (4) with time zone) + teradata.InOutParam(None, "p14", dataType = 'PERIOD (TIME (4) WITH TIME ZONE)'), #p14 period(time (4) with time zone) + teradata.OutParam("p15", dataType = 'PERIOD (TIME (4) WITH TIME ZONE)'), #p15 period(time (4) with time zone) + + teradata.InParam(None, dataType='PERIOD (TIMESTAMP (0))'), #p16 period(time (0) with time zone) + teradata.InOutParam(period(dt(1980, 5, 3, 3, 4, 5), dt (1986, 8, 7, 1, 10, 11)), "p17", dataType = 'PERIOD (TIMESTAMP (0))'), #p17 period(time (0) with time zone) + teradata.OutParam("p18", dataType = 'PERIOD (TIMESTAMP (0))'), #p18 period(time (0) with time zone) + + teradata.InParam(period(dt(1981, 6, 4, 4, 5, 6, 456000), dt(1986, 7, 8, 11, 10, 11, 135600)), dataType = 'PERIOD (TIMESTAMP)'), #p19 period(timestamp with time zone) + teradata.InOutParam(None, "p20", dataType = 'PERIOD (TIMESTAMP)'), #p20 period(timestamp with time zone) + teradata.OutParam("p21", dataType = 'PERIOD (TIMESTAMP)'), #p21 period(timestamp with time zone) + + teradata.InParam(period(dt(2000, 1, 1, 0, 1, 5, 0, tz("+", 5, 30)), dt(2000, 12, 31, 11, 59, 0, 0, tz("+", 5, 30))), dataType = 'PERIOD (TIMESTAMP (0) WITH TIME ZONE)'), #p22 period(timestamp (0) with time zone) + teradata.InOutParam(None, "p23", dataType = 'PERIOD (TIMESTAMP (0) WITH TIME ZONE)'), #p23 period(timestamp (0) with time zone) + teradata.OutParam("p24", dataType = 'PERIOD (TIMESTAMP (0) WITH TIME ZONE)'), #p24 period(timestamp (0) with time zone) + + teradata.InParam(None, dataType='PERIOD (TIMESTAMP (3) WITH TIME ZONE)'), #p25 period(timestamp (3) with time zone) + teradata.InOutParam(period(dt(2003, 10, 27, 8, 10, 30, 123000, tz("+", 5, 30)), dt(2019, 5, 6, 10, 21, 0, 560000, tz ("+", 5, 30))), "p26", dataType = 'PERIOD (TIMESTAMP (3) WITH TIME ZONE)'), #p26 period(timestamp (3) with time zone) + teradata.OutParam("p27", dataType = 'PERIOD (TIMESTAMP (3) WITH TIME ZONE)'), #p27 period(timestamp (3) with time zone) + + ],[ # Same values as above but use string to represent periods and cast as period type + teradata.InParam (None, dataType='PERIOD (DATE)'), # p1 period(date) + teradata.InOutParam("2000-12-22,2008-10-27", "p2", dataType='PERIOD (DATE)'), # p2 period(date) + teradata.OutParam ("p3", dataType="PERIOD (DATE)"), # p3 period(date) + + teradata.InParam ("09:09:09,10:10:10", dataType='PERIOD (TIME (0))'), # p4 period(time (0)) + teradata.InOutParam(None, "p5", dataType = 'PERIOD (TIME (0))'), # p5 period(time (0)) + teradata.OutParam ("p6", dataType = 'PERIOD (TIME (0))'), # p6 period(time (0)) + + teradata.InParam (None, dataType='PERIOD (TIME)'), # p7 period(time) + teradata.InOutParam("08:45:59.5006,12:10:45.123", "p8", dataType = 'PERIOD (TIME)'), # p8 period(time) + teradata.OutParam ("p9", dataType = 'PERIOD (TIME)'), # p9 period(time) + + teradata.InParam ("02:12:12+00:30,22:03:44+05:30", dataType='PERIOD (TIME (0) WITH TIME ZONE)'), #p10 period(time) + teradata.InOutParam(None, "p11", dataType = 'PERIOD (TIME (0) WITH TIME ZONE)'), #p11 period(time) + teradata.OutParam ("p12", dataType = 'PERIOD (TIME (0) WITH TIME ZONE)'), #p12 period(time) + + teradata.InParam ("03:04:05.06+00:30,12:13:14.561+05:30", dataType='PERIOD (TIME (4) WITH TIME ZONE)'), #p13 period(time (4) with time zone) + teradata.InOutParam(None, "p14", dataType = 'PERIOD (TIME (4) WITH TIME ZONE)'), #p14 period(time (4) with time zone) + teradata.OutParam ("p15", dataType = 'PERIOD (TIME (4) WITH TIME ZONE)'), #p15 period(time (4) with time zone) + + teradata.InParam (None, dataType='PERIOD (TIMESTAMP (0))'), #p16 period(time (0) with time zone) + teradata.InOutParam("1980-05-03 03:04:05,1986-08-07 01:10:11", "p17", dataType = 'PERIOD (TIMESTAMP (0))'), #p17 period(time (0) with time zone) + teradata.OutParam ("p18", dataType = 'PERIOD (TIMESTAMP (0))'), #p18 period(time (0) with time zone) + + teradata.InParam ("1981-06-04 04:05:06.456000,1986-07-08 11:10:11.135600", dataType = 'PERIOD (TIMESTAMP)'), #p19 period(timestamp with time zone) + teradata.InOutParam(None, "p20", dataType = 'PERIOD (TIMESTAMP)'), #p20 period(timestamp with time zone) + teradata.OutParam ("p21", dataType = 'PERIOD (TIMESTAMP)'), #p21 period(timestamp with time zone) + + teradata.InParam ("2000-01-01 00:01:05+05:30,2000-12-31 11:59:00+05:30", dataType = 'PERIOD (TIMESTAMP (0) WITH TIME ZONE)'), #p22 period(timestamp (0) with time zone) + teradata.InOutParam(None, "p23", dataType = 'PERIOD (TIMESTAMP (0) WITH TIME ZONE)'), #p23 period(timestamp (0) with time zone) + teradata.OutParam ("p24", dataType = 'PERIOD (TIMESTAMP (0) WITH TIME ZONE)'), #p24 period(timestamp (0) with time zone) + + teradata.InParam (None, dataType='PERIOD (TIMESTAMP (3) WITH TIME ZONE)'), #p25 period(timestamp (3) with time zone) + teradata.InOutParam("2003-10-27 08:10:30.123+05:30,2019-05-06 10:21:00.56+05:30", "p26", dataType = 'PERIOD (TIMESTAMP (3) WITH TIME ZONE)'), #p26 period(timestamp (3) with time zone) + teradata.OutParam ("p27", dataType = 'PERIOD (TIMESTAMP (3) WITH TIME ZONE)'), #p27 period(timestamp (3) with time zone) + ]] + + for i in range (len (aaoParameters)): + result = conn.callproc("testProcedurePeriodNulls", aaoParameters [i]) + self.assertEqual (len (result), len (aaoParameters [i])) + nParam = 2 + for p in range (0, int (len (result) * 2/3)): + # Output is returned as period types so always compare against 1st param set + self.assertEqual (result ["p{}".format(nParam)], aaoParameters [0][nParam - 2].inValue) + nParam += (p + 2) % 2 + 1 + finally: + conn.execute ("DROP PROCEDURE testProcedurePeriodNulls") + # end testProcedurePeriodNulls + + def testProcedureIntervalNulls(self): + with udaExec.connect(self.dsn, username=self.username, + password=self.password) as conn: + self.assertIsNotNone(conn) + conn.execute( + """REPLACE PROCEDURE testProcedureIntervalNulls + ( + in p1 interval year , inout p2 interval year , out p3 interval year, + in p4 interval year(4) , inout p5 interval year(4) , out p6 interval year(4), + in p7 interval year to month , inout p8 interval year to month , out p9 interval year to month, + in p10 interval year(4) to month , inout p11 interval year(4) to month , out p12 interval year(4) to month, + in p13 interval month , inout p14 interval month , out p15 interval month , + in p16 interval month(4) , inout p17 interval month(4) , out p18 interval month(4), + in p19 interval day , inout p20 interval day , out p21 interval day, + in p22 interval day(4) , inout p23 interval day(4) , out p24 interval day(4), + in p25 interval day to hour , inout p26 interval day to hour , out p27 interval day to hour, + in p28 interval day(4) to hour , inout p29 interval day(4) to hour , out p30 interval day(4) to hour, + in p31 interval day to minute , inout p32 interval day to minute , out p33 interval day to minute, + in p34 interval day(4) to minute , inout p35 interval day(4) to minute , out p36 interval day(4) to minute, + in p37 interval day to second(0) , inout p38 interval day to second(0) , out p39 interval day to second(0), + in p40 interval day to second , inout p41 interval day to second , out p42 interval day to second, + in p43 interval day(4) to second(0) , inout p44 interval day(4) to second(0) , out p45 interval day(4) to second(0), + in p46 interval day(4) to second , inout p47 interval day(4) to second , out p48 interval day(4) to second, + in p49 interval hour , inout p50 interval hour , out p51 interval hour, + in p52 interval hour(4) , inout p53 interval hour(4) , out p54 interval hour(4), + in p55 interval hour to minute , inout p56 interval hour to minute , out p57 interval hour to minute, + in p58 interval hour(4) to minute , inout p59 interval hour(4) to minute , out p60 interval hour(4) to minute, + in p61 interval hour to second(0) , inout p62 interval hour to second(0) , out p63 interval hour to second(0), + in p64 interval hour to second , inout p65 interval hour to second , out p66 interval hour to second, + in p67 interval hour(4) to second(0) , inout p68 interval hour(4) to second(0) , out p69 interval hour(4) to second(0), + in p70 interval hour(4) to second , inout p71 interval hour(4) to second , out p72 interval hour(4) to second, + in p73 interval minute , inout p74 interval minute , out p75 interval minute, + in p76 interval minute(4) , inout p77 interval minute(4) , out p78 interval minute(4), + in p79 interval minute to second(0) , inout p80 interval minute to second(0) , out p81 interval minute to second(0), + in p82 interval minute to second , inout p83 interval minute to second , out p84 interval minute to second, + in p85 interval minute(4) to second(0), inout p86 interval minute(4) to second(0), out p87 interval minute(4) to second(0), + in p88 interval minute(4) to second , inout p89 interval minute(4) to second , out p90 interval minute(4) to second, + in p91 interval second(2,0) , inout p92 interval second(2,0) , out p93 interval second(2,0), + in p94 interval second , inout p95 interval second , out p96 interval second, + in p97 interval second(4,0) , inout p98 interval second(4,0) , out p99 interval second(4,0), + in p100 interval second(4) , inout p101 interval second(4) , out p102 interval second(4) + ) begin + set p3 = p2 ; set p2 = p1 ; + set p6 = p5 ; set p5 = p4 ; + set p9 = p8 ; set p8 = p7 ; + set p12 = p11 ; set p11 = p10 ; + set p15 = p14 ; set p14 = p13 ; + set p18 = p17 ; set p17 = p16 ; + set p21 = p20 ; set p20 = p19 ; + set p24 = p23 ; set p23 = p22 ; + set p27 = p26 ; set p26 = p25 ; + set p30 = p29 ; set p29 = p28 ; + set p33 = p32 ; set p32 = p31 ; + set p36 = p35 ; set p35 = p34 ; + set p39 = p38 ; set p38 = p37 ; + set p42 = p41 ; set p41 = p40 ; + set p45 = p44 ; set p44 = p43 ; + set p48 = p47 ; set p47 = p46 ; + set p51 = p50 ; set p50 = p49 ; + set p54 = p53 ; set p53 = p52 ; + set p57 = p56 ; set p56 = p55 ; + set p60 = p59 ; set p59 = p58 ; + set p63 = p62 ; set p62 = p61 ; + set p66 = p65 ; set p65 = p64 ; + set p69 = p68 ; set p68 = p67 ; + set p72 = p71 ; set p71 = p70 ; + set p75 = p74 ; set p74 = p73 ; + set p78 = p77 ; set p77 = p76 ; + set p81 = p80 ; set p80 = p79 ; + set p84 = p83 ; set p83 = p82 ; + set p87 = p86 ; set p86 = p85 ; + set p90 = p89 ; set p89 = p88 ; + set p93 = p92 ; set p92 = p91 ; + set p96 = p95 ; set p95 = p94 ; + set p99 = p98 ; set p98 = p97 ; + set p102 = p101 ; set p101 = p100 ; + end;""") + + try: + interval = datatypes.Interval + aaoInputParameters = [[ + # Use interval types in IN/INOUT parameters and cast as PERIOD types + + teradata.InParam (None, dataType='INTERVAL YEAR'), #p1 interval year + teradata.InOutParam(interval(negative=True, years=12), "p2", dataType='INTERVAL YEAR'), #p2 interval year + teradata.OutParam ("p3", dataType='INTERVAL YEAR'), #p3 interval year + + teradata.InParam (interval(years=1234), dataType='INTERVAL YEAR(4)'), #p4 interval year(4) + teradata.InOutParam(None, "p5", dataType='INTERVAL YEAR(4)'), #p5 interval year(4) + teradata.OutParam ("p6", dataType='INTERVAL YEAR(4)'), #p6 interval year(4) + + teradata.InParam (None, dataType='INTERVAL YEAR TO MONTH'), #p7 interval year to month + teradata.InOutParam(interval(negative=True, years=12, months=10), "p8", dataType='INTERVAL YEAR TO MONTH'), #p8 interval year to month + teradata.OutParam ("p9", dataType='INTERVAL YEAR TO MONTH'), #p9 interval year + + teradata.InParam (interval(years=1234, months=10), dataType='INTERVAL YEAR(4) TO MONTH'), #p10 interval year(4) to month + teradata.InOutParam(None, "p11", dataType='INTERVAL YEAR(4) TO MONTH'), #p11 interval year(4) to month + teradata.OutParam ("p12", dataType='INTERVAL YEAR(4) TO MONTH'), #p12 interval year + + teradata.InParam (interval(months= 12), dataType='INTERVAL MONTH'), #p13 interval month + teradata.InOutParam(interval(negative=True, months=12), "p14", dataType='INTERVAL MONTH'), #p14 interval month + teradata.OutParam ("p15", dataType='INTERVAL MONTH'), #p15 interval year + + teradata.InParam (None, dataType='INTERVAL MONTH(4)'), #p16 interval month(4) + teradata.InOutParam(interval(negative=True, months=1234), "p17", dataType='INTERVAL MONTH(4)'), #p17 interval month(4) + teradata.OutParam ("p18", dataType='INTERVAL MONTH(4)'), #p18 interval year + + teradata.InParam (interval(days=11), dataType='INTERVAL DAY'), #p19 interval day + teradata.InOutParam(None, "p20", dataType='INTERVAL DAY'), #p20 interval day + teradata.OutParam ("p21", dataType='INTERVAL DAY'), #p21 interval day + + teradata.InParam (interval(days= 1234), dataType='INTERVAL DAY(4)'), #p22 interval day(4) + teradata.InOutParam(interval(negative=True, days=1234), "p23", dataType='INTERVAL DAY(4)'), #p23 interval day(4) + teradata.OutParam ("p24", dataType='INTERVAL DAY(4)'), #p24 interval day(4) + + teradata.InParam (None, dataType='INTERVAL DAY TO HOUR'), #p25 interval day to hour + teradata.InOutParam(interval(negative=True, days=12, hours=11), "p26", dataType='INTERVAL DAY TO HOUR'), #p26 interval day to hour + teradata.OutParam ("p27", dataType='INTERVAL DAY TO HOUR'), #p27 interval day to hour + + teradata.InParam (interval(days=1234, hours=11), dataType='INTERVAL DAY(4) TO HOUR'), #p28 interval day(4) to hour + teradata.InOutParam(None, "p29", dataType='INTERVAL DAY(4) TO HOUR'), #p29 interval day(4) to hour + teradata.OutParam ("p30", dataType='INTERVAL DAY(4) TO HOUR'), #p30 interval day(4) to hour + + teradata.InParam (interval(days=12, hours=11, minutes=22), dataType='INTERVAL DAY TO MINUTE'), #p31 interval day to minute + teradata.InOutParam(interval(negative=True, days=12, hours=11, minutes=22), "p32", dataType='INTERVAL DAY TO MINUTE'), #p32 interval day to minute + teradata.OutParam ("p33", dataType='INTERVAL DAY TO MINUTE'), #p33 interval day to minute + + teradata.InParam (None, dataType='INTERVAL DAY(4) TO MINUTE'), #p34 interval day(4) to minute + teradata.InOutParam(interval(negative=True, days=1234, hours=11, minutes=22), "p35", dataType='INTERVAL DAY(4) TO MINUTE'), #p35 interval day(4) to minute + teradata.OutParam ("p36", dataType='INTERVAL DAY(4) TO MINUTE'), #p36 interval day (4) to minute + + teradata.InParam (interval(days=12, hours=11, minutes=22, seconds=33), dataType='INTERVAL DAY TO SECOND(0)'), #p37 interval day to second(0) + teradata.InOutParam(None, "p38", dataType='INTERVAL DAY TO SECOND(0)'), #p38 interval day to second(0) + teradata.OutParam ("p39", dataType='INTERVAL DAY TO SECOND(0)'), #p39 interval year + + teradata.InParam (interval(days=12, hours=11, minutes=22, seconds=33.120001), dataType='INTERVAL DAY TO SECOND'), #p40 interval day to second + teradata.InOutParam(interval(negative=True, days=12, hours=11, minutes=22, seconds=33.987654), "p41", dataType='INTERVAL DAY TO SECOND'), #p41 interval day to second + teradata.OutParam ("p42", dataType='INTERVAL DAY TO SECOND'), #p42 interval year + + teradata.InParam (None, dataType='INTERVAL DAY(4) TO SECOND(0)'), #p43 interval day(4) to second(0) + teradata.InOutParam(interval(negative=True, days=1234, hours=11, minutes=22, seconds=33), "p44", dataType='INTERVAL DAY(4) TO SECOND(0)'), #p44 interval day(4) to second(0) + teradata.OutParam ("p45", dataType='INTERVAL DAY(4) TO SECOND(0)'), #p45 interval day(4) to second(0) + + teradata.InParam (interval(days=1234, hours=11, minutes=22, seconds=33.124321), dataType='INTERVAL DAY(4) TO SECOND'), #p46 interval day(4) to second + teradata.InOutParam(None, "p47", dataType='INTERVAL DAY(4) TO SECOND'), #p47 interval day(4) to second + teradata.OutParam ("p48", dataType='INTERVAL DAY(4) TO SECOND'), #p48 interval year + + teradata.InParam (interval(hours= 12), dataType='INTERVAL HOUR'), #p49 interval hour + teradata.InOutParam(interval(negative=True, hours=12), "p50", dataType='INTERVAL HOUR'), #p50 interval hour + teradata.OutParam ("p51", dataType='INTERVAL HOUR'), #p51 interval year + + teradata.InParam (None, dataType='INTERVAL HOUR(4)'), #p52 interval hour(4) + teradata.InOutParam(interval(negative=True, hours=1234), "p53", dataType='INTERVAL HOUR(4)'), #p53 interval hour(4) + teradata.OutParam ("p54", dataType='INTERVAL HOUR(4)'), #p54 interval hour(4) + + teradata.InParam (interval(hours=12, minutes=22), dataType='INTERVAL HOUR TO MINUTE'), #p55 interval hour to minute + teradata.InOutParam(None, "p56", dataType='INTERVAL HOUR TO MINUTE'), #p56 interval hour to minute + teradata.OutParam ("p57", dataType='INTERVAL HOUR TO MINUTE'), #p57 interval hour to minute + + teradata.InParam (interval(hours=1234, minutes=22), dataType='INTERVAL HOUR(4) TO MINUTE'), #p58 interval hour(4) to minute + teradata.InOutParam(interval(negative=True, hours=1234, minutes=22), "p59", dataType='INTERVAL HOUR(4) TO MINUTE'), #p59 interval hour(4) to minute + teradata.OutParam ("p60", dataType='INTERVAL HOUR(4) TO MINUTE'), #p60 interval hour(4) to minute + + teradata.InParam (None, dataType='INTERVAL HOUR TO SECOND (0)'), #p61 interval hour to second(0) + teradata.InOutParam(interval(negative=True, hours=12, minutes=22, seconds=33), "p62", dataType='INTERVAL HOUR TO SECOND (0)'), #p62 interval hour to second(0) + teradata.OutParam ("p63", dataType='INTERVAL HOUR TO SECOND (0)'), #p63 interval hour to second(0) + + teradata.InParam (interval(negative=True, hours=12, minutes=22, seconds=33.145655), dataType='INTERVAL HOUR TO SECOND'), #p64 interval hour to second + teradata.InOutParam(None, "p65", dataType='INTERVAL HOUR TO SECOND'), #p65 interval hour to second + teradata.OutParam ("p66", dataType='INTERVAL HOUR TO SECOND'), #p66 interval year + + teradata.InParam (interval(hours= 1234, minutes=22, seconds=33), dataType='INTERVAL HOUR(4) TO SECOND(0)'), #p67 interval hour(4) to second(0) + teradata.InOutParam(interval(negative=True, hours=1234, minutes=22, seconds=33), "p68", dataType='INTERVAL HOUR(4) TO SECOND(0)'), #p68 interval hour(4) to second(0) + teradata.OutParam ("p69", dataType='INTERVAL HOUR(4) TO SECOND(0)'), #p69 interval hour(4) to second(0) + + teradata.InParam (None, dataType='INTERVAL HOUR(4) TO SECOND'), #p70 interval hour(4) to second + teradata.InOutParam(interval(negative=True, hours=1234, minutes=22, seconds=33.145666), "p71", dataType='INTERVAL HOUR(4) TO SECOND'), #p71 interval hour(4) to second + teradata.OutParam ("p72", dataType='INTERVAL HOUR(4) TO SECOND'), #p72 interval hour(4) to second + + teradata.InParam (interval(minutes=12), dataType='INTERVAL MINUTE'), #p73 interval minute + teradata.InOutParam(None, "p74", dataType='INTERVAL MINUTE'), #p74 interval minute + teradata.OutParam ("p75", dataType='INTERVAL MINUTE'), #p75 interval minute + + teradata.InParam (interval(minutes=1234), dataType='INTERVAL MINUTE(4)'), #p76 interval minute(4) + teradata.InOutParam(interval(negative=True, minutes=1234), "p77", dataType='INTERVAL MINUTE(4)'), #p77 interval minute(4) + teradata.OutParam ("p78", dataType='INTERVAL MINUTE(4)'), #p78 interval minute(4) + + teradata.InParam (None, dataType='INTERVAL MINUTE TO SECOND(0)'), #p79 interval minute to second(0) + teradata.InOutParam(interval(negative=True, minutes=12, seconds=33), "p80", dataType='INTERVAL MINUTE TO SECOND(0)'), #p80 interval minute to second(0) + teradata.OutParam ("p81", dataType='INTERVAL MINUTE TO SECOND(0)'), #p81 interval second(0) + + teradata.InParam (interval(minutes=12, seconds=33.400004), dataType='INTERVAL MINUTE TO SECOND'), #p82 interval minute to second + teradata.InOutParam(None, "p83", dataType='INTERVAL MINUTE TO SECOND'), #p83 interval minute to second + teradata.OutParam ("p84", dataType='INTERVAL MINUTE TO SECOND'), #p84 interval minute to second + + teradata.InParam (interval(minutes=1234, seconds=33), dataType='INTERVAL MINUTE(4) TO SECOND(0)'), #p85 interval minute(4) to second(0) + teradata.InOutParam(interval(negative=True, minutes=1234, seconds=33), "p86", dataType='INTERVAL MINUTE(4) TO SECOND(0)'), #p86 interval minute(4) to second(0) + teradata.OutParam ("p87", dataType='INTERVAL MINUTE(4) TO SECOND(0)'), #p87 interval minute(4) to second(0) + + teradata.InParam (None, dataType='INTERVAL MINUTE(4) TO SECOND'), #p88 interval minute(4) to second + teradata.InOutParam(interval(negative=True, minutes=1234, seconds=33.002001), "p89", dataType='INTERVAL MINUTE(4) TO SECOND'), #p89 interval minute(4) to second + teradata.OutParam ("p90", dataType='INTERVAL MINUTE(4) TO SECOND'), #p90 interval minute(4) to second + + teradata.InParam (interval(seconds=12), dataType='INTERVAL SECOND(2,0)'), #p91 interval second(2,0) + teradata.InOutParam(None, "p92", dataType='INTERVAL SECOND(2,0)'), #p92 interval second(2,0) + teradata.OutParam ("p93", dataType='INTERVAL SECOND(2,0)'), #p93 interval second(2,0) + + teradata.InParam (interval(seconds=12.123456), dataType='INTERVAL SECOND'), #p94 interval second + teradata.InOutParam(interval(negative=True, seconds=12.123456), "p95", dataType='INTERVAL SECOND'), #p95 interval second + teradata.OutParam ("p96", dataType='INTERVAL SECOND'), #p96 interval second + + teradata.InParam (None, dataType='INTERVAL SECOND(4,0)'), #p97 interval second(4,0) + teradata.InOutParam(interval(negative=True, seconds=1234), "p98", dataType='INTERVAL SECOND(4,0)'), #p98 interval second(4,0) + teradata.OutParam ("p99", dataType='INTERVAL SECOND(4,0)'), #p99 interval second(4,0) + + teradata.InParam (interval(seconds=1234.123456), dataType='INTERVAL SECOND(4)'), #p100 interval second(4) + teradata.InOutParam(None, "p101", dataType='INTERVAL SECOND(4)'), #p101 interval second(4) + teradata.OutParam ("p102", dataType='INTERVAL SECOND(4)') #p102 interval second(4) + + ], [ # Same values as above but use string interval values and cast as interval types + teradata.InParam (None, dataType='INTERVAL YEAR'), #p1 interval year + teradata.InOutParam("-12", "p2", dataType='INTERVAL YEAR'), #p2 interval year + teradata.OutParam ("p3", dataType='INTERVAL YEAR'), #p3 interval year + + teradata.InParam (" 1234", dataType='INTERVAL YEAR(4)'), #p4 interval year(4) + teradata.InOutParam(None, "p5", dataType='INTERVAL YEAR(4)'), #p5 interval year(4) + teradata.OutParam ("p6", dataType='INTERVAL YEAR(4)'), #p6 interval year(4) + + teradata.InParam (None, dataType='INTERVAL YEAR TO MONTH'), #p7 interval year to month + teradata.InOutParam("-12-10", "p8", dataType='INTERVAL YEAR TO MONTH'), #p8 interval year to month + teradata.OutParam ("p9", dataType='INTERVAL YEAR TO MONTH'), #p9 interval year + + teradata.InParam (" 1234-10", dataType='INTERVAL YEAR(4) TO MONTH'), #p10 interval year(4) to month + teradata.InOutParam(None, "p11", dataType='INTERVAL YEAR(4) TO MONTH'), #p11 interval year(4) to month + teradata.OutParam ("p12", dataType='INTERVAL YEAR(4) TO MONTH'), #p12 interval year + + teradata.InParam (" 12", dataType='INTERVAL MONTH'), #p13 interval month + teradata.InOutParam("-12", "p14", dataType='INTERVAL MONTH'), #p14 interval month + teradata.OutParam ("p15", dataType='INTERVAL MONTH'), #p15 interval year + + teradata.InParam (None, dataType='INTERVAL MONTH(4)'), #p16 interval month(4) + teradata.InOutParam("-1234", "p17", dataType='INTERVAL MONTH(4)'), #p17 interval month(4) + teradata.OutParam ("p18", dataType='INTERVAL MONTH(4)'), #p18 interval year + + teradata.InParam (" 11", dataType='INTERVAL DAY'), #p19 interval day + teradata.InOutParam(None, "p20", dataType='INTERVAL DAY'), #p20 interval day + teradata.OutParam ("p21", dataType='INTERVAL DAY'), #p21 interval day + + teradata.InParam (" 1234", dataType='INTERVAL DAY(4)'), #p22 interval day(4) + teradata.InOutParam("-1234", "p23", dataType='INTERVAL DAY(4)'), #p23 interval day(4) + teradata.OutParam ("p24", dataType='INTERVAL DAY(4)'), #p24 interval day(4) + + teradata.InParam (None, dataType='INTERVAL DAY TO HOUR'), #p25 interval day to hour + teradata.InOutParam("-12 11", "p26", dataType='INTERVAL DAY TO HOUR'), #p26 interval day to hour + teradata.OutParam ("p27", dataType='INTERVAL DAY TO HOUR'), #p27 interval day to hour + + teradata.InParam (" 1234 11", dataType='INTERVAL DAY(4) TO HOUR'), #p28 interval day(4) to hour + teradata.InOutParam(None, "p29", dataType='INTERVAL DAY(4) TO HOUR'), #p29 interval day(4) to hour + teradata.OutParam ("p30", dataType='INTERVAL DAY(4) TO HOUR'), #p30 interval day(4) to hour + + teradata.InParam (" 12 11:22", dataType='INTERVAL DAY TO MINUTE'), #p31 interval day to minute + teradata.InOutParam("-12 11:22", "p32", dataType='INTERVAL DAY TO MINUTE'), #p32 interval day to minute + teradata.OutParam ("p33", dataType='INTERVAL DAY TO MINUTE'), #p33 interval day to minute + + teradata.InParam (None, dataType='INTERVAL DAY(4) TO MINUTE'), #p34 interval day(4) to minute + teradata.InOutParam("-1234 11:22", "p35", dataType='INTERVAL DAY(4) TO MINUTE'), #p35 interval day(4) to minute + teradata.OutParam ("p36", dataType='INTERVAL DAY(4) TO MINUTE'), #p36 interval day (4) to minute + + teradata.InParam (" 12 11:22:33", dataType='INTERVAL DAY TO SECOND(0)'), #p37 interval day to second(0) + teradata.InOutParam(None, "p38", dataType='INTERVAL DAY TO SECOND(0)'), #p38 interval day to second(0) + teradata.OutParam ("p39", dataType='INTERVAL DAY TO SECOND(0)'), #p39 interval year + + teradata.InParam (" 12 11:22:33.120001", dataType='INTERVAL DAY TO SECOND'), #p40 interval day to second + teradata.InOutParam("-12 11:22:33.987654", "p41", dataType='INTERVAL DAY TO SECOND'), #p41 interval day to second + teradata.OutParam ("p42", dataType='INTERVAL DAY TO SECOND'), #p42 interval year + + teradata.InParam (None, dataType='INTERVAL DAY(4) TO SECOND(0)'), #p43 interval day(4) to second(0) + teradata.InOutParam("-1234 11:22:33", "p44", dataType='INTERVAL DAY(4) TO SECOND(0)'), #p44 interval day(4) to second(0) + teradata.OutParam ("p45", dataType='INTERVAL DAY(4) TO SECOND(0)'), #p45 interval day(4) to second(0) + + teradata.InParam (" 1234 11:22:33.124321", dataType='INTERVAL DAY(4) TO SECOND'), #p46 interval day(4) to second + teradata.InOutParam(None, "p47", dataType='INTERVAL DAY(4) TO SECOND'), #p47 interval day(4) to second + teradata.OutParam ("p48", dataType='INTERVAL DAY(4) TO SECOND'), #p48 interval year + + teradata.InParam (" 12", dataType='INTERVAL HOUR'), #p49 interval hour + teradata.InOutParam("-12", "p50", dataType='INTERVAL HOUR'), #p50 interval hour + teradata.OutParam ("p51", dataType='INTERVAL HOUR'), #p51 interval year + + teradata.InParam (None, dataType='INTERVAL HOUR(4)'), #p52 interval hour(4) + teradata.InOutParam("-1234", "p53", dataType='INTERVAL HOUR(4)'), #p53 interval hour(4) + teradata.OutParam ("p54", dataType='INTERVAL HOUR(4)'), #p54 interval hour(4) + + teradata.InParam (" 12:22", dataType='INTERVAL HOUR TO MINUTE'), #p55 interval hour to minute + teradata.InOutParam(None, "p56", dataType='INTERVAL HOUR TO MINUTE'), #p56 interval hour to minute + teradata.OutParam ("p57", dataType='INTERVAL HOUR TO MINUTE'), #p57 interval hour to minute + + teradata.InParam (" 1234:22", dataType='INTERVAL HOUR(4) TO MINUTE'), #p58 interval hour(4) to minute + teradata.InOutParam("-1234:22", "p59", dataType='INTERVAL HOUR(4) TO MINUTE'), #p59 interval hour(4) to minute + teradata.OutParam ("p60", dataType='INTERVAL HOUR(4) TO MINUTE'), #p60 interval hour(4) to minute + + teradata.InParam (None, dataType='INTERVAL HOUR TO SECOND (0)'), #p61 interval hour to second(0) + teradata.InOutParam("-12:22:33", "p62", dataType='INTERVAL HOUR TO SECOND (0)'), #p62 interval hour to second(0) + teradata.OutParam ("p63", dataType='INTERVAL HOUR TO SECOND (0)'), #p63 interval hour to second(0) + + teradata.InParam ("-12:22:33.145655", dataType='INTERVAL HOUR TO SECOND'), #p64 interval hour to second + teradata.InOutParam(None, "p65", dataType='INTERVAL HOUR TO SECOND'), #p65 interval hour to second + teradata.OutParam ("p66", dataType='INTERVAL HOUR TO SECOND'), #p66 interval year + + teradata.InParam (" 1234:22:33", dataType='INTERVAL HOUR(4) TO SECOND(0)'), #p67 interval hour(4) to second(0) + teradata.InOutParam("-1234:22:33", "p68", dataType='INTERVAL HOUR(4) TO SECOND(0)'), #p68 interval hour(4) to second(0) + teradata.OutParam ("p69", dataType='INTERVAL HOUR(4) TO SECOND(0)'), #p69 interval hour(4) to second(0) + + teradata.InParam (None, dataType='INTERVAL HOUR(4) TO SECOND'), #p70 interval hour(4) to second + teradata.InOutParam("-1234:22:33.145666", "p71", dataType='INTERVAL HOUR(4) TO SECOND'), #p71 interval hour(4) to second + teradata.OutParam ("p72", dataType='INTERVAL HOUR(4) TO SECOND'), #p72 interval hour(4) to second + + teradata.InParam (" 12", dataType='INTERVAL MINUTE'), #p73 interval minute + teradata.InOutParam(None, "p74", dataType='INTERVAL MINUTE'), #p74 interval minute + teradata.OutParam ("p75", dataType='INTERVAL MINUTE'), #p75 interval minute + + teradata.InParam (" 1234", dataType='INTERVAL MINUTE(4)'), #p76 interval minute(4) + teradata.InOutParam("-1234", "p77", dataType='INTERVAL MINUTE(4)'), #p77 interval minute(4) + teradata.OutParam ("p78", dataType='INTERVAL MINUTE(4)'), #p78 interval minute(4) + + teradata.InParam (None, dataType='INTERVAL MINUTE TO SECOND(0)'), #p79 interval minute to second(0) + teradata.InOutParam("-12:33", "p80", dataType='INTERVAL MINUTE TO SECOND(0)'), #p80 interval minute to second(0) + teradata.OutParam ("p81", dataType='INTERVAL MINUTE TO SECOND(0)'), #p81 interval second(0) + + teradata.InParam (" 12:33.400004", dataType='INTERVAL MINUTE TO SECOND'), #p82 interval minute to second + teradata.InOutParam(None, "p83", dataType='INTERVAL MINUTE TO SECOND'), #p83 interval minute to second + teradata.OutParam ("p84", dataType='INTERVAL MINUTE TO SECOND'), #p84 interval minute to second + + teradata.InParam (" 1234:33", dataType='INTERVAL MINUTE(4) TO SECOND(0)'), #p85 interval minute(4) to second(0) + teradata.InOutParam("-1234:33", "p86", dataType='INTERVAL MINUTE(4) TO SECOND(0)'), #p86 interval minute(4) to second(0) + teradata.OutParam ("p87", dataType='INTERVAL MINUTE(4) TO SECOND(0)'), #p87 interval minute(4) to second(0) + + teradata.InParam (None, dataType='INTERVAL MINUTE(4) TO SECOND'), #p88 interval minute(4) to second + teradata.InOutParam("-1234:33.002001", "p89", dataType='INTERVAL MINUTE(4) TO SECOND'), #p89 interval minute(4) to second + teradata.OutParam ("p90", dataType='INTERVAL MINUTE(4) TO SECOND'), #p90 interval minute(4) to second + + teradata.InParam (" 12", dataType='INTERVAL SECOND(2,0)'), #p91 interval second(2,0) + teradata.InOutParam(None, "p92", dataType='INTERVAL SECOND(2,0)'), #p92 interval second(2,0) + teradata.OutParam ("p93", dataType='INTERVAL SECOND(2,0)'), #p93 interval second(2,0) + + teradata.InParam (" 12.123456", dataType='INTERVAL SECOND'), #p94 interval second + teradata.InOutParam("-12.123456", "p95", dataType='INTERVAL SECOND'), #p95 interval second + teradata.OutParam ("p96", dataType='INTERVAL SECOND'), #p96 interval second + + teradata.InParam (None, dataType='INTERVAL SECOND(4,0)'), #p97 interval second(4,0) + teradata.InOutParam("-1234", "p98", dataType='INTERVAL SECOND(4,0)'), #p98 interval second(4,0) + teradata.OutParam ("p99", dataType='INTERVAL SECOND(4,0)'), #p99 interval second(4,0) + + teradata.InParam (" 1234.123456", dataType='INTERVAL SECOND(4)'), #p100 interval second(4) + teradata.InOutParam(None, "p101", dataType='INTERVAL SECOND(4)'), #p101 interval second(4) + teradata.OutParam ("p102", dataType='INTERVAL SECOND(4)') #p102 interval second(4) + + ]] + + results1 = conn.callproc("testProcedureIntervalNulls", aaoInputParameters [0]) + results2 = conn.callproc("testProcedureIntervalNulls", aaoInputParameters [1]) + self.assertEqual (len (results1), len (aaoInputParameters [0])) + self.assertEqual (len (results2), len (aaoInputParameters [1])) + nParam = 2 + for p in range (0, int (len (results1) * 2/3)): + # Use aaoInputParameters [0] for both compares since the results are always returned as interval + # values and not strings + self.assertEqual (results1 ["p{}".format(nParam)], aaoInputParameters [0][nParam - 2].inValue) + self.assertEqual (results2 ["p{}".format(nParam)], aaoInputParameters [0][nParam - 2].inValue) + nParam += (p + 2) % 2 + 1 + finally: + conn.execute ("DROP PROCEDURE testProcedureIntervalNulls") + # end testProcedureIntervalNulls + + def testProcedureDateTimeNulls(self): + with udaExec.connect(self.dsn, username=self.username, + password=self.password) as conn: + self.assertIsNotNone(conn) + conn.execute( + """REPLACE PROCEDURE testProcedureDateTimeNulls + ( + in p1 date , inout p2 date , out p3 date , + in p4 time(0) , inout p5 time(0) , out p6 time(0) , + in p7 time(3) , inout p8 time(3) , out p9 time(3) , + in p10 time(0) with time zone , inout p11 time(0) with time zone , out p12 time(0) with time zone , + in p13 time (4) with time zone , inout p14 time (4) with time zone , out p15 time (4) with time zone , + in p16 timestamp (0) , inout p17 timestamp (0) , out p18 timestamp (0) , + in p19 timestamp , inout p20 timestamp , out p21 timestamp , + in p22 timestamp (0) with time zone, inout p23 timestamp (0) with time zone, out p24 timestamp (0) with time zone, + in p25 timestamp (3) with time zone, inout p26 timestamp (3) with time zone, out p27 timestamp (3) with time zone + ) begin + set p3 = p2 ; set p2 = p1 ; + set p6 = p5 ; set p5 = p4 ; + set p9 = p8 ; set p8 = p7 ; + set p12 = p11 ; set p11 = p10 ; + set p15 = p14 ; set p14 = p13 ; + set p18 = p17 ; set p17 = p16 ; + set p21 = p20 ; set p20 = p19 ; + set p24 = p23 ; set p23 = p22 ; + set p27 = p26 ; set p26 = p25 ; + END;""") + + try: + d = datetime.date + t = datetime.time + dt = datetime.datetime + tz = datatypes.TimeZone + period = datatypes.Period + + aaoParameters = [ + [ # Use data, time & timestamp types in IN/INOUT parameters and cast + teradata.InParam (None, dataType='DATE ANSI'), # p1 date) + teradata.InOutParam(d(1899, 1, 11), "p2", dataType='DATE ANSI'), # p2 date) + teradata.OutParam ("p3", dataType="DATE ANSI"), # p3 date) + + teradata.InParam (t (9, 9, 9), dataType='TIME (0)'), # p4 time (0)) + teradata.InOutParam(None, "p5", dataType = 'TIME (0)'), # p5 time (0)) + teradata.OutParam ("p6", dataType = 'TIME (0)'), # p6 time (0)) + + teradata.InParam(t(11, 5, 16, 123000), dataType='TIME(3)'), # p7 time) + teradata.InOutParam(t(12, 10, 45, 100000), "p8", dataType = 'TIME(3)'), # p8 time) + teradata.OutParam("p9", dataType = 'TIME(3)'), # p9 time) + + teradata.InParam(t (2, 12, 12, 0, tz("+", 0, 30)), dataType='TIME (0) WITH TIME ZONE'), #p10 time) + teradata.InOutParam(None, "p11", dataType = 'TIME (0) WITH TIME ZONE'), #p11 time) + teradata.OutParam("p12", dataType = 'TIME (0) WITH TIME ZONE'), #p12 time) + + teradata.InParam(t (3, 4, 5, 60000, tz("+", 0, 30)), dataType='TIME (4) WITH TIME ZONE'), #p13 time (4) with time zone) + teradata.InOutParam(None, "p14", dataType = 'TIME (4) WITH TIME ZONE'), #p14 time (4) with time zone) + teradata.OutParam("p15", dataType = 'TIME (4) WITH TIME ZONE'), #p15 time (4) with time zone) + + teradata.InParam(None, dataType='TIMESTAMP (0)'), #p16 time (0) with time zone) + teradata.InOutParam(dt (1980, 5, 3, 3, 4, 5), "p17", dataType = 'TIMESTAMP (0)'), #p17 time (0) with time zone) + teradata.OutParam("p18", dataType = 'TIMESTAMP (0)'), #p18 time (0) with time zone) + + teradata.InParam(dt(1981, 6, 4, 4, 5, 6, 456000), dataType = 'TIMESTAMP'), #p19 timestamp with time zone) + teradata.InOutParam(None, "p20", dataType = 'TIMESTAMP'), #p20 timestamp with time zone) + teradata.OutParam("p21", dataType = 'TIMESTAMP'), #p21 timestamp with time zone) + + teradata.InParam(dt(2000, 1, 1, 0, 1, 5, 0, tz("+", 5, 30)), dataType = 'TIMESTAMP (0) WITH TIME ZONE'), #p22 timestamp (0) with time zone) + teradata.InOutParam(None, "p23", dataType = 'TIMESTAMP (0) WITH TIME ZONE'), #p23 timestamp (0) with time zone) + teradata.OutParam("p24", dataType = 'TIMESTAMP (0) WITH TIME ZONE'), #p24 timestamp (0) with time zone) + + teradata.InParam(None, dataType='TIMESTAMP (3) WITH TIME ZONE'), #p25 timestamp (3) with time zone) + teradata.InOutParam(dt(2003, 10, 27, 8, 10, 30, 123000, tz("+", 5, 30)), "p26", dataType = 'TIMESTAMP (3) WITH TIME ZONE'), #p26 timestamp (3) with time zone) + teradata.OutParam("p27", dataType = 'TIMESTAMP (3) WITH TIME ZONE'), #p27 timestamp (3) with time zone) + + ],[ # Same values as above but use string for params but cast as date, time or timestamp types + teradata.InParam (None, dataType='DATE INTEGER'), # p1 date) + teradata.InOutParam("1899-01-11", "p2", dataType='DATE INTEGER'), # p2 date) + teradata.OutParam ("p3", dataType="DATE)"), # p3 date) + + teradata.InParam ("09:09:09", dataType='TIME (0)'), # p4 time (0)) + teradata.InOutParam(None, "p5", dataType = 'TIME (0)'), # p5 time (0)) + teradata.OutParam ("p6", dataType = 'TIME (0)'), # p6 time (0)) + + teradata.InParam ("11:05:16.123", dataType = 'TIME(3)'), # p8 time) + teradata.InOutParam ("12:10:45.1", "p8", dataType = 'TIME(3)'), # p8 time) + teradata.OutParam ("p9", dataType = 'TIME(3)'), # p9 time) + + teradata.InParam ("02:12:12+00:30", dataType='TIME (0) WITH TIME ZONE'), #p10 time) + teradata.InOutParam(None, "p11", dataType = 'TIME (0) WITH TIME ZONE'), #p11 time) + teradata.OutParam ("p12", dataType = 'TIME (0) WITH TIME ZONE'), #p12 time) + + teradata.InParam ("03:04:05.06+00:30", dataType='TIME (4) WITH TIME ZONE'), #p13 time (4) with time zone) + teradata.InOutParam(None, "p14", dataType = 'TIME (4) WITH TIME ZONE'), #p14 time (4) with time zone) + teradata.OutParam ("p15", dataType = 'TIME (4) WITH TIME ZONE'), #p15 time (4) with time zone) + + teradata.InParam (None, dataType='TIMESTAMP (0)'), #p16 time (0) with time zone) + teradata.InOutParam("1980-05-03 03:04:05", "p17", dataType = 'TIMESTAMP (0)'), #p17 time (0) with time zone) + teradata.OutParam ("p18", dataType = 'TIMESTAMP (0)'), #p18 time (0) with time zone) + + teradata.InParam ("1981-06-04 04:05:06.456000", dataType = 'TIMESTAMP'), #p19 timestamp with time zone) + teradata.InOutParam(None, "p20", dataType = 'TIMESTAMP'), #p20 timestamp with time zone) + teradata.OutParam ("p21", dataType = 'TIMESTAMP'), #p21 timestamp with time zone) + + teradata.InParam ("2000-01-01 00:01:05+05:30", dataType = 'TIMESTAMP (0) WITH TIME ZONE'), #p22 timestamp (0) with time zone) + teradata.InOutParam(None, "p23", dataType = 'TIMESTAMP (0) WITH TIME ZONE'), #p23 timestamp (0) with time zone) + teradata.OutParam ("p24", dataType = 'TIMESTAMP (0) WITH TIME ZONE'), #p24 timestamp (0) with time zone) + + teradata.InParam (None, dataType='TIMESTAMP (3) WITH TIME ZONE'), #p25 timestamp (3) with time zone) + teradata.InOutParam("2003-10-27 08:10:30.123+05:30", "p26", dataType = 'TIMESTAMP (3) WITH TIME ZONE'), #p26 timestamp (3) with time zone) + teradata.OutParam ("p27", dataType = 'TIMESTAMP (3) WITH TIME ZONE'), #p27 timestamp (3) with time zone) + ]] + + for i in range (len (aaoParameters)): + result = conn.callproc("testProcedureDateTimeNulls", aaoParameters [i]) + self.assertEqual (len (result), len (aaoParameters [i])) + nParam = 2 + for p in range (0, int (len (result) * 2/3)): + # Output is returned as date, time or timestamp so always compare against 1st parameter set + self.assertEqual (result ["p{}".format(nParam)], aaoParameters [0][nParam - 2].inValue) + nParam += (p + 2) % 2 + 1 + finally: + conn.execute ("DROP PROCEDURE testProcedureDateTimeNulls") + # end testProcedureDateTimeNulls + + def testProcedureInOutPeriod(self): + with udaExec.connect(self.dsn, username=self.username, + password=self.password) as conn: + self.assertIsNotNone(conn) + conn.execute( + """REPLACE PROCEDURE testProcedureInOutPeriod + ( + in p1 period (date) , inout p2 period (date) , out p3 period (date) , + in p4 period (time(2)) , inout p5 period (time(2)) , out p6 period (time(2)) , + in p7 period (time) , inout p8 period (time) , out p9 period (time) , + in p10 period (time(0) with time zone) , inout p11 period (time(0) with time zone) , out p12 period (time(0) with time zone) , + in p13 period (time (4) with time zone) , inout p14 period (time (4) with time zone) , out p15 period (time (4) with time zone) , + in p16 period (timestamp (1)) , inout p17 period (timestamp (1)) , out p18 period (timestamp (1)) , + in p19 period (timestamp) , inout p20 period (timestamp) , out p21 period (timestamp) , + in p22 period (timestamp (0) with time zone), inout p23 period (timestamp (0) with time zone), out p24 period (timestamp (0) with time zone), + in p25 period (timestamp (3) with time zone), inout p26 period (timestamp (3) with time zone), out p27 period (timestamp (3) with time zone) + ) begin + set p3 = p2 ; set p2 = p1 ; + set p6 = p5 ; set p5 = p4 ; + set p9 = p8 ; set p8 = p7 ; + set p12 = p11 ; set p11 = p10 ; + set p15 = p14 ; set p14 = p13 ; + set p18 = p17 ; set p17 = p16 ; + set p21 = p20 ; set p20 = p19 ; + set p24 = p23 ; set p23 = p22 ; + set p27 = p26 ; set p26 = p25 ; + END;""") + + try: + d = datetime.date + t = datetime.time + dt = datetime.datetime + tz = datatypes.TimeZone + period = datatypes.Period + + aaoParameters = [ + [ # Use period types in IN/INOUT parameters and cast as PERIOD types + teradata.InParam (period(d(1970, 1, 2),d(1973, 4, 5)), dataType='PERIOD (DATE)'), # p1 period(date) + teradata.InOutParam(period(d(1998, 3, 21),d(1999, 12, 10)), "p2", dataType='PERIOD (DATE)'), # p2 period(date) + teradata.OutParam ("p3", dataType="PERIOD (DATE)"), # p3 period(date) + + teradata.InParam (period(t(11, 22, 33, 10000), t(22, 33, 44, 210000)), dataType='PERIOD (TIME (2))'), # p4 period(time (0)) + teradata.InOutParam(period(t(9, 9, 9), t(10, 10, 10)), "p5", dataType = 'PERIOD (TIME (2))'), # p5 period(time (0)) + teradata.OutParam ("p6", dataType = 'PERIOD (TIME (2))'), # p6 period(time (0)) + + teradata.InParam(period(t(11, 22, 33, 234560), t(22, 33, 44, 345600)), dataType='PERIOD (TIME)'), # p7 period(time) + teradata.InOutParam(period(t(1, 2, 3, 456000), t(2, 3, 4, 560000)), "p8", dataType = 'PERIOD (TIME)'), # p8 period(time) + teradata.OutParam("p9", dataType = 'PERIOD (TIME)'), # p9 period(time) + + teradata.InParam(period(t (1, 22, 33, 0, tz("+", 0, 30)), t (22, 33, 44, 0, tz("+", 5, 30))), dataType='PERIOD (TIME (0) WITH TIME ZONE)'), #p10 period(time) + teradata.InOutParam(period(t(2, 12, 12, 0, tz("+", 0, 30)), t(22, 3, 44, 0,tz("+", 5, 30))), "p11", dataType = 'PERIOD (TIME (0) WITH TIME ZONE)'), #p11 period(time) + teradata.OutParam("p12", dataType = 'PERIOD (TIME (0) WITH TIME ZONE)'), #p12 period(time) + + teradata.InParam(period(t (1, 22, 33, 60000, tz("+", 0, 30)), t (22, 33, 44, 561000, tz ("+", 5, 30))), dataType='PERIOD (TIME (4) WITH TIME ZONE)'), #p13 period(time (4) with time zone) + teradata.InOutParam(period(t(3, 4, 5, 600000, tz("+", 0, 30)), t (12, 13, 14, 140000, tz ("+", 5, 30))), "p14", dataType = 'PERIOD (TIME (4) WITH TIME ZONE)'), #p14 period(time (4) with time zone) + teradata.OutParam("p15", dataType = 'PERIOD (TIME (4) WITH TIME ZONE)'), #p15 period(time (4) with time zone) + + teradata.InParam(period(dt(1970, 1, 2, 3, 4, 5, 100000), dt(1976, 7, 8, 9, 10, 11, 900000)), dataType='PERIOD (TIMESTAMP (1))'), #p16 period(time (0) with time zone) + teradata.InOutParam(period(dt(1980, 5, 3, 3, 4, 5), dt (1986, 8, 7, 1, 10, 11)), "p17", dataType = 'PERIOD (TIMESTAMP (1))'), #p17 period(time (0) with time zone) + teradata.OutParam("p18", dataType = 'PERIOD (TIMESTAMP (1))'), #p18 period(time (0) with time zone) + + teradata.InParam(period(dt(1970, 1, 2, 3, 4, 5, 456000), dt(1976, 7, 8, 9, 10, 11, 125600)), dataType = 'PERIOD (TIMESTAMP)'), #p19 period(timestamp with time zone) + teradata.InOutParam(period(dt(1981, 6, 4, 4, 5, 6, 124560), dt(1986, 7, 8, 11, 10, 11, 135600)), "p20", dataType = 'PERIOD (TIMESTAMP)'), #p20 period(timestamp with time zone) + teradata.OutParam("p21", dataType = 'PERIOD (TIMESTAMP)'), #p21 period(timestamp with time zone) + + teradata.InParam(period(dt(1970, 1, 2, 3, 4, 5, 0, tz("+", 5, 30)), dt(1976, 7, 8, 9, 10, 11, 0, tz("+", 5, 30))), dataType = 'PERIOD (TIMESTAMP (0) WITH TIME ZONE)'), #p22 period(timestamp (0) with time zone) + teradata.InOutParam(period(dt(2000, 1, 1, 0, 1, 5, 0,tz("+", 5, 30)), dt(2020, 12, 31, 11, 59, 0, 0, tz("+", 5, 30))), "p23", dataType = 'PERIOD (TIMESTAMP (0) WITH TIME ZONE)'), #p23 period(timestamp (0) with time zone) + teradata.OutParam("p24", dataType = 'PERIOD (TIMESTAMP (0) WITH TIME ZONE)'), #p24 period(timestamp (0) with time zone) + + teradata.InParam(period(dt(1970, 1, 2, 3, 4, 5, 123000, tz("+", 5, 30)), dt(1976, 7, 8, 9, 10, 11, 123000, tz("+", 5, 30))), dataType='PERIOD (TIMESTAMP (3) WITH TIME ZONE)'), #p25 period(timestamp (3) with time zone) + teradata.InOutParam(period(dt(2003, 10, 27, 8, 10, 30, 0, tz("+", 5, 30)), dt(2019, 5, 6, 10, 21, 0, 0, tz ("+", 5, 30))), "p26", dataType = 'PERIOD (TIMESTAMP (3) WITH TIME ZONE)'), #p26 period(timestamp (3) with time zone) + teradata.OutParam("p27", dataType = 'PERIOD (TIMESTAMP (3) WITH TIME ZONE)'), #p27 period(timestamp (3) with time zone) + + ],[ # Same values as above but use string to represent periods and cast as period type + teradata.InParam ("1970-01-02,1973-04-05", dataType='PERIOD (DATE)'), # p1 period(date) + teradata.InOutParam("1998-03-21,1999-12-10", "p2", dataType='PERIOD (DATE)'), # p2 period(date) + teradata.OutParam ("p3", dataType="PERIOD (DATE)"), # p3 period(date) + + teradata.InParam ("11:22:33.01,22:33:44.21", dataType='PERIOD (TIME (2))'), # p4 period(time (0)) + teradata.InOutParam("09:09:09,10:10:10", "p5", dataType = 'PERIOD (TIME (2))'), # p5 period(time (0)) + teradata.OutParam ("p6", dataType = 'PERIOD (TIME (2))'), # p6 period(time (0)) + + teradata.InParam ("11:22:33.23456,22:33:44.3456", dataType='PERIOD (TIME)'), # p7 period(time) + teradata.InOutParam("01:02:03.456,02:03:04.56", "p8", dataType = 'PERIOD (TIME)'), # p8 period(time) + teradata.OutParam ("p9", dataType = 'PERIOD (TIME)'), # p9 period(time) + + teradata.InParam ("01:22:33+00:30,22:33:44+05:30", dataType='PERIOD (TIME (0) WITH TIME ZONE)'), #p10 period(time) + teradata.InOutParam("02:12:12+00:30,22:03:44+05:30", "p11", dataType = 'PERIOD (TIME (0) WITH TIME ZONE)'), #p11 period(time) + teradata.OutParam ("p12", dataType = 'PERIOD (TIME (0) WITH TIME ZONE)'), #p12 period(time) + + teradata.InParam ("01:22:33.06+00:30,22:33:44.561+05:30", dataType='PERIOD (TIME (4) WITH TIME ZONE)'), #p13 period(time (4) with time zone) + teradata.InOutParam("03:04:05.6+00:30,12:13:14.14+05:30", "p14", dataType = 'PERIOD (TIME (4) WITH TIME ZONE)'), #p14 period(time (4) with time zone) + teradata.OutParam ("p15", dataType = 'PERIOD (TIME (4) WITH TIME ZONE)'), #p15 period(time (4) with time zone) + + teradata.InParam ("1970-01-02 03:04:05.1,1976-07-08 09:10:11.9", dataType='PERIOD (TIMESTAMP (1))'), #p16 period(time (0) with time zone) + teradata.InOutParam("1980-05-03 03:04:05,1986-08-07 01:10:11", "p17", dataType = 'PERIOD (TIMESTAMP (1))'), #p17 period(time (0) with time zone) + teradata.OutParam ("p18", dataType = 'PERIOD (TIMESTAMP (1))'), #p18 period(time (0) with time zone) + + teradata.InParam ("1970-01-02 03:04:05.456,1976-07-08 09:10:11.1256", dataType = 'PERIOD (TIMESTAMP)'), #p19 period(timestamp with time zone) + teradata.InOutParam("1981-06-04 04:05:06.12456,1986-07-08 11:10:11.135600", "p20", dataType = 'PERIOD (TIMESTAMP)'), #p20 period(timestamp with time zone) + teradata.OutParam ("p21", dataType = 'PERIOD (TIMESTAMP)'), #p21 period(timestamp with time zone) + + teradata.InParam ("1970-01-02 03:04:05+05:30,1976-07-08 09:10:11+05:30", dataType = 'PERIOD (TIMESTAMP (0) WITH TIME ZONE)'), #p22 period(timestamp (0) with time zone) + teradata.InOutParam("2000-01-01 00:01:05+05:30,2020-12-31 11:59:00+05:30", "p23", dataType = 'PERIOD (TIMESTAMP (0) WITH TIME ZONE)'), #p23 period(timestamp (0) with time zone) + teradata.OutParam ("p24", dataType = 'PERIOD (TIMESTAMP (0) WITH TIME ZONE)'), #p24 period(timestamp (0) with time zone) + + teradata.InParam ("1970-01-02 03:04:05.123+05:30,1976-07-08 09:10:11.123+05:30", dataType='PERIOD (TIMESTAMP (3) WITH TIME ZONE)'), #p25 period(timestamp (3) with time zone) + teradata.InOutParam("2003-10-27 08:10:30+05:30,2019-05-06 10:21:00+05:30", "p26", dataType = 'PERIOD (TIMESTAMP (3) WITH TIME ZONE)'), #p26 period(timestamp (3) with time zone) + teradata.OutParam ("p27", dataType = 'PERIOD (TIMESTAMP (3) WITH TIME ZONE)'), #p27 period(timestamp (3) with time zone) + ]] + + for i in range (len (aaoParameters)): + result = conn.callproc("testProcedureInOutPeriod", aaoParameters [i]) + self.assertEqual (len (result), len (aaoParameters [i])) + nParam = 2 + for p in range (0, int (len (result) * 2/3)): + # Output is returned as period types so always compare against 1st param set + self.assertEqual (result ["p{}".format(nParam)], aaoParameters [0][nParam - 2].inValue) + nParam += (p + 2) % 2 + 1 + finally: + conn.execute ("DROP PROCEDURE testProcedureInOutPeriod") + # end testProcedureInOutPeriod + + def testProcedureDateTime(self): + with udaExec.connect(self.dsn, username=self.username, + password=self.password) as conn: + self.assertIsNotNone(conn) + conn.execute( + """REPLACE PROCEDURE testProcedureDateTime + ( + in p1 date , inout p2 date , out p3 date , + in p4 time(0) , inout p5 time(0) , out p6 time(0) , + in p7 time(3) , inout p8 time(3) , out p9 time(3) , + in p10 time(0) with time zone , inout p11 time(0) with time zone , out p12 time(0) with time zone , + in p13 time (4) with time zone , inout p14 time (4) with time zone , out p15 time (4) with time zone , + in p16 timestamp (0) , inout p17 timestamp (0) , out p18 timestamp (0) , + in p19 timestamp , inout p20 timestamp , out p21 timestamp , + in p22 timestamp (0) with time zone, inout p23 timestamp (0) with time zone, out p24 timestamp (0) with time zone, + in p25 timestamp (3) with time zone, inout p26 timestamp (3) with time zone, out p27 timestamp (3) with time zone + ) begin + set p3 = p2 ; set p2 = p1 ; + set p6 = p5 ; set p5 = p4 ; + set p9 = p8 ; set p8 = p7 ; + set p12 = p11 ; set p11 = p10 ; + set p15 = p14 ; set p14 = p13 ; + set p18 = p17 ; set p17 = p16 ; + set p21 = p20 ; set p20 = p19 ; + set p24 = p23 ; set p23 = p22 ; + set p27 = p26 ; set p26 = p25 ; + END;""") + + try: + d = datetime.date + t = datetime.time + dt = datetime.datetime + td = datetime.timedelta + tz = datetime.timezone + period = datatypes.Period + + aaoParameters = [ + [ # Use data, time & timestamp types in IN/INOUT parameters and cast + teradata.InParam (d(1899, 1, 11), dataType='DATE ANSI'), # p1 date + teradata.InOutParam(d(2001, 2, 5), "p2", dataType='DATE ANSI'), # p2 date + teradata.OutParam ("p3", dataType="DATE ANSI"), # p3 date + + teradata.InParam (t(17, 43, 53), dataType='TIME (0)'), # p4 time (0) + teradata.InOutParam(t( 5, 55, 5), "p5", dataType = 'TIME (0)'), # p5 time (0) + teradata.OutParam ("p6", dataType = 'TIME (0)'), # p6 time (0) + + teradata.InParam (t(20, 46, 56, 123000), dataType='TIME(3)'), # p7 time (3) + teradata.InOutParam(t(10, 23, 28, 300000), "p8", dataType = 'TIME(3)'), # p8 time (3) + teradata.OutParam ("p9", dataType = 'TIME(3)'), # p9 time (3) + + teradata.InParam (t(10, 43, 53, tzinfo=tz(td(hours= 11, minutes= 45))), dataType='TIME (0) WITH TIME ZONE'), #p10 time (0) with time zone + teradata.InOutParam(t( 4, 4, 4, tzinfo=tz(td(hours= 10, minutes= 45))), "p11", dataType = 'TIME (0) WITH TIME ZONE'), #p11 time (0) with time zone + teradata.OutParam ("p12", dataType = 'TIME (0) WITH TIME ZONE'), #p12 time (0) with time zone + + teradata.InParam (t(14, 47, 57, 123400, tz(td(hours= -1))), dataType='TIME (4) WITH TIME ZONE'), #p13 time (4) with time zone + teradata.InOutParam(t(12, 13, 13, 100000, tz(td(hours= -1))), "p14", dataType = 'TIME (4) WITH TIME ZONE'), #p14 time (4) with time zone + teradata.OutParam ("p15", dataType = 'TIME (4) WITH TIME ZONE'), #p15 time (4) with time zone + + teradata.InParam (dt(1899, 6, 19, 12, 33, 50), dataType='TIMESTAMP (0)'), #p16 timestamp (0) + teradata.InOutParam(dt(1999, 1, 22, 6, 15, 00), "p17", dataType = 'TIMESTAMP (0)'), #p17 timestamp (0) + teradata.OutParam ("p18", dataType = 'TIMESTAMP (0)'), #p18 timestamp (0) + + teradata.InParam (dt(1901, 8, 21, 14, 35, 52, 12), dataType = 'TIMESTAMP'), #p19 timestamp + teradata.InOutParam(dt(1952, 11, 9, 7, 42, 12, 10), "p20", dataType = 'TIMESTAMP'), #p20 timestamp + teradata.OutParam ("p21", dataType = 'TIMESTAMP'), #p21 timestamp + + teradata.InParam (dt(1899, 5, 24, 17, 28, 50, tzinfo=tz(td(hours= 11, minutes= 45))), dataType = 'TIMESTAMP (0) WITH TIME ZONE'), #p22 timestamp (0) with time zone + teradata.InOutParam(dt(2020, 10, 12, 8, 14, 25, tzinfo=tz(td(hours= 11, minutes= 45))), "p23", dataType = 'TIMESTAMP (0) WITH TIME ZONE'), #p23 timestamp (0) with time zone + teradata.OutParam ("p24", dataType = 'TIMESTAMP (0) WITH TIME ZONE'), #p24 timestamp (0) with time zone + + teradata.InParam (dt(1901, 7, 26, 19, 30, 52, 12000, tz(td(hours= 1))), dataType='TIMESTAMP (3) WITH TIME ZONE'), #p25 timestamp (3) with time zone + teradata.InOutParam(dt(1863, 12, 2, 9, 15, 26, 50000, tz(td(hours= 1))), "p26", dataType = 'TIMESTAMP (3) WITH TIME ZONE'), #p26 timestamp (3) with time zone + teradata.OutParam ("p27", dataType = 'TIMESTAMP (3) WITH TIME ZONE'), #p27 timestamp (3) with time zone + + ],[ # Same values as above but use string for params but cast as date, time or timestamp types + teradata.InParam ("1899-01-11", dataType='DATE INTEGER'), # p1 date + teradata.InOutParam("2001-02-05", "p2", dataType='DATE INTEGER'), # p2 date + teradata.OutParam ("p3", dataType="DATE)"), # p3 date + + teradata.InParam ("17:43:53", dataType='TIME (0)'), # p4 time (0) + teradata.InOutParam("05:55:05", "p5", dataType = 'TIME (0)'), # p5 time (0) + teradata.OutParam ("p6", dataType = 'TIME (0)'), # p6 time (0) + + teradata.InParam ("20:46:56.123", dataType = 'TIME(3)'), # p8 time(3) + teradata.InOutParam("10:23:28.3", "p8", dataType = 'TIME(3)'), # p8 time(3) + teradata.OutParam ("p9", dataType = 'TIME(3)'), # p9 time(3) + + teradata.InParam ("10:43:53+11:45", dataType='TIME (0) WITH TIME ZONE'), #p10 time(0) with time zone + teradata.InOutParam("04:04:04+10:45", "p11", dataType = 'TIME (0) WITH TIME ZONE'), #p11 time(0) with time zone + teradata.OutParam ("p12", dataType = 'TIME (0) WITH TIME ZONE'), #p12 time(0) with time zone + + teradata.InParam ("14:47:57.1234-01:00", dataType='TIME (4) WITH TIME ZONE'), #p13 time (4) with time zone + teradata.InOutParam("12:13:13.1-01:00", "p14", dataType = 'TIME (4) WITH TIME ZONE'), #p14 time (4) with time zone + teradata.OutParam ("p15", dataType = 'TIME (4) WITH TIME ZONE'), #p15 time (4) with time zone + + teradata.InParam ("1899-06-19 12:33:50", dataType='TIMESTAMP (0)'), #p16 timestamp (0) + teradata.InOutParam("1999-01-22 06:15:00", "p17", dataType = 'TIMESTAMP (0)'), #p17 timestamp (0) + teradata.OutParam ("p18", dataType = 'TIMESTAMP (0)'), #p18 timestamp (0) + + teradata.InParam ("1901-08-21 14:35:52.000012", dataType = 'TIMESTAMP'), #p19 timestamp + teradata.InOutParam("1952-11-09 07:42:12.00001", "p20", dataType = 'TIMESTAMP'), #p20 timestamp + teradata.OutParam ("p21", dataType = 'TIMESTAMP'), #p21 timestamp + + teradata.InParam ("1899-05-24 17:28:50+11:45", dataType = 'TIMESTAMP (0) WITH TIME ZONE'), #p22 timestamp (0) with time zone + teradata.InOutParam("2020-10-12 08:14:25+11:45", "p23", dataType = 'TIMESTAMP (0) WITH TIME ZONE'), #p23 timestamp (0) with time zone + teradata.OutParam ("p24", dataType = 'TIMESTAMP (0) WITH TIME ZONE'), #p24 timestamp (0) with time zone + + teradata.InParam ("1901-07-26 19:30:52.012+01:00", dataType='TIMESTAMP (3) WITH TIME ZONE'), #p25 timestamp (3) with time zone + teradata.InOutParam("1863-12-02 09:15:26.05+01:00", "p26", dataType = 'TIMESTAMP (3) WITH TIME ZONE'), #p26 timestamp (3) with time zone + teradata.OutParam ("p27", dataType = 'TIMESTAMP (3) WITH TIME ZONE'), #p27 timestamp (3) with time zone + ]] + + for i in range (len (aaoParameters)): + result = conn.callproc("testProcedureDateTime", aaoParameters [i]) + self.assertEqual (len (result), len (aaoParameters [i])) + nParam = 2 + for p in range (0, int (len (result) * 2/3)): + # Output is returned as date, time or timestamp so always compare against 1st parameter set + self.assertEqual (result ["p{}".format(nParam)], aaoParameters [0][nParam - 2].inValue) + nParam += (p + 2) % 2 + 1 + finally: + conn.execute ("DROP PROCEDURE testProcedureDateTime") + # end testProcedureDateTime + util.createTestCasePerDSN( - UdaExecDataTypesTest, unittest.TestCase, ("HTTP", "HTTPS", "ODBC")) + UdaExecDataTypesTest, unittest.TestCase, ("TERADATASQL",)) if __name__ == '__main__': formatter = logging.Formatter( "%(asctime)s - %(name)s - %(levelname)s - %(message)s") - sh = logging.StreamHandler(sys.stdout) + stream = codecs.StreamWriter(sys.stdout, errors="replace") + stream.encode = lambda msg, errors="strict": (msg.encode(locale.getpreferredencoding(False), errors).decode(), msg) + sh = logging.StreamHandler(stream) sh.setFormatter(formatter) sh.setLevel(logging.INFO) root = logging.getLogger() @@ -953,10 +1932,8 @@ def testLargeTestView(self): def runTest(testName): suite = unittest.TestSuite() - suite.addTest(UdaExecDataTypesTest_ODBC(testName)) # @UndefinedVariable # noqa - suite.addTest(UdaExecDataTypesTest_HTTP(testName)) # @UndefinedVariable # noqa + suite.addTest(UdaExecExecuteTest_TERADATASQL(testName)) # @UndefinedVariable # noqa unittest.TextTestRunner().run(suite) if __name__ == '__main__': - # runTest('testDateAndTimeDataTypes') unittest.main() diff --git a/test/test_udaexec_execute.py b/test/test_udaexec_execute.py index 8ca147e..0d045cc 100644 --- a/test/test_udaexec_execute.py +++ b/test/test_udaexec_execute.py @@ -42,6 +42,10 @@ def setUpClass(cls): cls.username = cls.password = util.setupTestUser(udaExec, cls.dsn) cls.failure = False + @classmethod + def tearDownClass(cls): + util.cleanupTestUser(udaExec, cls.dsn) + def testCursorBasics(self): with udaExec.connect(self.dsn, username=self.username, password=self.password) as conn: @@ -145,6 +149,40 @@ def testRollbackCommitAnsiMode(self): "SELECT COUNT(*) FROM testRollbackCommitAnsiMode").fetchone() self.assertEqual(row[0], 0) + def testRollbackCreateAnsiMode(self): + with udaExec.connect(self.dsn, username=self.username, + password=self.password, autoCommit="false", + transactionMode='ANSI') as conn: + self.assertIsNotNone(conn) + cursor = conn.cursor() + + cursor.execute("CREATE TABLE testRollbackCreateAnsiMode (x INT)") + # instead of using cursor.rollback, use escape syntax so warnings will + # be printed in log file. + cursor.execute("{fn teradata_fake_result_sets}{fn teradata_rollback}") + + with self.assertRaises (teradata.DatabaseError) as cm: + cursor.execute("INSERT INTO testRollbackCreateAnsiMode VALUES (1)") + self.assertEqual (cm.exception.code, 3807) + # end testRollbackWarningAnsiMode + + def testRollbackCreateTeraMode(self): + with udaExec.connect(self.dsn, username=self.username, + password=self.password, autoCommit="false", + transactionMode='TERA') as conn: + self.assertIsNotNone(conn) + cursor = conn.cursor() + + cursor.execute("CREATE TABLE testRollbackCreateTeraMode (x INT)") + # instead of using cursor.rollback, use escape syntax so warnings will + # be printed in log file. + cursor.execute("{fn teradata_fake_result_sets}{fn teradata_rollback}") + + with self.assertRaises (teradata.DatabaseError) as cm: + cursor.execute("INSERT INTO testRollbackCreateTeraMode VALUES (1)") + self.assertEqual (cm.exception.code, 3807) + # end testRollbackWarningAnsiMode + def testSqlScriptExecution(self): with udaExec.connect(self.dsn, username=self.username, password=self.password) as conn: @@ -292,48 +330,49 @@ def testVolatileTable(self): self.assertIsNone(cursor.fetchone()) def testProcedureInOutParamNull(self): - if self.dsn == "ODBC": - with udaExec.connect("ODBC", username=self.username, - password=self.password) as conn: - self.assertIsNotNone(conn) - for r in conn.execute( - """REPLACE PROCEDURE testProcedure1 - (IN p1 INTEGER, INOUT p2 INTEGER, - INOUT p3 VARCHAR(200), INOUT p4 FLOAT, - INOUT p5 VARBYTE(128)) - BEGIN - IF p2 IS NULL THEN - SET p2 = p1; - END IF; - IF p3 IS NULL THEN - SET p3 = 'PASS'; - END IF; - IF p4 IS NULL THEN - SET p4 = p1; - END IF; - IF p5 IS NULL THEN - SET p5 = 'AABBCCDDEEFF'XBV; - END IF; - END;"""): - logger.info(r) - with udaExec.connect(self.dsn, username=self.username, - password=self.password) as conn: - for i in range(0, 10): - result = conn.callproc( - "testProcedure1", - (i, teradata.InOutParam(None, "p2", - dataType='INTEGER'), - teradata.InOutParam(None, "p3", size=200), - teradata.InOutParam(None, "p4"), - teradata.InOutParam(None, "p5"))) - self.assertEqual(result["p2"], i) - self.assertEqual(result["p3"], "PASS") - self.assertEqual(result["p4"], i) + with udaExec.connect(self.dsn, username=self.username, + password=self.password) as conn: + self.assertIsNotNone(conn) + for r in conn.execute( + """REPLACE PROCEDURE testProcedureIONull + (IN p1 INTEGER, INOUT p2 INTEGER, + INOUT p3 VARCHAR(200), INOUT p4 FLOAT, + INOUT p5 VARBYTE(128)) + BEGIN + IF p2 IS NULL THEN + SET p2 = p1; + END IF; + IF p3 IS NULL THEN + SET p3 = 'PASSING TEST'; + END IF; + IF p4 IS NULL THEN + SET p4 = p1; + END IF; + IF p5 IS NULL THEN + SET p5 = 'AABBCCDDEEFFAABBCCDDEEFF'XBV; + END IF; + END;"""): + logger.info(r) + + sP3Value = 'PASSING TEST' + byP5Value = bytearray ([0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF]) + with udaExec.connect(self.dsn, username=self.username, + password=self.password) as conn: + for i in range(0, 10): + result = conn.callproc( + "testProcedureIONull", + (i, + teradata.InOutParam(None, "p2", dataType='INTEGER'), + teradata.InOutParam(None, "p3", dataType='VARCHAR(200)', size = i + 1), + teradata.InOutParam(None, "p4", dataType='FLOAT'), + teradata.InOutParam(None, "p5", dataType='VARBYTE(50)', size = 14 - i))) + self.assertEqual(result["p2"], i) + self.assertEqual(result["p3"], sP3Value [:i + 1]) + self.assertEqual(result["p4"], i) + self.assertEqual(result["p5"], byP5Value [:14 - i]) def testProcedure(self): - # REST-307 - Unable to create Stored Procedure using REST, always use - # ODBC. - with udaExec.connect("ODBC", username=self.username, + with udaExec.connect(self.dsn, username=self.username, password=self.password) as conn: self.assertIsNotNone(conn) for r in conn.execute( @@ -356,18 +395,14 @@ def testProcedure(self): "testProcedure1", (i, teradata.OutParam("p2", dataType="INTEGER"))) self.assertEqual(result["p2"], i) - # Does not work with REST due to REST-308 - if self.dsn == "ODBC": - for i in range(0, 10): - result = conn.callproc( - "testProcedure2", - (teradata.InOutParam(i, "p1", dataType="INTEGER"), )) - self.assertEqual(result["p1"], i * i) + for i in range(0, 10): + result = conn.callproc( + "testProcedure2", + (teradata.InOutParam(i, "p1", dataType="INTEGER"), )) + self.assertEqual(result["p1"], i * i) def testProcedureWithLargeLobInput(self): - # REST-307 - Unable to create Stored Procedure using REST, always use - # ODBC. - with udaExec.connect("ODBC", username=self.username, + with udaExec.connect(self.dsn, username=self.username, password=self.password) as conn: self.assertIsNotNone(conn) scriptFile = os.path.join( @@ -376,7 +411,6 @@ def testProcedureWithLargeLobInput(self): SQLText = "CDR_2011-07-25_090000.000000.txt\n" SQLText = SQLText * 5000 - print("LENGTH OF SQLTest: {}".format(len(SQLText))) conn.callproc('GCFR_BB_ExecutionLog_Set', ('TestProc', 127, 12, 96, 2, 2, 'MyText', @@ -389,69 +423,130 @@ def testProcedureWithLargeLobInput(self): self.assertEqual(count, 1) def testProcedureWithBinaryAndFloatParameters(self): - if self.dsn == "ODBC": - with udaExec.connect(self.dsn, username=self.username, - password=self.password) as conn: - self.assertIsNotNone(conn) - for r in conn.execute( - """REPLACE PROCEDURE testProcedure1 - (INOUT p1 VARBYTE(128), OUT p2 VARBYTE(128), - INOUT p3 FLOAT, OUT p4 FLOAT, OUT p5 TIMESTAMP) - BEGIN - SET p2 = p1; - SET p4 = p3; - SET p5 = CURRENT_TIMESTAMP; - END;"""): - logger.info(r) - result = conn.callproc( - "testProcedure1", - (teradata.InOutParam(bytearray([0xFF]), "p1"), - teradata.OutParam("p2"), - teradata.InOutParam(float("inf"), "p3"), - teradata.OutParam("p4", dataType="FLOAT"), - teradata.OutParam("p5", dataType="TIMESTAMP"))) - self.assertEqual(result.p1, bytearray([0xFF])) - self.assertEqual(result.p2, result.p1) - self.assertEqual(result.p3, float('inf')) - self.assertEqual(result.p4, result.p3) + with udaExec.connect(self.dsn, username=self.username, + password=self.password) as conn: + self.assertIsNotNone(conn) + for r in conn.execute( + """REPLACE PROCEDURE testProcedureBF + (INOUT p1 VARBYTE(128), OUT p2 VARBYTE(128), + INOUT p3 FLOAT, OUT p4 FLOAT, OUT p5 TIMESTAMP, + INOUT p6 VARCHAR(10), OUT p7 VARCHAR (10)) + BEGIN + SET p2 = p1; + SET p4 = p3; + SET p5 = CURRENT_TIMESTAMP; + SET p7 = p6; + END;"""): + logger.info(r) + result = conn.callproc( + "testProcedureBF", + (teradata.InOutParam(bytearray([0xFF, 0xFE, 0xFF]), "p1", size=2), + teradata.OutParam("p2", size=1), + teradata.InOutParam(float("inf"), "p3"), + teradata.OutParam("p4", dataType="FLOAT"), + teradata.OutParam("p5", dataType="TIMESTAMP"), + teradata.InOutParam("abcdefghij", "p6", size = 40), + teradata.OutParam ("p7", size = 4))) + self.assertEqual(result.p1, bytearray([0xFF, 0xFE])) + self.assertEqual(result.p2, result.p1[:1]) + self.assertEqual(result.p3, float('inf')) + self.assertEqual(result.p4, result.p3) + self.assertEqual(result.p6, "abcdefghij") + self.assertEqual(result.p7,result.p6[:4]) def testProcedureWithResultSet(self): - if self.dsn == "ODBC": - with udaExec.connect(self.dsn, username=self.username, - password=self.password) as conn: - self.assertIsNotNone(conn) - for r in conn.execute( - """REPLACE PROCEDURE testProcedureWithResultSet() -DYNAMIC RESULT SETS 1 -BEGIN - DECLARE QUERY1 VARCHAR(22000); - DECLARE dyna_set1 CURSOR WITH RETURN TO CALLER FOR STMT1; - SET QUERY1 = 'select * from dbc.dbcinfo'; - PREPARE STMT1 FROM QUERY1; - OPEN dyna_set1; - DEALLOCATE PREPARE STMT1; -END;"""): - logger.info(r) - with conn.cursor() as cursor: - cursor.callproc("testProcedureWithResultSet", ()) - self.assertEqual(len(cursor.fetchall()), 3) - - def testQueryTimeout(self): - with self.assertRaises(teradata.DatabaseError) as cm: - with udaExec.connect(self.dsn, username=self.username, - password=self.password) as conn: - conn.execute( - "CREATE TABLE testQueryTimeout (id INT, " - "name VARCHAR(128), dob TIMESTAMP)") - conn.executemany( - "INSERT INTO testQueryTimeout VALUES (?, ?, " - "CURRENT_TIMESTAMP)", - [(x, str(x)) for x in range(0, 10000)], - batch=True) - conn.execute( - "SELECT * FROM testQueryTimeout t1, testQueryTimeout t2", - queryTimeout=1) - self.assertIn("timeout", cm.exception.msg) + with udaExec.connect(self.dsn, username=self.username, + password=self.password) as conn: + self.assertIsNotNone(conn) + for r in conn.execute( + """REPLACE PROCEDURE testProcedureWithResultSet() + DYNAMIC RESULT SETS 1 + BEGIN + DECLARE QUERY1 VARCHAR(22000); + DECLARE dyna_set1 CURSOR WITH RETURN TO CALLER FOR STMT1; + SET QUERY1 = 'select * from dbc.dbcinfo'; + PREPARE STMT1 FROM QUERY1; + OPEN dyna_set1; + DEALLOCATE PREPARE STMT1; + END;"""): + logger.info(r) + with conn.cursor() as cursor: + cursor.callproc("testProcedureWithResultSet", ()) + self.assertTrue ("cursor.nextset failed to retrieve dynamic result set", cursor.nextset ()) + self.assertEqual(len(cursor.fetchall()), 3) + + def testProcedureWithParamsAndResultSet(self): + with udaExec.connect(self.dsn, username=self.username, + password=self.password) as conn: + self.assertIsNotNone(conn) + for r in conn.execute( + """REPLACE PROCEDURE testProcedureWithParamsAndResultSet + (IN p1 VARBYTE (128), INOUT p2 VARBYTE(128), OUT p3 VARBYTE(128), + IN p4 VARCHAR (100) , INOUT p5 VARCHAR(100) , OUT p6 VARCHAR (100)) + DYNAMIC RESULT SETS 2 + BEGIN + declare cur1 cursor with return for select :p1 as c1, bytes (:p1) as c2 , :p2 as c3, bytes (:p2) as c4 ; + declare cur2 cursor with return for select :p4 as c1, characters (:p4) as c2, :p5 as c3, characters (:p5) as c4 ; + open cur1 ; + open cur2 ; + + SET p3 = p2; + SET p2 = p1; + SET p6 = p5; + SET p5 = p4; + END;"""): + logger.info(r) + with conn.cursor() as cursor: + result = cursor.callproc("testProcedureWithParamsAndResultSet", + (teradata.InParam (bytearray ([0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF])), + teradata.InOutParam (bytearray ([0xFF, 0xFE, 0xFF, 0xEE]), "p2", dataType = 'VARBYTE(20)', size=9), + teradata.OutParam ("p3", size=3), + teradata.InParam ("abcdefghijklmnop"), + teradata.InOutParam ("123456789012345678901", "p5", dataType = 'VARCHAR(128)', size = 15), + teradata.OutParam ("p6", size = 4))) + self.assertEqual(result.p2, bytearray ([0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF, 0xAA, 0xBB, 0xCC])) + self.assertEqual(result.p3, bytearray ([0xFF, 0xFE, 0xFF])) + self.assertEqual(result.p5, "abcdefghijklmno") + self.assertEqual(result.p6, "1234") + + self.assertTrue ("cursor.nextset failed to retrieve dynamic result set one", cursor.nextset ()) + compareLists (self, cursor.description, [ + ['c1', bytearray , None, 128, 0, None, False], + ['c2', decimal.Decimal, None, 4, 10, None, False], + ['c3', bytearray , None, 128, 0, None, False], + ['c4', decimal.Decimal, None, 4, 10, None, False] + ]) + compareLists (self, cursor.types, [ + ['VARBYTE', bytearray], + ['INTEGER', decimal.Decimal], + ['VARBYTE', bytearray], + ['INTEGER', decimal.Decimal] + ]) + for row in cursor.fetchall () : + self.assertEqual(row [0], bytearray ([0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF])) + self.assertEqual(row [1], len (row [0])) + self.assertEqual(row [2], bytearray ([0xFF, 0xFE, 0xFF, 0xEE])) + self.assertEqual(row [3], len (row [2])) + + self.assertTrue ("cursor.nextset failed to retrieve dynamic result set two", cursor.nextset ()) + compareLists (self, cursor.description, [ + ['c1', str , None, 200, 0, None, False], + ['c2', decimal.Decimal, None, 4, 10, None, False], + ['c3', str , None, 200, 0, None, False], + ['c4', decimal.Decimal, None, 4, 10, None, False] + ]) + compareLists (self, cursor.types, [ + ['VARCHAR', str], + ['INTEGER', decimal.Decimal], + ['VARCHAR', str], + ['INTEGER', decimal.Decimal] + ]) + for row in cursor.fetchall () : + self.assertEqual(row [0], "abcdefghijklmnop") + self.assertEqual(row [1], len (row [0])) + self.assertEqual(row [2], "123456789012345678901") + self.assertEqual(row [3], len (row [2])) + # end testProcedureWithParamsAndResultSet def testNewlinesInQuery(self): with udaExec.connect(self.dsn, username=self.username, @@ -556,27 +651,6 @@ def testConnectionMultipleThreads(self): if self.failure: raise self.failure - def testAutoGeneratedKeys(self): - # Auto-generated keys are not supported by REST. - if self.dsn == "ODBC": - rowCount = 1 - with udaExec.connect(self.dsn, username=self.username, - password=self.password, - ReturnGeneratedKeys="C") as conn: - conn.execute( - "CREATE TABLE testAutoGeneratedKeys (id INTEGER " - "GENERATED BY DEFAULT AS IDENTITY, name VARCHAR(128))") - count = 0 - for row in conn.executemany( - "INSERT INTO testAutoGeneratedKeys VALUES (NULL, ?)", - [(str(x), ) for x in range(0, rowCount)]): - count += 1 - print(row) - self.assertEqual(row[0], count) - # Potential ODBC bug is preventing this test case from - # passing, e-mail sent to ODBC support team. - # self.assertEqual(count, rowCount) - def testEmptyResultSet(self): with udaExec.connect(self.dsn, username=self.username, password=self.password) as conn: @@ -587,7 +661,6 @@ def testEmptyResultSet(self): with conn.cursor() as cursor: for row in cursor.execute("SELECT * FROM testEmptyResultSet"): count += 1 - print(row) self.assertEqual(count, 0) def testFetchArraySize1000(self): @@ -637,7 +710,7 @@ def testFetchArraySize1000(self): def testDollarSignInPassword(self): with udaExec.connect(self.dsn) as session: session.execute("DROP USER testDollarSignInPassword", - ignoreErrors=[3802]) + ignoreErrors=[3802, 3524]) util.setupTestUser(udaExec, self.dsn, user='testDollarSignInPassword', passwd='pa$$$$word') with udaExec.connect(self.dsn, username='testDollarSignInPassword', @@ -645,16 +718,15 @@ def testDollarSignInPassword(self): session.execute("SELECT * FROM DBC.DBCINFO") def testOperationsOnClosedCursor(self): - if self.dsn == "ODBC": - with udaExec.connect(self.dsn) as session: - cursor = session.cursor() - cursor.close() - error = None - try: - cursor.execute("SELECT * FROM DBC.DBCINFO") - except teradata.InterfaceError as e: - error = e - self.assertIsNotNone(error) + with udaExec.connect(self.dsn) as session: + cursor = session.cursor() + cursor.close() + error = None + try: + cursor.execute("SELECT * FROM DBC.DBCINFO") + except teradata.ProgrammingError as e: + error = e + self.assertIsNotNone(error) def testIgnoreError(self): with udaExec.connect(self.dsn) as session: @@ -707,11 +779,15 @@ def cursorAndExecuteSelect(testCase, session, threadId): except Exception as e: testCase.failure = e +def compareLists (test, aaoList1, aaoList2): + nRowIndex = 0 + for aoList in aaoList1: + for nCol in range (len (aoList)): + test.assertEqual (aoList [nCol], aaoList2 [nRowIndex][nCol]) + nRowIndex += 1 -# The unit tests in the UdaExecExecuteTest are execute once for each named -# data source below. util.createTestCasePerDSN( - UdaExecExecuteTest, unittest.TestCase, ("HTTP", "HTTPS", "ODBC")) + UdaExecExecuteTest, unittest.TestCase, ("TERADATASQL",)) if __name__ == '__main__': formatter = logging.Formatter( @@ -727,13 +803,11 @@ def cursorAndExecuteSelect(testCase, session, threadId): udaExec = teradata.UdaExec(configFiles=configFiles, configureLogging=False) udaExec.checkpoint() - def runTest(testName): suite = unittest.TestSuite() - suite.addTest(UdaExecExecuteTest_ODBC(testName)) # @UndefinedVariable # noqa - suite.addTest(UdaExecExecuteTest_HTTP(testName)) # @UndefinedVariable # noqa + suite.addTest(UdaExecExecuteTest_TERADATASQL(testName)) # @UndefinedVariable # noqa unittest.TextTestRunner().run(suite) + unittest.addCleanup (util.cleanupTestUser(udaExec, 'TERADATASQL')) if __name__ == '__main__': - # runTest('testMultipleResultSets') unittest.main() diff --git a/test/udaexec.ini b/test/udaexec.ini index 68e8dd5..e7b8616 100644 --- a/test/udaexec.ini +++ b/test/udaexec.ini @@ -1,37 +1,29 @@ [CONFIG] appName=PyTdUnitTestsの version=1.00.00.01 -dsn=TEST3 +dsn=TERADATASQL key1=file1 key2=file1 escapeTest=this$$isatest -httpsPort=1443 dbcInfo=DBC.DBCInfo -port=${httpsPort} -testSystem=sdt00250 +testSystem=jdbc1710ek2 +configureLogging=False +logConsole=False +logLevel=NOTSET [DEFAULT] system=${testSystem} -host=sdlc4157.labs.teradata.com username=dbc password=dbc charset=UTF8 -[HTTP] -method=rest - -[HTTPS] -method=rest -protocol=https -verifyCerts=False - -[ODBC] -method=odbc -system=${testSystem}.labs.teradata.com - +[TERADATASQL] +system=${testSystem} +username=dbc +password=dbc +log=0 [ESCAPE_TEST] -method=odbc system=${testSystem}.labs.teradata.com password=pa$$$$word escapeTest2=${escapeTest} From 2362e8be5c62fceb9db31ec8d35680f3f01d32d9 Mon Sep 17 00:00:00 2001 From: hd121024 Date: Fri, 11 Dec 2020 14:44:20 -0800 Subject: [PATCH 2/5] pydbapi-81 Updated README --- README.md | 1117 ++++++++++++++++++++++++++++++++++++++++- setup.py | 16 +- teradata/datatypes.py | 2 + teradata/tdsql.py | 8 +- test/udaexec.ini | 2 +- 5 files changed, 1117 insertions(+), 28 deletions(-) diff --git a/README.md b/README.md index 7751035..9d50592 100644 --- a/README.md +++ b/README.md @@ -1,25 +1,1112 @@ -Teradata Python DevOps Module [![Build Status](https://travis-ci.org/Teradata/PyTd.svg?branch=master)](https://travis-ci.org/Teradata/PyTd) -============================= -The Teradata Python Module is a freely available, open source, library for the Python programming language, whose aim is to make it easy to script powerful interactions with Teradata Database. It adopts the philosophy of udaSQL, providing a DevOps focused SQL Execution Engine that allows developers to focus on their SQL and procedural logic without worrying about Operational requirements such as external configuration, query banding, and logging. +## Teradata Python Module -INSTALLATION ------------- +
- [sudo] pip install teradata +The Teradata Python Module is a freely available, open source, library +for the Python programming language, whose aim is to make it easy to +script powerful interactions with Teradata Database. It adopts the +philosophy of +[udaSQL](/tools/articles/udasql-a-devops-focused-sql-execution-engine), +providing a DevOps focused SQL Execution Engine that allows developers +to focus on their SQL and procedural logic without worrying about +Operational requirements such as external configuration, query banding, +and logging. -The module is hosted on PyPi: https://pypi.python.org/pypi/teradata +This package requires 64-bit Python 3.4 or later, and runs on Windows, macOS, and Linux. 32-bit Python is not supported. -DOCUMENTATION -------------- +The Teradata Python Module is released under an MIT license. The source +is available on [GitHub](https://github.com/Teradata/PyTd) and the +package is available for download and install from +[PyPI](https://pypi.python.org/pypi/teradata). This module is open +source and therefore uses the [Community +Support](https://support.teradata.com/community) model. -Documentation for the Teradata Python Module is available on the Teradata Developer Exchange. +For Teradata customer support, please visit [Teradata Access](https://access.teradata.com/). -UNIT TESTS ----------- +
-To execute the unit tests, you can run the following command at the root of the project checkout. +### Table of Contents +--- +* [1.0 Getting Started](#GettingStarted) + + [1.1 Documentation](#Documentation) + + [1.2 Installing the Teradata Python Module](#Installing) + + [1.3 Connectivity Options](#Connectivity) + + [1.4 Hello World Example](#HelloWorld) +* [2.0 DevOps Features](#DevOps) + + [2.1 External Configuration](#ExternalConfiguraton) + + [2.2 Logging](#Logging) + + [2.3 Checkpoints](#Checkpoints) + + [2.4 Query Banding](#QueryBands) +* [3.0 Database Interactions](#DatabaseInteractions) + + [3.1 Cursors](#Cursors) + + [3.2 Parameterized SQL](#ParameterizedSQL) + + [3.3 Stored Procedures](#StoredProcedures) + + [3.4 Transactions](#Transactions) + + [3.5 Data Types](#DataTypes) + + [3.6 Unicode](#Unicode) + + [3.7 Ignoring Errors](#IgnoringErrors) + + [3.8 Password Protection](#PasswordProtection) + + [3.9 External SQL Scripts](#ExternalConfiguraton) +* [4.0 Reference](#References) + + [4.1 UdaExec Parameters](#UdaExec%20Parameters) + + [4.2 Connect Parameters](#ConnectParametrs) + + [4.3 Execute Parameters](#ExecuteParameters) +* [5.0 Running Unit Tests](#RunningTests) +* [6.0 Migration Guide](#Migration) + + [6.1 Setup](#MGSetup) + + [6.2 Database Interactions](#MGDatabase) + + [6.3 Reference](#MGReferences) -python -m unittest discover -s test +Table of Contents links do not work on PyPI due to a [PyPI limitation](https://github.com/pypa/warehouse/issues/4064). -The unit tests use the connection information specified in test/udaexec.ini. The unit tests depend on Teradata ODBC being installed and also on access to Teradata REST Services. +--- + + +### **1.0 Getting Started** + +The following sections run through documentation, installation, connectivity options, +and a simple Hello World example. + + + +#### **1.1 Documentation** +--- +When the Teradata Python Module is installed, the `README.md` file is placed in the `teradata` directory under your Python installation directory. This permits you to view the documentation offline, when you are not connected to the internes. + +The `README.md` file is a plain text file containing the documentation for the Teradata Python Module. While the file can be viewed with any text file viewer or editor, your viewing experience will be best with an editor that understands Markdown format. + + + +#### **1.2 Installing the Teradata Python Module** +--- +The Teradata Python Module depends on the `teradatasql` package which is available from PyPI. + +Use `pip install` to download and install the Teradata Python Module and its dependencies automatically. + +Platform | Command +-------------- | --- +macOS or Linux | `pip install teradata` +Windows | `py -3 -m pip install teradata` + +When upgrading to a new version of the Teradata Python Module, you may need to use pip install's `--no-cache-dir` option to force the download of the new version. + +Platform | Command +-------------- | --- +macOS or Linux | `pip install --no-cache-dir -U teradata` +Windows | `py -3 -m pip install --no-cache-dir -U teradata` + +If you don't have pip installed, you can download the package from +[PyPI](https://pypi.python.org/pypi/teradata), unzip the folder, then +double click the setup.py file or run `setup.py install`. + + + +#### **1.3 Connectivity Options** +--- +The Teradata Python Module uses Teradata SQL Driver for Python to connect to the Teradata Database. + + + +#### **1.4 Hello World Example** +--- +In this example, we will connect to a Teradata Database and run a simple +query to fetch the Query Band information for the session that we +create. + +**Example 1 - HelloWorld.py** + + + +``` {.brush:python;} +import teradata + +udaExec = teradata.UdaExec (appName="HelloWorld", version="1.0", + logConsole=False) + +session = udaExec.connect(system="tdprod", + username="xxx", password="xxx"); + +for row in session.execute("SELECT GetQueryBand()"): + print(row) +``` + + +Let's break the example down line by line. The first line, "import +teradata", imports the Teradata Python Module for use in the script. + +The second line initializes the "UdaExec" framework that provides DevOps +support features such as configuration and logging. We tell UdaExec the +name and version of our application during initialization so that we can +get feedback about our application in DBQL and Teradata Viewpoint as +this information is included in the QueryBand of all Database sessions +created by our script. We also tell UdaExec not to log to the console +(e.g. logConsole=False) so that our print statement is easier to read. + +The third line creates a connection to a Teradata system named "tdprod". +The last line executes the "SELECT +GetQueryBand()" SQL statement and iterates over the results, printing +each row returned. Since "SELECT GetQueryBand()" statement only returns +one row, only one row is printed. + +Let's go ahead and run the script by executing "python HelloWorld.py". +Below is the result: + + + + Row 1: [=S> ApplicationName=HelloWorld;Version=1.0;JobID=20201208153806-1;ClientUser=example;Production=False;udaAppLogFile=/home/hd121024/devel/pydbapi-81-pytd/logs/HelloWorld.20201208153806-1.log;UtilityName=PyTd;UtilityVersion=15.10.0.22;] + + + +From the output, we see that one row was returned with a single string +column. We also see quite a bit of information was added to the +QueryBand of the session we created. We can see the application name and +version we specified when initializing UdaExec. If we look at this location +on the file system we can see the log file that was generated: + + + +``` {.brush:bash;} +2020-12-08 15:38:06,204 - teradata.udaexec - INFO - Initializing UdaExec... +2020-12-08 15:38:06,204 - teradata.udaexec - INFO - Reading config files: ['/etc/udaexec.ini: Not Found', '/home/example/udaexec.ini: Not Found', '/home/example/pytd/udaexec.ini: Not Found'] +2020-12-08 15:38:06,204 - teradata.udaexec - INFO - No previous run number found as /home/example/pytd/.runNumber does not exist. Initializing run number to 1 +2020-12-08 15:38:06,204 - teradata.udaexec - INFO - Cleaning up log files older than 90 days. +2020-12-08 15:38:06,204 - teradata.udaexec - INFO - Removed 0 log files. +2020-12-08 15:38:06,237 - teradata.udaexec - INFO - Checkpoint file not found: /home/example/pytd/HelloWorld.checkpoint +2020-12-08 15:38:06,237 - teradata.udaexec - INFO - No previous checkpoint found, executing from beginning... +2020-12-08 15:38:06,237 - teradata.udaexec - INFO - Execution Details: +/******************************************************************************** + * Application Name: HelloWorld + * Version: 1.0 + * Run Number: 20201208153806-1 + * Host: sdl52261 + * Platform: Linux-4.4.73-5-default-x86_64-with-SuSE-12-x86_64 + * OS User: example + * Python Version: 3.4.6 + * Python Compiler: GCC + * Python Build: ('default', 'Mar 01 2017 16:52:22') + * UdaExec Version: 15.10.0.22 + * Program Name: + * Working Dir: /home/example/pytd + * Log Dir: /home/example/pytd/logs + * Log File: /home/example/pytd/logs/HelloWorld.20201208153806-1.log + * Config Files: ['/etc/udaexec.ini: Not Found', '/home/example/udaexec.ini: Not Found', '/home/example/pytd/udaexec.ini: Not Found'] + * Query Bands: ApplicationName=HelloWorld;Version=1.0;JobID=20201208153806-1;ClientUser=example;Production=False;udaAppLogFile=/home/example/pytd/logs/HelloWorld.20201208153806-1.log;UtilityName=PyTd;UtilityVersion=15.10.0.22 +********************************************************************************/ +2020-12-08 15:38:35,290 - teradata.udaexec - INFO - Creating connection: {'password': 'XXXXXX', 'username': 'guest', 'system': 'tdprod'} +2020-12-08 15:38:35,498 - teradata.udaexec - INFO - Connection successful. Duration: 0.208 seconds. Details: {'password': 'XXXXXX', 'us +ername': 'guest', 'system': 'tdprod'} +2020-12-08 15:39:02,167 - teradata.udaexec - INFO - Query Successful. Duration: 0.007 seconds, Rows: 1, Query: SELECT GetQueryBand() +2020-12-08 15:40:43,349 - teradata.udaexec - INFO - UdaExec exiting. +``` + + + +In the logs, you can see connection information and all the SQL +statements submitted along with their durations. If any errors had +occurred, those would have been logged too. + +Explicitly closing resources when done is always a good idea. In the +next sections, we show how this can be done automatically using the "with" +statement. + +--- + + + +### **2.0 DevOps Features** + +The following sections discuss the DevOps oriented features provided by +the Teradata Python Module. These features help simplify development and +provide the feedback developers need once their applications are put +into QA and production. + + + +#### **2.1 External Configuration** +--- +In the first "Hello World" example, we depended on no external +configuration information for our script to run. What if we now wanted +to run our HelloWorld.py script against a different database system? We +would need to modify the source of our script, which is somewhat +inconvenient and error prone. Luckily the UdaExec framework makes it +easy to maintain configuration information outside of our source code. + +**Example 2 -- PrintTableRows.py** + +``` {.brush:python;} +import teradata + +udaExec = teradata.UdaExec () + +with udaExec.connect("${dataSourceName}") as session: + for row in session.execute("SELECT * FROM ${table}"): + print(row) +``` + +In this example, we remove all the hard coded configuration data and +instead load our configuration parameters from external configuration +files. We also call connect using the "with" statement so that the +connection is closed after use even when exceptions are raised. + +You may be wondering what *\${dataSourceName}* means above. Well, a +dollar sign followed by optional curly braces means replace +*\${whatever}* with the value of the external configuration variable +named "whatever". In this example, we make a connection to a data source +whose name and configuration is defined outside of our script. We then +perform a SELECT on a table whose name is also configured outside of our +script. + +UdaExec allows any SQL statement to make reference to an external +configuration parameter using the dollar sign/curly brace syntax. When +actually wanting to include a "\$" literal in a SQL statement that isn't +a parameter substitution, you must escape the dollar sign with another +dollar sign (e.g. "\$\$"). + +Here is our external configuration file that we name "udaexec.ini" and +place in the same directory as our python script. + +**Example 2 - udaexec.ini** + +``` {.brush:bash;} +# Application Configuration +[CONFIG] +appName=PrintTableRows +version=2 +logConsole=False +dataSourceName=TDPROD +table=DBC.DBCInfo + +# Default Data Source Configuration +[DEFAULT] +system=tdprod +username=xxx +password=xxx + +# Data Source Definition +[TDPROD] +username=xxx +password=xxx +``` + +An external configuration file should contain one section named "CONFIG" +that contains application configuration name/value pairs, a section +named "DEFAULT" that contains default data source name/value pairs, and +one or more user defined sections that contain data source name/value +pairs. + +In this example, we are connecting to *\${dataSourceName}*, which +resolves to "TDPROD" as dataSourceName and is a property in the CONFIG +section. The TDPROD data source is defined in our configuration file and +provides the name of the +username and password. It also inherits the properties in the DEFAULT +section, which in this case, defines system, username and password. The +username and password in the TDPROD section will override the username +and password defined in the DEFAULT section. + +You'll notice in this example we didn't specify the "appName" and +"version" when initializing UdaExec. If you look at the method signature +for UdaExec, you'll see that the default values for appName and version +are "\${appName}" and "\${version}". When not specified as method +arguments, these values are looked up in the external configuration. +This is true for almost all configuration parameters that can be passed +to the UdaExec constructor so that any setting can be set or changed +without changing your code. + +If we run the example script above using "python PrintTableRows.py", we +get the following output: + + Row 1: [VERSION, 17.10c.00.35] + Row 2: [LANGUAGE SUPPORT MODE, Standard] + Row 3: [RELEASE, 17.10c.00.30] + +Looking at the generated log file, we see the following log entry: + + 2020-12-08 17:21:32,307 - teradata.udaexec - INFO - Reading config files: ['C:\\etc\\udaexec.ini: Not Found', 'C:\\Users\\example\\udaexec.ini: Found', 'C:\\Users\\example\\udaexec.ini: Found'] + +As you can see, UdaExec is attempting to load external configuration +from multiple files. By default, UdaExec looks for a system specific +configuration file, a user specific configuration file, and an +application specific configuration file. The location of these files can +be specified as arguments to the UdaExec constructor. Below are the +argument names along with their default values. + +**Table 1 -- Config File Locations** + + | **Name** | **Description** | **Default Value** | + |------------------------|-------------------------------------------------------------------|-------------------| + | **systemConfigFile** | The system wide configuration file(s). Can be a single value or a list. | *\"/etc/udaexec.ini\"*| + | **userConfigFile** | The user specific configuration file(s). Can be a single value or a list. | *\"\~/udaexec.ini\" or \"%HOMEPATH%/udaexec.ini\"*| + | **appConfigFile** | The application specific configuration file (s). Can be a single value or a list. | *\"udaexec.ini\"*| + ---------------------- ----------------------------------------------------------------------------------- ---------------------------------------------------- + +Configuration data is loaded in the order shown above, from least +specific to most specific, with later configuration files overriding the +values specified by earlier configuration files when conflicts occur. + +If we had wanted to name our configuration file in this example +"PrintTableRows.ini" instead of "udaexec.ini", then we could've +specified that when creating the UdaExec object. E.g. + +``` {.brush:python;} +udaExec = teradata.UdaExec (appConfigFile="PrintTableRows.ini") +``` + +If we wanted to have multiple application configuration files, then we +could've specified a list of file names instead. E.g. + +``` {.brush:python;} +udaExec = teradata.UdaExec (appConfigFile=["PrintTableRows.ini", "PrintTableRows2.ini"]) +``` + +If you find that even that isn't flexible enough, you can always +override the external configuration file list used by UdaExec by passing +it in the "configFiles" argument. When the "configFiles" list is +specified, systemConfigFile, userConfigFile, and appConfigFile values +are ignored. + +In addition to using external configuration files, application +configuration options can also be specified via the command line. If we +wanted to change the table name we select from in the example above, we +can specify the table value on the command line e.g. "python +PrintTableRows.py \--table=ExampleTable" which would instead print the +rows of a table named "ExampleTable". Configuration options specified on +the command line override those in external configuration files. UdaExec +has a parameter named "parseCmdLineArgs" that is True by default. You +can set this value to False to prevent command line arguments from being +included as part of the UdaExec configuration. + +Sometimes it may be necessary to get or set UdaExec application +configuration parameters in the code directly. You can do this by using +the "config" dictionary-like object on the UdaExec instance. E.g. + +``` {.brush:sql;} +udaExec = teradata.UdaExec () +print(udaExec.config["table"]) +udaExec.config["table"] = "ExampleTable" +``` + +As you can see, using external configuration makes it easy to write +scripts that are reasonably generic and that can execute in a variety of +environments. The same script can be executed against a Dev, Test, and +Prod environment with no changes, making it easier to adopt and automate +a DevOps workflow. + + + +#### **2.2 Logging** +--- +The UdaExec object automatically enables logging when it is initialized. +Logging is implemented using Python's standard logging module. If you +create a logger in your script, your custom log messages will also be +logged along with the UdaExec log messages. + +By default, each execution of a script that creates the UdaExec object +gets its own unique log file. This has the potential to generate quite a +few files. For this reason, UdaExec also automatically removes log files +that are older than a configurable number of days. + +Below is a list of the different logging options and their default +values. Logging options can be specified in the UdaExec constructor, in +the application section of external configuration files, or on the +command line. + +**Table 2 -- Logging Options** + + | **Name** | **Description** | **Default Value**| + |---------------------|------------------------|-------------------------------| + | **configureLogging**| Flags if UdaExec will configure logging. |True | + | **logDir** | The directory that contains log files. |*\"logs\"* | + | **logFile** | The log file name. | *\"\${appName}.\${runNumber}.log\"*| + | **logLevel** | The level that determines what log messages are logged (i.e. CRITICAL, ERROR, WARNING, INFO, TRACE, DEBUG) | *\"INFO\"*| + | **logConsole** | Flags if logs should be written to stdout in addition to the log file. | True| + | **logRetention** | The number of days to retain log files. Files in the log directory older than the specified number of days are deleted. | 90 | + --- +If the logging features of UdaExec don't meet the requirements of your +application, then you can configure UdaExec not to configure logging and +instead configure it yourself. + +Log messages generated at INFO level contain all the status of all +submitted SQL statements and their durations. If there are problems +during script execution, the log files provide the insight needed to +diagnose any issues. If more information is needed, the log level can be +increased to *\TRACE\"* or *\"DEBUG\"*. + + + +#### **2.3 Checkpoints** +--- +When an error occurs during script execution, exceptions get raised that +typically cause the script to exit. Let's suppose you have a script that +performs 4 tasks but it is only able to complete 2 of them before an +unrecoverable exception is raised. In some cases, it would be nice to be +able to re-run the script when the error condition is resolved and have +it automatically resume execution of the 2 remaining tasks. This is +exactly the reason UdaExec includes support for checkpoints. + +A checkpoint is simply a string that denotes some point during script +execution. When a checkpoint is reached, UdaExec saves the checkpoint +string off to a file. UdaExec checks for this file during +initialization. If it finds a previous checkpoint, it will ignore all +execute statements until the checkpoint specified in the file is +reached. + +**Example 3 - CheckpointExample.py** + +``` {.brush:python;} +import teradata + +udaExec = teradata.UdaExec() +with udaExec.connect("${dataSourceName}") as session: + session.execute("-- Task 1") + udaExec.checkpoint("Task 1 Complete") + + session.execute("-- Task 2") + udaExec.checkpoint("Task 2 Complete") + + session.execute("-- Task 3") + udaExec.checkpoint("Task 3 Complete") + + session.execute("-- Task 4") + udaExec.checkpoint("Task 4 Complete") + + +# Script completed successfully, clear checkpoint +# so it executes from the beginning next time +udaExec.checkpoint() +``` + +In the example above, we are calling execute 4 different times and +setting a checkpoint after each call. If we were to re-run the script +after the 3rd execute failed, the first two calls to execute would be +ignored. Below are the related log entries when re-running our +CheckpointExample.py script after the 3rd execute failed. + +``` {.brush:bash;} +2015-06-25 14:15:29,017 - teradata.udaexec - INFO - Initializing UdaExec... +2015-06-25 14:15:29,026 - teradata.udaexec - INFO - Found checkpoint file: "/home/example/PyTd/Example3/CheckpointExample.checkpoint" +2015-06-25 14:15:29,027 - teradata.udaexec - INFO - Resuming from checkpoint "Task 2 Complete". +2015-06-25 14:15:29,028 - teradata.udaexec - INFO - Creating connection: {'system': 'tdprod', 'username': 'xxx', 'password': 'XXXXXX', 'dsn': 'TDPROD'} +2015-06-25 14:15:29,250 - teradata.udaexec - INFO - Connection successful. Duration: 0.222 seconds. Details: {'system': 'tdprod', 'username': 'xxx', 'password': 'XXXXXX', 'dsn': 'TDPROD'} +2015-06-25 14:15:29,250 - teradata.udaexec - INFO - Skipping query, haven't reached resume checkpoint yet. Query: -- Task 1 +2015-06-25 14:15:29,250 - teradata.udaexec - INFO - Skipping query, haven't reached resume checkpoint yet. Query: -- Task 2 +2015-06-25 14:15:29,250 - teradata.udaexec - INFO - Reached resume checkpoint: "Task 2 Complete". Resuming execution... +2015-06-25 14:15:29,252 - teradata.udaexec - INFO - Query Successful. Duration: 0.001 seconds, Rows: 0, Query: -- Task 3 +2015-06-25 14:15:29,252 - teradata.udaexec - INFO - Reached checkpoint: "Task 3 Complete" +2015-06-25 14:15:29,252 - teradata.udaexec - INFO - Saving checkpoint "Task 3 Complete" to /home/example/PyTd/Example3/CheckpointExample.checkpoint. +2015-06-25 14:15:29,253 - teradata.udaexec - INFO - Query Successful. Duration: 0.001 seconds, Rows: 0, Query: -- Task 4 +2015-06-25 14:15:29,254 - teradata.udaexec - INFO - Reached checkpoint: "Task 4 Complete" +2015-06-25 14:15:29,254 - teradata.udaexec - INFO - Saving checkpoint "Task 4 Complete" to /home/example/PyTd/Example3/CheckpointExample.checkpoint. +2015-06-25 14:15:29,328 - teradata.udaexec - INFO - Clearing checkpoint.... +2015-06-25 14:15:29,329 - teradata.udaexec - INFO - Removing checkpoint file /home/example/PyTd/Example3/CheckpointExample.checkpoint. +2015-06-25 14:15:29,329 - teradata.udaexec - INFO - UdaExec exiting. +``` + +As you can see from the logs, all calls to execute are skipped until the +"Task 2 Complete" checkpoint is reached. At the end of our script we +call "udaExec.checkpoint()" without a checkpoint string. This call +clears the checkpoint file so that the next time we run our script, it +will execute from the beginning. + +While skipping calls to execute help to resume after an error, there are +situations where this alone will not always work. If the results of a +query are necessary for program execution, then the script may hit +additional errors when being resumed. For example, let's assume our +script now loads a configuration parameter from a table. + +``` {.brush:python;} +udaExec.config["mysetting"] = session.execute("SELECT mysetting FROM + MyConfigTable").fetchone()[0] +``` + +A call to execute returns a Cursor into a result set, so we call +fetchone()\[0\] to get the first column of the first row in the result +set. If the execute call is skipped, then fetchone() will return None +and the lookup of the first column will fail. There are several ways we +can workaround this problem. The first way is to force execute to run +regardless of checkpoints by specifying the parameter runAlways=True. +E.g. + +``` {.brush:python;} +udaExec.config["mysetting"] = session.execute("SELECT mysetting FROM + MyConfigTable", runAlways=True).fetchone()[0] +``` + +This is a good approach if we want to set "mysetting" even on resume. If +"mysetting" is not necessary for resume though, then another way to +prevent errors is to check the UdaExec "skip" attribute. E.g. + +``` {.brush:python;} +if not udaExec.skip: + udaExec.config["mysetting"] = session.execute("SELECT mysetting FROM + MyConfigTable").fetchone()[0] +``` + +With this approach, we only access the "mysetting" column if execute +will not be skipped. + +UdaExec saves checkpoints to a file named *\"\${appName}.checkpoint\"* +located in the same directory the script is executed by default. The +checkpoint file can be changed by specifying the "checkpointFile" +parameter in the UdaExec constructor, in an external configuration file, +or on the command line. To disable file-based checkpoints, +"checkpointFile" can be set to None in the UdaExec constructor or it can +be set to an empty string in an external configuration file. + +If it is desirable to load checkpoints from and save checkpoints to a +place other than a local file (e.g. a database table), then a custom +checkpoint manager implementation can be used to handle loading, saving, +and clearing checkpoint details. Below is an example of a custom +checkpoint manager that loads and saves checkpoints to a database table. + +``` {.brush:python;} +class MyCheckpointManager (teradata.UdaExecCheckpointManager): + def __init__(self, session): + self.session = session + def loadCheckpoint(self): + for row in self.session.execute("""SELECT * FROM ${checkPointTable} + WHERE appName = '${appName}'"""): + return row.checkpointName + def saveCheckpoint(self, checkpointName): + self.session.execute("""UPDATE ${checkPointTable} SET checkpointName = ? + WHERE appName = '${appName}' ELSE + INSERT INTO ${checkPointTable} VALUES ('${appName}', ?)""", + (checkpointName, checkpointName)) + def clearCheckpoint(self): + self.session.execute("""DELETE FROM ${checkPointTable} + WHERE appName = '${appName}'""", + ignoreErrors=[3802]) +``` + +To use this custom checkpoint manager, you can disable the +checkpointFile and call the setCheckpointManager method on UdaExec. E.g. + +``` {.brush:python;} +udaexec = teradata.UdaExec(checkpointFile=None) +with udaexec.connect("${dsn}") as session: + udaexec.setCheckpointManager(MyCheckpointManager(session)) + # The rest of my program logic. +``` + + + +#### **2.4 Query Banding** +--- +UdaExec automatically sets session Query Bands for any connections you +create so that the runtime characteristics of your application can be +monitored in DBQL and Teradata Viewpoint. Reviewing application log +files along with the associated log entries in DBQL are great ways to +get feedback on the overall execution of your application. The table +below lists the name and descriptions of the Query Bands that are set. + +**Table 3 - Query Bands** + + ----------------- ----------------------------------------------------------- + | **Name** | **Description** | + |-------------------| ------------------------------------------------------| + | ApplicationName | The name of your application | + | Version | The version of your application | + | JobID | The run number of this particular execution | + | ClientUser | The OS user name. | + | Production | True if a production App, else False | + | udaAppLogFile | Path of the generated log file | + | gitRevision | The GIT revision of the application. | + | gitDirty | True if files have been modified since last commit to GIT| + | UtilityName | The nickname of the Teradata Python Module - PyTd | + | UtilityVersion | The version of the Teradata Python Module | + ----------------- ----------------------------------------------------------- + +Additional custom Query Bands can be set by passing a map (dict) as the +queryBand argument to UdaExec.connect(). + +--- + + +### **3.0 Database Interactions** + +UdaExec implements the Python Database API Specification v2.0 while +adding additional convenience on top. The only deviation from this +specification is that UdaExec enables auto commit by default. It is +recommended to review the Python Database API Specification v2.0 first +and then review the following sections for more details. + + + +#### **3.1 Cursors** +--- +Since only a single Cursor is needed most of the time, UdaExec creates +an internal cursor for each call to connect() and allows execute, +executemany, and callproc to be called directly on the connection +object. Calls to these methods on the Connection object simply invoke +those same methods on the internal cursor. The internal cursor is closed +when the connection is closed. + +Calls to execute, executemany, and callproc return the Cursor for +convenience. Cursors act as iterators, so the results of an execute call +can easily be iterated over in a "for" loop. Rows act like tuples or +dictionaries, and even allow columns to be accessed by name similar to +attributes on an object. Below is an example. All 3 print statements +print the same thing for each row. + +``` {.brush:python;} +import teradata +udaExec = teradata.UdaExec() +with udaExec.connect("${dataSourceName}") as session: + for row in session.execute("""SELECT InfoKey AS name, InfoData as val + FROM DBC.DBCInfo"""): + print(row[0] + ": " + row[1]) + print(row["name"] + ": " + row["val"]) + print(row.name + ": " + row.val) +``` + +There are situations where it may be necessary to use a separate cursor +in addition to the one created by default. A good example of this is +when wanting to perform queries while iterating over the results of +another query. To accomplish this, two cursors must be used, one to +iterate and one to invoke the additional queries. Below is an example. + +``` {.brush:python;} +import teradata +udaExec = teradata.UdaExec() +with udaExec.connect("${dataSourceName}") as session: + with session.cursor() as cursor: + for row in cursor.execute("SELECT * from ${tableName}"): + session.execute("DELETE FROM ${tableName} WHERE id = ?", (row.id, )): +``` + +Like connections, cursors should be closed when you\'re finished using +them. This is best accomplished using the "with" statement. + + + +#### **3.2 Parameterized SQL** +--- +You can pass parameters to SQL statements using the question mark +notation. The following example inserts a row into an employee table. + +``` {.brush:python;} +session.execute("""INSERT INTO employee (id, firstName, lastName, dob) + VALUES (?, ?, ?, ?)""", (1,"James", "Kirk", "2233-03-22")) +``` + +To insert multiple rows, executemany can be used. To insert them using +batch mode, pass in the parameter batch=True (default is True). To insert +them one at a time, pass in the parameter batch=False. E.g. + +``` {.brush:python;} +session.executemany("""INSERT INTO employee (id, firstName, lastName, dob) + VALUES (?, ?, ?, ?)""", + ((1,"James", "Kirk", "2233-03-22"), + (2,"Jean-Luc", "Picard", "2305-07-13")), + batch=True) +``` + +Batch mode sends all the parameter sequences to the database in a single +"batch" and is much faster than sending the parameter sequences +individually. + + + +#### **3.3 Stored Procedures** +--- +Stored procedures can be invoked using the "callproc" method. OUT +parameters should be specified as teradata.OutParam instances. INOUT +parameters should be specified as teradata.InOutParam instances. IN +parameters can be specified as teradata.InParam instances. An +optional name can be specified with output parameters that can be used +to access the returned parameter by name. E.g. + +``` {.brush:python;} +results = session.callproc("MyProcedure", (teradata.InOutParam("inputValue", "inoutVar1"), teradata.OutParam(), teradata.OutParam("outVar2", dataType="PERIOD"))) +print(results.inoutVar1) +print(results.outVar1) +``` +Additionally, a Teradata data type can be specified for the IN and INOUT +parameters, so that the input parameter is converted to the proper Teradata +data type. A size can be set for OUT and INOUT parameters so that +the output parameter is truncated to the specified size. The size option will only +work for string and byte types and will be ignored for all other types. + + + +#### **3.4 Transactions** +--- +UdaExec enables auto commit by default. To disable auto commit and +instead commit transactions manually, set autoCommit=False on the call +to connect or in the data source's external configuration. + +Transactions can be manually committed or rolled back using the commit() +and rollback() methods on the Connection object. E.g. + +``` {.brush:python;} +import teradata +udaExec = teradata.UdaExec() +with udaExec.connect("${dataSourceName}", autoCommit=False) as session: + session.execute("CREATE TABLE ${tableName} (${columns})") + session.commit() +``` + + + +#### **3.5 Data Types** +--- +The interface that UdaExec uses to perform conversion on the data type +values is called teradata.datatypes.DataTypeConverter with the default +implementation being teradata.datatypes.DefaultDataTypeConverter. If you +would like to customize how data gets converted to Python objects, +you can specify a custom DataTypeConverter during connect. E.g. + +``` {.brush:python;} +udaExec.connect("${dataSourceName}", dataTypeConverter=MyDataTypeConverter()) +``` + +It is recommended to derive your custom DataTypeConverter from +DefaultDataTypeConverter so that you can perform conversion for the data +types you're interested in while delegating to the default +implementation for any of the remaining ones. + +The table below specifies the data types that get converted by the +DefaultDataTypeConverter. Any data types not in the table below are +returned as a Python Unicode string (e.g. VARCHAR, CLOB, UDT, ARRAY, +etc.) + + +**Table 4 - Data Type Conversions** + + --------------------------------- ----------------------------------- + | **Data Type** | **Python Object** | + | -------------------------------|-----------------------------------| + | BYTE | bytearray | + | VARBYTE | bytearray | + | BYTEINT | decimal.Decimal | + | SMALLINT | decimal.Decimal | + | INTEGER | decimal.Decimal | + | BIGINT | decimal.Decimal | + | REAL, FLOAT, DOUBLE PRECISION | decimal.Decimal | + | DECIMAL, NUMERIC | decimal.Decimal | + | NUMBER | decimal.Decimal | + | DATE | datetime.date | + | TIME | datetime.time | + | TIME WITH TIME ZONE | datetime.time | + | TIMESTAMP | datetime.datetime | + | TIMESTAMP WITH TIME ZONE | datetime.datetime | + | INTERVAL | teradata.datatypes.Interval | + | BLOB | bytearray | + | JSON | dict or list, result of json.loads() | + | PERIOD | teradata.datatypes.Period | + ------------------------------- -------------------------------------- + + + +#### **3.6 Unicode** +--- +The Teradata Python Module supports the unicode character data transfer +via the UTF8 session character set. + + + +#### **3.7 Ignoring Errors** +--- +Sometimes it is necessary to execute a SQL statement even though there +is a chance it may fail. For example, if your script depends on a table +that may or may not already exist, the simple thing to do is to try to +create the table and ignore the "table already exists" error. UdaExec +makes it easy to do this by allowing clients to specify error codes that +can safely be ignored. For example, the following execute statement will +not raise an error even if the checkpoints table already exists. + +``` {.brush:python;} +session.execute("""CREATE TABLE ${dbname}.checkpoints ( + appName VARCHAR(1024) CHARACTER SET UNICODE, + checkpointName VARCHAR(1024) CHARACTER SET UNICODE) + UNIQUE PRIMARY INDEX(appName)""", + ignoreErrors=[3803]) +``` + +If you want to ignore all errors regardless of the error code, you can +include the "continueOnError=True" parameter to execute. This will cause +any errors to be caught and logged and not raised up to your +application. + + + +#### **3.8 Password Protection** +--- +Teradata SQL Driver for Python supports stored password protection. Please +refer to the [Stored Password Protection Section](https://github.com/Teradata/python-driver#StoredPasswordProtection) in the `README.md` for details. + + + +#### **3.9 External SQL Scripts** +--- +UdaExec can be used to execute SQL statements that are stored in files +external to your Python script. To execute the SQL statements in an +external file, simply pass the execute method the location of the file +to execute. E.g. + +``` {.brush:python;} +session.execute(file="myqueries.sql") +``` + +A semi-colon is used as the default delimiter when specifying multiple +SQL statements. Any occurrence of a semi-colon outside of a SQL string +literal or comments is treated as a delimiter. When SQL scripts contain +SQL stored procedures that contain semi-colons internal to the +procedure, the delimiter should be change to something other than the +default. To use a different character sequence as the delimiter, the +delimiter parameter can be used. E.g. + +``` {.brush:python;} +session.execute(file="myqueries.sql", delimiter=";;") +``` + +UdaExec also has limited support for executing BTEQ scripts. Any BTEQ +commands starting with a "." are simply ignored, while everything else +is treated as a SQL statement and executed. To execute a BTEQ script, +pass in a fileType=*\"bteq\"* parameter. E.g. + +``` {.brush:python;} +session.execute(file="myqueries.bteq", fileType="bteq") +``` + +SQL statements in external files can reference external configuration +values using the *\${keyname}* syntax. Therefore, any use of "\$" in an +external SQL file must be escaped if it is not intended to reference an +external configuration value. + +Any parameters passed to execute will be passed as parameters to the SQL +statements in the external file. Execute will still return a cursor when +executing a SQL script, the cursor will point to the results of the last +SQL statement in the file. + +Comments can be included in SQL files. Multi-line comments start with +\"/\*\" and end with \"\*/\". Single line comments start with \"\--\". +Comments are submitted to the database along with the individual SQL +statements. + +--- + + +### **4.0 Reference** + +This section defines the full set of method parameters supported by the +API. + + + +#### **4.1 UdaExec Parameters** +--- +UdaExec accepts the following list of parameters during initialization. +The column labeled "E" flags if a parameter can be specified in an +external configuration file. + + --- + |**Name** | **Description** |**E**| **Default Value** | + |----------------------|-------------------|-----|-----------------------| + | **appName** | The name of our application | Y | None - Required field | + | **version** | The version of our application | Y | None - Required field | + | **checkpointFile** | The location of the checkpoint file. Can be None to disable file-based checkpoints. |Y | *\${appName}.checkpoint*| + | **runNumberFile** | The path of the file containing the previous runNumber. | Y | *.runNumber* | + | **runNumber** | A string that represents this particular execution of the python script. Used in the log file name as well as included in the Session QueryBand. | Y | *YYYYmmddHHMMSS-X* | + | **configureLogging** | Flags if UdaExec will configure logging. | Y | True | + | **logDir** | The directory that contains log files. | Y | *\"logs\"* | + | **logFile** | The log file name. | Y | *\"\${appName}.\${runNumber}.log\"* | + | **logLevel** | The level that determines what log messages are logged (i.e. CRITICAL, ERROR, WARNING, INFO, DEBUG, TRACE) | Y | *\"INFO\"* | + | **logConsole** | Flags if logs should be written to stdout in addition to the log file. | Y | True | + | **logRetention** | The number of days to retain log files. Files in the log directory older than the specified number of days are deleted. | Y | 90 | + | **systemConfigFile** | The system wide configuration file(s). Can be a single value or a list. | N | *\"/etc/udaexec.ini\"* | + | **userConfigFile** | The user specific configuration file(s). Can be a single value or a list. | N | *\"\~/udaexec.ini\"* or *\"%HOMEPATH%/udaexec.ini\"* | + | **appConfigFile** | The application specific configuration file (s). Can be a single value or a list. | N | *\"udaexec.ini\"* | + | **configFiles** | The full list of external configuration files. Overrides any values in systemConfigFile, userConfigFile, appConfigFile. | N | None | + | **configSection** | The name of the application config section in external configuration files.| N | *CONFIG* | + | **parseCmdLineArgs** | Flags whether or not to include command line arguments as part of the external configuration variables. | N | True | + | **gitPath** | The path to the GIT executable to use to include GIT information in the session QueryBand. | Y | Defaults to system path | + | **production** | Flags if this app is a production application, applies this value to session QueryBand. | Y | False | + | **dataTypeConverter**| The DataTypeConverter implementation to use to convert data types from their string representation to python objects. | N | datatypes.DefaultDataTypeConverter() | + ----------------------- + + + +#### **4.2 Connect Parameters** +--- +The following table lists the parameters that the UdaExec.connect() +method accepts. With the exception of the "externalDSN" parameter, all +the parameters below can be specified in the DEFAULT or named data +source sections of external configuration files. While the externalDSN +parameter cannot be specified directly in an external configuration +file, it can reference the name of an external configuration variable +using *\${keyname}* syntax. The "Type" column indicates if a parameter +is specific to a connectivity option, if it is blank it applies to all +types. + +Any parameters passed to the connect method or specified in an external +configuration that are not listed below will be automatically be appened +to the connect string passed to the teradatasql driver. If the parameter +is not supported by the teradatasql driver, an error will be returned. + + --- + | **Name** | **Description** | **Default Value**| + |-----------------------|-------------------------------------------------------|------------------| + | **externalDSN** | The name of the data source defined in external configuration files. | None - Optional | + | **system** | The Database name of the system to connect. | None | + | **username** | The Database username to use to connect. | None | + | **password** | The Database password to use to connect. | None | + | **database** | The default database name to apply to the session | None | + | **autoCommit** | Enables or disables auto commit mode. When auto commit mode is disabled, transactions must be committed manually. | True | + | **transactionMode** | The transaction mode to use i.e. "Teradata" or "ANSI" | *Teradata* | + | **queryBands** | A map (dict) of query band key/value pairs to include the session's QueryBand. | None | + | **dataTypeConverter** | The DataTypeConverter implementation to use to convert data types from their string representation to python objects. | datatypes.DefaultDataTypeConverter() | + | **\*\*kwargs** | A variable number of name/value pairs to append to the ConnectString passed to the Teradata SQL Driver for Python. For a full list of connection parameters offered, refer to the [Connection Parameters Section](https://github.com/Teradata/python-driver#ConnectionParameters) in the Teradata SQL Driver for Python `README.md`. | None | + --- + + + +#### **4.3 Execute Parameters** +--- +The following table lists the parameters that the execute method +accepts. + + --- + | **Name** | **Description** | **Default Value** | + |-----------------------|--------------------------------------------------|------------------------| + | **query** | The query to execute. | None, required if file is None | + | **params** | The list or tuple containing the parameters to pass in to replace question mark placeholders. | None | + | **file** | The path of an external script to execute. | None | + | **fileType** | The type of file to execute if different than a standard delimited SQL script (i.e. bteq) | None | + | **delimiter** | The delimiter character to use for SQL scripts. | *;* | + | **runAlways** | When True, the query or script will be executed regardless if the previous checkpoint has been reached. | False | + | **continueOnError** | When True, all errors will be caught and logged but not raised up to the application. | False | + | **ignoreErrors** | The list or sequence of error codes to ignore. | None | + | **logParamCharLimit** | The maximum number of characters to log per query parameter. When a parameter exceeds the limit it is truncated in the logs and an ellipsis (\"\...\") is appended. | 80 characters per parameter | + | **logParamFrequency** | The amount of parameter sets to log when executemany is invoked. Setting this value to X means that every Xth parameter set will be logged in addition to the first and last parameter set. When this value is set to zero, no parameters are logged. | 1 - all parameters sets are logged. | + +--- + + +### **5.0 Running Unit Tests** + +To execute the unit tests, you can run the following command at the root of the project checkout. + + python -m unittest discover -s test + +The unit tests use the connection information specified in test/udaexec.ini. The unit tests depend on Teradata SQL Driver for Python being installed. + +--- + + +### **6.0 Migration Guide** + +The Teradata Python Module is now a fully supported Teradata product. This module has been updated to use the Teradata SQL Driver for Python to connect to the Teradata Database. The Teradata ODBC Driver and the Query Service REST API for Teradata have been dropped. + +This section highlights the modifications that may be useful to migrate your code to function with the updated module. + + + +#### **6.1 Setup** +--- +**Requirements and Limitations** + +* Python 2.7 support dropped. +* Requires 64-bit Python 3.4 or later. +* Runs on Windows, macOS and Linux. +* 32-bit Python is not supported. +* Supported for use with Teradata Database 14.10 and later releases. + +**Installation** + +The Teradata Python Module now depends on the teradatasql package which is available from PyPI. + +Continue to use pip install to download and install the Teradata Python module and its dependencies as described in the [Installation Section](#Installing). + +If a different version of the teradatasql package is required, refer to the [Installation Section](https://github.com/Teradata/python-driver#Installation) of the teradatasql `README.md`. + + + +### **6.2 Database Interactions** +--- +**Parameterized SQL** + +To insert multiple rows, executemany can be used. The default behavior for this method has changed to send all the parameter sequences to the database in a single "batch". This is much faster than sending the parameter sequences individually. To insert the statements individually, pass in the parameter batch=False. + +``` {.brush:python;} +session.executemany("""INSERT INTO employee (id, firstName, lastName, dob) + VALUES (?, ?, ?, ?)""", + ((1,"James", "Kirk", "2233-03-22"), + (2,"Jean-Luc", "Picard", "2305-07-13")), + batch=False) +``` + +**Stored Procedures** + +Stored procedures can be invoked using the "callproc" method. Following are some changes made to the parameters: + +* The teradata.InParam is new and can be used to specify the Teradata data type to bind the input parameter. E.g. + + teradata.InParam (None, dataType='PERIOD (DATE)') + +* The teradata.InOutParam can also be used to specify the Teradata data type to bind the input parameter. E.g. + + teradata.InOutParam("2000-12-22,2008-10-27", "p2", dataType='PERIOD (DATE)') + +The supported Teradata Database bind data types are limited to the ones supported by the teradatasql package. For these limitations, refer to the [Limitations Section](https://github.com/Teradata/python-driver#Limitations) in the teradatasql `README.md`. + +**Data Types** + +The returned data type values from the Teradata Database are no longer limited to their string representation. For a list of how the data types are returned, refer to the [Data Type Section](https://github.com/Teradata/python-driver#DataTypes) in the teradatasql `README.md`. + +The teradata.datatypes.DefaultDataTypeConverter will still perform the same conversions as displayed in the [Data Type Conversion](#DataTypeConversion) table. If a custom DataTypeConverter is being used, adjustments may be needed. + +**Unicode** + +The UTF8 session character set is always used. The charset connection parameter is no longer supported. + +**Password Protection** + +Stored password protection is still supported through the Teradata SQL Driver for Python. For details, see the [Stored Password Protection](https://github.com/Teradata/python-driver#StoredPasswordProtection) section in the teradatasql `README.md`. + +**Query Timeouts** + +Query timeouts are not currently supported. + + + + +### **6.3 Reference** +--- +**UdaExec Parameters** + +The odbcLibPath parameter has been removed. + +**Connect Parameters** + +The following table lists removed connect parameters. + +--- + | **Removed Parameter**| **Description** | **Reason**| + |---------------------------|-------------------|-------------| + | **charset** | The session character set. | The UTF8 session character set is always used so the charset connection parameter is not needed. | + | **dbType** | The type of system being connected to.| The only supported option is “Teradata” so this parameter is not needed. | + | **host** | The host name of the server hosting the REST service. | REST is no longer supported. | + | **method** | The type of connection to make. | The only supported option is teradatasql so this parameter is not needed | + | **port** | The port number of REST Service. | REST is no longer supported. | + | **protocol** | The protocol to use for REST connections | REST is no longer supported. | + | **sslContext** | The ssl.SSLContext to use to establish SSL connections.| REST is no longer supported. | + | **webContext** | The web context of the REST service | REST is no longer supported. | + | **verifyCerts** | Flags if REST SSL certificate should be verified, ignored if sslContext is not None. | REST is no longer supported. | + --- + +The following table lists modified connect parameters. + + | **Modified Parameter**| **Description** | **Reason**| + |---------------------------|-------------------|-------------| + | **\*\*kwargs** | A variable number of name/value pairs to append to the ConnectString passed to the Teradata SQL Driver for Python. | For a full list of connection parameters offered, refer to the [Connection Parameters Section](https://github.com/Teradata/python-driver#ConnectionParameters) in the Teradata SQL Driver for Python `README.md`. | + --- + +**Execute Parameters** + +The following table lists the execute parameters not currently supported. + + | **Name** | **Description** | **Reason** | + |---------------------|--------------------------------------------------|------------------------| + | **queryTimeout** | The number of seconds to wait for a response before aborting the query and returning. | This feature is not currently supported in the teradatasql package but will be offered at a future date. | + --- \ No newline at end of file diff --git a/setup.py b/setup.py index ed42e9c..39efff6 100755 --- a/setup.py +++ b/setup.py @@ -2,17 +2,17 @@ # The MIT License (MIT) # # Copyright (c) 2015 by Teradata -# +# # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: -# +# # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. -# +# # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE @@ -27,16 +27,18 @@ if sys.version_info[0] < 3 or (sys.version_info[0] == 3 and sys.version_info[1] < 4): print("The teradata module does not support this version of Python, the version must be 3.4 or later.") sys.exit(1) - -with open('teradata/version.py') as f: + +with open('teradata/version.py') as f: exec(f.read()) setup(name='teradata', version=__version__, # @UndefinedVariable + author = 'Teradata Corporation', description='The Teradata python module for DevOps enabled SQL scripting for Teradata UDA.', url='http://github.com/teradata/PyTd', - author='Teradata Corporation', - author_email='eric.scheie@teradata.com', license='MIT', packages=['teradata'], + install_requires=['teradatasql'], + platforms = ['Windows', 'MacOS X', 'Linux'], + python_requires = '>=3.4', zip_safe=True) diff --git a/teradata/datatypes.py b/teradata/datatypes.py index 6a56aff..6d9c045 100644 --- a/teradata/datatypes.py +++ b/teradata/datatypes.py @@ -291,6 +291,8 @@ def convertValue(self, dataType, typeCode, value): elif typeCode == Date: if isinstance(value, str): return convertDate(value) + elif isinstance (value, datetime.date): + return (value) elif type (value) is int: return datetime.datetime.fromtimestamp( value // SECS_IN_MILLISECS).replace( diff --git a/teradata/tdsql.py b/teradata/tdsql.py index 78b6f9c..e484101 100644 --- a/teradata/tdsql.py +++ b/teradata/tdsql.py @@ -277,15 +277,15 @@ def close(self): def _setQueryTimeout(self, queryTimeout): pass - def execute(self, query, params=None, queryTimeout=0): + def execute(self, query, params=None, ignoreErrors = None, queryTimeout=0): logger.trace ('> enter execute {}'.format (self)) try: - return self.executemany (query, params, queryTimeout) + return self.executemany (query, params, ignoreErrors, queryTimeout) finally: logger.trace ('> leave execute {}'.format (self)) # end execute - def executemany (self, query, params, batch=False, ignoreErrors = None, queryTimeout=0): + def executemany (self, query, params, ignoreErrors = None, queryTimeout=0, batch=True): logger.trace ('> enter executemany {} : {}'.format (self, query)) try: self._setQueryTimeout(queryTimeout) @@ -473,8 +473,6 @@ def __next__(self): values[i] = self.converter.convertValue( self.types[i][0], self.types[i][1], values[i]) row = Row(self.columns, values, self.rownumber + 1) - if logger.isEnabledFor (logging.DEBUG): - [ logger.debug (" Column {} {:15} = {}".format (i + 1, self.cur.description [i][0], row [i])) for i in range (0, len (row)) ] return row raise StopIteration() diff --git a/test/udaexec.ini b/test/udaexec.ini index e7b8616..bfbd9ee 100644 --- a/test/udaexec.ini +++ b/test/udaexec.ini @@ -6,7 +6,7 @@ key1=file1 key2=file1 escapeTest=this$$isatest dbcInfo=DBC.DBCInfo -testSystem=jdbc1710ek2 +testSystem=jdbc1705ek2 configureLogging=False logConsole=False logLevel=NOTSET From 4319edbdd88cb0faf0d0d8436d30846b4f51b9b4 Mon Sep 17 00:00:00 2001 From: hd121024 Date: Fri, 11 Dec 2020 15:10:58 -0800 Subject: [PATCH 3/5] hd121024_pydbapi-81-PyTd change test system name in .ini file --- test/udaexec.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/udaexec.ini b/test/udaexec.ini index bfbd9ee..6f92c08 100644 --- a/test/udaexec.ini +++ b/test/udaexec.ini @@ -6,7 +6,7 @@ key1=file1 key2=file1 escapeTest=this$$isatest dbcInfo=DBC.DBCInfo -testSystem=jdbc1705ek2 +testSystem=tdprod configureLogging=False logConsole=False logLevel=NOTSET From 930e9a11f23099ca1252b0aa7b9e1a1aad09bd73 Mon Sep 17 00:00:00 2001 From: hd121024 Date: Fri, 11 Dec 2020 16:18:59 -0800 Subject: [PATCH 4/5] hd121024_pydbapi-81-PyTd Update links in readme --- README.md | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 9d50592..735583d 100644 --- a/README.md +++ b/README.md @@ -47,16 +47,16 @@ For Teradata customer support, please visit [Teradata Access](https://access.ter + [3.6 Unicode](#Unicode) + [3.7 Ignoring Errors](#IgnoringErrors) + [3.8 Password Protection](#PasswordProtection) - + [3.9 External SQL Scripts](#ExternalConfiguraton) + + [3.9 External SQL Scripts](#ExternalScripts) * [4.0 Reference](#References) - + [4.1 UdaExec Parameters](#UdaExec%20Parameters) + + [4.1 UdaExec Parameters](#UdaExecParameters) + [4.2 Connect Parameters](#ConnectParametrs) + [4.3 Execute Parameters](#ExecuteParameters) * [5.0 Running Unit Tests](#RunningTests) * [6.0 Migration Guide](#Migration) + [6.1 Setup](#MGSetup) + [6.2 Database Interactions](#MGDatabase) - + [6.3 Reference](#MGReferences) + + [6.3 Reference](#MGReference) Table of Contents links do not work on PyPI due to a [PyPI limitation](https://github.com/pypa/warehouse/issues/4064). @@ -392,9 +392,10 @@ environments. The same script can be executed against a Dev, Test, and Prod environment with no changes, making it easier to adopt and automate a DevOps workflow. - + #### **2.2 Logging** + --- The UdaExec object automatically enables logging when it is initialized. Logging is implemented using Python's standard logging module. If you @@ -892,7 +893,7 @@ statements. This section defines the full set of method parameters supported by the API. - + #### **4.1 UdaExec Parameters** --- From b6a27194996411f5ab19d3c854276d08b2513283 Mon Sep 17 00:00:00 2001 From: hd121024 Date: Fri, 11 Dec 2020 17:00:48 -0800 Subject: [PATCH 5/5] hd121024_pydbapi-81-PyTd Update readme --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 735583d..e92b4f8 100644 --- a/README.md +++ b/README.md @@ -954,7 +954,7 @@ is not supported by the teradatasql driver, an error will be returned. | **password** | The Database password to use to connect. | None | | **database** | The default database name to apply to the session | None | | **autoCommit** | Enables or disables auto commit mode. When auto commit mode is disabled, transactions must be committed manually. | True | - | **transactionMode** | The transaction mode to use i.e. "Teradata" or "ANSI" | *Teradata* | + | **transactionMode** | The transaction mode to use i.e. "Teradata" or "ANSI" | The default tranaction mode configured for the Teradata Database | | **queryBands** | A map (dict) of query band key/value pairs to include the session's QueryBand. | None | | **dataTypeConverter** | The DataTypeConverter implementation to use to convert data types from their string representation to python objects. | datatypes.DefaultDataTypeConverter() | | **\*\*kwargs** | A variable number of name/value pairs to append to the ConnectString passed to the Teradata SQL Driver for Python. For a full list of connection parameters offered, refer to the [Connection Parameters Section](https://github.com/Teradata/python-driver#ConnectionParameters) in the Teradata SQL Driver for Python `README.md`. | None |