From 81ecd5608e94314b94f2e3e6c97c69d2d948aab0 Mon Sep 17 00:00:00 2001 From: John Gemignani Date: Wed, 20 Dec 2023 10:04:39 -0800 Subject: [PATCH] Master to PostgreSQL version 16 (#1451) * Initial PG16 version (#1237) Fixed empty string handling. Previously, the PG 16 outToken emitted NULL for an empty string, but now it emits an empty string "". Consequently, our cypher read function required modification to properly decode the empty string as a plain token, thereby addressing the comparison issue. Compared the branches and added the necessary changes so that the query's rteperminfos variable doesn't stay NULL. Fix missing include varatt.h (causing undefined symbol VARDATA_ANY) & Fixing some test cases of the failings Added missing include varatt.h to fix the undefined symbol while loading age into postgresql because usage of VARDATA_ANY needs to import varatt.h in PG16 Modified initialisation of ResultRelInfo and removed unnecessary RTEs Compared the branches and added the necessary changes so that the query's rteperminfos variable doesn't stay NULL. Modified initialisation of ResultRelInfo and removed unnecessary RTEs One of the problems that we were facing was related to the ResultRelInfo pointing at the wrong RTE via its ri_RangeTableIndex. The create_entity_result_rel_info() function does not have the capability of setting the ri_RootResultRelInfo to the correct ResultRelInfo node because it does not have access to the ModifyTableState node. The solution for this was to set the third argument in InitResultRelInfo() to be zero instead of list_length(estate->es_range_table). In the update_entity_tuple() function, when we call table_tuple_update() and assign the returned value to the result variable, the buffer variable receives the value of 0. Made a workaround so that the original value isn't lost. This is a work in progress for the new field that was added to the struct Var called varnullingrels. According to the documentation, this field is responsible for marking the Vars as nullable, if they are coming from a JOIN, either LEFT JOIN, RIGHT JOIN, or FULL OUTER JOIN. The changes were made following an "optional match" clause which is being treated as a LEFT JOIN from our extension. A function markRelsAsNulledBy is added because its internal in Postgres and doesn't belong in a header file, therefore it can't be exported. This function is added before the creation of the Vars from the make_vertex_expr and make_edge_expr, to correctly mark the specific PNSI as nullable, so later in the planner stage, the Vars will be correctly nulled. Fix incorrect typecasting in agtype_to_graphid function. Fix incorrect returns to the fuction _label_name, _ag_build_vertex and _ag_build_edge. Contributors Panagiotis Foliadis Matheus Farias Mohamed Mokhtar Hannan Aamir John Gemignani Muhammad Taha Naveed Wendel de Lana --------- * Fix Docker files to reflect PostgreSQL version 16 (#1448) Fixed the following Docker files to reflect running under PostgreSQL version 16. modified: docker/Dockerfile modified: docker/Dockerfile.dev This impacts the DockerHub builds. * Mark null-returning RTEs in outer joins as nullable RTEs that appear in the right side of a left join are marked as 'nullable'. The column references to a nullable RTE within the join's RTE are also marked as 'nullable'. This concept is introduced in Postgresql v16. The change in Postgresql v16's pullup_replace_vars_callback() function causes different plans to be generated depending on whether appropriate RTEs are marked nullable. Without marking nullable, in a left join, any function call expression containing right RTE's columns as arguments are not evaluated at the scan level, rather it is evaluated after the join is performed. At that point, the function call may receive null input, which was unexpected in previous Postgresql versions. See: ---- - Postgresql v16's commit: Make Vars be outer-join-aware https://www.postgresql.org/message-id/830269.1656693747@sss.pgh.pa.us - The 'Vars and PlaceHolderVars' section in the Postgresql v16's optimizer/README.md * Fix minor code formatting. --------- Co-authored-by: Shoaib Co-authored-by: Rafsun Masud --- .github/workflows/go-driver.yml | 12 +-- .github/workflows/installcheck.yaml | 30 +++---- .github/workflows/jdbc-driver.yaml | 12 +-- .github/workflows/nodejs-driver.yaml | 12 +-- .github/workflows/python-driver.yaml | 12 +-- .gitignore | 1 + docker/Dockerfile | 4 +- docker/Dockerfile.dev | 4 +- src/backend/catalog/ag_catalog.c | 26 ++++-- src/backend/catalog/ag_graph.c | 4 +- src/backend/catalog/ag_label.c | 12 +-- src/backend/commands/label_commands.c | 5 +- src/backend/executor/cypher_create.c | 4 +- src/backend/executor/cypher_merge.c | 5 +- src/backend/executor/cypher_set.c | 10 ++- src/backend/executor/cypher_utils.c | 47 +++++++--- src/backend/nodes/cypher_readfuncs.c | 24 +++-- src/backend/parser/cypher_analyze.c | 1 + src/backend/parser/cypher_clause.c | 122 +++++++++++++++++++++----- src/backend/parser/cypher_expr.c | 3 +- src/backend/parser/cypher_item.c | 28 +++--- src/backend/parser/cypher_parse_agg.c | 18 +++- src/backend/utils/adt/agtype.c | 64 ++++++++------ src/backend/utils/adt/agtype_gin.c | 1 + src/backend/utils/adt/agtype_ops.c | 2 +- src/backend/utils/adt/agtype_parser.c | 4 +- src/backend/utils/ag_func.c | 12 +-- src/backend/utils/cache/ag_cache.c | 12 +-- src/backend/utils/graph_generation.c | 10 +-- 29 files changed, 334 insertions(+), 167 deletions(-) diff --git a/.github/workflows/go-driver.yml b/.github/workflows/go-driver.yml index adbacc93b..10b1abaa6 100644 --- a/.github/workflows/go-driver.yml +++ b/.github/workflows/go-driver.yml @@ -2,10 +2,10 @@ name: Go Driver Tests on: push: - branches: [ "master", "PG15" ] + branches: [ "master", "PG16" ] pull_request: - branches: [ "master", "PG15" ] + branches: [ "master", "PG16" ] jobs: build: @@ -26,14 +26,14 @@ jobs: if [[ "$GITHUB_EVENT_NAME" == "push" ]]; then if [[ "$GITHUB_REF" == "refs/heads/master" ]]; then echo "TAG=latest" >> $GITHUB_ENV - elif [[ "$GITHUB_REF" == "refs/heads/PG15" ]]; then - echo "TAG=PG15_latest" >> $GITHUB_ENV + elif [[ "$GITHUB_REF" == "refs/heads/PG16" ]]; then + echo "TAG=PG16_latest" >> $GITHUB_ENV fi elif [[ "$GITHUB_EVENT_NAME" == "pull_request" ]]; then if [[ "$GITHUB_BASE_REF" == "master" ]]; then echo "TAG=latest" >> $GITHUB_ENV - elif [[ "$GITHUB_BASE_REF" == "PG15" ]]; then - echo "TAG=PG15_latest" >> $GITHUB_ENV + elif [[ "$GITHUB_BASE_REF" == "PG16" ]]; then + echo "TAG=PG16_latest" >> $GITHUB_ENV fi fi diff --git a/.github/workflows/installcheck.yaml b/.github/workflows/installcheck.yaml index 3709c9744..a975aeb49 100644 --- a/.github/workflows/installcheck.yaml +++ b/.github/workflows/installcheck.yaml @@ -2,32 +2,32 @@ name: Build / Regression on: push: - branches: [ 'master', 'PG15' ] + branches: [ 'master', 'PG16' ] pull_request: - branches: [ 'master', 'PG15' ] + branches: [ 'master', 'PG16' ] jobs: build: runs-on: ubuntu-latest steps: - - name: Get latest commit id of PostgreSQL 15 + - name: Get latest commit id of PostgreSQL 16 run: | - echo "PG_COMMIT_HASH=$(git ls-remote git://git.postgresql.org/git/postgresql.git refs/heads/REL_15_STABLE | awk '{print $1}')" >> $GITHUB_ENV + echo "PG_COMMIT_HASH=$(git ls-remote git://git.postgresql.org/git/postgresql.git refs/heads/REL_16_STABLE | awk '{print $1}')" >> $GITHUB_ENV - - name: Cache PostgreSQL 15 + - name: Cache PostgreSQL 16 uses: actions/cache@v3 - id: pg15cache + id: pg16cache with: - path: ~/pg15 - key: ${{ runner.os }}-v1-pg15-${{ env.PG_COMMIT_HASH }} + path: ~/pg16 + key: ${{ runner.os }}-v1-pg16-${{ env.PG_COMMIT_HASH }} - - name: Install PostgreSQL 15 - if: steps.pg15cache.outputs.cache-hit != 'true' + - name: Install PostgreSQL 16 + if: steps.pg16cache.outputs.cache-hit != 'true' run: | - git clone --depth 1 --branch REL_15_STABLE git://git.postgresql.org/git/postgresql.git ~/pg15source - cd ~/pg15source - ./configure --prefix=$HOME/pg15 CFLAGS="-std=gnu99 -ggdb -O0" --enable-cassert + git clone --depth 1 --branch REL_16_STABLE git://git.postgresql.org/git/postgresql.git ~/pg16source + cd ~/pg16source + ./configure --prefix=$HOME/pg16 CFLAGS="-std=gnu99 -ggdb -O0" --enable-cassert make install -j$(nproc) > /dev/null - uses: actions/checkout@v3 @@ -35,12 +35,12 @@ jobs: - name: Build id: build run: | - make PG_CONFIG=$HOME/pg15/bin/pg_config install -j$(nproc) + make PG_CONFIG=$HOME/pg16/bin/pg_config install -j$(nproc) - name: Regression tests id: regression_tests run: | - make PG_CONFIG=$HOME/pg15/bin/pg_config installcheck + make PG_CONFIG=$HOME/pg16/bin/pg_config installcheck continue-on-error: true - name: Dump regression test errors diff --git a/.github/workflows/jdbc-driver.yaml b/.github/workflows/jdbc-driver.yaml index 7dda51a4e..81d2558ae 100644 --- a/.github/workflows/jdbc-driver.yaml +++ b/.github/workflows/jdbc-driver.yaml @@ -2,10 +2,10 @@ name: JDBC Driver Tests on: push: - branches: [ "master", "PG15" ] + branches: [ "master", "PG16" ] pull_request: - branches: [ "master", "PG15" ] + branches: [ "master", "PG16" ] jobs: build: @@ -28,14 +28,14 @@ jobs: if [[ "$GITHUB_EVENT_NAME" == "push" ]]; then if [[ "$GITHUB_REF" == "refs/heads/master" ]]; then echo "TAG=latest" >> $GITHUB_ENV - elif [[ "$GITHUB_REF" == "refs/heads/PG15" ]]; then - echo "TAG=PG15_latest" >> $GITHUB_ENV + elif [[ "$GITHUB_REF" == "refs/heads/PG16" ]]; then + echo "TAG=PG16_latest" >> $GITHUB_ENV fi elif [[ "$GITHUB_EVENT_NAME" == "pull_request" ]]; then if [[ "$GITHUB_BASE_REF" == "master" ]]; then echo "TAG=latest" >> $GITHUB_ENV - elif [[ "$GITHUB_BASE_REF" == "PG15" ]]; then - echo "TAG=PG15_latest" >> $GITHUB_ENV + elif [[ "$GITHUB_BASE_REF" == "PG16" ]]; then + echo "TAG=PG16_latest" >> $GITHUB_ENV fi fi diff --git a/.github/workflows/nodejs-driver.yaml b/.github/workflows/nodejs-driver.yaml index 36356a7c3..bc926e6fd 100644 --- a/.github/workflows/nodejs-driver.yaml +++ b/.github/workflows/nodejs-driver.yaml @@ -2,10 +2,10 @@ name: Nodejs Driver Tests on: push: - branches: [ "master", "PG15" ] + branches: [ "master", "PG16" ] pull_request: - branches: [ "master", "PG15" ] + branches: [ "master", "PG16" ] jobs: build: @@ -23,14 +23,14 @@ jobs: if [[ "$GITHUB_EVENT_NAME" == "push" ]]; then if [[ "$GITHUB_REF" == "refs/heads/master" ]]; then echo "TAG=latest" >> $GITHUB_ENV - elif [[ "$GITHUB_REF" == "refs/heads/PG15" ]]; then - echo "TAG=PG15_latest" >> $GITHUB_ENV + elif [[ "$GITHUB_REF" == "refs/heads/PG16" ]]; then + echo "TAG=PG16_latest" >> $GITHUB_ENV fi elif [[ "$GITHUB_EVENT_NAME" == "pull_request" ]]; then if [[ "$GITHUB_BASE_REF" == "master" ]]; then echo "TAG=latest" >> $GITHUB_ENV - elif [[ "$GITHUB_BASE_REF" == "PG15" ]]; then - echo "TAG=PG15_latest" >> $GITHUB_ENV + elif [[ "$GITHUB_BASE_REF" == "PG16" ]]; then + echo "TAG=PG16_latest" >> $GITHUB_ENV fi fi diff --git a/.github/workflows/python-driver.yaml b/.github/workflows/python-driver.yaml index 9a7f3559a..3e7f8ee54 100644 --- a/.github/workflows/python-driver.yaml +++ b/.github/workflows/python-driver.yaml @@ -2,10 +2,10 @@ name: Python Driver Tests on: push: - branches: [ "master", "PG15" ] + branches: [ "master", "PG16" ] pull_request: - branches: [ "master", "PG15" ] + branches: [ "master", "PG16" ] jobs: build: @@ -23,14 +23,14 @@ jobs: if [[ "$GITHUB_EVENT_NAME" == "push" ]]; then if [[ "$GITHUB_REF" == "refs/heads/master" ]]; then echo "TAG=latest" >> $GITHUB_ENV - elif [[ "$GITHUB_REF" == "refs/heads/PG15" ]]; then - echo "TAG=PG15_latest" >> $GITHUB_ENV + elif [[ "$GITHUB_REF" == "refs/heads/PG16" ]]; then + echo "TAG=PG16_latest" >> $GITHUB_ENV fi elif [[ "$GITHUB_EVENT_NAME" == "pull_request" ]]; then if [[ "$GITHUB_BASE_REF" == "master" ]]; then echo "TAG=latest" >> $GITHUB_ENV - elif [[ "$GITHUB_BASE_REF" == "PG15" ]]; then - echo "TAG=PG15_latest" >> $GITHUB_ENV + elif [[ "$GITHUB_BASE_REF" == "PG16" ]]; then + echo "TAG=PG16_latest" >> $GITHUB_ENV fi fi diff --git a/.gitignore b/.gitignore index 7a75f8cbe..d159e2fa2 100644 --- a/.gitignore +++ b/.gitignore @@ -7,3 +7,4 @@ age--*.*.*.sql .DS_Store *.tokens *.interp +*.dylib diff --git a/docker/Dockerfile b/docker/Dockerfile index 45298538e..0df81e4d3 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -16,14 +16,14 @@ # limitations under the License. # -FROM postgres:15 +FROM postgres:16 RUN apt-get update \ && apt-get install -y --no-install-recommends --no-install-suggests \ bison \ build-essential \ flex \ - postgresql-server-dev-15 \ + postgresql-server-dev-16 \ locales ENV LANG=en_US.UTF-8 diff --git a/docker/Dockerfile.dev b/docker/Dockerfile.dev index 5bfc60d7a..bdf0c40d0 100644 --- a/docker/Dockerfile.dev +++ b/docker/Dockerfile.dev @@ -17,14 +17,14 @@ # -FROM postgres:15 +FROM postgres:16 RUN apt-get update RUN apt-get install --assume-yes --no-install-recommends --no-install-suggests \ bison \ build-essential \ flex \ - postgresql-server-dev-15 \ + postgresql-server-dev-16 \ locales ENV LANG=en_US.UTF-8 diff --git a/src/backend/catalog/ag_catalog.c b/src/backend/catalog/ag_catalog.c index 60a576a9e..7b8a96c13 100644 --- a/src/backend/catalog/ag_catalog.c +++ b/src/backend/catalog/ag_catalog.c @@ -86,19 +86,29 @@ void process_utility_hook_fini(void) * from being thrown, we need to disable the object_access_hook before dropping * the extension. */ -void ag_ProcessUtility_hook(PlannedStmt *pstmt, const char *queryString, bool readOnlyTree, - ProcessUtilityContext context, ParamListInfo params, - QueryEnvironment *queryEnv, DestReceiver *dest, - QueryCompletion *qc) +void ag_ProcessUtility_hook(PlannedStmt *pstmt, const char *queryString, + bool readOnlyTree, ProcessUtilityContext context, + ParamListInfo params, QueryEnvironment *queryEnv, + DestReceiver *dest, QueryCompletion *qc) { if (is_age_drop(pstmt)) + { drop_age_extension((DropStmt *)pstmt->utilityStmt); + } else if (prev_process_utility_hook) - (*prev_process_utility_hook) (pstmt, queryString, readOnlyTree, context, params, - queryEnv, dest, qc); + { + (*prev_process_utility_hook) (pstmt, queryString, readOnlyTree, context, + params, queryEnv, dest, qc); + } else - standard_ProcessUtility(pstmt, queryString, readOnlyTree, context, params, queryEnv, - dest, qc); + { + Assert(IsA(pstmt, PlannedStmt)); + Assert(pstmt->commandType == CMD_UTILITY); + Assert(queryString != NULL); /* required as of 8.4 */ + Assert(qc == NULL || qc->commandTag == CMDTAG_UNKNOWN); + standard_ProcessUtility(pstmt, queryString, readOnlyTree, context, + params, queryEnv, dest, qc); + } } static void drop_age_extension(DropStmt *stmt) diff --git a/src/backend/catalog/ag_graph.c b/src/backend/catalog/ag_graph.c index f4a0d7213..7594eea59 100644 --- a/src/backend/catalog/ag_graph.c +++ b/src/backend/catalog/ag_graph.c @@ -49,8 +49,8 @@ void insert_graph(const Name graph_name, const Oid nsp_id) HeapTuple tuple; - AssertArg(graph_name); - AssertArg(OidIsValid(nsp_id)); + Assert(graph_name); + Assert(OidIsValid(nsp_id)); ag_graph = table_open(ag_graph_relation_id(), RowExclusiveLock); values[Anum_ag_graph_oid - 1] = ObjectIdGetDatum(nsp_id); diff --git a/src/backend/catalog/ag_label.c b/src/backend/catalog/ag_label.c index 9cd892356..bbaad5292 100644 --- a/src/backend/catalog/ag_label.c +++ b/src/backend/catalog/ag_label.c @@ -63,12 +63,12 @@ void insert_label(const char *label_name, Oid graph_oid, int32 label_id, * NOTE: Is it better to make use of label_id and label_kind domain types * than to use assert to check label_id and label_kind are valid? */ - AssertArg(label_name); - AssertArg(label_id_is_valid(label_id)); - AssertArg(label_kind == LABEL_KIND_VERTEX || + Assert(label_name); + Assert(label_id_is_valid(label_id)); + Assert(label_kind == LABEL_KIND_VERTEX || label_kind == LABEL_KIND_EDGE); - AssertArg(OidIsValid(label_relation)); - AssertArg(seq_name); + Assert(OidIsValid(label_relation)); + Assert(seq_name); ag_label = table_open(ag_label_relation_id(), RowExclusiveLock); @@ -188,8 +188,10 @@ Datum _label_name(PG_FUNCTION_ARGS) uint32 label_id; if (PG_ARGISNULL(0) || PG_ARGISNULL(1)) + { ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), errmsg("graph_oid and label_id must not be null"))); + } graph = PG_GETARG_OID(0); diff --git a/src/backend/commands/label_commands.c b/src/backend/commands/label_commands.c index 0f6396e30..a3a9768cc 100644 --- a/src/backend/commands/label_commands.c +++ b/src/backend/commands/label_commands.c @@ -808,7 +808,7 @@ static void remove_relation(List *qname) Oid rel_oid; ObjectAddress address; - AssertArg(list_length(qname) == 2); + Assert(list_length(qname) == 2); // concurrent is false so lockmode is AccessExclusiveLock @@ -868,8 +868,7 @@ static void range_var_callback_for_remove_relation(const RangeVar *rel, // relkind == expected_relkind - if (!pg_class_ownercheck(rel_oid, GetUserId()) && - !pg_namespace_ownercheck(get_rel_namespace(rel_oid), GetUserId())) + if (!object_ownercheck(rel_oid, get_rel_namespace(rel_oid), GetUserId())) { aclcheck_error(ACLCHECK_NOT_OWNER, get_relkind_objtype(get_rel_relkind(rel_oid)), diff --git a/src/backend/executor/cypher_create.c b/src/backend/executor/cypher_create.c index be7825bfb..bd2b228e1 100644 --- a/src/backend/executor/cypher_create.c +++ b/src/backend/executor/cypher_create.c @@ -438,7 +438,7 @@ static void create_edge(cypher_create_custom_scan_state *css, result = make_edge( id, start_id, end_id, CStringGetDatum(node->label_name), - PointerGetDatum(scanTupleSlot->tts_values[node->prop_attr_num])); + scanTupleSlot->tts_values[node->prop_attr_num]); if (CYPHER_TARGET_NODE_IN_PATH(node->flags)) { @@ -528,7 +528,7 @@ static Datum create_vertex(cypher_create_custom_scan_state *css, // make the vertex agtype result = make_vertex(id, CStringGetDatum(node->label_name), - PointerGetDatum(scanTupleSlot->tts_values[node->prop_attr_num])); + scanTupleSlot->tts_values[node->prop_attr_num]); // append to the path list if (CYPHER_TARGET_NODE_IN_PATH(node->flags)) diff --git a/src/backend/executor/cypher_merge.c b/src/backend/executor/cypher_merge.c index dbe988510..cece1eb80 100644 --- a/src/backend/executor/cypher_merge.c +++ b/src/backend/executor/cypher_merge.c @@ -472,8 +472,9 @@ static TupleTableSlot *exec_cypher_merge(CustomScanState *node) * So we will need to create a TupleTableSlot and populate with the * information from the newly created path that the query needs. */ - ExprContext *econtext = node->ss.ps.ps_ExprContext; - SubqueryScanState *sss = (SubqueryScanState *)node->ss.ps.lefttree; + SubqueryScanState *sss = NULL; + econtext = node->ss.ps.ps_ExprContext; + sss = (SubqueryScanState *)node->ss.ps.lefttree; /* * Our child execution node is always a subquery. If not there diff --git a/src/backend/executor/cypher_set.c b/src/backend/executor/cypher_set.c index 4f941bf59..446131309 100644 --- a/src/backend/executor/cypher_set.c +++ b/src/backend/executor/cypher_set.c @@ -111,7 +111,7 @@ static HeapTuple update_entity_tuple(ResultRelInfo *resultRelInfo, TM_FailureData hufd; TM_Result lock_result; Buffer buffer; - bool update_indexes; + TU_UpdateIndexes update_indexes; TM_Result result; CommandId cid = GetCurrentCommandId(true); ResultRelInfo **saved_resultRels = estate->es_result_relations; @@ -167,9 +167,10 @@ static HeapTuple update_entity_tuple(ResultRelInfo *resultRelInfo, } // Insert index entries for the tuple - if (resultRelInfo->ri_NumIndices > 0 && update_indexes) + if (resultRelInfo->ri_NumIndices > 0 && update_indexes != TU_None) { - ExecInsertIndexTuples(resultRelInfo, elemTupleSlot, estate, false, false, NULL, NIL); + ExecInsertIndexTuples(resultRelInfo, elemTupleSlot, estate, false, false, NULL, NIL, + (update_indexes == TU_Summarizing)); } ExecCloseIndices(resultRelInfo); @@ -484,7 +485,8 @@ static void process_update_list(CustomScanState *node) } // Alter the properties Agtype value. - if (strcmp(update_item->prop_name, "")) + if (update_item->prop_name != NULL && + strcmp(update_item->prop_name, "") != 0) { altered_properties = alter_property_value(original_properties, update_item->prop_name, diff --git a/src/backend/executor/cypher_utils.c b/src/backend/executor/cypher_utils.c index 0bf3430fe..e649c1136 100644 --- a/src/backend/executor/cypher_utils.c +++ b/src/backend/executor/cypher_utils.c @@ -51,16 +51,20 @@ /* * Given the graph name and the label name, create a ResultRelInfo for the table - * those to variables represent. Open the Indices too. + * those two variables represent. Open the Indices too. */ ResultRelInfo *create_entity_result_rel_info(EState *estate, char *graph_name, char *label_name) { - RangeVar *rv; - Relation label_relation; - ResultRelInfo *resultRelInfo; + RangeVar *rv = NULL; + Relation label_relation = NULL; + ResultRelInfo *resultRelInfo = NULL; + ParseState *pstate = NULL; + RangeTblEntry *rte = NULL; + int pii = 0; - ParseState *pstate = make_parsestate(NULL); + /* create a new parse state for this operation */ + pstate = make_parsestate(NULL); resultRelInfo = palloc(sizeof(ResultRelInfo)); @@ -75,12 +79,33 @@ ResultRelInfo *create_entity_result_rel_info(EState *estate, char *graph_name, label_relation = parserOpenTable(pstate, rv, RowExclusiveLock); - // initialize the resultRelInfo - InitResultRelInfo(resultRelInfo, label_relation, - list_length(estate->es_range_table), NULL, + /* + * Get the rte to determine the correct perminfoindex value. Some rtes + * may have it set up, some created here (executor) may not. + * + * Note: The RTEPermissionInfo structure was added in PostgreSQL version 16. + * + * Note: We use the list_length because exec_rt_fetch starts at 1, not 0. + * Doing this gives us the last rte in the es_range_table list, which + * is the rte in question. + * + * If the rte is created here and doesn't have a perminfoindex, we + * need to pass on a 0. Otherwise, later on GetResultRTEPermissionInfo + * will attempt to get the rte's RTEPermissionInfo data, which doesn't + * exist. + * + * TODO: Ideally, we should consider creating the RTEPermissionInfo data, + * but as this is just a read of the label relation, it is likely + * unnecessary. + */ + rte = exec_rt_fetch(list_length(estate->es_range_table), estate); + pii = (rte->perminfoindex == 0) ? 0 : list_length(estate->es_range_table); + + /* initialize the resultRelInfo */ + InitResultRelInfo(resultRelInfo, label_relation, pii, NULL, estate->es_instrument); - // open the parse state + /* open the indices */ ExecOpenIndices(resultRelInfo, false); free_parsestate(pstate); @@ -254,8 +279,8 @@ HeapTuple insert_entity_tuple_cid(ResultRelInfo *resultRelInfo, // Insert index entries for the tuple if (resultRelInfo->ri_NumIndices > 0) { - ExecInsertIndexTuples(resultRelInfo, elemTupleSlot, estate, false, - false, NULL, NIL); + ExecInsertIndexTuples(resultRelInfo, elemTupleSlot, estate, + false, false, NULL, NIL, false); } return tuple; diff --git a/src/backend/nodes/cypher_readfuncs.c b/src/backend/nodes/cypher_readfuncs.c index 89cedd577..0587229f6 100644 --- a/src/backend/nodes/cypher_readfuncs.c +++ b/src/backend/nodes/cypher_readfuncs.c @@ -24,6 +24,7 @@ #include "nodes/cypher_readfuncs.h" #include "nodes/cypher_nodes.h" +static char *nullable_string(const char *token, int length); /* * Copied From Postgres * @@ -111,7 +112,7 @@ #define READ_STRING_FIELD(fldname) \ token = pg_strtok(&length); \ token = pg_strtok(&length); \ - local_node->fldname = non_nullable_string(token, length) + local_node->fldname = nullable_string(token, length) // Read a parse location field (and throw away the value, per notes above) #define READ_LOCATION_FIELD(fldname) \ @@ -162,11 +163,22 @@ #define strtobool(x) ((*(x) == 't') ? true : false) -#define nullable_string(token,length) \ - ((length) == 0 ? NULL : debackslash(token, length)) - -#define non_nullable_string(token,length) \ - ((length) == 0 ? "" : debackslash(token, length)) +/* copied from PG16 function of the same name for consistency */ +static char *nullable_string(const char *token, int length) +{ + /* outToken emits <> for NULL, and pg_strtok makes that an empty string */ + if (length == 0) + { + return NULL; + } + /* outToken emits "" for empty string */ + if (length == 2 && token[0] == '"' && token[1] == '"') + { + return pstrdup(""); + } + /* otherwise, we must remove protective backslashes added by outToken */ + return debackslash(token, length); +} /* * Default read function for cypher nodes. For most nodes, we don't expect diff --git a/src/backend/parser/cypher_analyze.c b/src/backend/parser/cypher_analyze.c index 9f4836cb1..5c0b445e3 100644 --- a/src/backend/parser/cypher_analyze.c +++ b/src/backend/parser/cypher_analyze.c @@ -868,6 +868,7 @@ static Query *analyze_cypher_and_coerce(List *stmt, RangeTblFunction *rtfunc, } query->rtable = pstate->p_rtable; + query->rteperminfos = pstate->p_rteperminfos; query->jointree = makeFromExpr(pstate->p_joinlist, NULL); assign_query_collations(pstate, query); diff --git a/src/backend/parser/cypher_clause.c b/src/backend/parser/cypher_clause.c index cb8f5a174..e89f20b8a 100644 --- a/src/backend/parser/cypher_clause.c +++ b/src/backend/parser/cypher_clause.c @@ -45,6 +45,7 @@ #include "parser/parse_relation.h" #include "parser/parse_target.h" #include "parser/parsetree.h" +#include "parser/parse_relation.h" #include "rewrite/rewriteHandler.h" #include "utils/typcache.h" #include "utils/lsyscache.h" @@ -331,6 +332,8 @@ static List *make_target_list_from_join(ParseState *pstate, RangeTblEntry *rte); static FuncExpr *make_clause_func_expr(char *function_name, Node *clause_information); +static void markRelsAsNulledBy(ParseState *pstate, Node *n, int jindex); + /* for VLE support */ static ParseNamespaceItem *transform_RangeFunction(cypher_parsestate *cpstate, RangeFunction *r); @@ -639,6 +642,7 @@ static Query *transform_cypher_union(cypher_parsestate *cpstate, EXPR_KIND_LIMIT, "LIMIT"); qry->rtable = pstate->p_rtable; + qry->rteperminfos = pstate->p_rteperminfos; qry->jointree = makeFromExpr(pstate->p_joinlist, NULL); qry->hasAggs = pstate->p_hasAggs; @@ -1235,6 +1239,7 @@ static Query *transform_cypher_call_subquery(cypher_parsestate *cpstate, markTargetListOrigins(pstate, query->targetList); query->rtable = cpstate->pstate.p_rtable; + query->rteperminfos = cpstate->pstate.p_rteperminfos; query->jointree = makeFromExpr(cpstate->pstate.p_joinlist, (Node *)where_qual); query->hasAggs = pstate->p_hasAggs; @@ -1307,6 +1312,7 @@ static Query *transform_cypher_delete(cypher_parsestate *cpstate, query->targetList = lappend(query->targetList, tle); query->rtable = pstate->p_rtable; + query->rteperminfos = pstate->p_rteperminfos; query->jointree = makeFromExpr(pstate->p_joinlist, NULL); return query; @@ -1384,6 +1390,7 @@ static Query *transform_cypher_unwind(cypher_parsestate *cpstate, query->targetList = lappend(query->targetList, te); query->rtable = pstate->p_rtable; + query->rteperminfos = pstate->p_rteperminfos; query->jointree = makeFromExpr(pstate->p_joinlist, NULL); query->hasTargetSRFs = pstate->p_hasTargetSRFs; @@ -1526,6 +1533,7 @@ static Query *transform_cypher_set(cypher_parsestate *cpstate, query->targetList = lappend(query->targetList, tle); query->rtable = pstate->p_rtable; + query->rteperminfos = pstate->p_rteperminfos; query->jointree = makeFromExpr(pstate->p_joinlist, NULL); return query; @@ -2121,6 +2129,7 @@ static Query *transform_cypher_return(cypher_parsestate *cpstate, EXPR_KIND_LIMIT, "LIMIT"); query->rtable = pstate->p_rtable; + query->rteperminfos = pstate->p_rteperminfos; query->jointree = makeFromExpr(pstate->p_joinlist, NULL); query->hasAggs = pstate->p_hasAggs; @@ -2344,6 +2353,7 @@ static Query *transform_cypher_clause_with_where(cypher_parsestate *cpstate, markTargetListOrigins(pstate, query->targetList); query->rtable = pstate->p_rtable; + query->rteperminfos = pstate->p_rteperminfos; if (!is_ag_node(self, cypher_match)) { @@ -2465,8 +2475,17 @@ static void get_res_cols(ParseState *pstate, ParseNamespaceItem *l_pnsi, if (var == NULL) { + Var *v; + + /* + * Each join (left) RTE's Var, that references a column of the + * right RTE, needs to be marked 'nullable'. + */ + v = lfirst(r_lvar); + markNullableIfNeeded(pstate, v); + colnames = lappend(colnames, lfirst(r_lname)); - colvars = lappend(colvars, lfirst(r_lvar)); + colvars = lappend(colvars, v); } } @@ -2515,6 +2534,13 @@ static RangeTblEntry *transform_cypher_optional_match_clause(cypher_parsestate * j->rarg = transform_clause_for_join(cpstate, clause, &r_rte, &r_nsitem, r_alias); + /* + * Since this is a left join, we need to mark j->rarg as it may potentially + * emit NULL. The jindex argument holds rtindex of the join's RTE, which is + * created right after j->arg's RTE in this case. + */ + markRelsAsNulledBy(pstate, j->rarg, r_nsitem->p_rtindex + 1); + // we are done transform the lateral left join pstate->p_lateral_active = false; @@ -2593,6 +2619,7 @@ static Query *transform_cypher_match_pattern(cypher_parsestate *cpstate, query->targetList = make_target_list_from_join(pstate, rte); query->rtable = pstate->p_rtable; + query->rteperminfos = pstate->p_rteperminfos; query->jointree = makeFromExpr(pstate->p_joinlist, NULL); } else @@ -2647,7 +2674,7 @@ static List *make_target_list_from_join(ParseState *pstate, RangeTblEntry *rte) ListCell *lt; ListCell *ln; - AssertArg(rte->rtekind == RTE_JOIN); + Assert(rte->rtekind == RTE_JOIN); forboth(lt, rte->joinaliasvars, ln, rte->eref->colnames) { @@ -2680,7 +2707,7 @@ static List *makeTargetListFromPNSItem(ParseState *pstate, ParseNamespaceItem *p rte = pnsi->p_rte; /* right now this is only for subqueries */ - AssertArg(rte->rtekind == RTE_SUBQUERY); + Assert(rte->rtekind == RTE_SUBQUERY); rtindex = pnsi->p_rtindex; @@ -2759,6 +2786,7 @@ static Query *transform_cypher_sub_pattern(cypher_parsestate *cpstate, markTargetListOrigins(p_child_parse_state, qry->targetList); qry->rtable = p_child_parse_state->p_rtable; + qry->rteperminfos = p_child_parse_state->p_rteperminfos; qry->jointree = makeFromExpr(p_child_parse_state->p_joinlist, NULL); /* the state will be destroyed so copy the data we need */ @@ -3045,6 +3073,7 @@ static void transform_match_pattern(cypher_parsestate *cpstate, Query *query, } query->rtable = cpstate->pstate.p_rtable; + query->rteperminfos = cpstate->pstate.p_rteperminfos; query->jointree = makeFromExpr(cpstate->pstate.p_joinlist, (Node *)expr); } @@ -3213,7 +3242,7 @@ static List *make_join_condition_for_edge(cypher_parsestate *cpstate, prev_edge != NULL && prev_edge->type == ENT_VLE_EDGE) { - List *qualified_name, *args; + List *qualified_name; String *match_qual; FuncCall *fc; @@ -5293,6 +5322,7 @@ static Query *transform_cypher_create(cypher_parsestate *cpstate, query->targetList = lappend(query->targetList, tle); query->rtable = pstate->p_rtable; + query->rteperminfos = pstate->p_rteperminfos; query->jointree = makeFromExpr(pstate->p_joinlist, NULL); return query; @@ -5444,7 +5474,7 @@ transform_create_cypher_edge(cypher_parsestate *cpstate, List **target_list, Expr *props; Relation label_relation; RangeVar *rv; - RangeTblEntry *rte; + RTEPermissionInfo *rte_pi; TargetEntry *te; char *alias; AttrNumber resno; @@ -5519,7 +5549,6 @@ transform_create_cypher_edge(cypher_parsestate *cpstate, List **target_list, if (!label_exists(edge->label, cpstate->graph_oid)) { List *parent; - RangeVar *rv; rv = get_label_range_var(cpstate->graph_name, cpstate->graph_oid, AG_DEFAULT_LABEL_EDGE); @@ -5539,8 +5568,9 @@ transform_create_cypher_edge(cypher_parsestate *cpstate, List **target_list, pnsi = addRangeTableEntryForRelation((ParseState *)cpstate, label_relation, AccessShareLock, NULL, false, false); - rte = pnsi->p_rte; - rte->requiredPerms = ACL_INSERT; + + rte_pi = pnsi->p_perminfo; + rte_pi->requiredPerms = ACL_INSERT; // Build Id expression, always use the default logic rel->id_expr = (Expr *)build_column_default(label_relation, @@ -5759,7 +5789,7 @@ transform_create_cypher_new_node(cypher_parsestate *cpstate, cypher_target_node *rel = make_ag_node(cypher_target_node); Relation label_relation; RangeVar *rv; - RangeTblEntry *rte; + RTEPermissionInfo *rte_pi; TargetEntry *te; Expr *props; char *alias; @@ -5789,7 +5819,6 @@ transform_create_cypher_new_node(cypher_parsestate *cpstate, if (!label_exists(node->label, cpstate->graph_oid)) { List *parent; - RangeVar *rv; rv = get_label_range_var(cpstate->graph_name, cpstate->graph_oid, AG_DEFAULT_LABEL_VERTEX); @@ -5810,8 +5839,9 @@ transform_create_cypher_new_node(cypher_parsestate *cpstate, pnsi = addRangeTableEntryForRelation((ParseState *)cpstate, label_relation, AccessShareLock, NULL, false, false); - rte = pnsi->p_rte; - rte->requiredPerms = ACL_INSERT; + + rte_pi = pnsi->p_perminfo; + rte_pi->requiredPerms = ACL_INSERT; // id rel->id_expr = (Expr *)build_column_default(label_relation, @@ -6292,6 +6322,7 @@ static Query *transform_cypher_merge(cypher_parsestate *cpstate, markTargetListOrigins(pstate, query->targetList); query->rtable = pstate->p_rtable; + query->rteperminfos = pstate->p_rteperminfos; query->jointree = makeFromExpr(pstate->p_joinlist, NULL); query->hasSubLinks = pstate->p_hasSubLinks; @@ -6365,6 +6396,13 @@ transform_merge_make_lateral_join(cypher_parsestate *cpstate, Query *query, j->rarg = transform_clause_for_join(cpstate, isolated_merge_clause, &r_rte, &r_nsitem, r_alias); + /* + * Since this is a left join, we need to mark j->rarg as it may potentially + * emit NULL. The jindex argument holds rtindex of the join's RTE, which is + * created right after j->arg's RTE in this case. + */ + markRelsAsNulledBy(pstate, j->rarg, r_nsitem->p_rtindex + 1); + // deactivate the lateral flag pstate->p_lateral_active = false; @@ -6777,7 +6815,7 @@ transform_merge_cypher_edge(cypher_parsestate *cpstate, List **target_list, cypher_target_node *rel = make_ag_node(cypher_target_node); Relation label_relation; RangeVar *rv; - RangeTblEntry *rte; + RTEPermissionInfo *rte_pi; ParseNamespaceItem *pnsi; if (edge->name != NULL) @@ -6825,8 +6863,6 @@ transform_merge_cypher_edge(cypher_parsestate *cpstate, List **target_list, if (edge->label && !label_exists(edge->label, cpstate->graph_oid)) { List *parent; - RangeVar *rv; - /* * setup the default edge table as the parent table, that we * will inherit from. @@ -6865,8 +6901,8 @@ transform_merge_cypher_edge(cypher_parsestate *cpstate, List **target_list, pnsi = addRangeTableEntryForRelation((ParseState *)cpstate, label_relation, AccessShareLock, NULL, false, false); - rte = pnsi->p_rte; - rte->requiredPerms = ACL_INSERT; + rte_pi = pnsi->p_perminfo; + rte_pi->requiredPerms = ACL_INSERT; // Build Id expression, always use the default logic rel->id_expr = (Expr *)build_column_default(label_relation, @@ -6892,7 +6928,7 @@ transform_merge_cypher_node(cypher_parsestate *cpstate, List **target_list, cypher_target_node *rel = make_ag_node(cypher_target_node); Relation label_relation; RangeVar *rv; - RangeTblEntry *rte; + RTEPermissionInfo *rte_pi; ParseNamespaceItem *pnsi; if (node->name != NULL) @@ -6944,7 +6980,6 @@ transform_merge_cypher_node(cypher_parsestate *cpstate, List **target_list, if (node->label && !label_exists(node->label, cpstate->graph_oid)) { List *parent; - RangeVar *rv; /* * setup the default vertex table as the parent table, that we @@ -6985,8 +7020,9 @@ transform_merge_cypher_node(cypher_parsestate *cpstate, List **target_list, pnsi = addRangeTableEntryForRelation((ParseState *)cpstate, label_relation, AccessShareLock, NULL, false, false); - rte = pnsi->p_rte; - rte->requiredPerms = ACL_INSERT; + + rte_pi = pnsi->p_perminfo; + rte_pi->requiredPerms = ACL_INSERT; // id rel->id_expr = (Expr *)build_column_default(label_relation, @@ -7085,6 +7121,50 @@ static FuncExpr *make_clause_func_expr(char *function_name, return func_expr; } +/* + * This function is borrowed from PG version 16.1. + * + * It is used in transformations involving left join in Optional Match and + * Merge in a similar way PG16's transformFromClauseItem() uses it. + */ +static void markRelsAsNulledBy(ParseState *pstate, Node *n, int jindex) +{ + int varno; + ListCell *lc; + + /* Note: we can't see FromExpr here */ + if (IsA(n, RangeTblRef)) + { + varno = ((RangeTblRef *) n)->rtindex; + } + else if (IsA(n, JoinExpr)) + { + JoinExpr *j = (JoinExpr *) n; + + /* recurse to children */ + markRelsAsNulledBy(pstate, j->larg, jindex); + markRelsAsNulledBy(pstate, j->rarg, jindex); + varno = j->rtindex; + } + else + { + elog(ERROR, "unrecognized node type: %d", (int) nodeTag(n)); + varno = 0; /* keep compiler quiet */ + } + + /* + * Now add jindex to the p_nullingrels set for relation varno. Since we + * maintain the p_nullingrels list lazily, we might need to extend it to + * make the varno'th entry exist. + */ + while (list_length(pstate->p_nullingrels) < varno) + { + pstate->p_nullingrels = lappend(pstate->p_nullingrels, NULL); + } + lc = list_nth_cell(pstate->p_nullingrels, varno - 1); + lfirst(lc) = bms_add_member((Bitmapset *) lfirst(lc), jindex); +} + /* * Utility function that helps a clause add the information needed to * the query from the previous clause. diff --git a/src/backend/parser/cypher_expr.c b/src/backend/parser/cypher_expr.c index 0efd44868..cd19a5548 100644 --- a/src/backend/parser/cypher_expr.c +++ b/src/backend/parser/cypher_expr.c @@ -249,7 +249,8 @@ static Node *transform_A_Const(cypher_parsestate *cpstate, A_Const *ac) } else { - float8 f = float8in_internal(n, NULL, "double precision", n); + float8 f = float8in_internal(n, NULL, "double precision", n, + NULL); d = float_to_agtype(f); } diff --git a/src/backend/parser/cypher_item.c b/src/backend/parser/cypher_item.c index 7d0ad88b0..d5d088761 100644 --- a/src/backend/parser/cypher_item.c +++ b/src/backend/parser/cypher_item.c @@ -39,8 +39,9 @@ #include "parser/cypher_parse_node.h" static List *ExpandAllTables(ParseState *pstate, int location); -static List *expand_rel_attrs(ParseState *pstate, RangeTblEntry *rte, - int rtindex, int sublevels_up, int location); +static List *expand_pnsi_attrs(ParseState *pstate, ParseNamespaceItem *pnsi, + int sublevels_up, bool require_col_privs, + int location); // see transformTargetEntry() TargetEntry *transform_cypher_item(cypher_parsestate *cpstate, Node *node, @@ -161,10 +162,8 @@ static List *ExpandAllTables(ParseState *pstate, int location) /* Remember we found a p_cols_visible item */ found_table = true; - target = list_concat(target, expand_rel_attrs(pstate, - nsitem->p_rte, - nsitem->p_rtindex, - 0, location)); + target = list_concat(target, expand_pnsi_attrs(pstate, nsitem, 0, true, + location)); } /* Check for "RETURN *;" */ @@ -177,26 +176,33 @@ static List *ExpandAllTables(ParseState *pstate, int location) } /* - * From PG's expandRelAttrs + * From PG's expandNSItemAttrs * Modified to exclude hidden variables and aliases in RETURN * */ -static List *expand_rel_attrs(ParseState *pstate, RangeTblEntry *rte, - int rtindex, int sublevels_up, int location) +static List *expand_pnsi_attrs(ParseState *pstate, ParseNamespaceItem *pnsi, + int sublevels_up, bool require_col_privs, + int location) { + RangeTblEntry *rte = pnsi->p_rte; + RTEPermissionInfo *perminfo = pnsi->p_perminfo; List *names, *vars; ListCell *name, *var; List *te_list = NIL; int var_prefix_len = strlen(AGE_DEFAULT_VARNAME_PREFIX); int alias_prefix_len = strlen(AGE_DEFAULT_ALIAS_PREFIX); - expandRTE(rte, rtindex, sublevels_up, location, false, &names, &vars); + vars = expandNSItemVars(pstate, pnsi, sublevels_up, location, &names); /* * Require read access to the table. This is normally redundant with the * markVarForSelectPriv calls below, but not if the table has zero * columns. */ - rte->requiredPerms |= ACL_SELECT; + if (rte->rtekind == RTE_RELATION) + { + Assert(perminfo != NULL); + perminfo->requiredPerms |= ACL_SELECT; + } /* iterate through the variables */ forboth(name, names, var, vars) diff --git a/src/backend/parser/cypher_parse_agg.c b/src/backend/parser/cypher_parse_agg.c index 47075ef7b..cd743fcc4 100644 --- a/src/backend/parser/cypher_parse_agg.c +++ b/src/backend/parser/cypher_parse_agg.c @@ -192,7 +192,7 @@ void parse_check_aggregates(ParseState *pstate, Query *qry) root->planner_cxt = CurrentMemoryContext; root->hasJoinRTEs = true; - groupClauses = (List *) flatten_join_alias_vars((Query*)root, + groupClauses = (List *) flatten_join_alias_vars(root, qry, (Node *) groupClauses); } @@ -236,7 +236,9 @@ void parse_check_aggregates(ParseState *pstate, Query *qry) finalize_grouping_exprs(clause, pstate, qry, groupClauses, root, have_non_var_grouping); if (hasJoinRTEs) - clause = flatten_join_alias_vars((Query*)root, clause); + { + clause = flatten_join_alias_vars(root, qry, clause); + } check_ungrouped_columns(clause, pstate, qry, groupClauses, groupClauseCommonVars, have_non_var_grouping, &func_grouped_rels); @@ -245,7 +247,9 @@ void parse_check_aggregates(ParseState *pstate, Query *qry) finalize_grouping_exprs(clause, pstate, qry, groupClauses, root, have_non_var_grouping); if (hasJoinRTEs) - clause = flatten_join_alias_vars((Query*)root, clause); + { + clause = flatten_join_alias_vars(root, qry, clause); + } check_ungrouped_columns(clause, pstate, qry, groupClauses, groupClauseCommonVars, have_non_var_grouping, &func_grouped_rels); @@ -254,10 +258,12 @@ void parse_check_aggregates(ParseState *pstate, Query *qry) * Per spec, aggregates can't appear in a recursive term. */ if (pstate->p_hasAggs && hasSelfRefRTEs) + { ereport(ERROR, (errcode(ERRCODE_INVALID_RECURSION), errmsg("aggregate functions are not allowed in a recursive query's recursive term"), parser_errposition(pstate, locate_agg_of_level((Node *) qry, 0)))); + } } /* @@ -562,7 +568,11 @@ static bool finalize_grouping_exprs_walker(Node *node, Index ref = 0; if (context->root) - expr = flatten_join_alias_vars((Query*)context->root, expr); + { + expr = flatten_join_alias_vars(context->root, + (Query *)context->root, + expr); + } /* * Each expression must match a grouping entry at the current diff --git a/src/backend/utils/adt/agtype.c b/src/backend/utils/adt/agtype.c index e6539e87c..ce0d1bb0e 100644 --- a/src/backend/utils/adt/agtype.c +++ b/src/backend/utils/adt/agtype.c @@ -29,6 +29,8 @@ */ #include "postgres.h" +#include "varatt.h" +#include #include @@ -971,7 +973,7 @@ static void agtype_in_scalar(void *pstate, char *token, Assert(token != NULL); v.type = AGTV_FLOAT; v.val.float_value = float8in_internal(token, NULL, "double precision", - token); + token, NULL); break; case AGTYPE_TOKEN_NUMERIC: Assert(token != NULL); @@ -1974,7 +1976,7 @@ Datum _agtype_build_path(PG_FUNCTION_ARGS) */ if (nargs >= 1 && nargs <= 3) { - int i = 0; + i = 0; for (i = 0; i < nargs; i++) { @@ -2207,7 +2209,7 @@ Datum _agtype_build_vertex(PG_FUNCTION_ARGS) if (fcinfo->args[2].isnull) { - agtype_build_state *bstate = init_agtype_build_state(0, AGT_FOBJECT); + bstate = init_agtype_build_state(0, AGT_FOBJECT); properties = build_agtype(bstate); pfree_agtype_build_state(bstate); } @@ -2303,7 +2305,7 @@ Datum _agtype_build_edge(PG_FUNCTION_ARGS) /* if the properties object is null, push an empty object */ if (fcinfo->args[4].isnull) { - agtype_build_state *bstate = init_agtype_build_state(0, AGT_FOBJECT); + bstate = init_agtype_build_state(0, AGT_FOBJECT); properties = build_agtype(bstate); pfree_agtype_build_state(bstate); } @@ -3669,13 +3671,13 @@ Datum agtype_object_field_agtype(PG_FUNCTION_ARGS) if (key_value->type == AGTV_INTEGER) { - PG_RETURN_TEXT_P(agtype_array_element_impl(fcinfo, agt, + PG_RETURN_TEXT_P((const void*)agtype_array_element_impl(fcinfo, agt, key_value->val.int_value, false)); } else if (key_value->type == AGTV_STRING) { - AG_RETURN_AGTYPE_P(agtype_object_field_impl(fcinfo, agt, + AG_RETURN_AGTYPE_P((const void*)agtype_object_field_impl(fcinfo, agt, key_value->val.string.val, key_value->val.string.len, false)); @@ -3703,13 +3705,13 @@ Datum agtype_object_field_text_agtype(PG_FUNCTION_ARGS) if (key_value->type == AGTV_INTEGER) { - PG_RETURN_TEXT_P(agtype_array_element_impl(fcinfo, agt, + PG_RETURN_TEXT_P((const void*)agtype_array_element_impl(fcinfo, agt, key_value->val.int_value, true)); } else if (key_value->type == AGTV_STRING) { - AG_RETURN_AGTYPE_P(agtype_object_field_impl(fcinfo, agt, + AG_RETURN_AGTYPE_P((const void*)agtype_object_field_impl(fcinfo, agt, key_value->val.string.val, key_value->val.string.len, true)); @@ -3727,7 +3729,7 @@ Datum agtype_object_field(PG_FUNCTION_ARGS) agtype *agt = AG_GET_ARG_AGTYPE_P(0); text *key = PG_GETARG_TEXT_PP(1); - AG_RETURN_AGTYPE_P(agtype_object_field_impl(fcinfo, agt, VARDATA_ANY(key), + AG_RETURN_AGTYPE_P((const void*)agtype_object_field_impl(fcinfo, agt, VARDATA_ANY(key), VARSIZE_ANY_EXHDR(key), false)); } @@ -3739,7 +3741,7 @@ Datum agtype_object_field_text(PG_FUNCTION_ARGS) agtype *agt = AG_GET_ARG_AGTYPE_P(0); text *key = PG_GETARG_TEXT_PP(1); - PG_RETURN_TEXT_P(agtype_object_field_impl(fcinfo, agt, VARDATA_ANY(key), + PG_RETURN_TEXT_P((const void*)agtype_object_field_impl(fcinfo, agt, VARDATA_ANY(key), VARSIZE_ANY_EXHDR(key), true)); } @@ -3750,7 +3752,8 @@ Datum agtype_array_element(PG_FUNCTION_ARGS) agtype *agt = AG_GET_ARG_AGTYPE_P(0); int elem = PG_GETARG_INT32(1); - AG_RETURN_AGTYPE_P(agtype_array_element_impl(fcinfo, agt, elem, false)); + AG_RETURN_AGTYPE_P((const void*) + agtype_array_element_impl(fcinfo, agt, elem, false)); } PG_FUNCTION_INFO_V1(agtype_array_element_text); @@ -3760,7 +3763,8 @@ Datum agtype_array_element_text(PG_FUNCTION_ARGS) agtype *agt = AG_GET_ARG_AGTYPE_P(0); int elem = PG_GETARG_INT32(1); - PG_RETURN_TEXT_P(agtype_array_element_impl(fcinfo, agt, elem, true)); + PG_RETURN_TEXT_P((const void*) + agtype_array_element_impl(fcinfo, agt, elem, true)); } PG_FUNCTION_INFO_V1(agtype_access_operator); @@ -4267,8 +4271,11 @@ Datum agtype_hash_cmp(PG_FUNCTION_ARGS) agtype_value *r; uint64 seed = 0xF0F0F0F0; + /* this function returns INTEGER which is 32 bits */ if (PG_ARGISNULL(0)) - PG_RETURN_INT16(0); + { + PG_RETURN_INT32(0); + } agt = AG_GET_ARG_AGTYPE_P(0); @@ -4291,7 +4298,7 @@ Datum agtype_hash_cmp(PG_FUNCTION_ARGS) seed = LEFT_ROTATE(seed, 1); } - PG_RETURN_INT16(hash); + PG_RETURN_INT32(hash); } // Comparison function for btree Indexes @@ -4302,18 +4309,25 @@ Datum agtype_btree_cmp(PG_FUNCTION_ARGS) agtype *agtype_lhs; agtype *agtype_rhs; + /* this function returns INTEGER which is 32bits */ if (PG_ARGISNULL(0) && PG_ARGISNULL(1)) - PG_RETURN_INT16(0); + { + PG_RETURN_INT32(0); + } else if (PG_ARGISNULL(0)) - PG_RETURN_INT16(1); + { + PG_RETURN_INT32(1); + } else if (PG_ARGISNULL(1)) - PG_RETURN_INT16(-1); + { + PG_RETURN_INT32(-1); + } agtype_lhs = AG_GET_ARG_AGTYPE_P(0); agtype_rhs = AG_GET_ARG_AGTYPE_P(1); - PG_RETURN_INT16(compare_agtype_containers_orderability(&agtype_lhs->root, - &agtype_rhs->root)); + PG_RETURN_INT32(compare_agtype_containers_orderability(&agtype_lhs->root, + &agtype_rhs->root)); } PG_FUNCTION_INFO_V1(agtype_typecast_numeric); @@ -6340,7 +6354,7 @@ PG_FUNCTION_INFO_V1(graphid_to_agtype); Datum graphid_to_agtype(PG_FUNCTION_ARGS) { - PG_RETURN_POINTER(integer_to_agtype(AG_GETARG_GRAPHID(0))); + PG_RETURN_POINTER((const void *) integer_to_agtype(AG_GETARG_GRAPHID(0))); } PG_FUNCTION_INFO_V1(agtype_to_graphid); @@ -6356,7 +6370,7 @@ Datum agtype_to_graphid(PG_FUNCTION_ARGS) PG_FREE_IF_COPY(agtype_in, 0); - PG_RETURN_INT16(agtv.val.int_value); + PG_RETURN_INT64(agtv.val.int_value); } PG_FUNCTION_INFO_V1(age_type); @@ -10179,7 +10193,7 @@ Datum age_percentile_cont_aggfinalfn(PG_FUNCTION_ARGS) if (!tuplesort_skiptuples(pgastate->sortstate, first_row, true)) elog(ERROR, "missing row in percentile_cont"); - if (!tuplesort_getdatum(pgastate->sortstate, true, &first_val, &isnull, NULL)) + if (!tuplesort_getdatum(pgastate->sortstate, true, false, &first_val, &isnull, NULL)) elog(ERROR, "missing row in percentile_cont"); if (isnull) PG_RETURN_NULL(); @@ -10190,7 +10204,7 @@ Datum age_percentile_cont_aggfinalfn(PG_FUNCTION_ARGS) } else { - if (!tuplesort_getdatum(pgastate->sortstate, true, &second_val, &isnull, NULL)) + if (!tuplesort_getdatum(pgastate->sortstate, true, false, &second_val, &isnull, NULL)) elog(ERROR, "missing row in percentile_cont"); if (isnull) @@ -10256,7 +10270,7 @@ Datum age_percentile_disc_aggfinalfn(PG_FUNCTION_ARGS) elog(ERROR, "missing row in percentile_disc"); } - if (!tuplesort_getdatum(pgastate->sortstate, true, &val, &isnull, NULL)) + if (!tuplesort_getdatum(pgastate->sortstate, true, false, &val, &isnull, NULL)) elog(ERROR, "missing row in percentile_disc"); /* We shouldn't have stored any nulls, but do the right thing anyway */ @@ -11363,5 +11377,5 @@ Datum agtype_volatile_wrapper(PG_FUNCTION_ARGS) } /* otherwise, just pass it through */ - PG_RETURN_POINTER(PG_GETARG_DATUM(0)); + PG_RETURN_POINTER((const void*) PG_GETARG_DATUM(0)); } diff --git a/src/backend/utils/adt/agtype_gin.c b/src/backend/utils/adt/agtype_gin.c index e8964c924..d260fd986 100644 --- a/src/backend/utils/adt/agtype_gin.c +++ b/src/backend/utils/adt/agtype_gin.c @@ -27,6 +27,7 @@ */ #include "postgres.h" +#include "varatt.h" #include "access/gin.h" #include "access/hash.h" diff --git a/src/backend/utils/adt/agtype_ops.c b/src/backend/utils/adt/agtype_ops.c index 766b7d899..33a32deb3 100644 --- a/src/backend/utils/adt/agtype_ops.c +++ b/src/backend/utils/adt/agtype_ops.c @@ -22,7 +22,7 @@ */ #include "postgres.h" - +#include "varatt.h" #include #include diff --git a/src/backend/utils/adt/agtype_parser.c b/src/backend/utils/adt/agtype_parser.c index fe8203aae..c485cb925 100644 --- a/src/backend/utils/adt/agtype_parser.c +++ b/src/backend/utils/adt/agtype_parser.c @@ -30,13 +30,15 @@ */ #include "postgres.h" - +#include "varatt.h" #include "catalog/pg_type.h" #include "libpq/pqformat.h" #include "miscadmin.h" #include "utils/date.h" #include "utils/datetime.h" +#include "utils/varlena.h" +#include "utils/agtype.h" #include "utils/agtype_parser.h" /* diff --git a/src/backend/utils/ag_func.c b/src/backend/utils/ag_func.c index 9c1ba0204..3040e44b8 100644 --- a/src/backend/utils/ag_func.c +++ b/src/backend/utils/ag_func.c @@ -41,8 +41,8 @@ bool is_oid_ag_func(Oid func_oid, const char *func_name) Oid nspid; const char *nspname; - AssertArg(OidIsValid(func_oid)); - AssertArg(func_name); + Assert(OidIsValid(func_oid)); + Assert(func_name); proctup = SearchSysCache1(PROCOID, ObjectIdGetDatum(func_oid)); Assert(HeapTupleIsValid(proctup)); @@ -70,8 +70,8 @@ Oid get_ag_func_oid(const char *func_name, const int nargs, ...) oidvector *arg_types; Oid func_oid; - AssertArg(func_name); - AssertArg(nargs >= 0 && nargs <= FUNC_MAX_ARGS); + Assert(func_name); + Assert(nargs >= 0 && nargs <= FUNC_MAX_ARGS); va_start(ap, nargs); for (i = 0; i < nargs; i++) @@ -101,8 +101,8 @@ Oid get_pg_func_oid(const char *func_name, const int nargs, ...) oidvector *arg_types; Oid func_oid; - AssertArg(func_name); - AssertArg(nargs >= 0 && nargs <= FUNC_MAX_ARGS); + Assert(func_name); + Assert(nargs >= 0 && nargs <= FUNC_MAX_ARGS); va_start(ap, nargs); for (i = 0; i < nargs; i++) diff --git a/src/backend/utils/cache/ag_cache.c b/src/backend/utils/cache/ag_cache.c index 4469618ca..92f795848 100644 --- a/src/backend/utils/cache/ag_cache.c +++ b/src/backend/utils/cache/ag_cache.c @@ -202,7 +202,7 @@ static int name_hash_compare(const void *key1, const void *key2, Size keysize) Name name2 = (Name)key2; // keysize parameter is superfluous here - AssertArg(keysize == NAMEDATALEN); + Assert(keysize == NAMEDATALEN); return strncmp(NameStr(*name1), NameStr(*name2), NAMEDATALEN); } @@ -339,7 +339,7 @@ graph_cache_data *search_graph_name_cache(const char *name) NameData name_key; graph_name_cache_entry *entry; - AssertArg(name); + Assert(name); initialize_caches(); @@ -851,7 +851,7 @@ label_cache_data *search_label_name_graph_cache(const char *name, Oid graph) NameData name_key; label_name_graph_cache_entry *entry; - AssertArg(name); + Assert(name); initialize_caches(); @@ -932,7 +932,7 @@ label_cache_data *search_label_graph_oid_cache(uint32 graph_oid, int32 id) { label_graph_oid_cache_entry *entry; - AssertArg(label_id_is_valid(id)); + Assert(label_id_is_valid(id)); initialize_caches(); @@ -1071,8 +1071,8 @@ label_cache_data *search_label_seq_name_graph_cache(const char *name, Oid graph) NameData name_key; label_seq_name_graph_cache_entry *entry; - AssertArg(name); - AssertArg(OidIsValid(graph)); + Assert(name); + Assert(OidIsValid(graph)); initialize_caches(); diff --git a/src/backend/utils/graph_generation.c b/src/backend/utils/graph_generation.c index b27b49518..f84031c79 100644 --- a/src/backend/utils/graph_generation.c +++ b/src/backend/utils/graph_generation.c @@ -129,7 +129,7 @@ Datum create_complete_graph(PG_FUNCTION_ARGS) if (!graph_exists(graph_name_str)) { - DirectFunctionCall1(create_graph, CStringGetDatum(graph_name)); + DirectFunctionCall1(create_graph, CStringGetDatum(graph_name->data)); } graph_oid = get_graph_oid(graph_name_str); @@ -140,16 +140,16 @@ Datum create_complete_graph(PG_FUNCTION_ARGS) if (!label_exists(vtx_name_str, graph_oid)) { DirectFunctionCall2(create_vlabel, - CStringGetDatum(graph_name), - CStringGetDatum(vtx_label_name)); + CStringGetDatum(graph_name->data), + CStringGetDatum(vtx_label_name->data)); } } if (!label_exists(edge_name_str, graph_oid)) { DirectFunctionCall2(create_elabel, - CStringGetDatum(graph_name), - CStringGetDatum(edge_label_name)); + CStringGetDatum(graph_name->data), + CStringGetDatum(edge_label_name->data)); } vtx_label_id = get_label_id(vtx_name_str, graph_oid);