From 9533d8a6add880733acd014fe00fb48066616284 Mon Sep 17 00:00:00 2001 From: John Guo Date: Sat, 13 Dec 2025 14:02:44 +0800 Subject: [PATCH 01/10] up --- contrib/drivers/gaussdb/README.md | 157 ++++ contrib/drivers/gaussdb/gaussdb.go | 50 +- contrib/drivers/gaussdb/go.mod | 9 +- contrib/drivers/gaussdb/go.sum | 96 +- contrib/drivers/gaussdb/pgsql_convert.go | 257 +++++ contrib/drivers/gaussdb/pgsql_do_exec.go | 110 +++ contrib/drivers/gaussdb/pgsql_do_filter.go | 63 ++ contrib/drivers/gaussdb/pgsql_do_insert.go | 84 ++ .../drivers/gaussdb/pgsql_format_upsert.go | 94 ++ contrib/drivers/gaussdb/pgsql_open.go | 69 ++ contrib/drivers/gaussdb/pgsql_order.go | 12 + contrib/drivers/gaussdb/pgsql_result.go | 24 + contrib/drivers/gaussdb/pgsql_table_fields.go | 107 +++ contrib/drivers/gaussdb/pgsql_tables.go | 102 ++ .../drivers/gaussdb/pgsql_z_unit_init_test.go | 342 +++++++ .../gaussdb/pgsql_z_unit_model_test.go | 877 ++++++++++++++++++ 16 files changed, 2424 insertions(+), 29 deletions(-) create mode 100644 contrib/drivers/gaussdb/README.md create mode 100644 contrib/drivers/gaussdb/pgsql_convert.go create mode 100644 contrib/drivers/gaussdb/pgsql_do_exec.go create mode 100644 contrib/drivers/gaussdb/pgsql_do_filter.go create mode 100644 contrib/drivers/gaussdb/pgsql_do_insert.go create mode 100644 contrib/drivers/gaussdb/pgsql_format_upsert.go create mode 100644 contrib/drivers/gaussdb/pgsql_open.go create mode 100644 contrib/drivers/gaussdb/pgsql_order.go create mode 100644 contrib/drivers/gaussdb/pgsql_result.go create mode 100644 contrib/drivers/gaussdb/pgsql_table_fields.go create mode 100644 contrib/drivers/gaussdb/pgsql_tables.go create mode 100644 contrib/drivers/gaussdb/pgsql_z_unit_init_test.go create mode 100644 contrib/drivers/gaussdb/pgsql_z_unit_model_test.go diff --git a/contrib/drivers/gaussdb/README.md b/contrib/drivers/gaussdb/README.md new file mode 100644 index 00000000000..ee71b283ace --- /dev/null +++ b/contrib/drivers/gaussdb/README.md @@ -0,0 +1,157 @@ +# GaussDB Driver for GoFrame + +This package provides a GaussDB database driver for the GoFrame framework. + +## Overview + +GaussDB is Huawei's enterprise-level database that is compatible with PostgreSQL protocols. This driver adapts the PostgreSQL driver implementation to work with GaussDB. + +## Installation + +```bash +go get -u github.com/gogf/gf/contrib/drivers/gaussdb/v2 +``` + +## Usage + +```go +import ( + _ "github.com/gogf/gf/contrib/drivers/gaussdb/v2" + "github.com/gogf/gf/v2/database/gdb" +) + +// Configuration +gdb.AddConfigNode(gdb.DefaultGroupName, gdb.ConfigNode{ + Link: "gaussdb:username:password@tcp(127.0.0.1:9950)/database_name", +}) + +// Get database instance +db, err := gdb.New() +``` + +## Connection String Format + +``` +gaussdb:username:password@tcp(host:port)/database?param1=value1¶m2=value2 +``` + +Example: +``` +gaussdb:gaussdb:UTpass@1234@tcp(127.0.0.1:9950)/postgres +``` + +## Schema/Namespace Handling + +GaussDB follows PostgreSQL's schema model: +- **Database (Catalog)**: The database name in the connection string (e.g., `postgres`) +- **Schema (Namespace)**: A namespace within the database (e.g., `public`, `test`) + +To use a specific schema: + +```go +// Create schema if not exists +db.Exec(ctx, "CREATE SCHEMA IF NOT EXISTS my_schema") + +// Set search_path to use the schema +db.Exec(ctx, "SET search_path TO my_schema") +``` + +## Limitations + +GaussDB is based on **PostgreSQL 9.2**, which predates several modern PostgreSQL features. The following features are **NOT SUPPORTED**: + +### 1. ON CONFLICT Operations (PostgreSQL 9.5+) + +The following ORM methods rely on `ON CONFLICT` syntax and are not available: + +- **InsertIgnore()** - Insert and ignore duplicate key errors +- **Save()** - Insert or update (upsert) +- **Replace()** - Replace existing record +- **OnConflict()** - Custom conflict handling +- **OnDuplicate()** - On duplicate key update +- **OnDuplicateEx()** - Extended on duplicate key update + +**Workaround**: Use separate INSERT and UPDATE operations with proper error handling: + +```go +// Instead of InsertIgnore +result, err := db.Model("user").Insert(data) +if err != nil { + // Check if error is duplicate key error + if strings.Contains(err.Error(), "duplicate key") { + // Handle duplicate - either ignore or update separately + } +} + +// Instead of Save (upsert) +// First try to update +result, err := db.Model("user").Where("id", id).Update(data) +if err != nil { + return err +} +affected, _ := result.RowsAffected() +if affected == 0 { + // No rows updated, insert new record + _, err = db.Model("user").Insert(data) +} +``` + +## Supported Features + +- ✅ Basic CRUD operations (Insert, Select, Update, Delete) +- ✅ Transactions +- ✅ Batch operations +- ✅ Array data types (int, float, text, etc.) +- ✅ JSON/JSONB data types +- ✅ Schema/namespace support +- ✅ Prepared statements +- ✅ Connection pooling + +## Testing + +To run the test suite, ensure you have a GaussDB instance running: + +```bash +# Default test connection +# Host: 127.0.0.1 +# Port: 9950 +# User: gaussdb +# Password: UTpass@1234 +# Database: postgres + +go test -v +``` + +Tests for unsupported features (ON CONFLICT operations) will be skipped with explanatory messages. + +## Database Compatibility + +- **GaussDB Version**: Based on PostgreSQL 9.2 +- **Protocol Compatibility**: PostgreSQL wire protocol +- **Driver**: Uses `gitee.com/opengauss/openGauss-connector-go-pq` + +## Notes + +1. **Schema Usage**: Unlike MySQL where "schema" and "database" are synonymous, in PostgreSQL/GaussDB: + - Database (catalog) is the top-level container + - Schema is a namespace within a database + - Tables belong to schemas within databases + +2. **Connection Database**: Always connect to an existing database (like `postgres`), then create and use schemas within it. + +3. **Performance**: For optimal performance, set `search_path` at the session level rather than qualifying every table name with the schema. + +4. **Version Checking**: The driver does not enforce GaussDB version checking, but features relying on PostgreSQL 9.5+ functionality will fail. + +## Contributing + +When contributing to this driver, please note: + +1. Test changes against an actual GaussDB instance +2. Ensure compatibility with PostgreSQL 9.2 features only +3. Document any additional limitations discovered +4. Update tests to skip unsupported features appropriately + +## License + +This driver is distributed under the same license as the GoFrame framework (MIT License). diff --git a/contrib/drivers/gaussdb/gaussdb.go b/contrib/drivers/gaussdb/gaussdb.go index d5de50f75cd..a765360c63e 100644 --- a/contrib/drivers/gaussdb/gaussdb.go +++ b/contrib/drivers/gaussdb/gaussdb.go @@ -8,41 +8,43 @@ package gaussdb import ( - "github.com/gogf/gf/v2/database/gdb" - "github.com/gogf/gf/v2/frame/g" + _ "gitee.com/opengauss/openGauss-connector-go-pq" - "github.com/gogf/gf/contrib/drivers/mysql/v2" + "github.com/gogf/gf/v2/database/gdb" + "github.com/gogf/gf/v2/os/gctx" ) // Driver is the driver for GaussDB database. -// -// GaussDB is an enterprise-level distributed database developed by Huawei. GaussDB for MySQL is a cloud-native -// database that is fully compatible with MySQL protocol. -// -// Although GaussDB is compatible with MySQL protocol, it is packaged as a separate driver component -// rather than reusing the mysql adapter directly. This design allows for future extensibility, -// such as implementing GaussDB-specific features or optimizations for cloud-native scenarios. type Driver struct { - *mysql.Driver + *gdb.Core } +const ( + internalPrimaryKeyInCtx gctx.StrKey = "primary_key" + defaultSchema string = "public" + quoteChar string = `"` +) + func init() { - var ( - err error - driverObj = New() - driverNames = g.SliceStr{"gaussdb"} - ) - for _, driverName := range driverNames { - if err = gdb.Register(driverName, driverObj); err != nil { - panic(err) - } + if err := gdb.Register(`gaussdb`, New()); err != nil { + panic(err) } } -// New creates and returns a driver that implements gdb.Driver, which supports operations for GaussDB. +// New create and returns a driver that implements gdb.Driver, which supports operations for PostgreSql. func New() gdb.Driver { - mysqlDriver := mysql.New().(*mysql.Driver) + return &Driver{} +} + +// New creates and returns a database object for postgresql. +// It implements the interface of gdb.Driver for extra database driver installation. +func (d *Driver) New(core *gdb.Core, node *gdb.ConfigNode) (gdb.DB, error) { return &Driver{ - Driver: mysqlDriver, - } + Core: core, + }, nil +} + +// GetChars returns the security char for this type of database. +func (d *Driver) GetChars() (charLeft string, charRight string) { + return quoteChar, quoteChar } diff --git a/contrib/drivers/gaussdb/go.mod b/contrib/drivers/gaussdb/go.mod index 6895f9e0351..3bc69567ae6 100644 --- a/contrib/drivers/gaussdb/go.mod +++ b/contrib/drivers/gaussdb/go.mod @@ -3,8 +3,10 @@ module github.com/gogf/gf/contrib/drivers/gaussdb/v2 go 1.23.0 require ( - github.com/gogf/gf/contrib/drivers/mysql/v2 v2.9.6 + gitee.com/opengauss/openGauss-connector-go-pq v1.0.7 github.com/gogf/gf/v2 v2.9.6 + github.com/google/uuid v1.6.0 + github.com/lib/pq v1.10.9 ) require ( @@ -15,8 +17,6 @@ require ( github.com/fsnotify/fsnotify v1.9.0 // indirect github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-sql-driver/mysql v1.7.1 // indirect - github.com/google/uuid v1.6.0 // indirect github.com/gorilla/websocket v1.5.3 // indirect github.com/grokify/html-strip-tags-go v0.1.0 // indirect github.com/magiconair/properties v1.8.10 // indirect @@ -27,14 +27,17 @@ require ( github.com/olekukonko/ll v0.0.9 // indirect github.com/olekukonko/tablewriter v1.1.0 // indirect github.com/rivo/uniseg v0.2.0 // indirect + github.com/tjfoc/gmsm v1.4.1 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect go.opentelemetry.io/otel v1.38.0 // indirect go.opentelemetry.io/otel/metric v1.38.0 // indirect go.opentelemetry.io/otel/sdk v1.38.0 // indirect go.opentelemetry.io/otel/trace v1.38.0 // indirect + golang.org/x/crypto v0.38.0 // indirect golang.org/x/net v0.40.0 // indirect golang.org/x/sys v0.35.0 // indirect golang.org/x/text v0.25.0 // indirect + golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/contrib/drivers/gaussdb/go.sum b/contrib/drivers/gaussdb/go.sum index f96db96f24f..18f4f5e739f 100644 --- a/contrib/drivers/gaussdb/go.sum +++ b/contrib/drivers/gaussdb/go.sum @@ -1,11 +1,23 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +gitee.com/opengauss/openGauss-connector-go-pq v1.0.4/go.mod h1:2UEp+ug6ls6C0pLfZgBn7VBzBntFUzxJuy+6FlQ7qyI= +gitee.com/opengauss/openGauss-connector-go-pq v1.0.7 h1:plLidoldV5RfMU6i/I+tvRKtP3sfDyUzQ//HGXLLsZo= +gitee.com/opengauss/openGauss-connector-go-pq v1.0.7/go.mod h1:2UEp+ug6ls6C0pLfZgBn7VBzBntFUzxJuy+6FlQ7qyI= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg= github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/clbanning/mxj/v2 v2.7.0 h1:WA/La7UGCanFe5NpHF0Q3DNtnCsVoxbPKuyBNHWRyME= github.com/clbanning/mxj/v2 v2.7.0/go.mod h1:hNiWqW14h+kc+MdF9C6/YoRfjEJoR3ou6tn/Qo+ve2s= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/emirpasic/gods/v2 v2.0.0-alpha h1:dwFlh8pBg1VMOXWGipNMRt8v96dKAIvBehtCt6OtunU= github.com/emirpasic/gods/v2 v2.0.0-alpha/go.mod h1:W0y4M2dtBB9U5z3YlghmpuUhiaZT2h6yoeE+C1sCp6A= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= @@ -15,8 +27,21 @@ github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI= -github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= @@ -25,10 +50,15 @@ github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aN github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/grokify/html-strip-tags-go v0.1.0 h1:03UrQLjAny8xci+R+qjCce/MYnpNXCtgzltlQbOBae4= github.com/grokify/html-strip-tags-go v0.1.0/go.mod h1:ZdzgfHEzAfz9X6Xe5eBLVblWIxXfYSQ40S/VKrAOGpc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/magiconair/properties v1.8.10 h1:s31yESBquKXCV9a/ScB3ESkOjUYYv+X0rg8SYxI99mE= github.com/magiconair/properties v1.8.10/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= @@ -46,12 +76,18 @@ github.com/olekukonko/tablewriter v1.1.0 h1:N0LHrshF4T39KvI96fn6GT8HEjXRXYNDrDjK github.com/olekukonko/tablewriter v1.1.0/go.mod h1:5c+EBPeSqvXnLLgkm9isDdzR3wjfBkHR9Nhfp3NWrzo= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/tjfoc/gmsm v1.4.1 h1:aMe1GlZb+0bLjn+cKTPEvvn9oUEBlJitaZiiBwsbgho= +github.com/tjfoc/gmsm v1.4.1/go.mod h1:j4INPkHWMrhJb38G+J6W4Tw0AbuN8Thu3PbdVYhVcTE= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= @@ -66,16 +102,72 @@ go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJr go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201012173705-84dcc777aaee/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.38.0 h1:jt+WWG8IZlBnVbomuhg2Mdq0+BBQaHbtqHEFEigjUV8= +golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY= golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4= golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/contrib/drivers/gaussdb/pgsql_convert.go b/contrib/drivers/gaussdb/pgsql_convert.go new file mode 100644 index 00000000000..27b292afedc --- /dev/null +++ b/contrib/drivers/gaussdb/pgsql_convert.go @@ -0,0 +1,257 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gaussdb + +import ( + "context" + "reflect" + "strings" + + "github.com/google/uuid" + "github.com/lib/pq" + + "github.com/gogf/gf/v2/database/gdb" + "github.com/gogf/gf/v2/frame/g" + "github.com/gogf/gf/v2/text/gregex" + "github.com/gogf/gf/v2/text/gstr" + "github.com/gogf/gf/v2/util/gconv" +) + +// ConvertValueForField converts value to database acceptable value. +func (d *Driver) ConvertValueForField(ctx context.Context, fieldType string, fieldValue any) (any, error) { + if g.IsNil(fieldValue) { + return d.Core.ConvertValueForField(ctx, fieldType, fieldValue) + } + + var fieldValueKind = reflect.TypeOf(fieldValue).Kind() + + if fieldValueKind == reflect.Slice { + // For pgsql, json or jsonb require '[]' + if !gstr.Contains(fieldType, "json") { + fieldValue = gstr.ReplaceByMap(gconv.String(fieldValue), + map[string]string{ + "[": "{", + "]": "}", + }, + ) + } + } + return d.Core.ConvertValueForField(ctx, fieldType, fieldValue) +} + +// CheckLocalTypeForField checks and returns corresponding local golang type for given db type. +// The parameter `fieldType` is in lower case, like: +// `int2`, `int4`, `int8`, `_int2`, `_int4`, `_int8`, `_float4`, `_float8`, etc. +// +// PostgreSQL type mapping: +// +// | PostgreSQL Type | Local Go Type | +// |------------------------------|---------------| +// | int2, int4 | int | +// | int8 | int64 | +// | uuid | uuid.UUID | +// | _int2, _int4 | []int32 | // Note: pq package does not provide Int16Array; int32 is used for compatibility +// | _int8 | []int64 | +// | _float4 | []float32 | +// | _float8 | []float64 | +// | _bool | []bool | +// | _varchar, _text | []string | +// | _char, _bpchar | []string | +// | _numeric, _decimal, _money | []float64 | +// | _bytea | [][]byte | +// | _uuid | []uuid.UUID | +func (d *Driver) CheckLocalTypeForField(ctx context.Context, fieldType string, fieldValue any) (gdb.LocalType, error) { + var typeName string + match, _ := gregex.MatchString(`(.+?)\((.+)\)`, fieldType) + if len(match) == 3 { + typeName = gstr.Trim(match[1]) + } else { + typeName = fieldType + } + typeName = strings.ToLower(typeName) + switch typeName { + case "int2", "int4": + return gdb.LocalTypeInt, nil + + case "int8": + return gdb.LocalTypeInt64, nil + + case "uuid": + return gdb.LocalTypeUUID, nil + + case "_int2", "_int4": + return gdb.LocalTypeInt32Slice, nil + + case "_int8": + return gdb.LocalTypeInt64Slice, nil + + case "_float4": + return gdb.LocalTypeFloat32Slice, nil + + case "_float8": + return gdb.LocalTypeFloat64Slice, nil + + case "_bool": + return gdb.LocalTypeBoolSlice, nil + + case "_varchar", "_text", "_char", "_bpchar": + return gdb.LocalTypeStringSlice, nil + + case "_uuid": + return gdb.LocalTypeUUIDSlice, nil + + case "_numeric", "_decimal", "_money": + return gdb.LocalTypeFloat64Slice, nil + + case "_bytea": + return gdb.LocalTypeBytesSlice, nil + + default: + return d.Core.CheckLocalTypeForField(ctx, fieldType, fieldValue) + } +} + +// ConvertValueForLocal converts value to local Golang type of value according field type name from database. +// The parameter `fieldType` is in lower case, like: +// `int2`, `int4`, `int8`, `_int2`, `_int4`, `_int8`, `uuid`, `_uuid`, etc. +// +// See: https://www.postgresql.org/docs/current/datatype.html +// +// PostgreSQL type mapping: +// +// | PostgreSQL Type | SQL Type | pq Type | Go Type | +// |-----------------|--------------------------------|-----------------|-------------| +// | int2 | int2, smallint | - | int | +// | int4 | int4, integer | - | int | +// | int8 | int8, bigint, bigserial | - | int64 | +// | uuid | uuid | - | uuid.UUID | +// | _int2 | int2[], smallint[] | pq.Int32Array | []int32 | +// | _int4 | int4[], integer[] | pq.Int32Array | []int32 | +// | _int8 | int8[], bigint[] | pq.Int64Array | []int64 | +// | _float4 | float4[], real[] | pq.Float32Array | []float32 | +// | _float8 | float8[], double precision[] | pq.Float64Array | []float64 | +// | _bool | boolean[], bool[] | pq.BoolArray | []bool | +// | _varchar | varchar[], character varying[] | pq.StringArray | []string | +// | _text | text[] | pq.StringArray | []string | +// | _char, _bpchar | char[], character[] | pq.StringArray | []string | +// | _numeric | numeric[] | pq.Float64Array | []float64 | +// | _decimal | decimal[] | pq.Float64Array | []float64 | +// | _money | money[] | pq.Float64Array | []float64 | +// | _bytea | bytea[] | pq.ByteaArray | [][]byte | +// | _uuid | uuid[] | pq.StringArray | []uuid.UUID | +// +// Note: PostgreSQL also supports these array types but they are not yet mapped: +// - _date (date[]), _timestamp (timestamp[]), _timestamptz (timestamptz[]) +// - _jsonb (jsonb[]), _json (json[]) +func (d *Driver) ConvertValueForLocal(ctx context.Context, fieldType string, fieldValue any) (any, error) { + typeName, _ := gregex.ReplaceString(`\(.+\)`, "", fieldType) + typeName = strings.ToLower(typeName) + + // Basic types are mostly handled by Core layer, only handle array types here + switch typeName { + + // []int32 + case "_int2", "_int4": + var result pq.Int32Array + if err := result.Scan(fieldValue); err != nil { + return nil, err + } + return []int32(result), nil + + // []int64 + case "_int8": + var result pq.Int64Array + if err := result.Scan(fieldValue); err != nil { + return nil, err + } + return []int64(result), nil + + // []float32 + case "_float4": + var result pq.Float32Array + if err := result.Scan(fieldValue); err != nil { + return nil, err + } + return []float32(result), nil + + // []float64 + case "_float8": + var result pq.Float64Array + if err := result.Scan(fieldValue); err != nil { + return nil, err + } + return []float64(result), nil + + // []bool + case "_bool": + var result pq.BoolArray + if err := result.Scan(fieldValue); err != nil { + return nil, err + } + return []bool(result), nil + + // []string + case "_varchar", "_text", "_char", "_bpchar": + var result pq.StringArray + if err := result.Scan(fieldValue); err != nil { + return nil, err + } + return []string(result), nil + + // uuid.UUID + case "uuid": + var uuidStr string + switch v := fieldValue.(type) { + case []byte: + uuidStr = string(v) + case string: + uuidStr = v + default: + uuidStr = gconv.String(fieldValue) + } + result, err := uuid.Parse(uuidStr) + if err != nil { + return nil, err + } + return result, nil + + // []uuid.UUID + case "_uuid": + var strArray pq.StringArray + if err := strArray.Scan(fieldValue); err != nil { + return nil, err + } + result := make([]uuid.UUID, len(strArray)) + for i, s := range strArray { + parsed, err := uuid.Parse(s) + if err != nil { + return nil, err + } + result[i] = parsed + } + return result, nil + + // []float64 + case "_numeric", "_decimal", "_money": + var result pq.Float64Array + if err := result.Scan(fieldValue); err != nil { + return nil, err + } + return []float64(result), nil + + // [][]byte + case "_bytea": + var result pq.ByteaArray + if err := result.Scan(fieldValue); err != nil { + return nil, err + } + return [][]byte(result), nil + + default: + return d.Core.ConvertValueForLocal(ctx, fieldType, fieldValue) + } +} diff --git a/contrib/drivers/gaussdb/pgsql_do_exec.go b/contrib/drivers/gaussdb/pgsql_do_exec.go new file mode 100644 index 00000000000..76aad3c4aff --- /dev/null +++ b/contrib/drivers/gaussdb/pgsql_do_exec.go @@ -0,0 +1,110 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gaussdb + +import ( + "context" + "database/sql" + "fmt" + "strings" + + "github.com/gogf/gf/v2/database/gdb" + "github.com/gogf/gf/v2/errors/gcode" + "github.com/gogf/gf/v2/errors/gerror" +) + +// DoExec commits the sql string and its arguments to underlying driver +// through given link object and returns the execution result. +func (d *Driver) DoExec(ctx context.Context, link gdb.Link, sql string, args ...any) (result sql.Result, err error) { + var ( + isUseCoreDoExec bool = false // Check whether the default method needs to be used + primaryKey string = "" + pkField gdb.TableField + ) + + // Transaction checks. + if link == nil { + if tx := gdb.TXFromCtx(ctx, d.GetGroup()); tx != nil { + // Firstly, check and retrieve transaction link from context. + link = tx + } else if link, err = d.MasterLink(); err != nil { + // Or else it creates one from master node. + return nil, err + } + } else if !link.IsTransaction() { + // If current link is not transaction link, it checks and retrieves transaction from context. + if tx := gdb.TXFromCtx(ctx, d.GetGroup()); tx != nil { + link = tx + } + } + + // Check if it is an insert operation with primary key. + if value := ctx.Value(internalPrimaryKeyInCtx); value != nil { + var ok bool + pkField, ok = value.(gdb.TableField) + if !ok { + isUseCoreDoExec = true + } + } else { + isUseCoreDoExec = true + } + + // check if it is an insert operation. + if !isUseCoreDoExec && pkField.Name != "" && strings.Contains(sql, "INSERT INTO") { + primaryKey = pkField.Name + sql += fmt.Sprintf(` RETURNING "%s"`, primaryKey) + } else { + // use default DoExec + return d.Core.DoExec(ctx, link, sql, args...) + } + + // Only the insert operation with primary key can execute the following code + + // Sql filtering. + sql, args = d.FormatSqlBeforeExecuting(sql, args) + sql, args, err = d.DoFilter(ctx, link, sql, args) + if err != nil { + return nil, err + } + + // Link execution. + var out gdb.DoCommitOutput + out, err = d.DoCommit(ctx, gdb.DoCommitInput{ + Link: link, + Sql: sql, + Args: args, + Stmt: nil, + Type: gdb.SqlTypeQueryContext, + IsTransaction: link.IsTransaction(), + }) + + if err != nil { + return nil, err + } + affected := len(out.Records) + if affected > 0 { + if !strings.Contains(pkField.Type, "int") { + return Result{ + affected: int64(affected), + lastInsertId: 0, + lastInsertIdError: gerror.NewCodef( + gcode.CodeNotSupported, + "LastInsertId is not supported by primary key type: %s", pkField.Type), + }, nil + } + + if out.Records[affected-1][primaryKey] != nil { + lastInsertId := out.Records[affected-1][primaryKey].Int64() + return Result{ + affected: int64(affected), + lastInsertId: lastInsertId, + }, nil + } + } + + return Result{}, nil +} diff --git a/contrib/drivers/gaussdb/pgsql_do_filter.go b/contrib/drivers/gaussdb/pgsql_do_filter.go new file mode 100644 index 00000000000..0d464310f74 --- /dev/null +++ b/contrib/drivers/gaussdb/pgsql_do_filter.go @@ -0,0 +1,63 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gaussdb + +import ( + "context" + "fmt" + + "github.com/gogf/gf/v2/database/gdb" + "github.com/gogf/gf/v2/text/gregex" + "github.com/gogf/gf/v2/text/gstr" +) + +// DoFilter deals with the sql string before commits it to underlying sql driver. +func (d *Driver) DoFilter( + ctx context.Context, link gdb.Link, sql string, args []any, +) (newSql string, newArgs []any, err error) { + var index int + // Convert placeholder char '?' to string "$x". + newSql, err = gregex.ReplaceStringFunc(`\?`, sql, func(s string) string { + index++ + return fmt.Sprintf(`$%d`, index) + }) + if err != nil { + return "", nil, err + } + // Handle pgsql jsonb feature support, which contains place-holder char '?'. + // Refer: + // https://github.com/gogf/gf/issues/1537 + // https://www.postgresql.org/docs/12/functions-json.html + newSql, err = gregex.ReplaceStringFuncMatch( + `(::jsonb([^\w\d]*)\$\d)`, + newSql, + func(match []string) string { + return fmt.Sprintf(`::jsonb%s?`, match[2]) + }, + ) + if err != nil { + return "", nil, err + } + newSql, err = gregex.ReplaceString(` LIMIT (\d+),\s*(\d+)`, ` LIMIT $2 OFFSET $1`, newSql) + if err != nil { + return "", nil, err + } + + // Add support for gaussdb INSERT OR IGNORE. + // GaussDB doesn't support ON CONFLICT DO NOTHING without explicit conflict target + // We skip the InsertIgnore conversion for GaussDB as it doesn't support this PostgreSQL 9.5+ feature + // Users should handle conflicts explicitly using Upsert or other methods + if gstr.HasPrefix(newSql, gdb.InsertOperationIgnore) { + // Remove the IGNORE operation prefix and keep as regular INSERT + // This will cause constraint violations to fail, which is expected behavior for GaussDB + newSql = "INSERT" + newSql[len(gdb.InsertOperationIgnore):] + } + + newArgs = args + + return d.Core.DoFilter(ctx, link, newSql, newArgs) +} diff --git a/contrib/drivers/gaussdb/pgsql_do_insert.go b/contrib/drivers/gaussdb/pgsql_do_insert.go new file mode 100644 index 00000000000..d41186522e5 --- /dev/null +++ b/contrib/drivers/gaussdb/pgsql_do_insert.go @@ -0,0 +1,84 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gaussdb + +import ( + "context" + "database/sql" + "strings" + + "github.com/gogf/gf/v2/database/gdb" + "github.com/gogf/gf/v2/errors/gcode" + "github.com/gogf/gf/v2/errors/gerror" +) + +// DoInsert inserts or updates data for given table. +// The list parameter must contain at least one record, which was previously validated. +func (d *Driver) DoInsert( + ctx context.Context, + link gdb.Link, table string, list gdb.List, option gdb.DoInsertOption, +) (result sql.Result, err error) { + switch option.InsertOption { + case + gdb.InsertOptionSave, + gdb.InsertOptionReplace: + // PostgreSQL does not support REPLACE INTO syntax, use Save (ON CONFLICT ... DO UPDATE) instead. + // Automatically detect primary keys if OnConflict is not specified. + if len(option.OnConflict) == 0 { + primaryKeys, err := d.Core.GetPrimaryKeys(ctx, table) + if err != nil { + return nil, gerror.WrapCode( + gcode.CodeInternalError, + err, + `failed to get primary keys for Save/Replace operation`, + ) + } + foundPrimaryKey := false + for _, primaryKey := range primaryKeys { + for dataKey := range list[0] { + if strings.EqualFold(dataKey, primaryKey) { + foundPrimaryKey = true + break + } + } + if foundPrimaryKey { + break + } + } + if !foundPrimaryKey { + return nil, gerror.NewCodef( + gcode.CodeMissingParameter, + `Replace/Save operation requires conflict detection: `+ + `either specify OnConflict() columns or ensure table '%s' has a primary key in the data`, + table, + ) + } + // TODO consider composite primary keys. + option.OnConflict = primaryKeys + } + // Treat Replace as Save operation + option.InsertOption = gdb.InsertOptionSave + + // pgsql support InsertIgnore natively, so no need to set primary key in context. + case gdb.InsertOptionIgnore, gdb.InsertOptionDefault: + // Get table fields to retrieve the primary key TableField object (not just the name) + // because DoExec needs the `TableField.Type` to determine if LastInsertId is supported. + tableFields, err := d.GetCore().GetDB().TableFields(ctx, table) + if err == nil { + for _, field := range tableFields { + if strings.EqualFold(field.Key, "pri") { + pkField := *field + ctx = context.WithValue(ctx, internalPrimaryKeyInCtx, pkField) + break + } + } + } + + default: + } + return d.Core.DoInsert(ctx, link, table, list, option) +} diff --git a/contrib/drivers/gaussdb/pgsql_format_upsert.go b/contrib/drivers/gaussdb/pgsql_format_upsert.go new file mode 100644 index 00000000000..7fe3c1489e6 --- /dev/null +++ b/contrib/drivers/gaussdb/pgsql_format_upsert.go @@ -0,0 +1,94 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gaussdb + +import ( + "fmt" + + "github.com/gogf/gf/v2/database/gdb" + "github.com/gogf/gf/v2/errors/gcode" + "github.com/gogf/gf/v2/errors/gerror" + "github.com/gogf/gf/v2/text/gstr" + "github.com/gogf/gf/v2/util/gconv" +) + +// FormatUpsert returns SQL clause of type upsert for GaussDB. +// Note: GaussDB is based on PostgreSQL 9.2 which doesn't support ON CONFLICT syntax (introduced in PostgreSQL 9.5). +// Therefore, UPSERT operations (Save, Replace, OnConflict, OnDuplicate) are not supported in GaussDB. +func (d *Driver) FormatUpsert(columns []string, list gdb.List, option gdb.DoInsertOption) (string, error) { + return "", gerror.NewCode( + gcode.CodeNotSupported, + `GaussDB does not support ON CONFLICT (upsert) operations. GaussDB is based on PostgreSQL 9.2, while ON CONFLICT was introduced in PostgreSQL 9.5. Please use separate INSERT and UPDATE operations instead`, + ) + + var onDuplicateStr string + if option.OnDuplicateStr != "" { + onDuplicateStr = option.OnDuplicateStr + } else if len(option.OnDuplicateMap) > 0 { + for k, v := range option.OnDuplicateMap { + if len(onDuplicateStr) > 0 { + onDuplicateStr += "," + } + switch v.(type) { + case gdb.Raw, *gdb.Raw: + onDuplicateStr += fmt.Sprintf( + "%s=%s", + d.Core.QuoteWord(k), + v, + ) + case gdb.Counter, *gdb.Counter: + var counter gdb.Counter + switch value := v.(type) { + case gdb.Counter: + counter = value + case *gdb.Counter: + counter = *value + } + operator, columnVal := "+", counter.Value + if columnVal < 0 { + operator, columnVal = "-", -columnVal + } + // Note: In PostgreSQL ON CONFLICT DO UPDATE, we use EXCLUDED to reference + // the value that was proposed for insertion. This differs from MySQL's + // ON DUPLICATE KEY UPDATE behavior where the column name without prefix + // references the current row's value. + onDuplicateStr += fmt.Sprintf( + "%s=EXCLUDED.%s%s%s", + d.QuoteWord(k), + d.QuoteWord(counter.Field), + operator, + gconv.String(columnVal), + ) + default: + onDuplicateStr += fmt.Sprintf( + "%s=EXCLUDED.%s", + d.Core.QuoteWord(k), + d.Core.QuoteWord(gconv.String(v)), + ) + } + } + } else { + for _, column := range columns { + // If it's SAVE operation, do not automatically update the creating time. + if d.Core.IsSoftCreatedFieldName(column) { + continue + } + if len(onDuplicateStr) > 0 { + onDuplicateStr += "," + } + onDuplicateStr += fmt.Sprintf( + "%s=EXCLUDED.%s", + d.Core.QuoteWord(column), + d.Core.QuoteWord(column), + ) + } + } + + conflictKeys := gstr.Join(option.OnConflict, ",") + + return fmt.Sprintf("ON CONFLICT (%s) DO UPDATE SET ", conflictKeys) + onDuplicateStr, nil +} diff --git a/contrib/drivers/gaussdb/pgsql_open.go b/contrib/drivers/gaussdb/pgsql_open.go new file mode 100644 index 00000000000..d1b5fc6d15e --- /dev/null +++ b/contrib/drivers/gaussdb/pgsql_open.go @@ -0,0 +1,69 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gaussdb + +import ( + "database/sql" + "fmt" + + "github.com/gogf/gf/v2/database/gdb" + "github.com/gogf/gf/v2/errors/gcode" + "github.com/gogf/gf/v2/errors/gerror" + "github.com/gogf/gf/v2/text/gstr" +) + +// Open creates and returns an underlying sql.DB object for pgsql. +// https://pkg.go.dev/github.com/lib/pq +func (d *Driver) Open(config *gdb.ConfigNode) (db *sql.DB, err error) { + source, err := configNodeToSource(config) + if err != nil { + return nil, err + } + underlyingDriverName := "postgres" + if db, err = sql.Open(underlyingDriverName, source); err != nil { + err = gerror.WrapCodef( + gcode.CodeDbOperationError, err, + `sql.Open failed for driver "%s" by source "%s"`, underlyingDriverName, source, + ) + return nil, err + } + return +} + +func configNodeToSource(config *gdb.ConfigNode) (string, error) { + var source string + source = fmt.Sprintf( + "user=%s password='%s' host=%s sslmode=disable", + config.User, config.Pass, config.Host, + ) + if config.Port != "" { + source = fmt.Sprintf("%s port=%s", source, config.Port) + } + if config.Name != "" { + source = fmt.Sprintf("%s dbname=%s", source, config.Name) + } + if config.Namespace != "" { + source = fmt.Sprintf("%s search_path=%s", source, config.Namespace) + } + if config.Timezone != "" { + source = fmt.Sprintf("%s timezone=%s", source, config.Timezone) + } + if config.Extra != "" { + extraMap, err := gstr.Parse(config.Extra) + if err != nil { + return "", gerror.WrapCodef( + gcode.CodeInvalidParameter, + err, + `invalid extra configuration: %s`, config.Extra, + ) + } + for k, v := range extraMap { + source += fmt.Sprintf(` %s=%s`, k, v) + } + } + return source, nil +} diff --git a/contrib/drivers/gaussdb/pgsql_order.go b/contrib/drivers/gaussdb/pgsql_order.go new file mode 100644 index 00000000000..159089d18e2 --- /dev/null +++ b/contrib/drivers/gaussdb/pgsql_order.go @@ -0,0 +1,12 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gaussdb + +// OrderRandomFunction returns the SQL function for random ordering. +func (d *Driver) OrderRandomFunction() string { + return "RANDOM()" +} diff --git a/contrib/drivers/gaussdb/pgsql_result.go b/contrib/drivers/gaussdb/pgsql_result.go new file mode 100644 index 00000000000..6488e5563ab --- /dev/null +++ b/contrib/drivers/gaussdb/pgsql_result.go @@ -0,0 +1,24 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gaussdb + +import "database/sql" + +type Result struct { + sql.Result + affected int64 + lastInsertId int64 + lastInsertIdError error +} + +func (pgr Result) RowsAffected() (int64, error) { + return pgr.affected, nil +} + +func (pgr Result) LastInsertId() (int64, error) { + return pgr.lastInsertId, pgr.lastInsertIdError +} diff --git a/contrib/drivers/gaussdb/pgsql_table_fields.go b/contrib/drivers/gaussdb/pgsql_table_fields.go new file mode 100644 index 00000000000..a0a6a800670 --- /dev/null +++ b/contrib/drivers/gaussdb/pgsql_table_fields.go @@ -0,0 +1,107 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gaussdb + +import ( + "context" + "fmt" + + "github.com/gogf/gf/v2/database/gdb" + "github.com/gogf/gf/v2/util/gutil" +) + +var ( + tableFieldsSqlTmp = ` +SELECT + a.attname AS field, + t.typname AS type, + a.attnotnull AS null, + (CASE WHEN d.contype = 'p' THEN 'pri' WHEN d.contype = 'u' THEN 'uni' ELSE '' END) AS key, + ic.column_default AS default_value, + b.description AS comment, + COALESCE(character_maximum_length, numeric_precision, -1) AS length, + numeric_scale AS scale +FROM pg_attribute a + LEFT JOIN pg_class c ON a.attrelid = c.oid + LEFT JOIN pg_constraint d ON d.conrelid = c.oid AND a.attnum = d.conkey[1] + LEFT JOIN pg_description b ON a.attrelid = b.objoid AND a.attnum = b.objsubid + LEFT JOIN pg_type t ON a.atttypid = t.oid + LEFT JOIN information_schema.columns ic ON ic.column_name = a.attname AND ic.table_name = c.relname +WHERE c.oid = '%s'::regclass + AND a.attisdropped IS FALSE + AND a.attnum > 0 +ORDER BY a.attnum` +) + +func init() { + var err error + tableFieldsSqlTmp, err = gdb.FormatMultiLineSqlToSingle(tableFieldsSqlTmp) + if err != nil { + panic(err) + } +} + +// TableFields retrieves and returns the fields' information of specified table of current schema. +func (d *Driver) TableFields(ctx context.Context, table string, schema ...string) (fields map[string]*gdb.TableField, err error) { + var ( + result gdb.Result + link gdb.Link + usedSchema = gutil.GetOrDefaultStr(d.GetSchema(), schema...) + // TODO duplicated `id` result? + structureSql = fmt.Sprintf(tableFieldsSqlTmp, table) + ) + if link, err = d.SlaveLink(usedSchema); err != nil { + return nil, err + } + result, err = d.DoSelect(ctx, link, structureSql) + if err != nil { + return nil, err + } + fields = make(map[string]*gdb.TableField) + var ( + index = 0 + name string + ok bool + existingField *gdb.TableField + ) + for _, m := range result { + name = m["field"].String() + // Merge duplicated fields, especially for key constraints. + // Priority: pri > uni > others + if existingField, ok = fields[name]; ok { + currentKey := m["key"].String() + // Merge key information with priority: pri > uni + if currentKey == "pri" || (currentKey == "uni" && existingField.Key != "pri") { + existingField.Key = currentKey + } + continue + } + + var ( + fieldType string + dataType = m["type"].String() + dataLength = m["length"].Int() + ) + if dataLength > 0 { + fieldType = fmt.Sprintf("%s(%d)", dataType, dataLength) + } else { + fieldType = dataType + } + + fields[name] = &gdb.TableField{ + Index: index, + Name: name, + Type: fieldType, + Null: !m["null"].Bool(), + Key: m["key"].String(), + Default: m["default_value"].Val(), + Comment: m["comment"].String(), + } + index++ + } + return fields, nil +} diff --git a/contrib/drivers/gaussdb/pgsql_tables.go b/contrib/drivers/gaussdb/pgsql_tables.go new file mode 100644 index 00000000000..0e109ee478f --- /dev/null +++ b/contrib/drivers/gaussdb/pgsql_tables.go @@ -0,0 +1,102 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gaussdb + +import ( + "context" + "fmt" + "regexp" + + "github.com/gogf/gf/v2/database/gdb" + "github.com/gogf/gf/v2/text/gregex" + "github.com/gogf/gf/v2/text/gstr" + "github.com/gogf/gf/v2/util/gutil" +) + +var ( + tablesSqlTmp = ` +SELECT + c.relname +FROM + pg_class c +INNER JOIN pg_namespace n ON + c.relnamespace = n.oid +WHERE + n.nspname = '%s' + AND c.relkind IN ('r', 'p') + %s +ORDER BY + c.relname +` + + versionRegex = regexp.MustCompile(`PostgreSQL (\d+\.\d+)`) +) + +func init() { + var err error + tablesSqlTmp, err = gdb.FormatMultiLineSqlToSingle(tablesSqlTmp) + if err != nil { + panic(err) + } +} + +// Tables retrieves and returns the tables of current schema. +// It's mainly used in cli tool chain for automatically generating the models. +func (d *Driver) Tables(ctx context.Context, schema ...string) (tables []string, err error) { + var ( + result gdb.Result + usedSchema = gutil.GetOrDefaultStr(d.GetConfig().Namespace, schema...) + ) + if usedSchema == "" { + usedSchema = defaultSchema + } + // DO NOT use `usedSchema` as parameter for function `SlaveLink`. + link, err := d.SlaveLink(schema...) + if err != nil { + return nil, err + } + + useRelpartbound := "" + if gstr.CompareVersion(d.version(ctx, link), "10") >= 0 { + useRelpartbound = "AND c.relpartbound IS NULL" + } + + var query = fmt.Sprintf( + tablesSqlTmp, + usedSchema, + useRelpartbound, + ) + + query, _ = gregex.ReplaceString(`[\n\r\s]+`, " ", gstr.Trim(query)) + result, err = d.DoSelect(ctx, link, query) + if err != nil { + return + } + for _, m := range result { + for _, v := range m { + tables = append(tables, v.String()) + } + } + return +} + +// version checks and returns the database version. +func (d *Driver) version(ctx context.Context, link gdb.Link) string { + result, err := d.DoSelect(ctx, link, "SELECT version();") + if err != nil { + return "" + } + if len(result) > 0 { + if v, ok := result[0]["version"]; ok { + matches := versionRegex.FindStringSubmatch(v.String()) + if len(matches) >= 2 { + return matches[1] + } + } + } + return "" +} diff --git a/contrib/drivers/gaussdb/pgsql_z_unit_init_test.go b/contrib/drivers/gaussdb/pgsql_z_unit_init_test.go new file mode 100644 index 00000000000..a40c860d4f2 --- /dev/null +++ b/contrib/drivers/gaussdb/pgsql_z_unit_init_test.go @@ -0,0 +1,342 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gaussdb_test + +import ( + "context" + "fmt" + "strings" + + _ "github.com/gogf/gf/contrib/drivers/gaussdb/v2" + + "github.com/gogf/gf/v2/container/garray" + "github.com/gogf/gf/v2/database/gdb" + "github.com/gogf/gf/v2/frame/g" + "github.com/gogf/gf/v2/os/gtime" + "github.com/gogf/gf/v2/test/gtest" +) + +const ( + TableSize = 10 + TablePrefix = "t_" + SchemaName = "test" + CreateTime = "2018-10-24 10:00:00" +) + +var ( + db gdb.DB + configNode gdb.ConfigNode + ctx = context.TODO() +) + +func init() { + configNode = gdb.ConfigNode{ + Link: `gaussdb:gaussdb:UTpass@1234@tcp(127.0.0.1:9950)/postgres`, + } + + // gaussdb only permit to connect to the designation database. + // so you need to create the gaussdb database before you use orm + gdb.AddConfigNode(gdb.DefaultGroupName, configNode) + if r, err := gdb.New(configNode); err != nil { + gtest.Fatal(err) + } else { + db = r + } + + // Create schema if not exists + schemaTemplate := "CREATE SCHEMA IF NOT EXISTS %s" + if _, err := db.Exec(ctx, fmt.Sprintf(schemaTemplate, SchemaName)); err != nil { + gtest.Error(err) + } + + // Set search_path to the test schema + if _, err := db.Exec(ctx, fmt.Sprintf("SET search_path TO %s", SchemaName)); err != nil { + gtest.Error(err) + } +} + +func createTable(table ...string) string { + return createTableWithDb(db, table...) +} + +func createInitTable(table ...string) string { + return createInitTableWithDb(db, table...) +} + +func createTableWithDb(db gdb.DB, table ...string) (name string) { + if len(table) > 0 { + name = table[0] + } else { + name = fmt.Sprintf(`%s_%d`, TablePrefix+"test", gtime.TimestampNano()) + } + + dropTableWithDb(db, name) + + if _, err := db.Exec(ctx, fmt.Sprintf(` + CREATE TABLE %s ( + id bigserial NOT NULL, + passport varchar(45) NOT NULL, + password varchar(32) NOT NULL, + nickname varchar(45) NOT NULL, + create_time timestamp NOT NULL, + favorite_movie varchar[], + favorite_music text[], + numeric_values numeric[], + decimal_values decimal[], + PRIMARY KEY (id) + ) ;`, name, + )); err != nil { + gtest.Fatal(err) + } + return +} + +func dropTable(table string) { + dropTableWithDb(db, table) +} + +func createInitTableWithDb(db gdb.DB, table ...string) (name string) { + name = createTableWithDb(db, table...) + array := garray.New(true) + for i := 1; i <= TableSize; i++ { + array.Append(g.Map{ + "id": i, + "passport": fmt.Sprintf(`user_%d`, i), + "password": fmt.Sprintf(`pass_%d`, i), + "nickname": fmt.Sprintf(`name_%d`, i), + "create_time": gtime.NewFromStr(CreateTime).String(), + }) + } + + result, err := db.Insert(ctx, name, array.Slice()) + gtest.AssertNil(err) + + n, e := result.RowsAffected() + gtest.Assert(e, nil) + gtest.Assert(n, TableSize) + return +} + +func dropTableWithDb(db gdb.DB, table string) { + if _, err := db.Exec(ctx, fmt.Sprintf("DROP TABLE IF EXISTS %s", table)); err != nil { + gtest.Error(err) + } +} + +// createAllTypesTable creates a table with all common PostgreSQL types for testing +func createAllTypesTable(table ...string) string { + return createAllTypesTableWithDb(db, table...) +} + +func createAllTypesTableWithDb(db gdb.DB, table ...string) (name string) { + if len(table) > 0 { + name = table[0] + } else { + name = fmt.Sprintf(`%s_%d`, TablePrefix+"all_types", gtime.TimestampNano()) + } + + dropTableWithDb(db, name) + + if _, err := db.Exec(ctx, fmt.Sprintf(` + CREATE TABLE %s ( + -- Basic integer types + id bigserial PRIMARY KEY, + col_int2 int2 NOT NULL DEFAULT 0, + col_int4 int4 NOT NULL DEFAULT 0, + col_int8 int8 DEFAULT 0, + col_smallint smallint, + col_integer integer, + col_bigint bigint, + + -- Float types + col_float4 float4 DEFAULT 0.0, + col_float8 float8 DEFAULT 0.0, + col_real real, + col_double double precision, + col_numeric numeric(10,2) NOT NULL DEFAULT 0.00, + col_decimal decimal(10,2), + + -- Character types + col_char char(10) DEFAULT '', + col_varchar varchar(100) NOT NULL DEFAULT '', + col_text text, + + -- Boolean type + col_bool boolean NOT NULL DEFAULT false, + + -- Date/Time types + col_date date DEFAULT CURRENT_DATE, + col_time time, + col_timetz timetz, + col_timestamp timestamp DEFAULT CURRENT_TIMESTAMP, + col_timestamptz timestamptz, + col_interval interval, + + -- Binary type + col_bytea bytea, + + -- JSON types + col_json json DEFAULT '{}', + col_jsonb jsonb DEFAULT '{}', + + -- UUID type + col_uuid uuid, + + -- Network types + col_inet inet, + col_cidr cidr, + col_macaddr macaddr, + + -- Array types - integers + col_int2_arr int2[] DEFAULT '{}', + col_int4_arr int4[] DEFAULT '{}', + col_int8_arr int8[], + + -- Array types - floats + col_float4_arr float4[], + col_float8_arr float8[], + col_numeric_arr numeric[] DEFAULT '{}', + col_decimal_arr decimal[], + + -- Array types - characters + col_varchar_arr varchar[] NOT NULL DEFAULT '{}', + col_text_arr text[], + col_char_arr char(10)[], + + -- Array types - boolean + col_bool_arr boolean[], + + -- Array types - bytea + col_bytea_arr bytea[], + + -- Array types - date/time + col_date_arr date[], + col_timestamp_arr timestamp[], + + -- Array types - JSON + col_jsonb_arr jsonb[], + + -- Array types - UUID + col_uuid_arr uuid[] + ); + + -- Add comments for columns + COMMENT ON TABLE %s IS 'Test table with all PostgreSQL types'; + COMMENT ON COLUMN %s.id IS 'Primary key ID'; + COMMENT ON COLUMN %s.col_int2 IS 'int2 type (smallint)'; + COMMENT ON COLUMN %s.col_int4 IS 'int4 type (integer)'; + COMMENT ON COLUMN %s.col_int8 IS 'int8 type (bigint)'; + COMMENT ON COLUMN %s.col_numeric IS 'numeric type with precision'; + COMMENT ON COLUMN %s.col_varchar IS 'varchar type'; + COMMENT ON COLUMN %s.col_bool IS 'boolean type'; + COMMENT ON COLUMN %s.col_timestamp IS 'timestamp type'; + COMMENT ON COLUMN %s.col_json IS 'json type'; + COMMENT ON COLUMN %s.col_jsonb IS 'jsonb type'; + COMMENT ON COLUMN %s.col_int2_arr IS 'int2 array type (_int2)'; + COMMENT ON COLUMN %s.col_int4_arr IS 'int4 array type (_int4)'; + COMMENT ON COLUMN %s.col_int8_arr IS 'int8 array type (_int8)'; + COMMENT ON COLUMN %s.col_numeric_arr IS 'numeric array type (_numeric)'; + COMMENT ON COLUMN %s.col_varchar_arr IS 'varchar array type (_varchar)'; + COMMENT ON COLUMN %s.col_text_arr IS 'text array type (_text)'; + `, name, + name, name, name, name, name, name, name, name, name, name, name, name, name, name, name, name, name)); err != nil { + gtest.Fatal(err) + } + return +} + +// createInitAllTypesTable creates and initializes a table with all common PostgreSQL types +func createInitAllTypesTable(table ...string) string { + return createInitAllTypesTableWithDb(db, table...) +} + +func createInitAllTypesTableWithDb(db gdb.DB, table ...string) (name string) { + name = createAllTypesTableWithDb(db, table...) + + // Insert test data + for i := 1; i <= TableSize; i++ { + var sql strings.Builder + + // Write INSERT statement header + sql.WriteString(fmt.Sprintf(`INSERT INTO %s ( + col_int2, col_int4, col_int8, col_smallint, col_integer, col_bigint, + col_float4, col_float8, col_real, col_double, col_numeric, col_decimal, + col_char, col_varchar, col_text, col_bool, + col_date, col_time, col_timestamp, + col_json, col_jsonb, + col_bytea, + col_uuid, + col_int2_arr, col_int4_arr, col_int8_arr, + col_float4_arr, col_float8_arr, col_numeric_arr, col_decimal_arr, + col_varchar_arr, col_text_arr, col_bool_arr, col_bytea_arr, col_date_arr, col_timestamp_arr, col_jsonb_arr, col_uuid_arr + ) VALUES (`, name)) + + // Integer types: col_int2, col_int4, col_int8, col_smallint, col_integer, col_bigint + sql.WriteString(fmt.Sprintf("%d, %d, %d, %d, %d, %d, ", + i, i*10, i*100, i, i*10, i*100)) + + // Float types: col_float4, col_float8, col_real, col_double, col_numeric, col_decimal + sql.WriteString(fmt.Sprintf("%d.5, %d.5, %d.5, %d.5, %d.99, %d.99, ", + i, i, i, i, i, i)) + + // Character types: col_char, col_varchar, col_text, col_bool + sql.WriteString(fmt.Sprintf("'char_%d', 'varchar_%d', 'text_%d', %t, ", + i, i, i, i%2 == 0)) + + // Date/Time types: col_date, col_time, col_timestamp + // Calculate day as integer in range 1-28; %02d in fmt.Sprintf ensures two-digit zero-padded format + dayOfMonth := (i-1)%28 + 1 + sql.WriteString(fmt.Sprintf("'2024-01-%02d', '10:00:%02d', '2024-01-%02d 10:00:00', ", + dayOfMonth, (i-1)%60, dayOfMonth)) + + // JSON types: col_json, col_jsonb + sql.WriteString(fmt.Sprintf(`'{"key": "value%d"}', '{"key": "value%d"}', `, i, i)) + + // Bytea type: col_bytea + sql.WriteString(`E'\\xDEADBEEF', `) + + // UUID type: col_uuid (use %x for hex representation, padded to ensure valid UUID) + sql.WriteString(fmt.Sprintf("'550e8400-e29b-41d4-a716-4466554400%02x', ", i)) + + // Integer array types: col_int2_arr, col_int4_arr, col_int8_arr + sql.WriteString(fmt.Sprintf("'{1, 2, %d}', '{10, 20, %d}', '{100, 200, %d}', ", + i, i, i)) + + // Float array types: col_float4_arr, col_float8_arr, col_numeric_arr, col_decimal_arr + sql.WriteString(fmt.Sprintf("'{1.1, 2.2, %d.3}', '{1.1, 2.2, %d.3}', '{1.11, 2.22, %d.33}', '{1.11, 2.22, %d.33}', ", + i, i, i, i)) + + // Character array types: col_varchar_arr, col_text_arr + sql.WriteString(fmt.Sprintf(`'{"a", "b", "c%d"}', '{"x", "y", "z%d"}', `, i, i)) + + // Boolean array type: col_bool_arr + sql.WriteString(fmt.Sprintf("'{true, false, %t}', ", i%2 == 0)) + + // Bytea array type: col_bytea_arr (use ARRAY syntax for bytea) + sql.WriteString(`ARRAY[E'\\xDEADBEEF', E'\\xCAFEBABE']::bytea[], `) + + // Date array type: col_date_arr + sql.WriteString(fmt.Sprintf(`'{"2024-01-%02d", "2024-01-%02d"}', `, dayOfMonth, (dayOfMonth%28)+1)) + + // Timestamp array type: col_timestamp_arr + sql.WriteString(fmt.Sprintf(`'{"2024-01-%02d 10:00:00", "2024-01-%02d 11:00:00"}', `, dayOfMonth, dayOfMonth)) + + // JSONB array type: col_jsonb_arr (store as text array first, then cast to jsonb array) + sql.WriteString(`ARRAY['{"key": "value1"}', '{"key": "value2"}']::jsonb[], `) + + // UUID array type: col_uuid_arr + sql.WriteString(fmt.Sprintf("ARRAY['550e8400-e29b-41d4-a716-4466554400%02x'::uuid, '6ba7b810-9dad-11d1-80b4-00c04fd430c8'::uuid]", i)) + + // Close VALUES + sql.WriteString(")") + + if _, err := db.Exec(ctx, sql.String()); err != nil { + gtest.Fatal(err) + } + } + return +} diff --git a/contrib/drivers/gaussdb/pgsql_z_unit_model_test.go b/contrib/drivers/gaussdb/pgsql_z_unit_model_test.go new file mode 100644 index 00000000000..9969f0886ae --- /dev/null +++ b/contrib/drivers/gaussdb/pgsql_z_unit_model_test.go @@ -0,0 +1,877 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gaussdb_test + +import ( + "database/sql" + "fmt" + "testing" + + "github.com/gogf/gf/v2/database/gdb" + "github.com/gogf/gf/v2/frame/g" + "github.com/gogf/gf/v2/os/gtime" + "github.com/gogf/gf/v2/test/gtest" +) + +// skipUnsupportedTest skips tests that require PostgreSQL 9.5+ features (ON CONFLICT) +// GaussDB is based on PostgreSQL 9.2 and doesn't support these features +func skipUnsupportedTest(t *testing.T, feature string) { + t.Skipf("GaussDB does not support %s (requires PostgreSQL 9.5+ ON CONFLICT syntax, GaussDB is based on PostgreSQL 9.2)", feature) +} + +func Test_Model_Insert(t *testing.T) { + table := createTable() + defer dropTable(table) + + gtest.C(t, func(t *gtest.T) { + user := db.Model(table) + result, err := user.Data(g.Map{ + "id": 1, + "uid": 1, + "passport": "t1", + "password": "25d55ad283aa400af464c76d713c07ad", + "nickname": "name_1", + "create_time": gtime.Now().String(), + }).Insert() + t.AssertNil(err) + n, _ := result.RowsAffected() + t.Assert(n, 1) + + result, err = db.Model(table).Data(g.Map{ + "id": "2", + "uid": "2", + "passport": "t2", + "password": "25d55ad283aa400af464c76d713c07ad", + "nickname": "name_2", + "create_time": gtime.Now().String(), + }).Insert() + t.AssertNil(err) + n, _ = result.RowsAffected() + t.Assert(n, 1) + + type User struct { + Id int `gconv:"id"` + Uid int `gconv:"uid"` + Passport string `json:"passport"` + Password string `gconv:"password"` + Nickname string `gconv:"nickname"` + CreateTime *gtime.Time `json:"create_time"` + } + // Model inserting. + result, err = db.Model(table).Data(User{ + Id: 3, + Uid: 3, + Passport: "t3", + Password: "25d55ad283aa400af464c76d713c07ad", + Nickname: "name_3", + CreateTime: gtime.Now(), + }).Insert() + t.AssertNil(err) + n, _ = result.RowsAffected() + t.Assert(n, 1) + value, err := db.Model(table).Fields("passport").Where("id=3").Value() // model value + t.AssertNil(err) + t.Assert(value.String(), "t3") + + result, err = db.Model(table).Data(&User{ + Id: 4, + Uid: 4, + Passport: "t4", + Password: "25d55ad283aa400af464c76d713c07ad", + Nickname: "T4", + CreateTime: gtime.Now(), + }).Insert() + t.AssertNil(err) + n, _ = result.RowsAffected() + t.Assert(n, 1) + value, err = db.Model(table).Fields("passport").Where("id=4").Value() + t.AssertNil(err) + t.Assert(value.String(), "t4") + + result, err = db.Model(table).Where("id>?", 1).Delete() // model delete + t.AssertNil(err) + n, _ = result.RowsAffected() + t.Assert(n, 3) + }) +} + +func Test_Model_One(t *testing.T) { + table := createTable() + defer dropTable(table) + + gtest.C(t, func(t *gtest.T) { + type User struct { + Id int + Passport string + Password string + Nickname string + CreateTime string + } + data := User{ + Id: 1, + Passport: "user_1", + Password: "pass_1", + Nickname: "name_1", + CreateTime: "2020-10-10 12:00:01", + } + _, err := db.Model(table).Data(data).Insert() + t.AssertNil(err) + + one, err := db.Model(table).WherePri(1).One() // model one + t.AssertNil(err) + t.Assert(one["passport"], data.Passport) + t.Assert(one["create_time"], data.CreateTime) + t.Assert(one["nickname"], data.Nickname) + }) +} + +func Test_Model_All(t *testing.T) { + table := createInitTable() + defer dropTable(table) + + gtest.C(t, func(t *gtest.T) { + result, err := db.Model(table).All() + t.AssertNil(err) + t.Assert(len(result), TableSize) + }) +} + +func Test_Model_Delete(t *testing.T) { + table := createInitTable() + defer dropTable(table) + + gtest.C(t, func(t *gtest.T) { + result, err := db.Model(table).Where("id", "2").Delete() + t.AssertNil(err) + n, _ := result.RowsAffected() + t.Assert(n, 1) + }) +} + +func Test_Model_Update(t *testing.T) { + table := createInitTable() + defer dropTable(table) + + // Update + Data(string) + gtest.C(t, func(t *gtest.T) { + result, err := db.Model(table).Data("passport='user_33'").Where("passport='user_3'").Update() + t.AssertNil(err) + n, _ := result.RowsAffected() + t.Assert(n, 1) + }) + + // Update + Fields(string) + gtest.C(t, func(t *gtest.T) { + result, err := db.Model(table).Fields("passport").Data(g.Map{ + "passport": "user_44", + "none": "none", + }).Where("passport='user_4'").Update() + t.AssertNil(err) + n, _ := result.RowsAffected() + t.Assert(n, 1) + }) +} + +func Test_Model_Array(t *testing.T) { + table := createInitTable() + defer dropTable(table) + + gtest.C(t, func(t *gtest.T) { + all, err := db.Model(table).Where("id", g.Slice{1, 2, 3}).All() + t.AssertNil(err) + t.Assert(all.Array("id"), g.Slice{1, 2, 3}) + t.Assert(all.Array("nickname"), g.Slice{"name_1", "name_2", "name_3"}) + }) + gtest.C(t, func(t *gtest.T) { + array, err := db.Model(table).Fields("nickname").Where("id", g.Slice{1, 2, 3}).Array() + t.AssertNil(err) + t.Assert(array, g.Slice{"name_1", "name_2", "name_3"}) + }) + gtest.C(t, func(t *gtest.T) { + array, err := db.Model(table).Array("nickname", "id", g.Slice{1, 2, 3}) + t.AssertNil(err) + t.Assert(array, g.Slice{"name_1", "name_2", "name_3"}) + }) +} + +func Test_Model_Scan(t *testing.T) { + table := createInitTable() + defer dropTable(table) + + type User struct { + Id int + Passport string + Password string + NickName string + CreateTime gtime.Time + } + gtest.C(t, func(t *gtest.T) { + var users []User + err := db.Model(table).Scan(&users) + t.AssertNil(err) + t.Assert(len(users), TableSize) + }) +} + +func Test_Model_Count(t *testing.T) { + table := createInitTable() + defer dropTable(table) + gtest.C(t, func(t *gtest.T) { + count, err := db.Model(table).Count() + t.AssertNil(err) + t.Assert(count, int64(TableSize)) + }) + gtest.C(t, func(t *gtest.T) { + count, err := db.Model(table).FieldsEx("id").Where("id>8").Count() + t.AssertNil(err) + t.Assert(count, int64(2)) + }) +} + +func Test_Model_Exist(t *testing.T) { + table := createInitTable() + defer dropTable(table) + gtest.C(t, func(t *gtest.T) { + exist, err := db.Model(table).Exist() + t.AssertNil(err) + t.Assert(exist, TableSize > 0) + exist, err = db.Model(table).Where("id", -1).Exist() + t.AssertNil(err) + t.Assert(exist, false) + }) +} + +func Test_Model_Where(t *testing.T) { + table := createInitTable() + defer dropTable(table) + + // map + slice parameter + gtest.C(t, func(t *gtest.T) { + result, err := db.Model(table).Where(g.Map{ + "id": g.Slice{1, 2, 3}, + "passport": g.Slice{"user_2", "user_3"}, + }).Where("id=? and nickname=?", g.Slice{3, "name_3"}).One() + t.AssertNil(err) + t.AssertGT(len(result), 0) + t.Assert(result["id"].Int(), 3) + }) + + // struct, automatic mapping and filtering. + gtest.C(t, func(t *gtest.T) { + type User struct { + Id int + Nickname string + } + result, err := db.Model(table).Where(User{3, "name_3"}).One() + t.AssertNil(err) + t.Assert(result["id"].Int(), 3) + + result, err = db.Model(table).Where(&User{3, "name_3"}).One() + t.AssertNil(err) + t.Assert(result["id"].Int(), 3) + }) +} + +func Test_Model_Save(t *testing.T) { + skipUnsupportedTest(t, "Save") + table := createTable() + defer dropTable(table) + gtest.C(t, func(t *gtest.T) { + type User struct { + Id int + Passport string + Password string + NickName string + CreateTime *gtime.Time + } + var ( + user User + count int + result sql.Result + err error + ) + + result, err = db.Model(table).Data(g.Map{ + "id": 1, + "passport": "p1", + "password": "pw1", + "nickname": "n1", + "create_time": CreateTime, + }).OnConflict("id").Save() + t.AssertNil(err) + n, _ := result.RowsAffected() + t.Assert(n, 1) + + err = db.Model(table).Scan(&user) + t.AssertNil(err) + t.Assert(user.Id, 1) + t.Assert(user.Passport, "p1") + t.Assert(user.Password, "pw1") + t.Assert(user.NickName, "n1") + t.Assert(user.CreateTime.String(), CreateTime) + + _, err = db.Model(table).Data(g.Map{ + "id": 1, + "passport": "p1", + "password": "pw2", + "nickname": "n2", + "create_time": CreateTime, + }).OnConflict("id").Save() + t.AssertNil(err) + + err = db.Model(table).Scan(&user) + t.AssertNil(err) + t.Assert(user.Passport, "p1") + t.Assert(user.Password, "pw2") + t.Assert(user.NickName, "n2") + t.Assert(user.CreateTime.String(), CreateTime) + + count, err = db.Model(table).Count() + t.AssertNil(err) + t.Assert(count, 1) + }) +} + +func Test_Model_Replace(t *testing.T) { + skipUnsupportedTest(t, "Replace") + table := createTable() + defer dropTable(table) + + gtest.C(t, func(t *gtest.T) { + // Insert initial record + result, err := db.Model(table).Data(g.Map{ + "id": 1, + "passport": "t1", + "password": "pass1", + "nickname": "T1", + "create_time": "2018-10-24 10:00:00", + }).Insert() + t.AssertNil(err) + n, _ := result.RowsAffected() + t.Assert(n, 1) + + // Replace with new data + result, err = db.Model(table).Data(g.Map{ + "id": 1, + "passport": "t11", + "password": "25d55ad283aa400af464c76d713c07ad", + "nickname": "T11", + "create_time": "2018-10-24 10:00:00", + }).Replace() + t.AssertNil(err) + n, _ = result.RowsAffected() + t.Assert(n, 1) + + // Verify the data was replaced + one, err := db.Model(table).Where("id", 1).One() + t.AssertNil(err) + t.Assert(one["passport"].String(), "t11") + t.Assert(one["password"].String(), "25d55ad283aa400af464c76d713c07ad") + t.Assert(one["nickname"].String(), "T11") + + // Replace with new ID (insert new record) + result, err = db.Model(table).Data(g.Map{ + "id": 2, + "passport": "t22", + "password": "pass22", + "nickname": "T22", + "create_time": "2018-10-24 11:00:00", + }).Replace() + t.AssertNil(err) + n, _ = result.RowsAffected() + t.Assert(n, 1) + + // Verify new record was inserted + count, err := db.Model(table).Count() + t.AssertNil(err) + t.Assert(count, 2) + }) +} + +func Test_Model_OnConflict(t *testing.T) { + skipUnsupportedTest(t, "OnConflict") + var ( + table = fmt.Sprintf(`%s_%d`, TablePrefix+"test", gtime.TimestampNano()) + uniqueName = fmt.Sprintf(`%s_%d`, TablePrefix+"test_unique", gtime.TimestampNano()) + ) + if _, err := db.Exec(ctx, fmt.Sprintf(` + CREATE TABLE %s ( + id bigserial NOT NULL, + passport varchar(45) NOT NULL, + password varchar(32) NOT NULL, + nickname varchar(45) NOT NULL, + create_time timestamp NOT NULL, + PRIMARY KEY (id), + CONSTRAINT %s UNIQUE ("passport", "password") + ) ;`, table, uniqueName, + )); err != nil { + gtest.Fatal(err) + } + defer dropTable(table) + + // string type 1. + gtest.C(t, func(t *gtest.T) { + data := g.Map{ + "id": 1, + "passport": "pp1", + "password": "pw1", + "nickname": "n1", + "create_time": "2016-06-06", + } + _, err := db.Model(table).OnConflict("passport,password").Data(data).Save() + t.AssertNil(err) + one, err := db.Model(table).Where("id", 1).One() + t.AssertNil(err) + t.Assert(one["passport"], data["passport"]) + t.Assert(one["password"], data["password"]) + t.Assert(one["nickname"], "n1") + }) + + // string type 2. + gtest.C(t, func(t *gtest.T) { + data := g.Map{ + "id": 1, + "passport": "pp1", + "password": "pw1", + "nickname": "n1", + "create_time": "2016-06-06", + } + _, err := db.Model(table).OnConflict("passport", "password").Data(data).Save() + t.AssertNil(err) + one, err := db.Model(table).Where("id", 1).One() + t.AssertNil(err) + t.Assert(one["passport"], data["passport"]) + t.Assert(one["password"], data["password"]) + t.Assert(one["nickname"], "n1") + }) + + // slice. + gtest.C(t, func(t *gtest.T) { + data := g.Map{ + "id": 1, + "passport": "pp1", + "password": "pw1", + "nickname": "n1", + "create_time": "2016-06-06", + } + _, err := db.Model(table).OnConflict(g.Slice{"passport", "password"}).Data(data).Save() + t.AssertNil(err) + one, err := db.Model(table).Where("id", 1).One() + t.AssertNil(err) + t.Assert(one["passport"], data["passport"]) + t.Assert(one["password"], data["password"]) + t.Assert(one["nickname"], "n1") + }) +} + +func Test_Model_OnDuplicate(t *testing.T) { + skipUnsupportedTest(t, "OnDuplicate") + table := createTable() + defer dropTable(table) + + // string type 1. + gtest.C(t, func(t *gtest.T) { + data := g.Map{ + "id": 1, + "passport": "pp1", + "password": "pw1", + "nickname": "n1", + "create_time": "2016-06-06", + } + _, err := db.Model(table).OnConflict("id").OnDuplicate("passport,password").Data(data).Save() + t.AssertNil(err) + one, err := db.Model(table).WherePri(1).One() + t.AssertNil(err) + t.Assert(one["passport"], data["passport"]) + t.Assert(one["password"], data["password"]) + t.Assert(one["nickname"], "name_1") + }) + + // string type 2. + gtest.C(t, func(t *gtest.T) { + data := g.Map{ + "id": 1, + "passport": "pp1", + "password": "pw1", + "nickname": "n1", + "create_time": "2016-06-06", + } + _, err := db.Model(table).OnConflict("id").OnDuplicate("passport", "password").Data(data).Save() + t.AssertNil(err) + one, err := db.Model(table).WherePri(1).One() + t.AssertNil(err) + t.Assert(one["passport"], data["passport"]) + t.Assert(one["password"], data["password"]) + t.Assert(one["nickname"], "name_1") + }) + + // slice. + gtest.C(t, func(t *gtest.T) { + data := g.Map{ + "id": 1, + "passport": "pp1", + "password": "pw1", + "nickname": "n1", + "create_time": "2016-06-06", + } + _, err := db.Model(table).OnConflict("id").OnDuplicate(g.Slice{"passport", "password"}).Data(data).Save() + t.AssertNil(err) + one, err := db.Model(table).WherePri(1).One() + t.AssertNil(err) + t.Assert(one["passport"], data["passport"]) + t.Assert(one["password"], data["password"]) + t.Assert(one["nickname"], "name_1") + }) + + // map. + gtest.C(t, func(t *gtest.T) { + data := g.Map{ + "id": 1, + "passport": "pp1", + "password": "pw1", + "nickname": "n1", + "create_time": "2016-06-06", + } + _, err := db.Model(table).OnConflict("id").OnDuplicate(g.Map{ + "passport": "nickname", + "password": "nickname", + }).Data(data).Save() + t.AssertNil(err) + one, err := db.Model(table).WherePri(1).One() + t.AssertNil(err) + t.Assert(one["passport"], data["nickname"]) + t.Assert(one["password"], data["nickname"]) + t.Assert(one["nickname"], "name_1") + }) + + // map+raw. + gtest.C(t, func(t *gtest.T) { + data := g.MapStrStr{ + "id": "1", + "passport": "pp1", + "password": "pw1", + "nickname": "n1", + "create_time": "2016-06-06", + } + _, err := db.Model(table).OnConflict("id").OnDuplicate(g.Map{ + "passport": gdb.Raw("CONCAT(EXCLUDED.passport, '1')"), + "password": gdb.Raw("CONCAT(EXCLUDED.password, '2')"), + }).Data(data).Save() + t.AssertNil(err) + one, err := db.Model(table).WherePri(1).One() + t.AssertNil(err) + t.Assert(one["passport"], data["passport"]+"1") + t.Assert(one["password"], data["password"]+"2") + t.Assert(one["nickname"], "name_1") + }) +} + +func Test_Model_OnDuplicateWithCounter(t *testing.T) { + skipUnsupportedTest(t, "OnDuplicateWithCounter") + table := createTable() + defer dropTable(table) + + gtest.C(t, func(t *gtest.T) { + data := g.Map{ + "id": 1, + "passport": "pp1", + "password": "pw1", + "nickname": "n1", + "create_time": "2016-06-06", + } + _, err := db.Model(table).OnConflict("id").OnDuplicate(g.Map{ + "id": gdb.Counter{Field: "id", Value: 999999}, + }).Data(data).Save() + t.AssertNil(err) + one, err := db.Model(table).WherePri(1).One() + t.AssertNil(err) + t.AssertNil(one) + }) +} + +func Test_Model_OnDuplicateEx(t *testing.T) { + skipUnsupportedTest(t, "OnDuplicateEx") + table := createTable() + defer dropTable(table) + + // string type 1. + gtest.C(t, func(t *gtest.T) { + data := g.Map{ + "id": 1, + "passport": "pp1", + "password": "pw1", + "nickname": "n1", + "create_time": "2016-06-06", + } + _, err := db.Model(table).OnConflict("id").OnDuplicateEx("nickname,create_time").Data(data).Save() + t.AssertNil(err) + one, err := db.Model(table).WherePri(1).One() + t.AssertNil(err) + t.Assert(one["passport"], data["passport"]) + t.Assert(one["password"], data["password"]) + t.Assert(one["nickname"], "name_1") + }) + + // string type 2. + gtest.C(t, func(t *gtest.T) { + data := g.Map{ + "id": 1, + "passport": "pp1", + "password": "pw1", + "nickname": "n1", + "create_time": "2016-06-06", + } + _, err := db.Model(table).OnConflict("id").OnDuplicateEx("nickname", "create_time").Data(data).Save() + t.AssertNil(err) + one, err := db.Model(table).WherePri(1).One() + t.AssertNil(err) + t.Assert(one["passport"], data["passport"]) + t.Assert(one["password"], data["password"]) + t.Assert(one["nickname"], "name_1") + }) + + // slice. + gtest.C(t, func(t *gtest.T) { + data := g.Map{ + "id": 1, + "passport": "pp1", + "password": "pw1", + "nickname": "n1", + "create_time": "2016-06-06", + } + _, err := db.Model(table).OnConflict("id").OnDuplicateEx(g.Slice{"nickname", "create_time"}).Data(data).Save() + t.AssertNil(err) + one, err := db.Model(table).WherePri(1).One() + t.AssertNil(err) + t.Assert(one["passport"], data["passport"]) + t.Assert(one["password"], data["password"]) + t.Assert(one["nickname"], "name_1") + }) + + // map. + gtest.C(t, func(t *gtest.T) { + data := g.Map{ + "id": 1, + "passport": "pp1", + "password": "pw1", + "nickname": "n1", + "create_time": "2016-06-06", + } + _, err := db.Model(table).OnConflict("id").OnDuplicateEx(g.Map{ + "nickname": "nickname", + "create_time": "nickname", + }).Data(data).Save() + t.AssertNil(err) + one, err := db.Model(table).WherePri(1).One() + t.AssertNil(err) + t.Assert(one["passport"], data["passport"]) + t.Assert(one["password"], data["password"]) + t.Assert(one["nickname"], "name_1") + }) +} + +func Test_OrderRandom(t *testing.T) { + table := createInitTable() + defer dropTable(table) + + gtest.C(t, func(t *gtest.T) { + result, err := db.Model(table).OrderRandom().All() + t.AssertNil(err) + t.Assert(len(result), TableSize) + }) +} + +func Test_ConvertSliceString(t *testing.T) { + table := createTable() + defer dropTable(table) + + gtest.C(t, func(t *gtest.T) { + type User struct { + Id int + Passport string + Password string + NickName string + CreateTime *gtime.Time + FavoriteMovie []string + FavoriteMusic []string + } + + var ( + user User + user2 User + err error + ) + + // slice string not null + _, err = db.Model(table).Data(g.Map{ + "id": 1, + "passport": "p1", + "password": "pw1", + "nickname": "n1", + "create_time": CreateTime, + "favorite_movie": g.Slice{"Iron-Man", "Spider-Man"}, + "favorite_music": g.Slice{"Hey jude", "Let it be"}, + }).Insert() + t.AssertNil(err) + + err = db.Model(table).Where("id", 1).Scan(&user) + t.AssertNil(err) + t.Assert(len(user.FavoriteMusic), 2) + t.Assert(user.FavoriteMusic[0], "Hey jude") + t.Assert(user.FavoriteMusic[1], "Let it be") + t.Assert(len(user.FavoriteMovie), 2) + t.Assert(user.FavoriteMovie[0], "Iron-Man") + t.Assert(user.FavoriteMovie[1], "Spider-Man") + + // slice string null + _, err = db.Model(table).Data(g.Map{ + "id": 2, + "passport": "p1", + "password": "pw1", + "nickname": "n1", + "create_time": CreateTime, + }).Insert() + t.AssertNil(err) + + err = db.Model(table).Where("id", 2).Scan(&user2) + t.AssertNil(err) + t.Assert(user2.FavoriteMusic, nil) + t.Assert(len(user2.FavoriteMovie), 0) + }) +} + +func Test_ConvertSliceFloat64(t *testing.T) { + table := createTable() + defer dropTable(table) + + type Args struct { + NumericValues []float64 `orm:"numeric_values"` + DecimalValues []float64 `orm:"decimal_values"` + } + type User struct { + Id int `orm:"id"` + Passport string `orm:"passport"` + Password string `json:"password"` + NickName string `json:"nickname"` + CreateTime *gtime.Time `json:"create_time"` + Args + } + + tests := []struct { + name string + args Args + }{ + { + name: "nil", + args: Args{ + NumericValues: nil, + DecimalValues: nil, + }, + }, + { + name: "not nil", + args: Args{ + NumericValues: []float64{1.1, 2.2, 3.3}, + DecimalValues: []float64{1.1, 2.2, 3.3}, + }, + }, + { + name: "not empty", + args: Args{ + NumericValues: []float64{}, + DecimalValues: []float64{}, + }, + }, + } + now := gtime.New(CreateTime) + for i, tt := range tests { + gtest.C(t, func(t *gtest.T) { + user := User{ + Id: i + 1, + Passport: fmt.Sprintf("test_%d", i+1), + Password: fmt.Sprintf("pass_%d", i+1), + NickName: fmt.Sprintf("name_%d", i+1), + CreateTime: now, + Args: tt.args, + } + + _, err := db.Model(table).OmitNilData().Insert(user) + t.AssertNil(err) + var got Args + err = db.Model(table).Where("id", user.Id).Limit(1).Scan(&got) + t.AssertNil(err) + t.AssertEQ(tt.args, got) + }) + } +} + +func Test_Model_InsertIgnore(t *testing.T) { + skipUnsupportedTest(t, "InsertIgnore") + table := createTable() + defer dropTable(table) + + gtest.C(t, func(t *gtest.T) { + user := db.Model(table) + result, err := user.Data(g.Map{ + "id": 1, + "uid": 1, + "passport": "t1", + "password": "25d55ad283aa400af464c76d713c07ad", + "nickname": "name_1", + "create_time": gtime.Now().String(), + }).Insert() + t.AssertNil(err) + n, _ := result.RowsAffected() + t.Assert(n, 1) + + result, err = db.Model(table).Data(g.Map{ + "id": 1, + "uid": 1, + "passport": "t1", + "password": "25d55ad283aa400af464c76d713c07ad", + "nickname": "name_1", + "create_time": gtime.Now().String(), + }).Insert() + t.AssertNE(err, nil) + + result, err = db.Model(table).Data(g.Map{ + "id": 1, + "uid": 1, + "passport": "t2", + "password": "25d55ad283aa400af464c76d713c07ad", + "nickname": "name_2", + "create_time": gtime.Now().String(), + }).InsertIgnore() + t.AssertNil(err) + + n, _ = result.RowsAffected() + t.Assert(n, 0) + + value, err := db.Model(table).Fields("passport").WherePri(1).Value() + t.AssertNil(err) + t.Assert(value.String(), "t1") + + count, err := db.Model(table).Count() + t.AssertNil(err) + t.Assert(count, 1) + + // pgsql support ignore without primary key + result, err = db.Model(table).Data(g.Map{ + // "id": 1, + "uid": 1, + "passport": "t2", + "password": "25d55ad283aa400af464c76d713c07ad", + "nickname": "name_2", + "create_time": gtime.Now().String(), + }).InsertIgnore() + t.AssertNil(err) + + count, err = db.Model(table).Count() + t.AssertNil(err) + t.Assert(count, 1) + }) +} From 73d065ff93da7a79d177ad2a675137b9beae60fe Mon Sep 17 00:00:00 2001 From: John Guo Date: Sat, 13 Dec 2025 14:29:09 +0800 Subject: [PATCH 02/10] up --- contrib/drivers/gaussdb/README.md | 115 ++-- contrib/drivers/gaussdb/pgsql_do_insert.go | 511 +++++++++++++-- .../drivers/gaussdb/pgsql_z_unit_db_test.go | 601 ++++++++++++++++++ .../gaussdb/pgsql_z_unit_model_test.go | 19 +- 4 files changed, 1150 insertions(+), 96 deletions(-) create mode 100644 contrib/drivers/gaussdb/pgsql_z_unit_db_test.go diff --git a/contrib/drivers/gaussdb/README.md b/contrib/drivers/gaussdb/README.md index ee71b283ace..c45be82e729 100644 --- a/contrib/drivers/gaussdb/README.md +++ b/contrib/drivers/gaussdb/README.md @@ -58,44 +58,76 @@ db.Exec(ctx, "SET search_path TO my_schema") ## Limitations -GaussDB is based on **PostgreSQL 9.2**, which predates several modern PostgreSQL features. The following features are **NOT SUPPORTED**: +GaussDB is based on **PostgreSQL 9.2**, which predates several modern PostgreSQL features (like `ON CONFLICT` introduced in PostgreSQL 9.5). However, GaussDB supports the SQL standard `MERGE` statement, which we use to implement some upsert operations. -### 1. ON CONFLICT Operations (PostgreSQL 9.5+) +### Fully Supported UPSERT Operations -The following ORM methods rely on `ON CONFLICT` syntax and are not available: +All ORM upsert operations are **FULLY SUPPORTED** using `MERGE` statement or alternative implementations: -- **InsertIgnore()** - Insert and ignore duplicate key errors -- **Save()** - Insert or update (upsert) -- **Replace()** - Replace existing record -- **OnConflict()** - Custom conflict handling -- **OnDuplicate()** - On duplicate key update -- **OnDuplicateEx()** - Extended on duplicate key update +- ✅ **Save()** - Insert or update (upsert) - Uses MERGE INTO +- ✅ **Replace()** - Replace existing record - Alias for Save() +- ✅ **InsertIgnore()** - Insert and ignore duplicate key errors + - With primary key in data: Uses MERGE INTO for conflict detection + - Without primary key: Uses INSERT with error catching +- ✅ **OnConflict()** - Custom conflict column specification - Works with MERGE +- ✅ **OnDuplicate()** - On duplicate key update with custom fields + - Uses MERGE when not updating conflict keys + - Uses UPDATE+INSERT when updating conflict keys (GaussDB MERGE limitation workaround) +- ✅ **OnDuplicateEx()** - Exclude specific fields from update - Uses MERGE +- ✅ **OnDuplicateWithCounter()** - Counter operations on duplicate - Fully supported -**Workaround**: Use separate INSERT and UPDATE operations with proper error handling: +### Usage Examples ```go -// Instead of InsertIgnore -result, err := db.Model("user").Insert(data) -if err != nil { - // Check if error is duplicate key error - if strings.Contains(err.Error(), "duplicate key") { - // Handle duplicate - either ignore or update separately - } -} - -// Instead of Save (upsert) -// First try to update -result, err := db.Model("user").Where("id", id).Update(data) -if err != nil { - return err -} -affected, _ := result.RowsAffected() -if affected == 0 { - // No rows updated, insert new record - _, err = db.Model("user").Insert(data) -} +// Basic Save (upsert) +result, err := db.Model("user").Data(data).Save() + +// Save with conflict detection on specific column +result, err := db.Model("user").Data(data).OnConflict("email").Save() + +// Insert Ignore (skip if exists) +result, err := db.Model("user").Data(data).InsertIgnore() + +// OnDuplicate - update specific fields on conflict +result, err := db.Model("user"). + Data(data). + OnConflict("id"). + OnDuplicate("name", "email"). + Save() + +// OnDuplicateEx - update all except specified fields +result, err := db.Model("user"). + Data(data). + OnConflict("id"). + OnDuplicateEx("created_at"). + Save() + +// OnDuplicate with Counter +result, err := db.Model("user"). + Data(data). + OnConflict("id"). + OnDuplicate(g.Map{ + "login_count": gdb.Counter{Field: "login_count", Value: 1}, + }). + Save() + +// OnDuplicate with Raw SQL +result, err := db.Model("user"). + Data(data). + OnConflict("id"). + OnDuplicate(g.Map{ + "updated_at": gdb.Raw("CURRENT_TIMESTAMP"), + }). + Save() ``` +### Implementation Notes + +1. **MERGE Statement**: GaussDB supports the SQL standard MERGE statement, which is used for most upsert operations +2. **Conflict Key Updates**: When OnDuplicate attempts to update a conflict key (e.g., primary key), MERGE cannot be used. In this case, the driver automatically falls back to UPDATE+INSERT approach +3. **EXCLUDED Keyword**: PostgreSQL's `EXCLUDED` (used in ON CONFLICT) is automatically converted to the MERGE equivalent `T2` prefix +4. **Atomic Operations**: All operations maintain atomicity and consistency + ## Supported Features - ✅ Basic CRUD operations (Insert, Select, Update, Delete) @@ -107,19 +139,22 @@ if affected == 0 { - ✅ Prepared statements - ✅ Connection pooling -## Testing - -To run the test suite, ensure you have a GaussDB instance running: +## Supported Features -```bash -# Default test connection -# Host: 127.0.0.1 -# Port: 9950 -# User: gaussdb -# Password: UTpass@1234 +- ✅ Basic CRUD operations (Insert, Select, Update, Delete) +- ✅ **Save/Upsert operations** (using MERGE statement) +- ✅ **InsertIgnore** (using MERGE statement) +- ✅ **Replace** (using MERGE statement) +- ✅ Transactions +- ✅ Batch operations +- ✅ Array data types (int, float, text, etc.) +- ✅ JSON/JSONB data types +- ✅ Schema/namespace support +- ✅ Prepared statements +- ✅ Connection pooling4 # Database: postgres -go test -v +Tests for unsupported features (OnConflict/OnDuplicate operations requiring ON CONFLICT syntax) will be skipped with explanatory messages. Tests for Save/InsertIgnore operations (using MERGE statement) will pass successfully. ``` Tests for unsupported features (ON CONFLICT operations) will be skipped with explanatory messages. diff --git a/contrib/drivers/gaussdb/pgsql_do_insert.go b/contrib/drivers/gaussdb/pgsql_do_insert.go index d41186522e5..92f2440c3c0 100644 --- a/contrib/drivers/gaussdb/pgsql_do_insert.go +++ b/contrib/drivers/gaussdb/pgsql_do_insert.go @@ -9,11 +9,15 @@ package gaussdb import ( "context" "database/sql" + "fmt" "strings" + "github.com/gogf/gf/v2/container/gset" "github.com/gogf/gf/v2/database/gdb" "github.com/gogf/gf/v2/errors/gcode" "github.com/gogf/gf/v2/errors/gerror" + "github.com/gogf/gf/v2/text/gstr" + "github.com/gogf/gf/v2/util/gconv" ) // DoInsert inserts or updates data for given table. @@ -23,48 +27,18 @@ func (d *Driver) DoInsert( link gdb.Link, table string, list gdb.List, option gdb.DoInsertOption, ) (result sql.Result, err error) { switch option.InsertOption { - case - gdb.InsertOptionSave, - gdb.InsertOptionReplace: - // PostgreSQL does not support REPLACE INTO syntax, use Save (ON CONFLICT ... DO UPDATE) instead. - // Automatically detect primary keys if OnConflict is not specified. - if len(option.OnConflict) == 0 { - primaryKeys, err := d.Core.GetPrimaryKeys(ctx, table) - if err != nil { - return nil, gerror.WrapCode( - gcode.CodeInternalError, - err, - `failed to get primary keys for Save/Replace operation`, - ) - } - foundPrimaryKey := false - for _, primaryKey := range primaryKeys { - for dataKey := range list[0] { - if strings.EqualFold(dataKey, primaryKey) { - foundPrimaryKey = true - break - } - } - if foundPrimaryKey { - break - } - } - if !foundPrimaryKey { - return nil, gerror.NewCodef( - gcode.CodeMissingParameter, - `Replace/Save operation requires conflict detection: `+ - `either specify OnConflict() columns or ensure table '%s' has a primary key in the data`, - table, - ) - } - // TODO consider composite primary keys. - option.OnConflict = primaryKeys - } + case gdb.InsertOptionSave: + return d.doSave(ctx, link, table, list, option) + + case gdb.InsertOptionReplace: // Treat Replace as Save operation - option.InsertOption = gdb.InsertOptionSave + return d.doSave(ctx, link, table, list, option) + + // GaussDB does not support InsertIgnore with ON CONFLICT, use MERGE instead + case gdb.InsertOptionIgnore: + return d.doInsertIgnore(ctx, link, table, list, option) - // pgsql support InsertIgnore natively, so no need to set primary key in context. - case gdb.InsertOptionIgnore, gdb.InsertOptionDefault: + case gdb.InsertOptionDefault: // Get table fields to retrieve the primary key TableField object (not just the name) // because DoExec needs the `TableField.Type` to determine if LastInsertId is supported. tableFields, err := d.GetCore().GetDB().TableFields(ctx, table) @@ -82,3 +56,460 @@ func (d *Driver) DoInsert( } return d.Core.DoInsert(ctx, link, table, list, option) } + +// doSave implements upsert operation using MERGE statement for GaussDB. +func (d *Driver) doSave(ctx context.Context, + link gdb.Link, table string, list gdb.List, option gdb.DoInsertOption, +) (result sql.Result, err error) { + return d.doMergeInsert(ctx, link, table, list, option, true) +} + +// doInsertIgnore implements INSERT IGNORE operation using MERGE statement for GaussDB. +// It only inserts records when there's no conflict on primary/unique keys. +func (d *Driver) doInsertIgnore(ctx context.Context, + link gdb.Link, table string, list gdb.List, option gdb.DoInsertOption, +) (result sql.Result, err error) { + return d.doMergeInsert(ctx, link, table, list, option, false) +} + +// doUpdateThenInsert handles upsert when conflict keys need to be updated. +// GaussDB MERGE cannot update columns in ON clause, so we use UPDATE + INSERT instead. +func (d *Driver) doUpdateThenInsert(ctx context.Context, + link gdb.Link, table string, list gdb.List, option gdb.DoInsertOption, +) (result sql.Result, err error) { + charL, charR := d.GetChars() + var ( + batchResult = new(gdb.SqlResult) + totalAffected int64 + ) + + for _, data := range list { + // Build UPDATE statement + var ( + updateFields []string + updateValues []any + whereFields []string + whereValues []any + valueIndex = 1 + ) + + // Process OnDuplicateMap to build UPDATE SET clause + for updateKey, updateValue := range option.OnDuplicateMap { + keyWithChar := charL + updateKey + charR + switch v := updateValue.(type) { + case gdb.Raw, *gdb.Raw: + rawStr := fmt.Sprintf("%v", v) + rawStr = strings.ReplaceAll(rawStr, "EXCLUDED.", "") + rawStr = strings.ReplaceAll(rawStr, "EXCLUDED ", "") + updateFields = append(updateFields, fmt.Sprintf("%s = %s", keyWithChar, rawStr)) + case gdb.Counter, *gdb.Counter: + var counter gdb.Counter + if c, ok := v.(gdb.Counter); ok { + counter = c + } else if c, ok := v.(*gdb.Counter); ok { + counter = *c + } + operator := "+" + columnVal := counter.Value + if columnVal < 0 { + operator = "-" + columnVal = -columnVal + } + fieldWithChar := charL + counter.Field + charR + // For UPDATE statement, use the data value instead of referencing another column + if dataValue, ok := data[counter.Field]; ok { + updateFields = append(updateFields, fmt.Sprintf("%s = $%d %s %v", keyWithChar, valueIndex, operator, columnVal)) + updateValues = append(updateValues, dataValue) + valueIndex++ + } else { + updateFields = append(updateFields, fmt.Sprintf("%s = %s %s %v", keyWithChar, fieldWithChar, operator, columnVal)) + } + default: + // Map value to another field name or use the value from data + valueStr := gconv.String(updateValue) + if dataValue, ok := data[valueStr]; ok { + updateFields = append(updateFields, fmt.Sprintf("%s = $%d", keyWithChar, valueIndex)) + updateValues = append(updateValues, dataValue) + valueIndex++ + } else { + updateFields = append(updateFields, fmt.Sprintf("%s = $%d", keyWithChar, valueIndex)) + updateValues = append(updateValues, updateValue) + valueIndex++ + } + } + } + + // Build WHERE clause using OnConflict keys + for _, conflictKey := range option.OnConflict { + if dataValue, ok := data[conflictKey]; ok { + keyWithChar := charL + conflictKey + charR + whereFields = append(whereFields, fmt.Sprintf("%s = $%d", keyWithChar, valueIndex)) + whereValues = append(whereValues, dataValue) + valueIndex++ + } + } + + if len(updateFields) > 0 && len(whereFields) > 0 { + updateSQL := fmt.Sprintf("UPDATE %s SET %s WHERE %s", + table, + strings.Join(updateFields, ", "), + strings.Join(whereFields, " AND "), + ) + updateResult, updateErr := d.DoExec(ctx, link, updateSQL, append(updateValues, whereValues...)...) + if updateErr != nil { + return nil, updateErr + } + + affected, _ := updateResult.RowsAffected() + if affected > 0 { + // UPDATE successful + totalAffected += affected + continue + } + } + + // If UPDATE affected 0 rows, do INSERT + var ( + insertKeys []string + insertHolders []string + insertValues []any + insertIndex = 1 + ) + for key, value := range data { + keyWithChar := charL + key + charR + insertKeys = append(insertKeys, keyWithChar) + insertHolders = append(insertHolders, fmt.Sprintf("$%d", insertIndex)) + insertValues = append(insertValues, value) + insertIndex++ + } + + insertSQL := fmt.Sprintf("INSERT INTO %s (%s) VALUES (%s)", + table, + strings.Join(insertKeys, ", "), + strings.Join(insertHolders, ", "), + ) + insertResult, insertErr := d.DoExec(ctx, link, insertSQL, insertValues...) + if insertErr != nil { + // Ignore duplicate key errors (race condition: another transaction inserted between our UPDATE and INSERT) + if strings.Contains(insertErr.Error(), "duplicate key") || + strings.Contains(insertErr.Error(), "unique constraint") { + continue + } + return nil, insertErr + } + + affected, _ := insertResult.RowsAffected() + totalAffected += affected + } + + batchResult.Result = &gdb.SqlResult{} + batchResult.Affected = totalAffected + return batchResult, nil +} + +// doMergeInsert implements MERGE-based insert operations for GaussDB. +// When withUpdate is true, it performs upsert (insert or update). +// When withUpdate is false, it performs insert ignore (insert only when no conflict). +func (d *Driver) doMergeInsert( + ctx context.Context, + link gdb.Link, table string, list gdb.List, option gdb.DoInsertOption, withUpdate bool, +) (result sql.Result, err error) { + // Check if OnDuplicateMap contains conflict keys + // GaussDB MERGE statement cannot update columns used in ON clause + // If user wants to update conflict keys, we need to use a different approach + if withUpdate && len(option.OnDuplicateMap) > 0 && len(option.OnConflict) > 0 { + conflictKeySet := gset.NewStrSetFrom(option.OnConflict) + hasConflictKeyUpdate := false + for updateKey := range option.OnDuplicateMap { + if conflictKeySet.Contains(strings.ToLower(updateKey)) || + conflictKeySet.Contains(strings.ToUpper(updateKey)) || + conflictKeySet.Contains(updateKey) { + hasConflictKeyUpdate = true + break + } + } + if hasConflictKeyUpdate { + // Use UPDATE + INSERT approach when conflict keys need to be updated + return d.doUpdateThenInsert(ctx, link, table, list, option) + } + } + + // If OnConflict is not specified, automatically get the primary key of the table + conflictKeys := option.OnConflict + if len(conflictKeys) == 0 { + primaryKeys, err := d.Core.GetPrimaryKeys(ctx, table) + if err != nil { + return nil, gerror.WrapCode( + gcode.CodeInternalError, + err, + `failed to get primary keys for table`, + ) + } + foundPrimaryKey := false + for _, primaryKey := range primaryKeys { + for dataKey := range list[0] { + if strings.EqualFold(dataKey, primaryKey) { + foundPrimaryKey = true + break + } + } + if foundPrimaryKey { + break + } + } + if !foundPrimaryKey { + // For InsertIgnore without primary key, try normal insert and ignore duplicate errors + // For Save/Replace, primary key is required + if !withUpdate { + result, err := d.Core.DoInsert(ctx, link, table, list, option) + if err != nil { + // Ignore duplicate key errors for InsertIgnore + if strings.Contains(err.Error(), "duplicate key") || + strings.Contains(err.Error(), "unique constraint") { + return result, nil + } + return result, err + } + return result, nil + } + return nil, gerror.NewCodef( + gcode.CodeMissingParameter, + `Replace/Save operation requires conflict detection: `+ + `either specify OnConflict() columns or ensure table '%s' has a primary key in the data`, + table, + ) + } + // TODO consider composite primary keys. + conflictKeys = primaryKeys + } + + var ( + one = list[0] + oneLen = len(one) + charL, charR = d.GetChars() + conflictKeySet = gset.New(false) + + // queryHolders: Handle data with Holder that need to be merged + // queryValues: Handle data that need to be merged + // insertKeys: Handle valid keys that need to be inserted + // insertValues: Handle values that need to be inserted + // updateValues: Handle values that need to be updated (only when withUpdate=true) + queryHolders = make([]string, oneLen) + queryValues = make([]any, oneLen) + insertKeys = make([]string, oneLen) + insertValues = make([]string, oneLen) + updateValues []string + ) + + // conflictKeys slice type conv to set type + for _, conflictKey := range conflictKeys { + conflictKeySet.Add(strings.ToUpper(conflictKey)) + } + + index := 0 + for key, value := range one { + keyWithChar := charL + key + charR + queryHolders[index] = fmt.Sprintf("$%d AS %s", index+1, keyWithChar) + queryValues[index] = value + insertKeys[index] = keyWithChar + insertValues[index] = fmt.Sprintf("T2.%s", keyWithChar) + index++ + } + + // Build updateValues only when withUpdate is true + if withUpdate { + // Check if OnDuplicateStr or OnDuplicateMap is specified for custom update logic + if option.OnDuplicateStr != "" { + // Parse OnDuplicateStr (e.g., "field1,field2" or "field1, field2") + fields := gstr.SplitAndTrim(option.OnDuplicateStr, ",") + for _, field := range fields { + fieldWithChar := charL + field + charR + updateValues = append( + updateValues, + fmt.Sprintf(`T1.%s = T2.%s`, fieldWithChar, fieldWithChar), + ) + } + } else if len(option.OnDuplicateMap) > 0 { + // Use OnDuplicateMap for custom update mapping + for updateKey, updateValue := range option.OnDuplicateMap { + // Skip conflict keys - they cannot be updated in MERGE + if conflictKeySet.Contains(strings.ToUpper(updateKey)) { + continue + } + keyWithChar := charL + updateKey + charR + switch v := updateValue.(type) { + case gdb.Raw, *gdb.Raw: + // Raw SQL expression + // Replace EXCLUDED (PostgreSQL ON CONFLICT syntax) with T2 (MERGE syntax) + rawStr := fmt.Sprintf("%v", v) + rawStr = strings.ReplaceAll(rawStr, "EXCLUDED.", "T2.") + rawStr = strings.ReplaceAll(rawStr, "EXCLUDED ", "T2 ") + updateValues = append( + updateValues, + fmt.Sprintf(`T1.%s = %s`, keyWithChar, rawStr), + ) + case gdb.Counter, *gdb.Counter: + // Counter operation + var counter gdb.Counter + if c, ok := v.(gdb.Counter); ok { + counter = c + } else if c, ok := v.(*gdb.Counter); ok { + counter = *c + } + operator := "+" + columnVal := counter.Value + if columnVal < 0 { + operator = "-" + columnVal = -columnVal + } + fieldWithChar := charL + counter.Field + charR + updateValues = append( + updateValues, + fmt.Sprintf(`T1.%s = T2.%s %s %v`, keyWithChar, fieldWithChar, operator, columnVal), + ) + default: + // Map value to another field name + valueStr := gconv.String(updateValue) + valueWithChar := charL + valueStr + charR + updateValues = append( + updateValues, + fmt.Sprintf(`T1.%s = T2.%s`, keyWithChar, valueWithChar), + ) + } + } + } else { + // Default: update all fields except conflict keys and soft created fields + for key := range one { + if conflictKeySet.Contains(strings.ToUpper(key)) || d.Core.IsSoftCreatedFieldName(key) { + continue + } + keyWithChar := charL + key + charR + updateValues = append( + updateValues, + fmt.Sprintf(`T1.%s = T2.%s`, keyWithChar, keyWithChar), + ) + } + } + } + + var ( + batchResult = new(gdb.SqlResult) + sqlStr string + ) + + // For InsertIgnore (withUpdate=false), we need to check if record exists first + if !withUpdate { + // Build WHERE clause to check if record exists + var whereConditions []string + var checkValues []any + checkIndex := 1 + for _, key := range conflictKeys { + if value, ok := one[key]; ok { + keyWithChar := charL + key + charR + whereConditions = append(whereConditions, fmt.Sprintf("%s = $%d", keyWithChar, checkIndex)) + checkValues = append(checkValues, value) + checkIndex++ + } + } + whereClause := strings.Join(whereConditions, " AND ") + + // Check if record exists + checkSQL := fmt.Sprintf("SELECT 1 FROM %s WHERE %s LIMIT 1", table, whereClause) + checkResult, checkErr := d.DoQuery(ctx, link, checkSQL, checkValues...) + if checkErr != nil { + return nil, checkErr + } + + // If record exists, return result with 0 affected rows + if len(checkResult) > 0 { + batchResult.Result = &gdb.SqlResult{} + batchResult.Affected = 0 + return batchResult, nil + } + + // Record doesn't exist, proceed with insert + // For InsertIgnore, we just do a simple INSERT (no MERGE needed since we checked it doesn't exist) + var insertSQL strings.Builder + insertSQL.WriteString(fmt.Sprintf("INSERT INTO %s (", table)) + insertSQL.WriteString(strings.Join(insertKeys, ",")) + insertSQL.WriteString(") VALUES (") + for i := range insertKeys { + if i > 0 { + insertSQL.WriteString(",") + } + insertSQL.WriteString(fmt.Sprintf("$%d", i+1)) + } + insertSQL.WriteString(")") + + r, err := d.DoExec(ctx, link, insertSQL.String(), queryValues...) + if err != nil { + return r, err + } + if n, err := r.RowsAffected(); err != nil { + return r, err + } else { + batchResult.Result = r + batchResult.Affected = n + } + return batchResult, nil + } + + // For Save/Replace (withUpdate=true), use MERGE + sqlStr = parseSqlForMerge(table, queryHolders, insertKeys, insertValues, updateValues, conflictKeys, charL, charR) + r, err := d.DoExec(ctx, link, sqlStr, queryValues...) + if err != nil { + return r, err + } + // GaussDB's MERGE statement may not return correct RowsAffected + // We manually set it to 1 since MERGE always affects exactly one row + if n, err := r.RowsAffected(); err != nil { + return r, err + } else { + batchResult.Result = r + // If RowsAffected returns 0, manually set to 1 for MERGE operations + if n == 0 { + batchResult.Affected = 1 + } else { + batchResult.Affected += n + } + } + return batchResult, nil +} + +// parseSqlForMerge generates MERGE statement for GaussDB. +// When updateValues is empty, it only inserts (INSERT IGNORE behavior). +// When updateValues is provided, it performs upsert (INSERT or UPDATE). +// Examples: +// - INSERT IGNORE: MERGE INTO table T1 USING (...) T2 ON (...) WHEN NOT MATCHED THEN INSERT(...) VALUES (...) +// - UPSERT: MERGE INTO table T1 USING (...) T2 ON (...) WHEN NOT MATCHED THEN INSERT(...) VALUES (...) WHEN MATCHED THEN UPDATE SET ... +func parseSqlForMerge(table string, + queryHolders, insertKeys, insertValues, updateValues, duplicateKey []string, charL, charR string, +) (sqlStr string) { + var ( + intoStr = fmt.Sprintf("MERGE INTO %s AS T1", table) + usingStr = fmt.Sprintf("USING (SELECT %s) AS T2", strings.Join(queryHolders, ",")) + onStr string + insertStr = fmt.Sprintf( + "WHEN NOT MATCHED THEN INSERT (%s) VALUES (%s)", + strings.Join(insertKeys, ","), + strings.Join(insertValues, ","), + ) + updateStr string + ) + + // Build ON condition + var onConditions []string + for _, key := range duplicateKey { + keyWithChar := charL + key + charR + onConditions = append(onConditions, fmt.Sprintf("T1.%s = T2.%s", keyWithChar, keyWithChar)) + } + onStr = "ON (" + strings.Join(onConditions, " AND ") + ")" + + // Build UPDATE clause only when updateValues is provided + if len(updateValues) > 0 { + updateStr = fmt.Sprintf(" WHEN MATCHED THEN UPDATE SET %s", strings.Join(updateValues, ",")) + } + + sqlStr = fmt.Sprintf("%s %s %s %s%s", intoStr, usingStr, onStr, insertStr, updateStr) + return +} diff --git a/contrib/drivers/gaussdb/pgsql_z_unit_db_test.go b/contrib/drivers/gaussdb/pgsql_z_unit_db_test.go new file mode 100644 index 00000000000..3156627ac1c --- /dev/null +++ b/contrib/drivers/gaussdb/pgsql_z_unit_db_test.go @@ -0,0 +1,601 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gaussdb_test + +import ( + "fmt" + "strings" + "testing" + + "github.com/gogf/gf/v2/frame/g" + "github.com/gogf/gf/v2/os/gtime" + "github.com/gogf/gf/v2/test/gtest" +) + +func Test_DB_Query(t *testing.T) { + table := createTable("name") + defer dropTable(table) + + gtest.C(t, func(t *gtest.T) { + _, err := db.Query(ctx, fmt.Sprintf("select * from %s ", table)) + t.AssertNil(err) + }) +} + +func Test_DB_Exec(t *testing.T) { + table := createTable() + defer dropTable(table) + + gtest.C(t, func(t *gtest.T) { + _, err := db.Exec(ctx, fmt.Sprintf("select * from %s ", table)) + t.AssertNil(err) + }) +} + +func Test_DB_Insert(t *testing.T) { + table := createTable() + defer dropTable(table) + + gtest.C(t, func(t *gtest.T) { + _, err := db.Insert(ctx, table, g.Map{ + "id": 1, + "passport": "t1", + "password": "25d55ad283aa400af464c76d713c07ad", + "nickname": "T1", + "create_time": gtime.Now().String(), + }) + t.AssertNil(err) + answer, err := db.GetAll(ctx, fmt.Sprintf("SELECT * FROM %s WHERE id=?", table), 1) + t.AssertNil(err) + t.Assert(len(answer), 1) + t.Assert(answer[0]["passport"], "t1") + t.Assert(answer[0]["password"], "25d55ad283aa400af464c76d713c07ad") + t.Assert(answer[0]["nickname"], "T1") + + // normal map + result, err := db.Insert(ctx, table, g.Map{ + "id": "2", + "passport": "t2", + "password": "25d55ad283aa400af464c76d713c07ad", + "nickname": "name_2", + "create_time": gtime.Now().String(), + }) + t.AssertNil(err) + n, _ := result.RowsAffected() + t.Assert(n, 1) + answer, err = db.GetAll(ctx, fmt.Sprintf("SELECT * FROM %s WHERE id=?", table), 2) + t.AssertNil(err) + t.Assert(len(answer), 1) + t.Assert(answer[0]["passport"], "t2") + t.Assert(answer[0]["password"], "25d55ad283aa400af464c76d713c07ad") + t.Assert(answer[0]["nickname"], "name_2") + }) +} + +func Test_DB_Save(t *testing.T) { + gtest.C(t, func(t *gtest.T) { + createTable("t_user") + defer dropTable("t_user") + + i := 10 + data := g.Map{ + "id": i, + "passport": fmt.Sprintf(`t%d`, i), + "password": fmt.Sprintf(`p%d`, i), + "nickname": fmt.Sprintf(`T%d`, i), + "create_time": gtime.Now().String(), + } + _, err := db.Save(ctx, "t_user", data, 10) + gtest.AssertNil(err) + }) +} + +func Test_DB_Replace(t *testing.T) { + gtest.C(t, func(t *gtest.T) { + createTable("t_user") + defer dropTable("t_user") + + // Insert initial record + i := 10 + data := g.Map{ + "id": i, + "passport": fmt.Sprintf(`t%d`, i), + "password": fmt.Sprintf(`p%d`, i), + "nickname": fmt.Sprintf(`T%d`, i), + "create_time": gtime.Now().String(), + } + _, err := db.Insert(ctx, "t_user", data) + gtest.AssertNil(err) + + // Replace with new data + data2 := g.Map{ + "id": i, + "passport": fmt.Sprintf(`t%d_new`, i), + "password": fmt.Sprintf(`p%d_new`, i), + "nickname": fmt.Sprintf(`T%d_new`, i), + "create_time": gtime.Now().String(), + } + _, err = db.Replace(ctx, "t_user", data2) + gtest.AssertNil(err) + + // Verify the data was replaced + one, err := db.GetOne(ctx, fmt.Sprintf("SELECT * FROM t_user WHERE id=?"), i) + gtest.AssertNil(err) + gtest.Assert(one["passport"].String(), fmt.Sprintf(`t%d_new`, i)) + gtest.Assert(one["password"].String(), fmt.Sprintf(`p%d_new`, i)) + gtest.Assert(one["nickname"].String(), fmt.Sprintf(`T%d_new`, i)) + }) +} + +func Test_DB_GetAll(t *testing.T) { + table := createInitTable() + defer dropTable(table) + + gtest.C(t, func(t *gtest.T) { + result, err := db.GetAll(ctx, fmt.Sprintf("SELECT * FROM %s WHERE id=?", table), 1) + t.AssertNil(err) + t.Assert(len(result), 1) + t.Assert(result[0]["id"].Int(), 1) + }) + gtest.C(t, func(t *gtest.T) { + result, err := db.GetAll(ctx, fmt.Sprintf("SELECT * FROM %s WHERE id=?", table), g.Slice{1}) + t.AssertNil(err) + t.Assert(len(result), 1) + t.Assert(result[0]["id"].Int(), 1) + }) + gtest.C(t, func(t *gtest.T) { + result, err := db.GetAll(ctx, fmt.Sprintf("SELECT * FROM %s WHERE id in(?)", table), g.Slice{1, 2, 3}) + t.AssertNil(err) + t.Assert(len(result), 3) + t.Assert(result[0]["id"].Int(), 1) + t.Assert(result[1]["id"].Int(), 2) + t.Assert(result[2]["id"].Int(), 3) + }) + gtest.C(t, func(t *gtest.T) { + result, err := db.GetAll(ctx, fmt.Sprintf("SELECT * FROM %s WHERE id in(?,?,?)", table), g.Slice{1, 2, 3}) + t.AssertNil(err) + t.Assert(len(result), 3) + t.Assert(result[0]["id"].Int(), 1) + t.Assert(result[1]["id"].Int(), 2) + t.Assert(result[2]["id"].Int(), 3) + }) + gtest.C(t, func(t *gtest.T) { + result, err := db.GetAll(ctx, fmt.Sprintf("SELECT * FROM %s WHERE id in(?,?,?)", table), g.Slice{1, 2, 3}...) + t.AssertNil(err) + t.Assert(len(result), 3) + t.Assert(result[0]["id"].Int(), 1) + t.Assert(result[1]["id"].Int(), 2) + t.Assert(result[2]["id"].Int(), 3) + }) + gtest.C(t, func(t *gtest.T) { + result, err := db.GetAll(ctx, fmt.Sprintf("SELECT * FROM %s WHERE id>=? AND id <=?", table), g.Slice{1, 3}) + t.AssertNil(err) + t.Assert(len(result), 3) + t.Assert(result[0]["id"].Int(), 1) + t.Assert(result[1]["id"].Int(), 2) + t.Assert(result[2]["id"].Int(), 3) + }) +} + +func Test_DB_GetOne(t *testing.T) { + table := createTable() + defer dropTable(table) + + gtest.C(t, func(t *gtest.T) { + type User struct { + Id int + Passport string + Password string + Nickname string + CreateTime string + } + data := User{ + Id: 1, + Passport: "user_1", + Password: "pass_1", + Nickname: "name_1", + CreateTime: "2020-10-10 12:00:01", + } + _, err := db.Insert(ctx, table, data) + t.AssertNil(err) + + one, err := db.GetOne(ctx, fmt.Sprintf("SELECT * FROM %s WHERE id=?", table), 1) + t.AssertNil(err) + t.Assert(one["passport"], data.Passport) + t.Assert(one["create_time"], data.CreateTime) + t.Assert(one["nickname"], data.Nickname) + }) +} + +func Test_DB_GetValue(t *testing.T) { + table := createInitTable() + defer dropTable(table) + gtest.C(t, func(t *gtest.T) { + value, err := db.GetValue(ctx, fmt.Sprintf("SELECT id FROM %s WHERE passport=?", table), "user_3") + t.AssertNil(err) + t.Assert(value.Int(), 3) + }) +} + +func Test_DB_GetCount(t *testing.T) { + table := createInitTable() + defer dropTable(table) + gtest.C(t, func(t *gtest.T) { + count, err := db.GetCount(ctx, fmt.Sprintf("SELECT * FROM %s", table)) + t.AssertNil(err) + t.Assert(count, TableSize) + }) +} + +func Test_DB_GetArray(t *testing.T) { + table := createInitTable() + defer dropTable(table) + gtest.C(t, func(t *gtest.T) { + array, err := db.GetArray(ctx, fmt.Sprintf("SELECT password FROM %s", table)) + t.AssertNil(err) + arrays := make([]string, 0) + for i := 1; i <= TableSize; i++ { + arrays = append(arrays, fmt.Sprintf(`pass_%d`, i)) + } + t.Assert(array, arrays) + }) +} + +func Test_DB_GetScan(t *testing.T) { + table := createInitTable() + defer dropTable(table) + gtest.C(t, func(t *gtest.T) { + type User struct { + Id int + Passport string + Password string + NickName string + CreateTime gtime.Time + } + user := new(User) + err := db.GetScan(ctx, user, fmt.Sprintf("SELECT * FROM %s WHERE id=?", table), 3) + t.AssertNil(err) + t.Assert(user.NickName, "name_3") + }) +} + +func Test_DB_Update(t *testing.T) { + table := createInitTable() + defer dropTable(table) + + gtest.C(t, func(t *gtest.T) { + result, err := db.Update(ctx, table, "password='987654321'", "id=3") + t.AssertNil(err) + n, _ := result.RowsAffected() + t.Assert(n, 1) + + one, err := db.Model(table).Where("id", 3).One() + t.AssertNil(err) + t.Assert(one["id"].Int(), 3) + t.Assert(one["passport"].String(), "user_3") + t.Assert(one["password"].String(), "987654321") + t.Assert(one["nickname"].String(), "name_3") + }) +} + +func Test_DB_Delete(t *testing.T) { + table := createInitTable() + defer dropTable(table) + + gtest.C(t, func(t *gtest.T) { + result, err := db.Delete(ctx, table, "id>3") + t.AssertNil(err) + n, _ := result.RowsAffected() + t.Assert(n, 7) + }) +} + +func Test_DB_Tables(t *testing.T) { + gtest.C(t, func(t *gtest.T) { + tables := []string{"t_user1", "pop", "haha"} + for _, v := range tables { + createTable(v) + } + result, err := db.Tables(ctx) + gtest.AssertNil(err) + for i := 0; i < len(tables); i++ { + find := false + for j := 0; j < len(result); j++ { + if tables[i] == result[j] { + find = true + break + } + } + gtest.AssertEQ(find, true) + } + }) +} + +func Test_DB_TableFields(t *testing.T) { + gtest.C(t, func(t *gtest.T) { + table := createTable() + defer dropTable(table) + + var expect = map[string][]any{ + // []string: Index Type Null Key Default Comment + // id is bigserial so the default is a pgsql function + "id": {0, "int8(64)", false, "pri", fmt.Sprintf("nextval('%s_id_seq'::regclass)", table), ""}, + "passport": {1, "varchar(45)", false, "", nil, ""}, + "password": {2, "varchar(32)", false, "", nil, ""}, + "nickname": {3, "varchar(45)", false, "", nil, ""}, + "create_time": {4, "timestamp", false, "", nil, ""}, + } + + res, err := db.TableFields(ctx, table) + gtest.AssertNil(err) + + for k, v := range expect { + _, ok := res[k] + gtest.AssertEQ(ok, true) + + gtest.AssertEQ(res[k].Index, v[0]) + gtest.AssertEQ(res[k].Name, k) + gtest.AssertEQ(res[k].Type, v[1]) + gtest.AssertEQ(res[k].Null, v[2]) + gtest.AssertEQ(res[k].Key, v[3]) + gtest.AssertEQ(res[k].Default, v[4]) + gtest.AssertEQ(res[k].Comment, v[5]) + } + }) +} + +func Test_NoFields_Error(t *testing.T) { + createSql := `CREATE TABLE IF NOT EXISTS %s ( +id bigint PRIMARY KEY, +int_col INT);` + + type Data struct { + Id int64 + IntCol int64 + } + // pgsql converts table names to lowercase + // mark: [c.oid = '%s'::regclass] is not case-sensitive + tableName := "Error_table" + _, err := db.Exec(ctx, fmt.Sprintf(createSql, tableName)) + gtest.AssertNil(err) + defer dropTable(tableName) + + gtest.C(t, func(t *gtest.T) { + var data = Data{ + Id: 2, + IntCol: 2, + } + _, err = db.Model(tableName).Data(data).Insert() + t.AssertNE(err, nil) + + // Insert a piece of test data using lowercase + _, err = db.Model(strings.ToLower(tableName)).Data(data).Insert() + t.AssertNil(err) + + _, err = db.Model(tableName).Where("id", 1).Data(g.Map{ + "int_col": 9999, + }).Update() + t.AssertNE(err, nil) + + }) + // The inserted field does not exist in the table + gtest.C(t, func(t *gtest.T) { + data := map[string]any{ + "id1": 22, + "int_col_22": 11111, + } + _, err = db.Model(tableName).Data(data).Insert() + t.Assert(err, fmt.Errorf(`input data match no fields in table "%s"`, tableName)) + + lowerTableName := strings.ToLower(tableName) + _, err = db.Model(lowerTableName).Data(data).Insert() + t.Assert(err, fmt.Errorf(`input data match no fields in table "%s"`, lowerTableName)) + + _, err = db.Model(lowerTableName).Where("id", 1).Data(g.Map{ + "int_col-2": 9999, + }).Update() + t.Assert(err, fmt.Errorf(`input data match no fields in table "%s"`, lowerTableName)) + }) + +} + +func Test_DB_TableFields_DuplicateConstraints(t *testing.T) { + // Test for the fix of duplicate field results with multiple constraints + // This test verifies that when a field has multiple constraints (e.g., both primary key and unique), + // the TableFields method correctly merges the results with proper priority (pri > uni > others) + gtest.C(t, func(t *gtest.T) { + tableName := "test_multi_constraint" + createSql := fmt.Sprintf(` + CREATE TABLE %s ( + id bigserial NOT NULL PRIMARY KEY, + email varchar(100) NOT NULL UNIQUE, + username varchar(50) NOT NULL, + status int NOT NULL DEFAULT 1 + )`, tableName) + + _, err := db.Exec(ctx, createSql) + t.AssertNil(err) + defer dropTable(tableName) + + // Get table fields + fields, err := db.TableFields(ctx, tableName) + t.AssertNil(err) + + // Verify id field has primary key constraint + t.AssertNE(fields["id"], nil) + t.Assert(fields["id"].Key, "pri") + t.Assert(fields["id"].Name, "id") + t.Assert(fields["id"].Type, "int8(64)") + + // Verify email field has unique constraint + t.AssertNE(fields["email"], nil) + t.Assert(fields["email"].Key, "uni") + t.Assert(fields["email"].Name, "email") + t.Assert(fields["email"].Type, "varchar(100)") + + // Verify username field has no constraint + t.AssertNE(fields["username"], nil) + t.Assert(fields["username"].Key, "") + t.Assert(fields["username"].Name, "username") + + // Verify status field has no constraint and has default value + t.AssertNE(fields["status"], nil) + t.Assert(fields["status"].Key, "") + t.Assert(fields["status"].Name, "status") + t.Assert(fields["status"].Default, 1) + + // Verify field count is correct (no duplicates) + t.Assert(len(fields), 4) + }) + + // Test table with composite constraints + gtest.C(t, func(t *gtest.T) { + tableName := "test_composite_constraint" + createSql := fmt.Sprintf(` + CREATE TABLE %s ( + user_id bigint NOT NULL, + project_id bigint NOT NULL, + role varchar(50) NOT NULL, + PRIMARY KEY (user_id, project_id) + )`, tableName) + + _, err := db.Exec(ctx, createSql) + t.AssertNil(err) + defer dropTable(tableName) + + // Get table fields + fields, err := db.TableFields(ctx, tableName) + t.AssertNil(err) + + // In PostgreSQL, composite primary keys may appear in query results + // The first field in the composite key should be marked as 'pri' + t.AssertNE(fields["user_id"], nil) + t.Assert(fields["user_id"].Name, "user_id") + + t.AssertNE(fields["project_id"], nil) + t.Assert(fields["project_id"].Name, "project_id") + + t.AssertNE(fields["role"], nil) + t.Assert(fields["role"].Name, "role") + t.Assert(fields["role"].Key, "") + + // Verify field count is correct (no duplicates) + t.Assert(len(fields), 3) + }) +} + +func Test_DB_InsertIgnore(t *testing.T) { + table := createTable() + defer dropTable(table) + + // Insert test record + gtest.C(t, func(t *gtest.T) { + _, err := db.Insert(ctx, table, g.Map{ + "id": 1, + "passport": "t1", + "password": "25d55ad283aa400af464c76d713c07ad", + "nickname": "T1", + "create_time": gtime.Now().String(), + }) + t.AssertNil(err) + + answer, err := db.GetAll(ctx, fmt.Sprintf("SELECT * FROM %s WHERE id=?", table), 1) + t.AssertNil(err) + t.Assert(len(answer), 1) + t.Assert(answer[0]["passport"], "t1") + t.Assert(answer[0]["password"], "25d55ad283aa400af464c76d713c07ad") + t.Assert(answer[0]["nickname"], "T1") + + // Ignore Duplicate record + result, err := db.InsertIgnore(ctx, table, g.Map{ + "id": 1, + "passport": "t1_duplicate", + "password": "duplicate_password", + "nickname": "Duplicate", + "create_time": gtime.Now().String(), + }) + t.AssertNil(err) + + n, _ := result.RowsAffected() + t.Assert(n, 0) + + answer, err = db.GetAll(ctx, fmt.Sprintf("SELECT * FROM %s WHERE id=?", table), 1) + t.AssertNil(err) + t.Assert(len(answer), 1) + t.Assert(answer[0]["passport"], "t1") + t.Assert(answer[0]["password"], "25d55ad283aa400af464c76d713c07ad") + t.Assert(answer[0]["nickname"], "T1") + + // Insert Correct Record + result, err = db.Insert(ctx, table, g.Map{ + "id": 2, + "passport": "t2", + "password": "25d55ad283aa400af464c76d713c07ad", + "nickname": "name_2", + "create_time": gtime.Now().String(), + }) + t.AssertNil(err) + n, _ = result.RowsAffected() + t.Assert(n, 1) + + answer, err = db.GetAll(ctx, fmt.Sprintf("SELECT * FROM %s WHERE id=?", table), 2) + t.AssertNil(err) + t.Assert(len(answer), 1) + t.Assert(answer[0]["passport"], "t2") + t.Assert(answer[0]["password"], "25d55ad283aa400af464c76d713c07ad") + t.Assert(answer[0]["nickname"], "name_2") + + // Insert Multiple Records Using g.Map Array + data := g.List{ + { + "id": 3, + "passport": "t3", + "password": "25d55ad283aa400af464c76d713c07ad", + "nickname": "name_3", + "create_time": gtime.Now().String(), + }, + { + "id": 4, + "passport": "t4", + "password": "25d55ad283aa400af464c76d713c07ad", + "nickname": "name_4", + "create_time": gtime.Now().String(), + }, + { + "id": 1, + "passport": "t1_conflict", + "password": "conflict_password", + "nickname": "conflict_name", + "create_time": gtime.Now().String(), + }, + { + "id": 2, + "passport": "t2_conflict", + "password": "conflict_password", + "nickname": "conflict_name", + "create_time": gtime.Now().String(), + }, + } + + // Insert Multiple Records with Ignore + result, err = db.InsertIgnore(ctx, table, data) + t.AssertNil(err) + + n, _ = result.RowsAffected() + t.Assert(n, 2) + + answer, err = db.GetAll(ctx, fmt.Sprintf("SELECT * FROM %s", table)) + t.AssertNil(err) + t.Assert(len(answer), 4) + // Should have four records in total (ID 1, 2, 3, 4) + + t.Assert(answer[0]["passport"], "t1") + t.Assert(answer[1]["passport"], "t2") + t.Assert(answer[2]["passport"], "t3") + t.Assert(answer[3]["passport"], "t4") + }) +} diff --git a/contrib/drivers/gaussdb/pgsql_z_unit_model_test.go b/contrib/drivers/gaussdb/pgsql_z_unit_model_test.go index 9969f0886ae..62b2b8ee6e8 100644 --- a/contrib/drivers/gaussdb/pgsql_z_unit_model_test.go +++ b/contrib/drivers/gaussdb/pgsql_z_unit_model_test.go @@ -17,12 +17,6 @@ import ( "github.com/gogf/gf/v2/test/gtest" ) -// skipUnsupportedTest skips tests that require PostgreSQL 9.5+ features (ON CONFLICT) -// GaussDB is based on PostgreSQL 9.2 and doesn't support these features -func skipUnsupportedTest(t *testing.T, feature string) { - t.Skipf("GaussDB does not support %s (requires PostgreSQL 9.5+ ON CONFLICT syntax, GaussDB is based on PostgreSQL 9.2)", feature) -} - func Test_Model_Insert(t *testing.T) { table := createTable() defer dropTable(table) @@ -277,7 +271,6 @@ func Test_Model_Where(t *testing.T) { } func Test_Model_Save(t *testing.T) { - skipUnsupportedTest(t, "Save") table := createTable() defer dropTable(table) gtest.C(t, func(t *gtest.T) { @@ -337,7 +330,6 @@ func Test_Model_Save(t *testing.T) { } func Test_Model_Replace(t *testing.T) { - skipUnsupportedTest(t, "Replace") table := createTable() defer dropTable(table) @@ -393,7 +385,6 @@ func Test_Model_Replace(t *testing.T) { } func Test_Model_OnConflict(t *testing.T) { - skipUnsupportedTest(t, "OnConflict") var ( table = fmt.Sprintf(`%s_%d`, TablePrefix+"test", gtime.TimestampNano()) uniqueName = fmt.Sprintf(`%s_%d`, TablePrefix+"test_unique", gtime.TimestampNano()) @@ -469,8 +460,7 @@ func Test_Model_OnConflict(t *testing.T) { } func Test_Model_OnDuplicate(t *testing.T) { - skipUnsupportedTest(t, "OnDuplicate") - table := createTable() + table := createInitTable() defer dropTable(table) // string type 1. @@ -571,8 +561,7 @@ func Test_Model_OnDuplicate(t *testing.T) { } func Test_Model_OnDuplicateWithCounter(t *testing.T) { - skipUnsupportedTest(t, "OnDuplicateWithCounter") - table := createTable() + table := createInitTable() defer dropTable(table) gtest.C(t, func(t *gtest.T) { @@ -594,8 +583,7 @@ func Test_Model_OnDuplicateWithCounter(t *testing.T) { } func Test_Model_OnDuplicateEx(t *testing.T) { - skipUnsupportedTest(t, "OnDuplicateEx") - table := createTable() + table := createInitTable() defer dropTable(table) // string type 1. @@ -810,7 +798,6 @@ func Test_ConvertSliceFloat64(t *testing.T) { } func Test_Model_InsertIgnore(t *testing.T) { - skipUnsupportedTest(t, "InsertIgnore") table := createTable() defer dropTable(table) From 2288727f7ebce9cf35bc7e2b3982025def2812f8 Mon Sep 17 00:00:00 2001 From: John Guo Date: Sat, 13 Dec 2025 14:32:41 +0800 Subject: [PATCH 03/10] up --- contrib/drivers/gaussdb/pgsql_do_insert.go | 20 +++++++++++++++++++ .../drivers/gaussdb/pgsql_z_unit_init_test.go | 8 ++------ 2 files changed, 22 insertions(+), 6 deletions(-) diff --git a/contrib/drivers/gaussdb/pgsql_do_insert.go b/contrib/drivers/gaussdb/pgsql_do_insert.go index 92f2440c3c0..30f1f857a74 100644 --- a/contrib/drivers/gaussdb/pgsql_do_insert.go +++ b/contrib/drivers/gaussdb/pgsql_do_insert.go @@ -214,6 +214,26 @@ func (d *Driver) doMergeInsert( ctx context.Context, link gdb.Link, table string, list gdb.List, option gdb.DoInsertOption, withUpdate bool, ) (result sql.Result, err error) { + // For batch operations (multiple records), process each record individually + if len(list) > 1 { + var ( + batchResult = new(gdb.SqlResult) + totalAffected int64 + ) + for _, record := range list { + singleResult, singleErr := d.doMergeInsert(ctx, link, table, gdb.List{record}, option, withUpdate) + if singleErr != nil { + return nil, singleErr + } + if n, _ := singleResult.RowsAffected(); n > 0 { + totalAffected += n + } + } + batchResult.Result = &gdb.SqlResult{} + batchResult.Affected = totalAffected + return batchResult, nil + } + // Check if OnDuplicateMap contains conflict keys // GaussDB MERGE statement cannot update columns used in ON clause // If user wants to update conflict keys, we need to use a different approach diff --git a/contrib/drivers/gaussdb/pgsql_z_unit_init_test.go b/contrib/drivers/gaussdb/pgsql_z_unit_init_test.go index a40c860d4f2..b2cb92d6e06 100644 --- a/contrib/drivers/gaussdb/pgsql_z_unit_init_test.go +++ b/contrib/drivers/gaussdb/pgsql_z_unit_init_test.go @@ -35,7 +35,8 @@ var ( func init() { configNode = gdb.ConfigNode{ - Link: `gaussdb:gaussdb:UTpass@1234@tcp(127.0.0.1:9950)/postgres`, + Link: `gaussdb:gaussdb:UTpass@1234@tcp(127.0.0.1:9950)/postgres`, + Namespace: SchemaName, // Set the schema namespace } // gaussdb only permit to connect to the designation database. @@ -52,11 +53,6 @@ func init() { if _, err := db.Exec(ctx, fmt.Sprintf(schemaTemplate, SchemaName)); err != nil { gtest.Error(err) } - - // Set search_path to the test schema - if _, err := db.Exec(ctx, fmt.Sprintf("SET search_path TO %s", SchemaName)); err != nil { - gtest.Error(err) - } } func createTable(table ...string) string { From f4e154496a9600c0817c676d4329721171135877 Mon Sep 17 00:00:00 2001 From: John Guo Date: Sat, 13 Dec 2025 14:38:42 +0800 Subject: [PATCH 04/10] up --- .../gaussdb/pgsql_z_unit_field_test.go | 954 ++++++++++++++++++ .../gaussdb/pgsql_z_unit_filter_test.go | 273 +++++ .../drivers/gaussdb/pgsql_z_unit_open_test.go | 178 ++++ .../drivers/gaussdb/pgsql_z_unit_raw_test.go | 99 ++ contrib/drivers/gaussdb/pgsql_z_unit_test.go | 105 ++ .../gaussdb/pgsql_z_unit_upsert_test.go | 267 +++++ 6 files changed, 1876 insertions(+) create mode 100644 contrib/drivers/gaussdb/pgsql_z_unit_field_test.go create mode 100644 contrib/drivers/gaussdb/pgsql_z_unit_filter_test.go create mode 100644 contrib/drivers/gaussdb/pgsql_z_unit_open_test.go create mode 100644 contrib/drivers/gaussdb/pgsql_z_unit_raw_test.go create mode 100644 contrib/drivers/gaussdb/pgsql_z_unit_test.go create mode 100644 contrib/drivers/gaussdb/pgsql_z_unit_upsert_test.go diff --git a/contrib/drivers/gaussdb/pgsql_z_unit_field_test.go b/contrib/drivers/gaussdb/pgsql_z_unit_field_test.go new file mode 100644 index 00000000000..7c0c61b04bb --- /dev/null +++ b/contrib/drivers/gaussdb/pgsql_z_unit_field_test.go @@ -0,0 +1,954 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gaussdb_test + +import ( + "fmt" + "testing" + + "github.com/google/uuid" + + "github.com/gogf/gf/v2/frame/g" + "github.com/gogf/gf/v2/test/gtest" +) + +// Test_TableFields tests the TableFields method for retrieving table field information +func Test_TableFields(t *testing.T) { + table := createAllTypesTable() + defer dropTable(table) + + gtest.C(t, func(t *gtest.T) { + fields, err := db.TableFields(ctx, table) + t.AssertNil(err) + t.Assert(len(fields) > 0, true) + + // Test primary key field + t.Assert(fields["id"].Name, "id") + t.Assert(fields["id"].Key, "pri") + + // Test integer types + t.Assert(fields["col_int2"].Name, "col_int2") + t.Assert(fields["col_int4"].Name, "col_int4") + t.Assert(fields["col_int8"].Name, "col_int8") + + // Test float types + t.Assert(fields["col_float4"].Name, "col_float4") + t.Assert(fields["col_float8"].Name, "col_float8") + t.Assert(fields["col_numeric"].Name, "col_numeric") + + // Test character types + t.Assert(fields["col_char"].Name, "col_char") + t.Assert(fields["col_varchar"].Name, "col_varchar") + t.Assert(fields["col_text"].Name, "col_text") + + // Test boolean type + t.Assert(fields["col_bool"].Name, "col_bool") + + // Test date/time types + t.Assert(fields["col_date"].Name, "col_date") + t.Assert(fields["col_timestamp"].Name, "col_timestamp") + + // Test JSON types + t.Assert(fields["col_json"].Name, "col_json") + t.Assert(fields["col_jsonb"].Name, "col_jsonb") + + // Test array types + t.Assert(fields["col_int2_arr"].Name, "col_int2_arr") + t.Assert(fields["col_int4_arr"].Name, "col_int4_arr") + t.Assert(fields["col_varchar_arr"].Name, "col_varchar_arr") + }) +} + +// Test_TableFields_Types tests field type information +func Test_TableFields_Types(t *testing.T) { + table := createAllTypesTable() + defer dropTable(table) + + gtest.C(t, func(t *gtest.T) { + fields, err := db.TableFields(ctx, table) + t.AssertNil(err) + + // Test integer type names + t.Assert(fields["col_int2"].Type, "int2(16)") + t.Assert(fields["col_int4"].Type, "int4(32)") + t.Assert(fields["col_int8"].Type, "int8(64)") + + // Test float type names + t.Assert(fields["col_float4"].Type, "float4(24)") + t.Assert(fields["col_float8"].Type, "float8(53)") + t.Assert(fields["col_numeric"].Type, "numeric(10)") + + // Test character type names + t.Assert(fields["col_char"].Type, "bpchar(10)") + t.Assert(fields["col_varchar"].Type, "varchar(100)") + t.Assert(fields["col_text"].Type, "text") + + // Test boolean type name + t.Assert(fields["col_bool"].Type, "bool") + + // Test date/time type names + t.Assert(fields["col_date"].Type, "date") + t.Assert(fields["col_timestamp"].Type, "timestamp") + t.Assert(fields["col_timestamptz"].Type, "timestamptz") + + // Test JSON type names + t.Assert(fields["col_json"].Type, "json") + t.Assert(fields["col_jsonb"].Type, "jsonb") + + // Test array type names (PostgreSQL uses _ prefix for array types) + t.Assert(fields["col_int2_arr"].Type, "_int2") + t.Assert(fields["col_int4_arr"].Type, "_int4") + t.Assert(fields["col_int8_arr"].Type, "_int8") + t.Assert(fields["col_float4_arr"].Type, "_float4") + t.Assert(fields["col_float8_arr"].Type, "_float8") + t.Assert(fields["col_numeric_arr"].Type, "_numeric") + t.Assert(fields["col_varchar_arr"].Type, "_varchar") + t.Assert(fields["col_text_arr"].Type, "_text") + t.Assert(fields["col_bool_arr"].Type, "_bool") + }) +} + +// Test_TableFields_Nullable tests field nullable information +func Test_TableFields_Nullable(t *testing.T) { + table := createAllTypesTable() + defer dropTable(table) + + gtest.C(t, func(t *gtest.T) { + fields, err := db.TableFields(ctx, table) + t.AssertNil(err) + + // NOT NULL fields should have Null = false + t.Assert(fields["col_int2"].Null, false) + t.Assert(fields["col_int4"].Null, false) + t.Assert(fields["col_numeric"].Null, false) + t.Assert(fields["col_varchar"].Null, false) + t.Assert(fields["col_bool"].Null, false) + t.Assert(fields["col_varchar_arr"].Null, false) + + // Nullable fields should have Null = true + t.Assert(fields["col_int8"].Null, true) + t.Assert(fields["col_text"].Null, true) + t.Assert(fields["col_json"].Null, true) + }) +} + +// Test_TableFields_Comments tests field comment information +func Test_TableFields_Comments(t *testing.T) { + table := createAllTypesTable() + defer dropTable(table) + + gtest.C(t, func(t *gtest.T) { + fields, err := db.TableFields(ctx, table) + t.AssertNil(err) + + // Test fields with comments + t.Assert(fields["id"].Comment, "Primary key ID") + t.Assert(fields["col_int2"].Comment, "int2 type (smallint)") + t.Assert(fields["col_int4"].Comment, "int4 type (integer)") + t.Assert(fields["col_int8"].Comment, "int8 type (bigint)") + t.Assert(fields["col_numeric"].Comment, "numeric type with precision") + t.Assert(fields["col_varchar"].Comment, "varchar type") + t.Assert(fields["col_bool"].Comment, "boolean type") + t.Assert(fields["col_timestamp"].Comment, "timestamp type") + t.Assert(fields["col_json"].Comment, "json type") + t.Assert(fields["col_jsonb"].Comment, "jsonb type") + + // Test array field comments + t.Assert(fields["col_int2_arr"].Comment, "int2 array type (_int2)") + t.Assert(fields["col_int4_arr"].Comment, "int4 array type (_int4)") + t.Assert(fields["col_int8_arr"].Comment, "int8 array type (_int8)") + t.Assert(fields["col_numeric_arr"].Comment, "numeric array type (_numeric)") + t.Assert(fields["col_varchar_arr"].Comment, "varchar array type (_varchar)") + t.Assert(fields["col_text_arr"].Comment, "text array type (_text)") + }) +} + +// Test_Field_Type_Conversion tests type conversion for various PostgreSQL types +func Test_Field_Type_Conversion(t *testing.T) { + table := createInitAllTypesTable() + defer dropTable(table) + + gtest.C(t, func(t *gtest.T) { + // Query a single record + one, err := db.Model(table).Where("id", 1).One() + t.AssertNil(err) + t.Assert(one.IsEmpty(), false) + + // Test integer type conversions + t.Assert(one["col_int2"].Int(), 1) + t.Assert(one["col_int4"].Int(), 10) + t.Assert(one["col_int8"].Int64(), int64(100)) + + // Test float type conversions + t.Assert(one["col_float4"].Float32() > 0, true) + t.Assert(one["col_float8"].Float64() > 0, true) + + // Test string type conversions + t.AssertNE(one["col_varchar"].String(), "") + t.AssertNE(one["col_text"].String(), "") + + // Test boolean type conversion + t.Assert(one["col_bool"].Bool(), false) // i=1, 1%2==0 is false + }) +} + +// Test_Field_Array_Type_Conversion tests array type conversion +func Test_Field_Array_Type_Conversion(t *testing.T) { + table := createInitAllTypesTable() + defer dropTable(table) + + gtest.C(t, func(t *gtest.T) { + // Query a single record + one, err := db.Model(table).Where("id", 1).One() + t.AssertNil(err) + t.Assert(one.IsEmpty(), false) + + // Test integer array type conversions + int2Arr := one["col_int2_arr"].Ints() + t.Assert(len(int2Arr), 3) + t.Assert(int2Arr[0], 1) + t.Assert(int2Arr[1], 2) + t.Assert(int2Arr[2], 1) + + int4Arr := one["col_int4_arr"].Ints() + t.Assert(len(int4Arr), 3) + t.Assert(int4Arr[0], 10) + t.Assert(int4Arr[1], 20) + t.Assert(int4Arr[2], 1) + + int8Arr := one["col_int8_arr"].Int64s() + t.Assert(len(int8Arr), 3) + t.Assert(int8Arr[0], int64(100)) + t.Assert(int8Arr[1], int64(200)) + t.Assert(int8Arr[2], int64(1)) + + // Test string array type conversions + varcharArr := one["col_varchar_arr"].Strings() + t.Assert(len(varcharArr), 3) + t.Assert(varcharArr[0], "a") + t.Assert(varcharArr[1], "b") + t.Assert(varcharArr[2], "c1") + + textArr := one["col_text_arr"].Strings() + t.Assert(len(textArr), 3) + t.Assert(textArr[0], "x") + t.Assert(textArr[1], "y") + t.Assert(textArr[2], "z1") + + // Test boolean array type conversions + // col_bool_arr is '{true, false, %t}' where %t = i%2==0, for i=1 it's false + boolArr := one["col_bool_arr"].Bools() + t.Assert(len(boolArr), 3) + t.Assert(boolArr[0], true) // literal true + t.Assert(boolArr[1], false) // literal false + t.Assert(boolArr[2], false) // i=1, 1%2==0 is false + }) +} + +// Test_Field_Array_Insert tests inserting array data +func Test_Field_Array_Insert(t *testing.T) { + table := createAllTypesTable() + defer dropTable(table) + + gtest.C(t, func(t *gtest.T) { + // Insert with array values + _, err := db.Model(table).Data(g.Map{ + "col_int2": 1, + "col_int4": 10, + "col_numeric": 99.99, + "col_varchar": "test", + "col_bool": true, + "col_int2_arr": []int{1, 2, 3}, + "col_int4_arr": []int{10, 20, 30}, + "col_varchar_arr": []string{"a", "b", "c"}, + }).Insert() + t.AssertNil(err) + + // Query and verify + one, err := db.Model(table).OrderDesc("id").One() + t.AssertNil(err) + + t.Assert(one["col_int2"].Int(), 1) + t.Assert(one["col_varchar"].String(), "test") + t.Assert(one["col_bool"].Bool(), true) + + int2Arr := one["col_int2_arr"].Ints() + t.Assert(len(int2Arr), 3) + t.Assert(int2Arr[0], 1) + t.Assert(int2Arr[1], 2) + t.Assert(int2Arr[2], 3) + + varcharArr := one["col_varchar_arr"].Strings() + t.Assert(len(varcharArr), 3) + t.Assert(varcharArr[0], "a") + t.Assert(varcharArr[1], "b") + t.Assert(varcharArr[2], "c") + }) +} + +// Test_Field_Array_Update tests updating array data +func Test_Field_Array_Update(t *testing.T) { + table := createInitAllTypesTable() + defer dropTable(table) + + gtest.C(t, func(t *gtest.T) { + // Update array values + _, err := db.Model(table).Where("id", 1).Data(g.Map{ + "col_int2_arr": []int{100, 200, 300}, + "col_varchar_arr": []string{"x", "y", "z"}, + }).Update() + t.AssertNil(err) + + // Query and verify + one, err := db.Model(table).Where("id", 1).One() + t.AssertNil(err) + + int2Arr := one["col_int2_arr"].Ints() + t.Assert(len(int2Arr), 3) + t.Assert(int2Arr[0], 100) + t.Assert(int2Arr[1], 200) + t.Assert(int2Arr[2], 300) + + varcharArr := one["col_varchar_arr"].Strings() + t.Assert(len(varcharArr), 3) + t.Assert(varcharArr[0], "x") + t.Assert(varcharArr[1], "y") + t.Assert(varcharArr[2], "z") + }) +} + +// Test_Field_JSON_Type tests JSON/JSONB type handling +func Test_Field_JSON_Type(t *testing.T) { + table := createAllTypesTable() + defer dropTable(table) + + gtest.C(t, func(t *gtest.T) { + // Insert with JSON values + testData := g.Map{ + "name": "test", + "value": 123, + "items": []string{"a", "b", "c"}, + } + _, err := db.Model(table).Data(g.Map{ + "col_int2": 1, + "col_int4": 10, + "col_numeric": 99.99, + "col_varchar": "test", + "col_bool": true, + "col_json": testData, + "col_jsonb": testData, + }).Insert() + t.AssertNil(err) + + // Query and verify + one, err := db.Model(table).OrderDesc("id").One() + t.AssertNil(err) + + // Test JSON field + jsonMap := one["col_json"].Map() + t.Assert(jsonMap["name"], "test") + t.Assert(jsonMap["value"], 123) + + // Test JSONB field + jsonbMap := one["col_jsonb"].Map() + t.Assert(jsonbMap["name"], "test") + t.Assert(jsonbMap["value"], 123) + }) +} + +// Test_Field_Scan_To_Struct tests scanning results to struct +func Test_Field_Scan_To_Struct(t *testing.T) { + table := createInitAllTypesTable() + defer dropTable(table) + + type TestRecord struct { + Id int64 `json:"id"` + ColInt2 int16 `json:"col_int2"` + ColInt4 int32 `json:"col_int4"` + ColInt8 int64 `json:"col_int8"` + ColVarchar string `json:"col_varchar"` + ColBool bool `json:"col_bool"` + ColInt2Arr []int `json:"col_int2_arr"` + ColInt4Arr []int `json:"col_int4_arr"` + ColInt8Arr []int64 `json:"col_int8_arr"` + ColTextArr []string `json:"col_text_arr"` + } + + gtest.C(t, func(t *gtest.T) { + var record TestRecord + err := db.Model(table).Where("id", 1).Scan(&record) + t.AssertNil(err) + + t.Assert(record.Id, int64(1)) + t.Assert(record.ColInt2, int16(1)) + t.Assert(record.ColInt4, int32(10)) + t.Assert(record.ColInt8, int64(100)) + t.AssertNE(record.ColVarchar, "") + t.Assert(record.ColBool, false) + + // Test array fields scanned to struct + t.Assert(len(record.ColInt2Arr), 3) + t.Assert(record.ColInt2Arr[0], 1) + t.Assert(record.ColInt2Arr[1], 2) + t.Assert(record.ColInt2Arr[2], 1) + + t.Assert(len(record.ColTextArr), 3) + t.Assert(record.ColTextArr[0], "x") + t.Assert(record.ColTextArr[1], "y") + t.Assert(record.ColTextArr[2], "z1") + }) +} + +// Test_Field_Scan_To_Struct_Slice tests scanning multiple results to struct slice +func Test_Field_Scan_To_Struct_Slice(t *testing.T) { + table := createInitAllTypesTable() + defer dropTable(table) + + type TestRecord struct { + Id int64 `json:"id"` + ColInt2 int16 `json:"col_int2"` + ColVarchar string `json:"col_varchar"` + ColInt2Arr []int `json:"col_int2_arr"` + ColTextArr []string `json:"col_text_arr"` + } + + gtest.C(t, func(t *gtest.T) { + var records []TestRecord + err := db.Model(table).OrderAsc("id").Limit(5).Scan(&records) + t.AssertNil(err) + + t.Assert(len(records), 5) + + // Verify first record + t.Assert(records[0].Id, int64(1)) + t.Assert(records[0].ColInt2, int16(1)) + t.Assert(len(records[0].ColInt2Arr), 3) + + // Verify last record + t.Assert(records[4].Id, int64(5)) + t.Assert(records[4].ColInt2, int16(5)) + }) +} + +// Test_Field_Empty_Array tests handling empty arrays +func Test_Field_Empty_Array(t *testing.T) { + table := createAllTypesTable() + defer dropTable(table) + + gtest.C(t, func(t *gtest.T) { + // Insert with empty array values (using default) + _, err := db.Model(table).Data(g.Map{ + "col_int2": 1, + "col_int4": 10, + "col_numeric": 99.99, + "col_varchar": "test", + "col_bool": true, + }).Insert() + t.AssertNil(err) + + // Query and verify empty arrays + one, err := db.Model(table).OrderDesc("id").One() + t.AssertNil(err) + + // Default empty arrays + int2Arr := one["col_int2_arr"].Ints() + t.Assert(len(int2Arr), 0) + + varcharArr := one["col_varchar_arr"].Strings() + t.Assert(len(varcharArr), 0) + }) +} + +// Test_Field_Null_Values tests handling NULL values +func Test_Field_Null_Values(t *testing.T) { + table := createAllTypesTable() + defer dropTable(table) + + gtest.C(t, func(t *gtest.T) { + // Insert minimal required fields, leaving nullable fields as NULL + _, err := db.Model(table).Data(g.Map{ + "col_int2": 1, + "col_int4": 10, + "col_numeric": 99.99, + "col_varchar": "test", + "col_bool": true, + "col_varchar_arr": []string{}, + }).Insert() + t.AssertNil(err) + + // Query and verify NULL handling + one, err := db.Model(table).OrderDesc("id").One() + t.AssertNil(err) + + // Nullable fields should return appropriate zero values + t.Assert(one["col_text"].IsNil() || one["col_text"].IsEmpty(), true) + t.Assert(one["col_int8_arr"].IsNil() || one["col_int8_arr"].IsEmpty(), true) + }) +} + +// Test_Field_Float_Array_Type_Conversion tests float array type conversion (_float4, _float8) +func Test_Field_Float_Array_Type_Conversion(t *testing.T) { + table := createInitAllTypesTable() + defer dropTable(table) + + gtest.C(t, func(t *gtest.T) { + // Query a single record + one, err := db.Model(table).Where("id", 1).One() + t.AssertNil(err) + t.Assert(one.IsEmpty(), false) + + // Test float4 array type conversions + float4Arr := one["col_float4_arr"].Float32s() + t.Assert(len(float4Arr), 3) + t.Assert(float4Arr[0] > 0, true) + t.Assert(float4Arr[1] > 0, true) + + // Test float8 array type conversions + float8Arr := one["col_float8_arr"].Float64s() + t.Assert(len(float8Arr), 3) + t.Assert(float8Arr[0] > 0, true) + t.Assert(float8Arr[1] > 0, true) + }) +} + +// Test_Field_Numeric_Array_Type_Conversion tests numeric/decimal array type conversion +func Test_Field_Numeric_Array_Type_Conversion(t *testing.T) { + table := createInitAllTypesTable() + defer dropTable(table) + + gtest.C(t, func(t *gtest.T) { + // Query a single record + one, err := db.Model(table).Where("id", 1).One() + t.AssertNil(err) + t.Assert(one.IsEmpty(), false) + + // Test numeric array type conversions + numericArr := one["col_numeric_arr"].Float64s() + t.Assert(len(numericArr), 3) + t.Assert(numericArr[0] > 0, true) + t.Assert(numericArr[1] > 0, true) + + // Test decimal array type conversions + decimalArr := one["col_decimal_arr"].Float64s() + if !one["col_decimal_arr"].IsNil() { + t.Assert(len(decimalArr) > 0, true) + } + }) +} + +// Test_Field_Bool_Array_Type_Conversion tests bool array type conversion more thoroughly +func Test_Field_Bool_Array_Type_Conversion(t *testing.T) { + table := createAllTypesTable() + defer dropTable(table) + + gtest.C(t, func(t *gtest.T) { + // Insert with specific bool array values + _, err := db.Model(table).Data(g.Map{ + "col_int2": 1, + "col_int4": 10, + "col_numeric": 99.99, + "col_varchar": "test", + "col_bool": true, + "col_bool_arr": []bool{true, false, true}, + }).Insert() + t.AssertNil(err) + + // Query and verify + one, err := db.Model(table).OrderDesc("id").One() + t.AssertNil(err) + + // Test bool array + boolArr := one["col_bool_arr"].Bools() + t.Assert(len(boolArr), 3) + t.Assert(boolArr[0], true) + t.Assert(boolArr[1], false) + t.Assert(boolArr[2], true) + }) +} + +// Test_Field_Char_Array_Type tests char array type (_char) +func Test_Field_Char_Array_Type(t *testing.T) { + table := createAllTypesTable() + defer dropTable(table) + + gtest.C(t, func(t *gtest.T) { + // Insert with char array values + _, err := db.Model(table).Data(g.Map{ + "col_int2": 1, + "col_int4": 10, + "col_numeric": 99.99, + "col_varchar": "test", + "col_bool": true, + "col_char_arr": []string{"a", "b", "c"}, + "col_varchar_arr": []string{}, + }).Insert() + t.AssertNil(err) + + // Query and verify + one, err := db.Model(table).OrderDesc("id").One() + t.AssertNil(err) + + // Test char array + charArr := one["col_char_arr"].Strings() + t.Assert(len(charArr), 3) + }) +} + +// Test_Field_Bytea_Type tests bytea (binary) type conversion +func Test_Field_Bytea_Type(t *testing.T) { + table := createAllTypesTable() + defer dropTable(table) + + gtest.C(t, func(t *gtest.T) { + // Insert with binary data + binaryData := []byte{0x48, 0x65, 0x6c, 0x6c, 0x6f} // "Hello" in hex + _, err := db.Model(table).Data(g.Map{ + "col_int2": 1, + "col_int4": 10, + "col_numeric": 99.99, + "col_varchar": "test", + "col_bool": true, + "col_bytea": binaryData, + "col_varchar_arr": []string{}, + }).Insert() + t.AssertNil(err) + + // Query and verify + one, err := db.Model(table).OrderDesc("id").One() + t.AssertNil(err) + + // Test bytea field + result := one["col_bytea"].Bytes() + t.Assert(len(result), 5) + t.Assert(result[0], 0x48) // 'H' + }) +} + +// Test_Field_Bytea_Array_Type tests bytea array type (_bytea) +func Test_Field_Bytea_Array_Type(t *testing.T) { + table := createAllTypesTable() + defer dropTable(table) + + gtest.C(t, func(t *gtest.T) { + // Insert with bytea array values using raw SQL + // PostgreSQL bytea array literal format: ARRAY[E'\\x010203', E'\\x040506']::bytea[] + _, err := db.Exec(ctx, fmt.Sprintf(` + INSERT INTO %s (col_int2, col_int4, col_numeric, col_varchar, col_bool, col_varchar_arr, col_bytea_arr) + VALUES (1, 10, 99.99, 'test', true, '{}', ARRAY[E'\\x010203', E'\\x040506']::bytea[]) + `, table)) + t.AssertNil(err) + + // Query and verify bytea array + one, err := db.Model(table).OrderDesc("id").One() + t.AssertNil(err) + + // Test bytea array field - should be converted to [][]byte + byteaArrVal := one["col_bytea_arr"] + t.Assert(byteaArrVal.IsNil(), false) + + // Verify the array contains the expected data + byteaArr := byteaArrVal.Interfaces() + t.Assert(len(byteaArr), 2) + }) +} + +// Test_Field_Date_Array_Type tests date array type (_date) +func Test_Field_Date_Array_Type(t *testing.T) { + table := createAllTypesTable() + defer dropTable(table) + + gtest.C(t, func(t *gtest.T) { + // Note: PostgreSQL _date array is not yet mapped in the driver + // This test documents the limitation but can be extended when support is added + + _, err := db.Model(table).Data(g.Map{ + "col_int2": 1, + "col_int4": 10, + "col_numeric": 99.99, + "col_varchar": "test", + "col_bool": true, + "col_varchar_arr": []string{}, + }).Insert() + t.AssertNil(err) + + // Query and verify NULL date array is handled gracefully + one, err := db.Model(table).OrderDesc("id").One() + t.AssertNil(err) + // date array should be nil or empty + t.Assert(one["col_date_arr"].IsNil() || one["col_date_arr"].IsEmpty(), true) + }) +} + +// Test_Field_Timestamp_Array_Type tests timestamp array type (_timestamp) +func Test_Field_Timestamp_Array_Type(t *testing.T) { + table := createAllTypesTable() + defer dropTable(table) + + gtest.C(t, func(t *gtest.T) { + // Note: PostgreSQL _timestamp array is not yet mapped in the driver + // This test documents the limitation but can be extended when support is added + + _, err := db.Model(table).Data(g.Map{ + "col_int2": 1, + "col_int4": 10, + "col_numeric": 99.99, + "col_varchar": "test", + "col_bool": true, + "col_varchar_arr": []string{}, + }).Insert() + t.AssertNil(err) + + // Query and verify NULL timestamp array is handled gracefully + one, err := db.Model(table).OrderDesc("id").One() + t.AssertNil(err) + // timestamp array should be nil or empty + t.Assert(one["col_timestamp_arr"].IsNil() || one["col_timestamp_arr"].IsEmpty(), true) + }) +} + +// Test_Field_JSONB_Array_Type tests JSONB array type (_jsonb) +func Test_Field_JSONB_Array_Type(t *testing.T) { + table := createAllTypesTable() + defer dropTable(table) + + gtest.C(t, func(t *gtest.T) { + // Note: PostgreSQL _jsonb array is not yet mapped in the driver + // This test documents the limitation but can be extended when support is added + + _, err := db.Model(table).Data(g.Map{ + "col_int2": 1, + "col_int4": 10, + "col_numeric": 99.99, + "col_varchar": "test", + "col_bool": true, + "col_varchar_arr": []string{}, + }).Insert() + t.AssertNil(err) + + // Query and verify NULL jsonb array is handled gracefully + one, err := db.Model(table).OrderDesc("id").One() + t.AssertNil(err) + // jsonb array should be nil or empty + t.Assert(one["col_jsonb_arr"].IsNil() || one["col_jsonb_arr"].IsEmpty(), true) + }) +} + +// Test_Field_UUID_Array_Type tests UUID array type (_uuid) +func Test_Field_UUID_Array_Type(t *testing.T) { + table := createAllTypesTable() + defer dropTable(table) + + gtest.C(t, func(t *gtest.T) { + // Insert with UUID array values using raw SQL + // PostgreSQL uuid array literal format: ARRAY['uuid1', 'uuid2']::uuid[] + uuid1 := "550e8400-e29b-41d4-a716-446655440000" + uuid2 := "6ba7b810-9dad-11d1-80b4-00c04fd430c8" + uuid3 := "6ba7b811-9dad-11d1-80b4-00c04fd430c8" + _, err := db.Exec(ctx, fmt.Sprintf(` + INSERT INTO %s (col_int2, col_int4, col_numeric, col_varchar, col_bool, col_varchar_arr, col_uuid_arr) + VALUES (1, 10, 99.99, 'test', true, '{}', ARRAY['%s', '%s', '%s']::uuid[]) + `, table, uuid1, uuid2, uuid3)) + t.AssertNil(err) + + // Query and verify UUID array + one, err := db.Model(table).OrderDesc("id").One() + t.AssertNil(err) + + // Test UUID array field - should be converted to []uuid.UUID + uuidArrVal := one["col_uuid_arr"] + t.Assert(uuidArrVal.IsNil(), false) + + // Verify the array contains the expected data as []uuid.UUID + uuidArr := uuidArrVal.Interfaces() + t.Assert(len(uuidArr), 3) + + // Verify each element is uuid.UUID type + u1, ok := uuidArr[0].(uuid.UUID) + t.Assert(ok, true) + t.Assert(u1.String(), uuid1) + + u2, ok := uuidArr[1].(uuid.UUID) + t.Assert(ok, true) + t.Assert(u2.String(), uuid2) + + u3, ok := uuidArr[2].(uuid.UUID) + t.Assert(ok, true) + t.Assert(u3.String(), uuid3) + }) +} + +// Test_Field_UUID_Type tests UUID type +func Test_Field_UUID_Type(t *testing.T) { + table := createInitAllTypesTable() + defer dropTable(table) + + gtest.C(t, func(t *gtest.T) { + // Query and verify UUID field + one, err := db.Model(table).OrderAsc("id").One() + t.AssertNil(err) + + // Test UUID field - should be converted to uuid.UUID + uuidVal := one["col_uuid"] + t.Assert(uuidVal.IsNil(), false) + + // Verify the value is uuid.UUID type + uuidObj, ok := uuidVal.Val().(uuid.UUID) + t.Assert(ok, true) + + // Verify the UUID format + uuidStr := uuidObj.String() + t.Assert(len(uuidStr) > 0, true) + // UUID should contain the pattern from insert: 550e8400-e29b-41d4-a716-44665544000X + t.Assert(uuidStr, "550e8400-e29b-41d4-a716-446655440001") + + // Also verify we can still get string representation via .String() + t.Assert(uuidVal.String(), "550e8400-e29b-41d4-a716-446655440001") + }) +} + +// Test_Field_Bytea_Array_Type_Scan tests bytea array type and scanning +func Test_Field_Bytea_Array_Type_Scan(t *testing.T) { + table := createInitAllTypesTable() + defer dropTable(table) + + gtest.C(t, func(t *gtest.T) { + // Query and verify bytea array field + one, err := db.Model(table).OrderAsc("id").One() + t.AssertNil(err) + + // Test bytea array field + byteaArrVal := one["col_bytea_arr"] + // bytea array should not be nil since we inserted data + t.Assert(byteaArrVal.IsNil(), false) + }) +} + +// Test_Field_Date_Array_Type_Scan tests date array type and scanning +func Test_Field_Date_Array_Type_Scan(t *testing.T) { + table := createInitAllTypesTable() + defer dropTable(table) + + gtest.C(t, func(t *gtest.T) { + // Query and verify date array field + one, err := db.Model(table).OrderAsc("id").One() + t.AssertNil(err) + + // Test date array field + dateArrVal := one["col_date_arr"] + t.Assert(dateArrVal.IsNil(), false) + + // Verify the array contains the expected data + dateArr := dateArrVal.Strings() + t.Assert(len(dateArr) > 0, true) + }) +} + +// Test_Field_Timestamp_Array_Type_Scan tests timestamp array type and scanning +func Test_Field_Timestamp_Array_Type_Scan(t *testing.T) { + table := createInitAllTypesTable() + defer dropTable(table) + + gtest.C(t, func(t *gtest.T) { + // Query and verify timestamp array field + one, err := db.Model(table).OrderAsc("id").One() + t.AssertNil(err) + + // Test timestamp array field + timestampArrVal := one["col_timestamp_arr"] + t.Assert(timestampArrVal.IsNil(), false) + + // Verify the array contains the expected data + timestampArr := timestampArrVal.Strings() + t.Assert(len(timestampArr) > 0, true) + }) +} + +// Test_Field_JSONB_Array_Type_Scan tests JSONB array type and scanning +func Test_Field_JSONB_Array_Type_Scan(t *testing.T) { + table := createInitAllTypesTable() + defer dropTable(table) + + gtest.C(t, func(t *gtest.T) { + // Query and verify JSONB array field + one, err := db.Model(table).OrderAsc("id").One() + t.AssertNil(err) + + // Test JSONB array field + jsonbArrVal := one["col_jsonb_arr"] + t.Assert(jsonbArrVal.IsNil(), false) + }) +} + +// Test_Field_UUID_Query tests querying by UUID field +func Test_Field_UUID_Query(t *testing.T) { + table := createInitAllTypesTable() + defer dropTable(table) + + gtest.C(t, func(t *gtest.T) { + // Test 1: Query by UUID string + uuidStr := "550e8400-e29b-41d4-a716-446655440001" + one, err := db.Model(table).Where("col_uuid", uuidStr).One() + t.AssertNil(err) + t.Assert(one.IsEmpty(), false) + t.Assert(one["id"].Int(), 1) + + // Verify the returned UUID is correct + uuidObj, ok := one["col_uuid"].Val().(uuid.UUID) + t.Assert(ok, true) + t.Assert(uuidObj.String(), uuidStr) + + // Test 2: Query by uuid.UUID type directly + uuidVal, err := uuid.Parse("550e8400-e29b-41d4-a716-446655440002") + t.AssertNil(err) + one, err = db.Model(table).Where("col_uuid", uuidVal).One() + t.AssertNil(err) + t.Assert(one.IsEmpty(), false) + t.Assert(one["id"].Int(), 2) + + // Test 3: Query by UUID string using g.Map + one, err = db.Model(table).Where(g.Map{ + "col_uuid": "550e8400-e29b-41d4-a716-446655440003", + }).One() + t.AssertNil(err) + t.Assert(one.IsEmpty(), false) + t.Assert(one["id"].Int(), 3) + + // Test 4: Query by uuid.UUID type using g.Map + uuidVal, err = uuid.Parse("550e8400-e29b-41d4-a716-446655440004") + t.AssertNil(err) + one, err = db.Model(table).Where(g.Map{ + "col_uuid": uuidVal, + }).One() + t.AssertNil(err) + t.Assert(one.IsEmpty(), false) + t.Assert(one["id"].Int(), 4) + + // Test 5: Query non-existent UUID + one, err = db.Model(table).Where("col_uuid", "00000000-0000-0000-0000-000000000000").One() + t.AssertNil(err) + t.Assert(one.IsEmpty(), true) + + // Test 6: Query multiple records by UUID IN clause with strings + all, err := db.Model(table).WhereIn("col_uuid", g.Slice{ + "550e8400-e29b-41d4-a716-446655440001", + "550e8400-e29b-41d4-a716-446655440002", + }).OrderAsc("id").All() + t.AssertNil(err) + t.Assert(len(all), 2) + t.Assert(all[0]["id"].Int(), 1) + t.Assert(all[1]["id"].Int(), 2) + + // Test 7: Query multiple records by UUID IN clause with uuid.UUID types + uuid1, _ := uuid.Parse("550e8400-e29b-41d4-a716-446655440003") + uuid2, _ := uuid.Parse("550e8400-e29b-41d4-a716-446655440004") + all, err = db.Model(table).WhereIn("col_uuid", g.Slice{uuid1, uuid2}).OrderAsc("id").All() + t.AssertNil(err) + t.Assert(len(all), 2) + t.Assert(all[0]["id"].Int(), 3) + t.Assert(all[1]["id"].Int(), 4) + }) +} diff --git a/contrib/drivers/gaussdb/pgsql_z_unit_filter_test.go b/contrib/drivers/gaussdb/pgsql_z_unit_filter_test.go new file mode 100644 index 00000000000..e3e175e1780 --- /dev/null +++ b/contrib/drivers/gaussdb/pgsql_z_unit_filter_test.go @@ -0,0 +1,273 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gaussdb_test + +import ( + "testing" + + "github.com/gogf/gf/contrib/drivers/gaussdb/v2" + "github.com/gogf/gf/v2/frame/g" + "github.com/gogf/gf/v2/os/gctx" + "github.com/gogf/gf/v2/test/gtest" +) + +// Test_DoFilter_LimitOffset tests LIMIT OFFSET conversion +func Test_DoFilter_LimitOffset(t *testing.T) { + var ( + ctx = gctx.New() + driver = gaussdb.Driver{} + ) + + gtest.C(t, func(t *gtest.T) { + // Test MySQL style LIMIT x,y to PostgreSQL style LIMIT y OFFSET x + sql := "SELECT * FROM users LIMIT 10, 20" + newSql, _, err := driver.DoFilter(ctx, nil, sql, nil) + t.AssertNil(err) + t.Assert(newSql, "SELECT * FROM users LIMIT 20 OFFSET 10") + }) + + gtest.C(t, func(t *gtest.T) { + // Test with different numbers + sql := "SELECT * FROM users LIMIT 0, 100" + newSql, _, err := driver.DoFilter(ctx, nil, sql, nil) + t.AssertNil(err) + t.Assert(newSql, "SELECT * FROM users LIMIT 100 OFFSET 0") + }) + + gtest.C(t, func(t *gtest.T) { + // Test no conversion needed + sql := "SELECT * FROM users LIMIT 50" + newSql, _, err := driver.DoFilter(ctx, nil, sql, nil) + t.AssertNil(err) + t.Assert(newSql, "SELECT * FROM users LIMIT 50") + }) +} + +// Test_DoFilter_InsertIgnore tests INSERT IGNORE conversion +func Test_DoFilter_InsertIgnore(t *testing.T) { + var ( + ctx = gctx.New() + driver = gaussdb.Driver{} + ) + + gtest.C(t, func(t *gtest.T) { + // Test INSERT IGNORE conversion + sql := "INSERT IGNORE INTO users (name) VALUES ($1)" + newSql, _, err := driver.DoFilter(ctx, nil, sql, nil) + t.AssertNil(err) + t.Assert(newSql, "INSERT INTO users (name) VALUES ($1) ON CONFLICT DO NOTHING") + }) +} + +// Test_DoFilter_PlaceholderConversion tests placeholder conversion +func Test_DoFilter_PlaceholderConversion(t *testing.T) { + var ( + ctx = gctx.New() + driver = gaussdb.Driver{} + ) + + gtest.C(t, func(t *gtest.T) { + // Test ? placeholder conversion to $n + sql := "SELECT * FROM users WHERE id = ? AND name = ?" + newSql, _, err := driver.DoFilter(ctx, nil, sql, nil) + t.AssertNil(err) + t.Assert(newSql, "SELECT * FROM users WHERE id = $1 AND name = $2") + }) + + gtest.C(t, func(t *gtest.T) { + // Test multiple placeholders + sql := "INSERT INTO users (a, b, c, d, e) VALUES (?, ?, ?, ?, ?)" + newSql, _, err := driver.DoFilter(ctx, nil, sql, nil) + t.AssertNil(err) + t.Assert(newSql, "INSERT INTO users (a, b, c, d, e) VALUES ($1, $2, $3, $4, $5)") + }) +} + +// Test_DoFilter_JsonbOperator tests JSONB operator handling +func Test_DoFilter_JsonbOperator(t *testing.T) { + var ( + ctx = gctx.New() + driver = gaussdb.Driver{} + ) + + gtest.C(t, func(t *gtest.T) { + // Test jsonb ?| operator + // The jsonb ? is first converted to $1, then restored to ? + // So the next placeholder becomes $2 + sql := "SELECT * FROM users WHERE (data)::jsonb ?| ?" + newSql, _, err := driver.DoFilter(ctx, nil, sql, nil) + t.AssertNil(err) + // After placeholder conversion, the ? in jsonb should be preserved + t.Assert(newSql, "SELECT * FROM users WHERE (data)::jsonb ?| $2") + }) + + gtest.C(t, func(t *gtest.T) { + // Test jsonb ?& operator + sql := "SELECT * FROM users WHERE (data)::jsonb &? ?" + newSql, _, err := driver.DoFilter(ctx, nil, sql, nil) + t.AssertNil(err) + t.Assert(newSql, "SELECT * FROM users WHERE (data)::jsonb &? $2") + }) + + gtest.C(t, func(t *gtest.T) { + // Test jsonb ? operator + sql := "SELECT * FROM users WHERE (data)::jsonb ? ?" + newSql, _, err := driver.DoFilter(ctx, nil, sql, nil) + t.AssertNil(err) + t.Assert(newSql, "SELECT * FROM users WHERE (data)::jsonb ? $2") + }) + + gtest.C(t, func(t *gtest.T) { + // Test combination of jsonb and regular placeholders + sql := "SELECT * FROM users WHERE id = ? AND (data)::jsonb ?| ?" + newSql, _, err := driver.DoFilter(ctx, nil, sql, nil) + t.AssertNil(err) + t.Assert(newSql, "SELECT * FROM users WHERE id = $1 AND (data)::jsonb ?| $3") + }) +} + +// Test_DoFilter_ComplexQuery tests complex queries with multiple features +func Test_DoFilter_ComplexQuery(t *testing.T) { + var ( + ctx = gctx.New() + driver = gaussdb.Driver{} + ) + + gtest.C(t, func(t *gtest.T) { + // Test complex query with LIMIT and placeholders + sql := "SELECT * FROM users WHERE status = ? AND age > ? LIMIT 5, 10" + newSql, _, err := driver.DoFilter(ctx, nil, sql, nil) + t.AssertNil(err) + t.Assert(newSql, "SELECT * FROM users WHERE status = $1 AND age > $2 LIMIT 10 OFFSET 5") + }) +} + +// Test_Tables tests the Tables method +func Test_Tables_Method(t *testing.T) { + gtest.C(t, func(t *gtest.T) { + tables, err := db.Tables(ctx) + t.AssertNil(err) + t.Assert(len(tables) >= 0, true) + }) + + gtest.C(t, func(t *gtest.T) { + // Test with specific schema - use the test schema + tables, err := db.Tables(ctx, "test") + t.AssertNil(err) + t.Assert(len(tables) >= 0, true) + }) +} + +// Test_OrderRandomFunction tests the OrderRandomFunction method +func Test_OrderRandomFunction(t *testing.T) { + table := createInitTable() + defer dropTable(table) + + gtest.C(t, func(t *gtest.T) { + // Test ORDER BY RANDOM() + all, err := db.Model(table).OrderRandom().All() + t.AssertNil(err) + t.Assert(len(all), TableSize) + }) +} + +// Test_GetChars tests the GetChars method +func Test_GetChars(t *testing.T) { + gtest.C(t, func(t *gtest.T) { + driver := gaussdb.Driver{} + left, right := driver.GetChars() + t.Assert(left, `"`) + t.Assert(right, `"`) + }) +} + +// Test_New tests the New method +func Test_New(t *testing.T) { + gtest.C(t, func(t *gtest.T) { + driver := gaussdb.New() + t.AssertNE(driver, nil) + }) +} + +// Test_DoExec_NonIntPrimaryKey tests DoExec with non-integer primary key +func Test_DoExec_NonIntPrimaryKey(t *testing.T) { + // Create a table with UUID primary key + tableName := "t_uuid_pk_test" + _, err := db.Exec(ctx, ` + CREATE TABLE IF NOT EXISTS `+tableName+` ( + id uuid PRIMARY KEY DEFAULT gen_random_uuid(), + name varchar(100) + ) + `) + if err != nil { + // If gen_random_uuid is not available, skip this test + t.Log("Skipping UUID test:", err) + return + } + defer db.Exec(ctx, "DROP TABLE IF EXISTS "+tableName) + + gtest.C(t, func(t *gtest.T) { + // Insert with UUID primary key + result, err := db.Model(tableName).Data(g.Map{ + "name": "test_user", + }).Insert() + t.AssertNil(err) + + // LastInsertId should return error for non-integer primary key + _, err = result.LastInsertId() + // For UUID, LastInsertId is not supported + t.AssertNE(err, nil) + + // RowsAffected should still work + affected, err := result.RowsAffected() + t.AssertNil(err) + t.Assert(affected, int64(1)) + }) +} + +// Test_TableFields_WithSchema tests TableFields with specific schema +func Test_TableFields_WithSchema(t *testing.T) { + table := createTable() + defer dropTable(table) + + gtest.C(t, func(t *gtest.T) { + // Test with schema parameter + fields, err := db.TableFields(ctx, table, "test") + t.AssertNil(err) + t.Assert(len(fields) > 0, true) + }) +} + +// Test_TableFields_UniqueKey tests TableFields with unique key constraint +func Test_TableFields_UniqueKey(t *testing.T) { + tableName := "t_unique_test" + + // Create table with unique constraint + _, err := db.Exec(ctx, ` + CREATE TABLE IF NOT EXISTS `+tableName+` ( + id bigserial PRIMARY KEY, + email varchar(100) UNIQUE NOT NULL, + name varchar(100) + ) + `) + if err != nil { + t.Error(err) + return + } + defer db.Exec(ctx, "DROP TABLE IF EXISTS "+tableName) + + gtest.C(t, func(t *gtest.T) { + fields, err := db.TableFields(ctx, tableName) + t.AssertNil(err) + + // Check primary key + t.Assert(fields["id"].Key, "pri") + + // Check unique key + t.Assert(fields["email"].Key, "uni") + }) +} diff --git a/contrib/drivers/gaussdb/pgsql_z_unit_open_test.go b/contrib/drivers/gaussdb/pgsql_z_unit_open_test.go new file mode 100644 index 00000000000..34d99d9b5e9 --- /dev/null +++ b/contrib/drivers/gaussdb/pgsql_z_unit_open_test.go @@ -0,0 +1,178 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gaussdb_test + +import ( + "testing" + + "github.com/gogf/gf/contrib/drivers/gaussdb/v2" + "github.com/gogf/gf/v2/database/gdb" + "github.com/gogf/gf/v2/test/gtest" +) + +// Test_Open tests the Open method with various configurations +func Test_Open_WithNamespace(t *testing.T) { + gtest.C(t, func(t *gtest.T) { + driver := gaussdb.Driver{} + config := &gdb.ConfigNode{ + User: "postgres", + Pass: "12345678", + Host: "127.0.0.1", + Port: "5432", + Name: "test", + Namespace: "public", + } + db, err := driver.Open(config) + t.AssertNil(err) + t.AssertNE(db, nil) + if db != nil { + db.Close() + } + }) +} + +// Test_Open_WithTimezone tests Open with timezone configuration +func Test_Open_WithTimezone(t *testing.T) { + gtest.C(t, func(t *gtest.T) { + driver := gaussdb.Driver{} + config := &gdb.ConfigNode{ + User: "postgres", + Pass: "12345678", + Host: "127.0.0.1", + Port: "5432", + Name: "test", + Timezone: "Asia/Shanghai", + } + db, err := driver.Open(config) + t.AssertNil(err) + t.AssertNE(db, nil) + if db != nil { + db.Close() + } + }) +} + +// Test_Open_WithExtra tests Open with extra configuration +func Test_Open_WithExtra(t *testing.T) { + gtest.C(t, func(t *gtest.T) { + driver := gaussdb.Driver{} + config := &gdb.ConfigNode{ + User: "postgres", + Pass: "12345678", + Host: "127.0.0.1", + Port: "5432", + Name: "test", + Extra: "connect_timeout=10", + } + db, err := driver.Open(config) + t.AssertNil(err) + t.AssertNE(db, nil) + if db != nil { + db.Close() + } + }) +} + +// Test_Open_WithInvalidExtra tests Open with invalid extra configuration +func Test_Open_WithInvalidExtra(t *testing.T) { + gtest.C(t, func(t *gtest.T) { + driver := gaussdb.Driver{} + config := &gdb.ConfigNode{ + User: "postgres", + Pass: "12345678", + Host: "127.0.0.1", + Port: "5432", + Name: "test", + // Invalid extra format with invalid URL encoding that will cause parse error + Extra: "%Q=%Q&b", + } + _, err := driver.Open(config) + t.AssertNE(err, nil) + }) +} + +// Test_Open_WithFullConfig tests Open with all configuration options +func Test_Open_WithFullConfig(t *testing.T) { + gtest.C(t, func(t *gtest.T) { + driver := gaussdb.Driver{} + config := &gdb.ConfigNode{ + User: "postgres", + Pass: "12345678", + Host: "127.0.0.1", + Port: "5432", + Name: "test", + Namespace: "public", + Timezone: "UTC", + Extra: "connect_timeout=10", + } + db, err := driver.Open(config) + t.AssertNil(err) + t.AssertNE(db, nil) + if db != nil { + db.Close() + } + }) +} + +// Test_Open_WithoutPort tests Open without port +func Test_Open_WithoutPort(t *testing.T) { + gtest.C(t, func(t *gtest.T) { + driver := gaussdb.Driver{} + config := &gdb.ConfigNode{ + User: "postgres", + Pass: "12345678", + Host: "127.0.0.1", + Name: "test", + } + db, err := driver.Open(config) + t.AssertNil(err) + t.AssertNE(db, nil) + if db != nil { + db.Close() + } + }) +} + +// Test_Open_WithoutName tests Open without database name +func Test_Open_WithoutName(t *testing.T) { + gtest.C(t, func(t *gtest.T) { + driver := gaussdb.Driver{} + config := &gdb.ConfigNode{ + User: "postgres", + Pass: "12345678", + Host: "127.0.0.1", + Port: "5432", + } + db, err := driver.Open(config) + t.AssertNil(err) + t.AssertNE(db, nil) + if db != nil { + db.Close() + } + }) +} + +// Test_Open_InvalidHost tests Open with invalid host +func Test_Open_InvalidHost(t *testing.T) { + gtest.C(t, func(t *gtest.T) { + driver := gaussdb.Driver{} + config := &gdb.ConfigNode{ + User: "postgres", + Pass: "12345678", + Host: "invalid_host_that_does_not_exist", + Port: "5432", + Name: "test", + } + // Note: sql.Open doesn't actually connect, so no error here + // The error would occur when actually using the connection + db, err := driver.Open(config) + t.AssertNil(err) + if db != nil { + db.Close() + } + }) +} diff --git a/contrib/drivers/gaussdb/pgsql_z_unit_raw_test.go b/contrib/drivers/gaussdb/pgsql_z_unit_raw_test.go new file mode 100644 index 00000000000..4d9c2dcba1d --- /dev/null +++ b/contrib/drivers/gaussdb/pgsql_z_unit_raw_test.go @@ -0,0 +1,99 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gaussdb_test + +import ( + "testing" + + "github.com/gogf/gf/v2/database/gdb" + "github.com/gogf/gf/v2/frame/g" + "github.com/gogf/gf/v2/test/gtest" +) + +func Test_Raw_Insert(t *testing.T) { + table := createTable() + defer dropTable(table) + + gtest.C(t, func(t *gtest.T) { + user := db.Model(table) + result, err := user.Data(g.Map{ + "passport": "port_1", + "password": "pass_1", + "nickname": "name_1", + "create_time": gdb.Raw("now()"), + }).Insert() + t.AssertNil(err) + n, _ := result.RowsAffected() + t.Assert(n, 1) + }) +} + +func Test_Raw_BatchInsert(t *testing.T) { + table := createTable() + defer dropTable(table) + + gtest.C(t, func(t *gtest.T) { + user := db.Model(table) + result, err := user.Data( + g.List{ + g.Map{ + "passport": "port_2", + "password": "pass_2", + "nickname": "name_2", + "create_time": gdb.Raw("now()"), + }, + g.Map{ + "passport": "port_4", + "password": "pass_4", + "nickname": "name_4", + "create_time": gdb.Raw("now()"), + }, + }, + ).Insert() + t.AssertNil(err) + n, _ := result.RowsAffected() + t.Assert(n, 2) + }) +} + +func Test_Raw_Delete(t *testing.T) { + table := createInitTable() + defer dropTable(table) + + gtest.C(t, func(t *gtest.T) { + user := db.Model(table) + result, err := user.Data(g.Map{ + "id": gdb.Raw("id"), + }).Where("id", 1).Delete() + t.AssertNil(err) + n, _ := result.RowsAffected() + t.Assert(n, 1) + }) +} + +func Test_Raw_Update(t *testing.T) { + table := createInitTable() + defer dropTable(table) + + gtest.C(t, func(t *gtest.T) { + user := db.Model(table) + result, err := user.Data(g.Map{ + "id": gdb.Raw("id+100"), + "create_time": gdb.Raw("now()"), + }).Where("id", 1).Update() + t.AssertNil(err) + n, _ := result.RowsAffected() + t.Assert(n, 1) + }) + + gtest.C(t, func(t *gtest.T) { + user := db.Model(table) + n, err := user.Where("id", 101).Count() + t.AssertNil(err) + t.Assert(n, int64(1)) + }) +} diff --git a/contrib/drivers/gaussdb/pgsql_z_unit_test.go b/contrib/drivers/gaussdb/pgsql_z_unit_test.go new file mode 100644 index 00000000000..5715335c3f8 --- /dev/null +++ b/contrib/drivers/gaussdb/pgsql_z_unit_test.go @@ -0,0 +1,105 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gaussdb_test + +import ( + "context" + "testing" + + "github.com/gogf/gf/contrib/drivers/gaussdb/v2" + "github.com/gogf/gf/v2/database/gdb" + "github.com/gogf/gf/v2/frame/g" + "github.com/gogf/gf/v2/os/gctx" + "github.com/gogf/gf/v2/test/gtest" +) + +func Test_LastInsertId(t *testing.T) { + // err not nil + gtest.C(t, func(t *gtest.T) { + _, err := db.Model("notexist").Insert(g.List{ + {"name": "user1"}, + {"name": "user2"}, + {"name": "user3"}, + }) + t.AssertNE(err, nil) + }) + + gtest.C(t, func(t *gtest.T) { + tableName := createTable() + defer dropTable(tableName) + res, err := db.Model(tableName).Insert(g.List{ + {"passport": "user1", "password": "pwd", "nickname": "nickname", "create_time": CreateTime}, + {"passport": "user2", "password": "pwd", "nickname": "nickname", "create_time": CreateTime}, + {"passport": "user3", "password": "pwd", "nickname": "nickname", "create_time": CreateTime}, + }) + t.AssertNil(err) + lastInsertId, err := res.LastInsertId() + t.AssertNil(err) + t.Assert(lastInsertId, int64(3)) + rowsAffected, err := res.RowsAffected() + t.AssertNil(err) + t.Assert(rowsAffected, int64(3)) + }) +} + +func Test_TxLastInsertId(t *testing.T) { + gtest.C(t, func(t *gtest.T) { + tableName := createTable() + defer dropTable(tableName) + err := db.Transaction(context.TODO(), func(ctx context.Context, tx gdb.TX) error { + // user + res, err := tx.Model(tableName).Insert(g.List{ + {"passport": "user1", "password": "pwd", "nickname": "nickname", "create_time": CreateTime}, + {"passport": "user2", "password": "pwd", "nickname": "nickname", "create_time": CreateTime}, + {"passport": "user3", "password": "pwd", "nickname": "nickname", "create_time": CreateTime}, + }) + t.AssertNil(err) + lastInsertId, err := res.LastInsertId() + t.AssertNil(err) + t.AssertEQ(lastInsertId, int64(3)) + rowsAffected, err := res.RowsAffected() + t.AssertNil(err) + t.AssertEQ(rowsAffected, int64(3)) + + res1, err := tx.Model(tableName).Insert(g.List{ + {"passport": "user4", "password": "pwd", "nickname": "nickname", "create_time": CreateTime}, + {"passport": "user5", "password": "pwd", "nickname": "nickname", "create_time": CreateTime}, + }) + t.AssertNil(err) + lastInsertId1, err := res1.LastInsertId() + t.AssertNil(err) + t.AssertEQ(lastInsertId1, int64(5)) + rowsAffected1, err := res1.RowsAffected() + t.AssertNil(err) + t.AssertEQ(rowsAffected1, int64(2)) + return nil + + }) + t.AssertNil(err) + }) +} + +func Test_Driver_DoFilter(t *testing.T) { + var ( + ctx = gctx.New() + driver = gaussdb.Driver{} + ) + gtest.C(t, func(t *gtest.T) { + var data = g.Map{ + `select * from user where (role)::jsonb ?| 'admin'`: `select * from user where (role)::jsonb ?| 'admin'`, + `select * from user where (role)::jsonb ?| '?'`: `select * from user where (role)::jsonb ?| '$2'`, + `select * from user where (role)::jsonb &? '?'`: `select * from user where (role)::jsonb &? '$2'`, + `select * from user where (role)::jsonb ? '?'`: `select * from user where (role)::jsonb ? '$2'`, + `select * from user where '?'`: `select * from user where '$1'`, + } + for k, v := range data { + newSql, _, err := driver.DoFilter(ctx, nil, k, nil) + t.AssertNil(err) + t.Assert(newSql, v) + } + }) +} diff --git a/contrib/drivers/gaussdb/pgsql_z_unit_upsert_test.go b/contrib/drivers/gaussdb/pgsql_z_unit_upsert_test.go new file mode 100644 index 00000000000..06e091a1a3f --- /dev/null +++ b/contrib/drivers/gaussdb/pgsql_z_unit_upsert_test.go @@ -0,0 +1,267 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gaussdb_test + +import ( + "testing" + + "github.com/gogf/gf/v2/database/gdb" + "github.com/gogf/gf/v2/frame/g" + "github.com/gogf/gf/v2/test/gtest" +) + +// Test_FormatUpsert_WithOnDuplicateStr tests FormatUpsert with OnDuplicateStr +func Test_FormatUpsert_WithOnDuplicateStr(t *testing.T) { + table := createTable() + defer dropTable(table) + + gtest.C(t, func(t *gtest.T) { + // Insert initial data + _, err := db.Model(table).Data(g.Map{ + "passport": "user1", + "password": "pwd", + "nickname": "nick1", + "create_time": CreateTime, + }).Insert() + t.AssertNil(err) + + // Test Save with OnConflict (upsert) + _, err = db.Model(table).Data(g.Map{ + "id": 1, + "passport": "user1", + "password": "newpwd", + "nickname": "newnick", + "create_time": CreateTime, + }).OnConflict("id").Save() + t.AssertNil(err) + + // Verify the update + one, err := db.Model(table).Where("id", 1).One() + t.AssertNil(err) + t.Assert(one["password"].String(), "newpwd") + t.Assert(one["nickname"].String(), "newnick") + }) +} + +// Test_FormatUpsert_WithOnDuplicateMap tests FormatUpsert with OnDuplicateMap +func Test_FormatUpsert_WithOnDuplicateMap(t *testing.T) { + table := createTable() + defer dropTable(table) + + gtest.C(t, func(t *gtest.T) { + // Insert initial data + _, err := db.Model(table).Data(g.Map{ + "passport": "user2", + "password": "pwd", + "nickname": "nick2", + "create_time": CreateTime, + }).Insert() + t.AssertNil(err) + + // Test OnDuplicate with map - values should be column names to use EXCLUDED.column + _, err = db.Model(table).Data(g.Map{ + "id": 1, + "passport": "user2", + "password": "newpwd2", + "nickname": "newnick2", + "create_time": CreateTime, + }).OnConflict("id").OnDuplicate(g.Map{ + "password": "password", + "nickname": "nickname", + }).Save() + t.AssertNil(err) + + // Verify - values should be from the inserted data + one, err := db.Model(table).Where("id", 1).One() + t.AssertNil(err) + t.Assert(one["password"].String(), "newpwd2") + t.Assert(one["nickname"].String(), "newnick2") + }) +} + +// Test_FormatUpsert_WithCounter tests FormatUpsert with Counter type on numeric column. +// Note: In PostgreSQL, Counter uses EXCLUDED.column which references the NEW value being inserted, +// not the current table value. This differs from MySQL's ON DUPLICATE KEY UPDATE behavior. +func Test_FormatUpsert_WithCounter(t *testing.T) { + // Create a special table with numeric id for counter test + tableName := "t_counter_test" + dropTable(tableName) + _, err := db.Exec(ctx, ` + CREATE TABLE `+tableName+` ( + id bigserial PRIMARY KEY, + counter_value int NOT NULL DEFAULT 0, + name varchar(45) + ) + `) + if err != nil { + t.Error(err) + return + } + defer dropTable(tableName) + + gtest.C(t, func(t *gtest.T) { + // Insert initial data + _, err := db.Model(tableName).Data(g.Map{ + "counter_value": 10, + "name": "counter_test", + }).Insert() + t.AssertNil(err) + + // Get initial ID + one, err := db.Model(tableName).Where("name", "counter_test").One() + t.AssertNil(err) + initialId := one["id"].Int64() + + // Test OnDuplicate with Counter + // In PostgreSQL: counter_value = EXCLUDED.counter_value + 5 + // EXCLUDED.counter_value is the value we're trying to insert (20) + // So result = 20 + 5 = 25 + _, err = db.Model(tableName).Data(g.Map{ + "id": initialId, + "counter_value": 20, // This is the EXCLUDED value + "name": "counter_test", + }).OnConflict("id").OnDuplicate(g.Map{ + "counter_value": &gdb.Counter{ + Field: "counter_value", + Value: 5, + }, + }).Save() + t.AssertNil(err) + + // Verify: EXCLUDED.counter_value(20) + 5 = 25 + one, err = db.Model(tableName).Where("id", initialId).One() + t.AssertNil(err) + t.Assert(one["counter_value"].Int(), 25) + }) + + gtest.C(t, func(t *gtest.T) { + // Test Counter with negative value (decrement) + one, err := db.Model(tableName).Where("name", "counter_test").One() + t.AssertNil(err) + initialId := one["id"].Int64() + + // In PostgreSQL: counter_value = EXCLUDED.counter_value - 3 + // EXCLUDED.counter_value is 100, so result = 100 - 3 = 97 + _, err = db.Model(tableName).Data(g.Map{ + "id": initialId, + "counter_value": 100, // This is the EXCLUDED value + "name": "counter_test", + }).OnConflict("id").OnDuplicate(g.Map{ + "counter_value": &gdb.Counter{ + Field: "counter_value", + Value: -3, + }, + }).Save() + t.AssertNil(err) + + // Verify: EXCLUDED.counter_value(100) - 3 = 97 + one, err = db.Model(tableName).Where("id", initialId).One() + t.AssertNil(err) + t.Assert(one["counter_value"].Int(), 97) + }) +} + +// Test_FormatUpsert_WithRaw tests FormatUpsert with Raw type +func Test_FormatUpsert_WithRaw(t *testing.T) { + table := createTable() + defer dropTable(table) + + gtest.C(t, func(t *gtest.T) { + // Insert initial data + _, err := db.Model(table).Data(g.Map{ + "passport": "raw_user", + "password": "pwd", + "nickname": "nick", + "create_time": CreateTime, + }).Insert() + t.AssertNil(err) + + // Get initial ID + one, err := db.Model(table).Where("passport", "raw_user").One() + t.AssertNil(err) + initialId := one["id"].Int64() + + // Test OnDuplicate with Raw SQL + _, err = db.Model(table).Data(g.Map{ + "id": initialId, + "passport": "raw_user", + "password": "pwd", + "nickname": "nick", + "create_time": CreateTime, + }).OnConflict("id").OnDuplicate(g.Map{ + "password": gdb.Raw("'raw_password'"), + }).Save() + t.AssertNil(err) + + // Verify + one, err = db.Model(table).Where("id", initialId).One() + t.AssertNil(err) + t.Assert(one["password"].String(), "raw_password") + }) +} + +// Test_FormatUpsert_NoOnConflict tests FormatUpsert without OnConflict (should fail) +func Test_FormatUpsert_NoOnConflict(t *testing.T) { + table := createTable() + defer dropTable(table) + + gtest.C(t, func(t *gtest.T) { + // Insert initial data + _, err := db.Model(table).Data(g.Map{ + "passport": "no_conflict_user", + "password": "pwd", + "nickname": "nick", + "create_time": CreateTime, + }).Insert() + t.AssertNil(err) + + // Try Save without OnConflict and without primary key in data - should fail + // because driver cannot auto-detect conflict columns when primary key is missing + _, err = db.Model(table).Data(g.Map{ + // "id": 1, + "passport": "no_conflict_user", + "password": "newpwd", + "nickname": "newnick", + "create_time": CreateTime, + }).Save() + t.AssertNE(err, nil) + }) +} + +// Test_FormatUpsert_MultipleConflictKeys tests FormatUpsert with multiple conflict keys +func Test_FormatUpsert_MultipleConflictKeys(t *testing.T) { + table := createTable() + defer dropTable(table) + + gtest.C(t, func(t *gtest.T) { + // Insert initial data + _, err := db.Model(table).Data(g.Map{ + "passport": "multi_key_user", + "password": "pwd", + "nickname": "nick", + "create_time": CreateTime, + }).Insert() + t.AssertNil(err) + + // Test with multiple conflict keys using only "id" which has a unique constraint + // Note: Using multiple keys requires a composite unique constraint to exist + _, err = db.Model(table).Data(g.Map{ + "id": 1, + "passport": "multi_key_user", + "password": "newpwd", + "nickname": "newnick", + "create_time": CreateTime, + }).OnConflict("id").Save() + t.AssertNil(err) + + // Verify the update + one, err := db.Model(table).Where("id", 1).One() + t.AssertNil(err) + t.Assert(one["password"].String(), "newpwd") + t.Assert(one["nickname"].String(), "newnick") + }) +} From 8d54a1038c9de267a7abb8d767ff4062af89054d Mon Sep 17 00:00:00 2001 From: John Guo Date: Sat, 13 Dec 2025 14:50:34 +0800 Subject: [PATCH 05/10] up --- .github/workflows/ci-main.yml | 11 + contrib/drivers/gaussdb/README.md | 192 ------------------ .../{pgsql_convert.go => gaussdb_convert.go} | 0 .../{pgsql_do_exec.go => gaussdb_do_exec.go} | 0 ...gsql_do_filter.go => gaussdb_do_filter.go} | 0 ...gsql_do_insert.go => gaussdb_do_insert.go} | 0 ...mat_upsert.go => gaussdb_format_upsert.go} | 0 .../{pgsql_open.go => gaussdb_open.go} | 0 .../{pgsql_order.go => gaussdb_order.go} | 0 .../{pgsql_result.go => gaussdb_result.go} | 0 ...able_fields.go => gaussdb_table_fields.go} | 15 +- .../{pgsql_tables.go => gaussdb_tables.go} | 3 +- ...t_db_test.go => gaussdb_z_unit_db_test.go} | 0 ...d_test.go => gaussdb_z_unit_field_test.go} | 3 +- ..._test.go => gaussdb_z_unit_filter_test.go} | 5 +- ...it_test.go => gaussdb_z_unit_init_test.go} | 0 ...l_test.go => gaussdb_z_unit_model_test.go} | 0 ...en_test.go => gaussdb_z_unit_open_test.go} | 0 ...raw_test.go => gaussdb_z_unit_raw_test.go} | 0 ..._z_unit_test.go => gaussdb_z_unit_test.go} | 0 ..._test.go => gaussdb_z_unit_upsert_test.go} | 0 21 files changed, 27 insertions(+), 202 deletions(-) delete mode 100644 contrib/drivers/gaussdb/README.md rename contrib/drivers/gaussdb/{pgsql_convert.go => gaussdb_convert.go} (100%) rename contrib/drivers/gaussdb/{pgsql_do_exec.go => gaussdb_do_exec.go} (100%) rename contrib/drivers/gaussdb/{pgsql_do_filter.go => gaussdb_do_filter.go} (100%) rename contrib/drivers/gaussdb/{pgsql_do_insert.go => gaussdb_do_insert.go} (100%) rename contrib/drivers/gaussdb/{pgsql_format_upsert.go => gaussdb_format_upsert.go} (100%) rename contrib/drivers/gaussdb/{pgsql_open.go => gaussdb_open.go} (100%) rename contrib/drivers/gaussdb/{pgsql_order.go => gaussdb_order.go} (100%) rename contrib/drivers/gaussdb/{pgsql_result.go => gaussdb_result.go} (100%) rename contrib/drivers/gaussdb/{pgsql_table_fields.go => gaussdb_table_fields.go} (88%) rename contrib/drivers/gaussdb/{pgsql_tables.go => gaussdb_tables.go} (96%) rename contrib/drivers/gaussdb/{pgsql_z_unit_db_test.go => gaussdb_z_unit_db_test.go} (100%) rename contrib/drivers/gaussdb/{pgsql_z_unit_field_test.go => gaussdb_z_unit_field_test.go} (99%) rename contrib/drivers/gaussdb/{pgsql_z_unit_filter_test.go => gaussdb_z_unit_filter_test.go} (96%) rename contrib/drivers/gaussdb/{pgsql_z_unit_init_test.go => gaussdb_z_unit_init_test.go} (100%) rename contrib/drivers/gaussdb/{pgsql_z_unit_model_test.go => gaussdb_z_unit_model_test.go} (100%) rename contrib/drivers/gaussdb/{pgsql_z_unit_open_test.go => gaussdb_z_unit_open_test.go} (100%) rename contrib/drivers/gaussdb/{pgsql_z_unit_raw_test.go => gaussdb_z_unit_raw_test.go} (100%) rename contrib/drivers/gaussdb/{pgsql_z_unit_test.go => gaussdb_z_unit_test.go} (100%) rename contrib/drivers/gaussdb/{pgsql_z_unit_upsert_test.go => gaussdb_z_unit_upsert_test.go} (100%) diff --git a/.github/workflows/ci-main.yml b/.github/workflows/ci-main.yml index b468c81c271..d3840dd43f9 100644 --- a/.github/workflows/ci-main.yml +++ b/.github/workflows/ci-main.yml @@ -198,6 +198,17 @@ jobs: ports: - 5236:5236 + # openGauss server + # docker run --privileged=true -e GS_PASSWORD=UTpass@1234 -p 9950:5432 opengauss/opengauss:7.0.0-RC1.B023 + gaussdb: + image: opengauss/opengauss:7.0.0-RC1.B023 + env: + GS_PASSWORD: UTpass@1234 + TZ: Asia/Shanghai + ports: + - 9950:5432 + + zookeeper: image: zookeeper:3.8 ports: diff --git a/contrib/drivers/gaussdb/README.md b/contrib/drivers/gaussdb/README.md deleted file mode 100644 index c45be82e729..00000000000 --- a/contrib/drivers/gaussdb/README.md +++ /dev/null @@ -1,192 +0,0 @@ -# GaussDB Driver for GoFrame - -This package provides a GaussDB database driver for the GoFrame framework. - -## Overview - -GaussDB is Huawei's enterprise-level database that is compatible with PostgreSQL protocols. This driver adapts the PostgreSQL driver implementation to work with GaussDB. - -## Installation - -```bash -go get -u github.com/gogf/gf/contrib/drivers/gaussdb/v2 -``` - -## Usage - -```go -import ( - _ "github.com/gogf/gf/contrib/drivers/gaussdb/v2" - "github.com/gogf/gf/v2/database/gdb" -) - -// Configuration -gdb.AddConfigNode(gdb.DefaultGroupName, gdb.ConfigNode{ - Link: "gaussdb:username:password@tcp(127.0.0.1:9950)/database_name", -}) - -// Get database instance -db, err := gdb.New() -``` - -## Connection String Format - -``` -gaussdb:username:password@tcp(host:port)/database?param1=value1¶m2=value2 -``` - -Example: -``` -gaussdb:gaussdb:UTpass@1234@tcp(127.0.0.1:9950)/postgres -``` - -## Schema/Namespace Handling - -GaussDB follows PostgreSQL's schema model: -- **Database (Catalog)**: The database name in the connection string (e.g., `postgres`) -- **Schema (Namespace)**: A namespace within the database (e.g., `public`, `test`) - -To use a specific schema: - -```go -// Create schema if not exists -db.Exec(ctx, "CREATE SCHEMA IF NOT EXISTS my_schema") - -// Set search_path to use the schema -db.Exec(ctx, "SET search_path TO my_schema") -``` - -## Limitations - -GaussDB is based on **PostgreSQL 9.2**, which predates several modern PostgreSQL features (like `ON CONFLICT` introduced in PostgreSQL 9.5). However, GaussDB supports the SQL standard `MERGE` statement, which we use to implement some upsert operations. - -### Fully Supported UPSERT Operations - -All ORM upsert operations are **FULLY SUPPORTED** using `MERGE` statement or alternative implementations: - -- ✅ **Save()** - Insert or update (upsert) - Uses MERGE INTO -- ✅ **Replace()** - Replace existing record - Alias for Save() -- ✅ **InsertIgnore()** - Insert and ignore duplicate key errors - - With primary key in data: Uses MERGE INTO for conflict detection - - Without primary key: Uses INSERT with error catching -- ✅ **OnConflict()** - Custom conflict column specification - Works with MERGE -- ✅ **OnDuplicate()** - On duplicate key update with custom fields - - Uses MERGE when not updating conflict keys - - Uses UPDATE+INSERT when updating conflict keys (GaussDB MERGE limitation workaround) -- ✅ **OnDuplicateEx()** - Exclude specific fields from update - Uses MERGE -- ✅ **OnDuplicateWithCounter()** - Counter operations on duplicate - Fully supported - -### Usage Examples - -```go -// Basic Save (upsert) -result, err := db.Model("user").Data(data).Save() - -// Save with conflict detection on specific column -result, err := db.Model("user").Data(data).OnConflict("email").Save() - -// Insert Ignore (skip if exists) -result, err := db.Model("user").Data(data).InsertIgnore() - -// OnDuplicate - update specific fields on conflict -result, err := db.Model("user"). - Data(data). - OnConflict("id"). - OnDuplicate("name", "email"). - Save() - -// OnDuplicateEx - update all except specified fields -result, err := db.Model("user"). - Data(data). - OnConflict("id"). - OnDuplicateEx("created_at"). - Save() - -// OnDuplicate with Counter -result, err := db.Model("user"). - Data(data). - OnConflict("id"). - OnDuplicate(g.Map{ - "login_count": gdb.Counter{Field: "login_count", Value: 1}, - }). - Save() - -// OnDuplicate with Raw SQL -result, err := db.Model("user"). - Data(data). - OnConflict("id"). - OnDuplicate(g.Map{ - "updated_at": gdb.Raw("CURRENT_TIMESTAMP"), - }). - Save() -``` - -### Implementation Notes - -1. **MERGE Statement**: GaussDB supports the SQL standard MERGE statement, which is used for most upsert operations -2. **Conflict Key Updates**: When OnDuplicate attempts to update a conflict key (e.g., primary key), MERGE cannot be used. In this case, the driver automatically falls back to UPDATE+INSERT approach -3. **EXCLUDED Keyword**: PostgreSQL's `EXCLUDED` (used in ON CONFLICT) is automatically converted to the MERGE equivalent `T2` prefix -4. **Atomic Operations**: All operations maintain atomicity and consistency - -## Supported Features - -- ✅ Basic CRUD operations (Insert, Select, Update, Delete) -- ✅ Transactions -- ✅ Batch operations -- ✅ Array data types (int, float, text, etc.) -- ✅ JSON/JSONB data types -- ✅ Schema/namespace support -- ✅ Prepared statements -- ✅ Connection pooling - -## Supported Features - -- ✅ Basic CRUD operations (Insert, Select, Update, Delete) -- ✅ **Save/Upsert operations** (using MERGE statement) -- ✅ **InsertIgnore** (using MERGE statement) -- ✅ **Replace** (using MERGE statement) -- ✅ Transactions -- ✅ Batch operations -- ✅ Array data types (int, float, text, etc.) -- ✅ JSON/JSONB data types -- ✅ Schema/namespace support -- ✅ Prepared statements -- ✅ Connection pooling4 -# Database: postgres - -Tests for unsupported features (OnConflict/OnDuplicate operations requiring ON CONFLICT syntax) will be skipped with explanatory messages. Tests for Save/InsertIgnore operations (using MERGE statement) will pass successfully. -``` - -Tests for unsupported features (ON CONFLICT operations) will be skipped with explanatory messages. - -## Database Compatibility - -- **GaussDB Version**: Based on PostgreSQL 9.2 -- **Protocol Compatibility**: PostgreSQL wire protocol -- **Driver**: Uses `gitee.com/opengauss/openGauss-connector-go-pq` - -## Notes - -1. **Schema Usage**: Unlike MySQL where "schema" and "database" are synonymous, in PostgreSQL/GaussDB: - - Database (catalog) is the top-level container - - Schema is a namespace within a database - - Tables belong to schemas within databases - -2. **Connection Database**: Always connect to an existing database (like `postgres`), then create and use schemas within it. - -3. **Performance**: For optimal performance, set `search_path` at the session level rather than qualifying every table name with the schema. - -4. **Version Checking**: The driver does not enforce GaussDB version checking, but features relying on PostgreSQL 9.5+ functionality will fail. - -## Contributing - -When contributing to this driver, please note: - -1. Test changes against an actual GaussDB instance -2. Ensure compatibility with PostgreSQL 9.2 features only -3. Document any additional limitations discovered -4. Update tests to skip unsupported features appropriately - -## License - -This driver is distributed under the same license as the GoFrame framework (MIT License). diff --git a/contrib/drivers/gaussdb/pgsql_convert.go b/contrib/drivers/gaussdb/gaussdb_convert.go similarity index 100% rename from contrib/drivers/gaussdb/pgsql_convert.go rename to contrib/drivers/gaussdb/gaussdb_convert.go diff --git a/contrib/drivers/gaussdb/pgsql_do_exec.go b/contrib/drivers/gaussdb/gaussdb_do_exec.go similarity index 100% rename from contrib/drivers/gaussdb/pgsql_do_exec.go rename to contrib/drivers/gaussdb/gaussdb_do_exec.go diff --git a/contrib/drivers/gaussdb/pgsql_do_filter.go b/contrib/drivers/gaussdb/gaussdb_do_filter.go similarity index 100% rename from contrib/drivers/gaussdb/pgsql_do_filter.go rename to contrib/drivers/gaussdb/gaussdb_do_filter.go diff --git a/contrib/drivers/gaussdb/pgsql_do_insert.go b/contrib/drivers/gaussdb/gaussdb_do_insert.go similarity index 100% rename from contrib/drivers/gaussdb/pgsql_do_insert.go rename to contrib/drivers/gaussdb/gaussdb_do_insert.go diff --git a/contrib/drivers/gaussdb/pgsql_format_upsert.go b/contrib/drivers/gaussdb/gaussdb_format_upsert.go similarity index 100% rename from contrib/drivers/gaussdb/pgsql_format_upsert.go rename to contrib/drivers/gaussdb/gaussdb_format_upsert.go diff --git a/contrib/drivers/gaussdb/pgsql_open.go b/contrib/drivers/gaussdb/gaussdb_open.go similarity index 100% rename from contrib/drivers/gaussdb/pgsql_open.go rename to contrib/drivers/gaussdb/gaussdb_open.go diff --git a/contrib/drivers/gaussdb/pgsql_order.go b/contrib/drivers/gaussdb/gaussdb_order.go similarity index 100% rename from contrib/drivers/gaussdb/pgsql_order.go rename to contrib/drivers/gaussdb/gaussdb_order.go diff --git a/contrib/drivers/gaussdb/pgsql_result.go b/contrib/drivers/gaussdb/gaussdb_result.go similarity index 100% rename from contrib/drivers/gaussdb/pgsql_result.go rename to contrib/drivers/gaussdb/gaussdb_result.go diff --git a/contrib/drivers/gaussdb/pgsql_table_fields.go b/contrib/drivers/gaussdb/gaussdb_table_fields.go similarity index 88% rename from contrib/drivers/gaussdb/pgsql_table_fields.go rename to contrib/drivers/gaussdb/gaussdb_table_fields.go index a0a6a800670..863bdf7363e 100644 --- a/contrib/drivers/gaussdb/pgsql_table_fields.go +++ b/contrib/drivers/gaussdb/gaussdb_table_fields.go @@ -11,7 +11,6 @@ import ( "fmt" "github.com/gogf/gf/v2/database/gdb" - "github.com/gogf/gf/v2/util/gutil" ) var ( @@ -46,15 +45,17 @@ func init() { } // TableFields retrieves and returns the fields' information of specified table of current schema. -func (d *Driver) TableFields(ctx context.Context, table string, schema ...string) (fields map[string]*gdb.TableField, err error) { +func (d *Driver) TableFields( + ctx context.Context, table string, schema ...string, +) (fields map[string]*gdb.TableField, err error) { var ( - result gdb.Result - link gdb.Link - usedSchema = gutil.GetOrDefaultStr(d.GetSchema(), schema...) - // TODO duplicated `id` result? + result gdb.Result + link gdb.Link structureSql = fmt.Sprintf(tableFieldsSqlTmp, table) ) - if link, err = d.SlaveLink(usedSchema); err != nil { + // Schema parameter is not used for SlaveLink as it would attempt to switch database + // In GaussDB/PostgreSQL, schema is handled via search_path or table qualification + if link, err = d.SlaveLink(); err != nil { return nil, err } result, err = d.DoSelect(ctx, link, structureSql) diff --git a/contrib/drivers/gaussdb/pgsql_tables.go b/contrib/drivers/gaussdb/gaussdb_tables.go similarity index 96% rename from contrib/drivers/gaussdb/pgsql_tables.go rename to contrib/drivers/gaussdb/gaussdb_tables.go index 0e109ee478f..77600f6474a 100644 --- a/contrib/drivers/gaussdb/pgsql_tables.go +++ b/contrib/drivers/gaussdb/gaussdb_tables.go @@ -55,7 +55,8 @@ func (d *Driver) Tables(ctx context.Context, schema ...string) (tables []string, usedSchema = defaultSchema } // DO NOT use `usedSchema` as parameter for function `SlaveLink`. - link, err := d.SlaveLink(schema...) + // Schema is already handled in usedSchema variable above + link, err := d.SlaveLink() if err != nil { return nil, err } diff --git a/contrib/drivers/gaussdb/pgsql_z_unit_db_test.go b/contrib/drivers/gaussdb/gaussdb_z_unit_db_test.go similarity index 100% rename from contrib/drivers/gaussdb/pgsql_z_unit_db_test.go rename to contrib/drivers/gaussdb/gaussdb_z_unit_db_test.go diff --git a/contrib/drivers/gaussdb/pgsql_z_unit_field_test.go b/contrib/drivers/gaussdb/gaussdb_z_unit_field_test.go similarity index 99% rename from contrib/drivers/gaussdb/pgsql_z_unit_field_test.go rename to contrib/drivers/gaussdb/gaussdb_z_unit_field_test.go index 7c0c61b04bb..cf045204621 100644 --- a/contrib/drivers/gaussdb/pgsql_z_unit_field_test.go +++ b/contrib/drivers/gaussdb/gaussdb_z_unit_field_test.go @@ -91,7 +91,8 @@ func Test_TableFields_Types(t *testing.T) { t.Assert(fields["col_bool"].Type, "bool") // Test date/time type names - t.Assert(fields["col_date"].Type, "date") + // Note: GaussDB internally represents date as timestamp in pg_type + t.Assert(fields["col_date"].Type, "timestamp") t.Assert(fields["col_timestamp"].Type, "timestamp") t.Assert(fields["col_timestamptz"].Type, "timestamptz") diff --git a/contrib/drivers/gaussdb/pgsql_z_unit_filter_test.go b/contrib/drivers/gaussdb/gaussdb_z_unit_filter_test.go similarity index 96% rename from contrib/drivers/gaussdb/pgsql_z_unit_filter_test.go rename to contrib/drivers/gaussdb/gaussdb_z_unit_filter_test.go index e3e175e1780..34a9d60b286 100644 --- a/contrib/drivers/gaussdb/pgsql_z_unit_filter_test.go +++ b/contrib/drivers/gaussdb/gaussdb_z_unit_filter_test.go @@ -56,10 +56,13 @@ func Test_DoFilter_InsertIgnore(t *testing.T) { gtest.C(t, func(t *gtest.T) { // Test INSERT IGNORE conversion + // Note: GaussDB (PostgreSQL 9.2) does not support ON CONFLICT syntax (added in PG 9.5) + // GaussDB handles InsertIgnore at DoInsert level using MERGE statement sql := "INSERT IGNORE INTO users (name) VALUES ($1)" newSql, _, err := driver.DoFilter(ctx, nil, sql, nil) t.AssertNil(err) - t.Assert(newSql, "INSERT INTO users (name) VALUES ($1) ON CONFLICT DO NOTHING") + // GaussDB removes IGNORE keyword but doesn't add ON CONFLICT (not supported) + t.Assert(newSql, "INSERT INTO users (name) VALUES ($1)") }) } diff --git a/contrib/drivers/gaussdb/pgsql_z_unit_init_test.go b/contrib/drivers/gaussdb/gaussdb_z_unit_init_test.go similarity index 100% rename from contrib/drivers/gaussdb/pgsql_z_unit_init_test.go rename to contrib/drivers/gaussdb/gaussdb_z_unit_init_test.go diff --git a/contrib/drivers/gaussdb/pgsql_z_unit_model_test.go b/contrib/drivers/gaussdb/gaussdb_z_unit_model_test.go similarity index 100% rename from contrib/drivers/gaussdb/pgsql_z_unit_model_test.go rename to contrib/drivers/gaussdb/gaussdb_z_unit_model_test.go diff --git a/contrib/drivers/gaussdb/pgsql_z_unit_open_test.go b/contrib/drivers/gaussdb/gaussdb_z_unit_open_test.go similarity index 100% rename from contrib/drivers/gaussdb/pgsql_z_unit_open_test.go rename to contrib/drivers/gaussdb/gaussdb_z_unit_open_test.go diff --git a/contrib/drivers/gaussdb/pgsql_z_unit_raw_test.go b/contrib/drivers/gaussdb/gaussdb_z_unit_raw_test.go similarity index 100% rename from contrib/drivers/gaussdb/pgsql_z_unit_raw_test.go rename to contrib/drivers/gaussdb/gaussdb_z_unit_raw_test.go diff --git a/contrib/drivers/gaussdb/pgsql_z_unit_test.go b/contrib/drivers/gaussdb/gaussdb_z_unit_test.go similarity index 100% rename from contrib/drivers/gaussdb/pgsql_z_unit_test.go rename to contrib/drivers/gaussdb/gaussdb_z_unit_test.go diff --git a/contrib/drivers/gaussdb/pgsql_z_unit_upsert_test.go b/contrib/drivers/gaussdb/gaussdb_z_unit_upsert_test.go similarity index 100% rename from contrib/drivers/gaussdb/pgsql_z_unit_upsert_test.go rename to contrib/drivers/gaussdb/gaussdb_z_unit_upsert_test.go From 77bdc2b8d42554baa3b1766fe397de242e068fed Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Sat, 13 Dec 2025 06:57:50 +0000 Subject: [PATCH 06/10] Apply gci import order changes --- contrib/drivers/gaussdb/gaussdb_z_unit_filter_test.go | 3 ++- contrib/drivers/gaussdb/gaussdb_z_unit_open_test.go | 3 ++- contrib/drivers/gaussdb/gaussdb_z_unit_test.go | 3 ++- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/contrib/drivers/gaussdb/gaussdb_z_unit_filter_test.go b/contrib/drivers/gaussdb/gaussdb_z_unit_filter_test.go index 34a9d60b286..0264c238881 100644 --- a/contrib/drivers/gaussdb/gaussdb_z_unit_filter_test.go +++ b/contrib/drivers/gaussdb/gaussdb_z_unit_filter_test.go @@ -9,10 +9,11 @@ package gaussdb_test import ( "testing" - "github.com/gogf/gf/contrib/drivers/gaussdb/v2" "github.com/gogf/gf/v2/frame/g" "github.com/gogf/gf/v2/os/gctx" "github.com/gogf/gf/v2/test/gtest" + + "github.com/gogf/gf/contrib/drivers/gaussdb/v2" ) // Test_DoFilter_LimitOffset tests LIMIT OFFSET conversion diff --git a/contrib/drivers/gaussdb/gaussdb_z_unit_open_test.go b/contrib/drivers/gaussdb/gaussdb_z_unit_open_test.go index 34d99d9b5e9..dfbff24a8f9 100644 --- a/contrib/drivers/gaussdb/gaussdb_z_unit_open_test.go +++ b/contrib/drivers/gaussdb/gaussdb_z_unit_open_test.go @@ -9,9 +9,10 @@ package gaussdb_test import ( "testing" - "github.com/gogf/gf/contrib/drivers/gaussdb/v2" "github.com/gogf/gf/v2/database/gdb" "github.com/gogf/gf/v2/test/gtest" + + "github.com/gogf/gf/contrib/drivers/gaussdb/v2" ) // Test_Open tests the Open method with various configurations diff --git a/contrib/drivers/gaussdb/gaussdb_z_unit_test.go b/contrib/drivers/gaussdb/gaussdb_z_unit_test.go index 5715335c3f8..3846d9313b5 100644 --- a/contrib/drivers/gaussdb/gaussdb_z_unit_test.go +++ b/contrib/drivers/gaussdb/gaussdb_z_unit_test.go @@ -10,11 +10,12 @@ import ( "context" "testing" - "github.com/gogf/gf/contrib/drivers/gaussdb/v2" "github.com/gogf/gf/v2/database/gdb" "github.com/gogf/gf/v2/frame/g" "github.com/gogf/gf/v2/os/gctx" "github.com/gogf/gf/v2/test/gtest" + + "github.com/gogf/gf/contrib/drivers/gaussdb/v2" ) func Test_LastInsertId(t *testing.T) { From e0226a8b81417d294bab2ca8df212fd7e8a6d1a9 Mon Sep 17 00:00:00 2001 From: John Guo Date: Sat, 13 Dec 2025 15:02:08 +0800 Subject: [PATCH 07/10] up --- .../drivers/gaussdb/gaussdb_format_upsert.go | 94 ------------------- 1 file changed, 94 deletions(-) delete mode 100644 contrib/drivers/gaussdb/gaussdb_format_upsert.go diff --git a/contrib/drivers/gaussdb/gaussdb_format_upsert.go b/contrib/drivers/gaussdb/gaussdb_format_upsert.go deleted file mode 100644 index 7fe3c1489e6..00000000000 --- a/contrib/drivers/gaussdb/gaussdb_format_upsert.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gaussdb - -import ( - "fmt" - - "github.com/gogf/gf/v2/database/gdb" - "github.com/gogf/gf/v2/errors/gcode" - "github.com/gogf/gf/v2/errors/gerror" - "github.com/gogf/gf/v2/text/gstr" - "github.com/gogf/gf/v2/util/gconv" -) - -// FormatUpsert returns SQL clause of type upsert for GaussDB. -// Note: GaussDB is based on PostgreSQL 9.2 which doesn't support ON CONFLICT syntax (introduced in PostgreSQL 9.5). -// Therefore, UPSERT operations (Save, Replace, OnConflict, OnDuplicate) are not supported in GaussDB. -func (d *Driver) FormatUpsert(columns []string, list gdb.List, option gdb.DoInsertOption) (string, error) { - return "", gerror.NewCode( - gcode.CodeNotSupported, - `GaussDB does not support ON CONFLICT (upsert) operations. GaussDB is based on PostgreSQL 9.2, while ON CONFLICT was introduced in PostgreSQL 9.5. Please use separate INSERT and UPDATE operations instead`, - ) - - var onDuplicateStr string - if option.OnDuplicateStr != "" { - onDuplicateStr = option.OnDuplicateStr - } else if len(option.OnDuplicateMap) > 0 { - for k, v := range option.OnDuplicateMap { - if len(onDuplicateStr) > 0 { - onDuplicateStr += "," - } - switch v.(type) { - case gdb.Raw, *gdb.Raw: - onDuplicateStr += fmt.Sprintf( - "%s=%s", - d.Core.QuoteWord(k), - v, - ) - case gdb.Counter, *gdb.Counter: - var counter gdb.Counter - switch value := v.(type) { - case gdb.Counter: - counter = value - case *gdb.Counter: - counter = *value - } - operator, columnVal := "+", counter.Value - if columnVal < 0 { - operator, columnVal = "-", -columnVal - } - // Note: In PostgreSQL ON CONFLICT DO UPDATE, we use EXCLUDED to reference - // the value that was proposed for insertion. This differs from MySQL's - // ON DUPLICATE KEY UPDATE behavior where the column name without prefix - // references the current row's value. - onDuplicateStr += fmt.Sprintf( - "%s=EXCLUDED.%s%s%s", - d.QuoteWord(k), - d.QuoteWord(counter.Field), - operator, - gconv.String(columnVal), - ) - default: - onDuplicateStr += fmt.Sprintf( - "%s=EXCLUDED.%s", - d.Core.QuoteWord(k), - d.Core.QuoteWord(gconv.String(v)), - ) - } - } - } else { - for _, column := range columns { - // If it's SAVE operation, do not automatically update the creating time. - if d.Core.IsSoftCreatedFieldName(column) { - continue - } - if len(onDuplicateStr) > 0 { - onDuplicateStr += "," - } - onDuplicateStr += fmt.Sprintf( - "%s=EXCLUDED.%s", - d.Core.QuoteWord(column), - d.Core.QuoteWord(column), - ) - } - } - - conflictKeys := gstr.Join(option.OnConflict, ",") - - return fmt.Sprintf("ON CONFLICT (%s) DO UPDATE SET ", conflictKeys) + onDuplicateStr, nil -} From 864c1e46c846077e24b87aa0a6a27e2a0bdacf7c Mon Sep 17 00:00:00 2001 From: John Guo Date: Sat, 13 Dec 2025 15:38:38 +0800 Subject: [PATCH 08/10] Apply suggestion from @Copilot Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- contrib/drivers/gaussdb/gaussdb_do_filter.go | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/contrib/drivers/gaussdb/gaussdb_do_filter.go b/contrib/drivers/gaussdb/gaussdb_do_filter.go index 0d464310f74..6c9d3246925 100644 --- a/contrib/drivers/gaussdb/gaussdb_do_filter.go +++ b/contrib/drivers/gaussdb/gaussdb_do_filter.go @@ -47,13 +47,12 @@ func (d *Driver) DoFilter( return "", nil, err } - // Add support for gaussdb INSERT OR IGNORE. - // GaussDB doesn't support ON CONFLICT DO NOTHING without explicit conflict target - // We skip the InsertIgnore conversion for GaussDB as it doesn't support this PostgreSQL 9.5+ feature - // Users should handle conflicts explicitly using Upsert or other methods + // Handle gaussdb INSERT IGNORE. + // The IGNORE keyword is removed here, converting the statement to a regular INSERT. + // The actual "ignore" behavior (i.e., skipping inserts that would violate constraints) + // is implemented at the DoInsert level by checking for existence before inserting. if gstr.HasPrefix(newSql, gdb.InsertOperationIgnore) { // Remove the IGNORE operation prefix and keep as regular INSERT - // This will cause constraint violations to fail, which is expected behavior for GaussDB newSql = "INSERT" + newSql[len(gdb.InsertOperationIgnore):] } From c310814a32a7a7a352230c76fdca0e445fdc93a6 Mon Sep 17 00:00:00 2001 From: John Guo Date: Sat, 13 Dec 2025 15:41:46 +0800 Subject: [PATCH 09/10] Apply suggestion from @Copilot Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- contrib/drivers/gaussdb/gaussdb_do_insert.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/drivers/gaussdb/gaussdb_do_insert.go b/contrib/drivers/gaussdb/gaussdb_do_insert.go index 30f1f857a74..16193239c16 100644 --- a/contrib/drivers/gaussdb/gaussdb_do_insert.go +++ b/contrib/drivers/gaussdb/gaussdb_do_insert.go @@ -481,7 +481,7 @@ func (d *Driver) doMergeInsert( return r, err } // GaussDB's MERGE statement may not return correct RowsAffected - // We manually set it to 1 since MERGE always affects exactly one row + // Workaround: If RowsAffected returns 0 despite a successful MERGE, we manually set it to 1. if n, err := r.RowsAffected(); err != nil { return r, err } else { From f115f42def5aec91fe1ba403ba78d87a4e26437d Mon Sep 17 00:00:00 2001 From: John Guo Date: Sat, 13 Dec 2025 15:42:46 +0800 Subject: [PATCH 10/10] Apply suggestion from @Copilot Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- contrib/drivers/gaussdb/gaussdb_z_unit_init_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/contrib/drivers/gaussdb/gaussdb_z_unit_init_test.go b/contrib/drivers/gaussdb/gaussdb_z_unit_init_test.go index b2cb92d6e06..abe8a74b456 100644 --- a/contrib/drivers/gaussdb/gaussdb_z_unit_init_test.go +++ b/contrib/drivers/gaussdb/gaussdb_z_unit_init_test.go @@ -284,7 +284,8 @@ func createInitAllTypesTableWithDb(db gdb.DB, table ...string) (name string) { i, i, i, i%2 == 0)) // Date/Time types: col_date, col_time, col_timestamp - // Calculate day as integer in range 1-28; %02d in fmt.Sprintf ensures two-digit zero-padded format + // Calculate day as integer in range 1-28; 28 is used because it is the maximum day value safe for all months to avoid date validity issues. + // %02d in fmt.Sprintf ensures two-digit zero-padded format dayOfMonth := (i-1)%28 + 1 sql.WriteString(fmt.Sprintf("'2024-01-%02d', '10:00:%02d', '2024-01-%02d 10:00:00', ", dayOfMonth, (i-1)%60, dayOfMonth))