Compare commits
6 Commits
Author | SHA1 | Date | |
---|---|---|---|
d99adb203b
|
|||
f1f91f4cfa
|
|||
2afb265ea4
|
|||
be24f7a190
|
|||
aae8a706e9
|
|||
7d64f18f54
|
@@ -68,6 +68,7 @@ func init() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type Builder struct {
|
type Builder struct {
|
||||||
|
wrappedErr error
|
||||||
errorData *ExErr
|
errorData *ExErr
|
||||||
containsGinData bool
|
containsGinData bool
|
||||||
noLog bool
|
noLog bool
|
||||||
@@ -89,9 +90,9 @@ func Wrap(err error, msg string) *Builder {
|
|||||||
if !pkgconfig.RecursiveErrors {
|
if !pkgconfig.RecursiveErrors {
|
||||||
v := FromError(err)
|
v := FromError(err)
|
||||||
v.Message = msg
|
v.Message = msg
|
||||||
return &Builder{errorData: v}
|
return &Builder{wrappedErr: err, errorData: v}
|
||||||
}
|
}
|
||||||
return &Builder{errorData: wrapExErr(FromError(err), msg, CatWrap, 1)}
|
return &Builder{wrappedErr: err, errorData: wrapExErr(FromError(err), msg, CatWrap, 1)}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ----------------------------------------------------------------------------
|
// ----------------------------------------------------------------------------
|
||||||
@@ -414,6 +415,10 @@ func extractHeader(header map[string][]string) []string {
|
|||||||
func (b *Builder) Build() error {
|
func (b *Builder) Build() error {
|
||||||
warnOnPkgConfigNotInitialized()
|
warnOnPkgConfigNotInitialized()
|
||||||
|
|
||||||
|
if pkgconfig.DisableErrorWrapping && b.wrappedErr != nil {
|
||||||
|
return b.wrappedErr
|
||||||
|
}
|
||||||
|
|
||||||
if pkgconfig.ZeroLogErrTraces && !b.noLog && (b.errorData.Severity == SevErr || b.errorData.Severity == SevFatal) {
|
if pkgconfig.ZeroLogErrTraces && !b.noLog && (b.errorData.Severity == SevErr || b.errorData.Severity == SevFatal) {
|
||||||
b.errorData.ShortLog(stackSkipLogger.Error())
|
b.errorData.ShortLog(stackSkipLogger.Error())
|
||||||
} else if pkgconfig.ZeroLogAllTraces && !b.noLog {
|
} else if pkgconfig.ZeroLogAllTraces && !b.noLog {
|
||||||
|
@@ -48,8 +48,9 @@ var (
|
|||||||
TypeMongoReflection = NewType("MONGO_REFLECTION", langext.Ptr(500))
|
TypeMongoReflection = NewType("MONGO_REFLECTION", langext.Ptr(500))
|
||||||
TypeMongoInvalidOpt = NewType("MONGO_INVALIDOPT", langext.Ptr(500))
|
TypeMongoInvalidOpt = NewType("MONGO_INVALIDOPT", langext.Ptr(500))
|
||||||
|
|
||||||
TypeSQLQuery = NewType("SQL_QUERY", langext.Ptr(500))
|
TypeSQLQuery = NewType("SQL_QUERY", langext.Ptr(500))
|
||||||
TypeSQLBuild = NewType("SQL_BUILD", langext.Ptr(500))
|
TypeSQLBuild = NewType("SQL_BUILD", langext.Ptr(500))
|
||||||
|
TypeSQLDecode = NewType("SQL_DECODE", langext.Ptr(500))
|
||||||
|
|
||||||
TypeWrap = NewType("Wrap", nil)
|
TypeWrap = NewType("Wrap", nil)
|
||||||
|
|
||||||
|
@@ -13,6 +13,7 @@ type ErrorPackageConfig struct {
|
|||||||
IncludeMetaInGinOutput bool // Log meta fields ( from e.g. `.Str(key, val).Build()` ) to gin in err.Output()
|
IncludeMetaInGinOutput bool // Log meta fields ( from e.g. `.Str(key, val).Build()` ) to gin in err.Output()
|
||||||
ExtendGinOutput func(err *ExErr, json map[string]any) // (Optionally) extend the gin output with more fields
|
ExtendGinOutput func(err *ExErr, json map[string]any) // (Optionally) extend the gin output with more fields
|
||||||
ExtendGinDataOutput func(err *ExErr, depth int, json map[string]any) // (Optionally) extend the gin `__data` output with more fields
|
ExtendGinDataOutput func(err *ExErr, depth int, json map[string]any) // (Optionally) extend the gin `__data` output with more fields
|
||||||
|
DisableErrorWrapping bool // Disables the exerr.Wrap()...Build() function - will always return the original error
|
||||||
}
|
}
|
||||||
|
|
||||||
type ErrorPackageConfigInit struct {
|
type ErrorPackageConfigInit struct {
|
||||||
@@ -23,6 +24,7 @@ type ErrorPackageConfigInit struct {
|
|||||||
IncludeMetaInGinOutput *bool
|
IncludeMetaInGinOutput *bool
|
||||||
ExtendGinOutput func(err *ExErr, json map[string]any)
|
ExtendGinOutput func(err *ExErr, json map[string]any)
|
||||||
ExtendGinDataOutput func(err *ExErr, depth int, json map[string]any)
|
ExtendGinDataOutput func(err *ExErr, depth int, json map[string]any)
|
||||||
|
DisableErrorWrapping *bool
|
||||||
}
|
}
|
||||||
|
|
||||||
var initialized = false
|
var initialized = false
|
||||||
@@ -35,6 +37,7 @@ var pkgconfig = ErrorPackageConfig{
|
|||||||
IncludeMetaInGinOutput: true,
|
IncludeMetaInGinOutput: true,
|
||||||
ExtendGinOutput: func(err *ExErr, json map[string]any) {},
|
ExtendGinOutput: func(err *ExErr, json map[string]any) {},
|
||||||
ExtendGinDataOutput: func(err *ExErr, depth int, json map[string]any) {},
|
ExtendGinDataOutput: func(err *ExErr, depth int, json map[string]any) {},
|
||||||
|
DisableErrorWrapping: false,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Init initializes the exerr packages
|
// Init initializes the exerr packages
|
||||||
@@ -63,6 +66,7 @@ func Init(cfg ErrorPackageConfigInit) {
|
|||||||
IncludeMetaInGinOutput: langext.Coalesce(cfg.IncludeMetaInGinOutput, pkgconfig.IncludeMetaInGinOutput),
|
IncludeMetaInGinOutput: langext.Coalesce(cfg.IncludeMetaInGinOutput, pkgconfig.IncludeMetaInGinOutput),
|
||||||
ExtendGinOutput: ego,
|
ExtendGinOutput: ego,
|
||||||
ExtendGinDataOutput: egdo,
|
ExtendGinDataOutput: egdo,
|
||||||
|
DisableErrorWrapping: langext.Coalesce(cfg.DisableErrorWrapping, pkgconfig.DisableErrorWrapping),
|
||||||
}
|
}
|
||||||
|
|
||||||
initialized = true
|
initialized = true
|
||||||
|
@@ -1,5 +1,5 @@
|
|||||||
package goext
|
package goext
|
||||||
|
|
||||||
const GoextVersion = "0.0.367"
|
const GoextVersion = "0.0.373"
|
||||||
|
|
||||||
const GoextVersionTimestamp = "2024-01-12T18:40:29+0100"
|
const GoextVersionTimestamp = "2024-01-14T00:07:01+0100"
|
||||||
|
@@ -5,7 +5,7 @@ import (
|
|||||||
"go.mongodb.org/mongo-driver/mongo"
|
"go.mongodb.org/mongo-driver/mongo"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Filter interface {
|
type MongoFilter interface {
|
||||||
FilterQuery() mongo.Pipeline
|
FilterQuery() mongo.Pipeline
|
||||||
Sort() bson.D
|
Sort() bson.D
|
||||||
}
|
}
|
||||||
@@ -23,6 +23,6 @@ func (d dynamicFilter) Sort() bson.D {
|
|||||||
return d.sort
|
return d.sort
|
||||||
}
|
}
|
||||||
|
|
||||||
func CreateFilter(pipeline mongo.Pipeline, sort bson.D) Filter {
|
func CreateFilter(pipeline mongo.Pipeline, sort bson.D) MongoFilter {
|
||||||
return dynamicFilter{pipeline: pipeline, sort: sort}
|
return dynamicFilter{pipeline: pipeline, sort: sort}
|
||||||
}
|
}
|
||||||
|
@@ -53,7 +53,7 @@ func BuildUpdateStatement(q Queryable, tableName string, obj any, idColumn strin
|
|||||||
return "", nil, err
|
return "", nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
setClauses = append(setClauses, fmt.Sprintf("(%s = :%s)", columnName, params.Add(val)))
|
setClauses = append(setClauses, fmt.Sprintf("%s = :%s", columnName, params.Add(val)))
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -69,3 +69,52 @@ func BuildUpdateStatement(q Queryable, tableName string, obj any, idColumn strin
|
|||||||
//goland:noinspection SqlNoDataSourceInspection
|
//goland:noinspection SqlNoDataSourceInspection
|
||||||
return fmt.Sprintf("UPDATE %s SET %s WHERE %s", tableName, strings.Join(setClauses, ", "), matchClause), params, nil
|
return fmt.Sprintf("UPDATE %s SET %s WHERE %s", tableName, strings.Join(setClauses, ", "), matchClause), params, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func BuildInsertStatement(q Queryable, tableName string, obj any) (string, PP, error) {
|
||||||
|
rval := reflect.ValueOf(obj)
|
||||||
|
rtyp := rval.Type()
|
||||||
|
|
||||||
|
params := PP{}
|
||||||
|
|
||||||
|
fields := make([]string, 0)
|
||||||
|
values := make([]string, 0)
|
||||||
|
|
||||||
|
for i := 0; i < rtyp.NumField(); i++ {
|
||||||
|
|
||||||
|
rsfield := rtyp.Field(i)
|
||||||
|
rvfield := rval.Field(i)
|
||||||
|
|
||||||
|
if !rsfield.IsExported() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
columnName := rsfield.Tag.Get("db")
|
||||||
|
if columnName == "" || columnName == "-" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if rsfield.Type.Kind() == reflect.Ptr && rvfield.IsNil() {
|
||||||
|
|
||||||
|
fields = append(fields, columnName)
|
||||||
|
values = append(fields, "NULL")
|
||||||
|
|
||||||
|
} else {
|
||||||
|
|
||||||
|
val, err := convertValueToDB(q, rvfield.Interface())
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
fields = append(fields, columnName)
|
||||||
|
values = append(fields, ":"+params.Add(val))
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(fields) == 0 {
|
||||||
|
return "", nil, exerr.New(exerr.TypeSQLBuild, "no fields found in object").Build()
|
||||||
|
}
|
||||||
|
|
||||||
|
//goland:noinspection SqlNoDataSourceInspection
|
||||||
|
return fmt.Sprintf("INSERT INTO %s (%s) VALUES (%s)", tableName, strings.Join(fields, ", "), values), params, nil
|
||||||
|
}
|
||||||
|
@@ -4,6 +4,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"database/sql"
|
"database/sql"
|
||||||
"github.com/jmoiron/sqlx"
|
"github.com/jmoiron/sqlx"
|
||||||
|
"gogs.mikescher.com/BlackForestBytes/goext/exerr"
|
||||||
"gogs.mikescher.com/BlackForestBytes/goext/langext"
|
"gogs.mikescher.com/BlackForestBytes/goext/langext"
|
||||||
"sync"
|
"sync"
|
||||||
)
|
)
|
||||||
@@ -45,7 +46,7 @@ func (db *database) Exec(ctx context.Context, sqlstr string, prep PP) (sql.Resul
|
|||||||
for _, v := range db.lstr {
|
for _, v := range db.lstr {
|
||||||
err := v.PreExec(ctx, nil, &sqlstr, &prep)
|
err := v.PreExec(ctx, nil, &sqlstr, &prep)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, exerr.Wrap(err, "failed to call SQL pre-exec listener").Str("original_sql", origsql).Str("sql", sqlstr).Any("sql_params", prep).Build()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -56,7 +57,7 @@ func (db *database) Exec(ctx context.Context, sqlstr string, prep PP) (sql.Resul
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, exerr.Wrap(err, "Failed to [exec] sql statement").Str("original_sql", origsql).Str("sql", sqlstr).Any("sql_params", prep).Build()
|
||||||
}
|
}
|
||||||
return res, nil
|
return res, nil
|
||||||
}
|
}
|
||||||
@@ -66,7 +67,7 @@ func (db *database) Query(ctx context.Context, sqlstr string, prep PP) (*sqlx.Ro
|
|||||||
for _, v := range db.lstr {
|
for _, v := range db.lstr {
|
||||||
err := v.PreQuery(ctx, nil, &sqlstr, &prep)
|
err := v.PreQuery(ctx, nil, &sqlstr, &prep)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, exerr.Wrap(err, "failed to call SQL pre-query listener").Str("original_sql", origsql).Str("sql", sqlstr).Any("sql_params", prep).Build()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -77,7 +78,7 @@ func (db *database) Query(ctx context.Context, sqlstr string, prep PP) (*sqlx.Ro
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, exerr.Wrap(err, "Failed to [query] sql statement").Str("original_sql", origsql).Str("sql", sqlstr).Any("sql_params", prep).Build()
|
||||||
}
|
}
|
||||||
return rows, nil
|
return rows, nil
|
||||||
}
|
}
|
||||||
@@ -97,7 +98,7 @@ func (db *database) Ping(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return exerr.Wrap(err, "Failed to [ping] sql database").Build()
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -117,7 +118,7 @@ func (db *database) BeginTransaction(ctx context.Context, iso sql.IsolationLevel
|
|||||||
|
|
||||||
xtx, err := db.db.BeginTxx(ctx, &sql.TxOptions{Isolation: iso})
|
xtx, err := db.db.BeginTxx(ctx, &sql.TxOptions{Isolation: iso})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, exerr.Wrap(err, "Failed to start sql transaction").Build()
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, v := range db.lstr {
|
for _, v := range db.lstr {
|
||||||
|
126
sq/paginate.go
Normal file
126
sq/paginate.go
Normal file
@@ -0,0 +1,126 @@
|
|||||||
|
package sq
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
ct "gogs.mikescher.com/BlackForestBytes/goext/cursortoken"
|
||||||
|
"gogs.mikescher.com/BlackForestBytes/goext/exerr"
|
||||||
|
"gogs.mikescher.com/BlackForestBytes/goext/langext"
|
||||||
|
pag "gogs.mikescher.com/BlackForestBytes/goext/pagination"
|
||||||
|
)
|
||||||
|
|
||||||
|
type PaginateFilter interface {
|
||||||
|
SQL(params PP) (filterClause string, joinClause string, joinTables []string)
|
||||||
|
Sort() []FilterSort
|
||||||
|
}
|
||||||
|
|
||||||
|
type FilterSort struct {
|
||||||
|
Field string
|
||||||
|
Direction ct.SortDirection
|
||||||
|
}
|
||||||
|
|
||||||
|
func Paginate[TData any](ctx context.Context, q Queryable, table string, filter PaginateFilter, scanMode StructScanMode, scanSec StructScanSafety, page int, limit *int) ([]TData, pag.Pagination, error) {
|
||||||
|
prepParams := PP{}
|
||||||
|
|
||||||
|
sortOrder := filter.Sort()
|
||||||
|
sortCond := ""
|
||||||
|
if len(sortOrder) > 0 {
|
||||||
|
sortCond = "ORDER BY "
|
||||||
|
for i, v := range sortOrder {
|
||||||
|
if i > 0 {
|
||||||
|
sortCond += ", "
|
||||||
|
}
|
||||||
|
sortCond += v.Field + " " + string(v.Direction)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pageCond := ""
|
||||||
|
if limit != nil {
|
||||||
|
pageCond += fmt.Sprintf("LIMIT :%s OFFSET :%s", prepParams.Add(*limit+1), prepParams.Add(*limit*(page-1)))
|
||||||
|
}
|
||||||
|
|
||||||
|
filterCond, joinCond, joinTables := filter.SQL(prepParams)
|
||||||
|
|
||||||
|
selectCond := table + ".*"
|
||||||
|
for _, v := range joinTables {
|
||||||
|
selectCond += ", " + v + ".*"
|
||||||
|
}
|
||||||
|
|
||||||
|
sqlQueryData := "SELECT " + selectCond + " FROM " + table + " " + joinCond + " WHERE ( " + filterCond + " ) " + sortCond + " " + pageCond
|
||||||
|
sqlQueryCount := "SELECT " + "COUNT(*)" + " FROM " + table + " " + joinCond + " WHERE ( " + filterCond + " ) "
|
||||||
|
|
||||||
|
rows, err := q.Query(ctx, sqlQueryData, prepParams)
|
||||||
|
if err != nil {
|
||||||
|
return nil, pag.Pagination{}, exerr.Wrap(err, "failed to list paginated entries from DB").Str("table", table).Any("filter", filter).Int("page", page).Any("limit", limit).Build()
|
||||||
|
}
|
||||||
|
|
||||||
|
entities, err := ScanAll[TData](ctx, q, rows, scanMode, scanSec, true)
|
||||||
|
if err != nil {
|
||||||
|
return nil, pag.Pagination{}, exerr.Wrap(err, "failed to decode paginated entries from DB").Str("table", table).Int("page", page).Any("limit", limit).Str("scanMode", string(scanMode)).Str("scanSec", string(scanSec)).Build()
|
||||||
|
}
|
||||||
|
|
||||||
|
if page == 1 && (limit == nil || len(entities) <= *limit) {
|
||||||
|
return entities, pag.Pagination{
|
||||||
|
Page: 1,
|
||||||
|
Limit: langext.Coalesce(limit, len(entities)),
|
||||||
|
TotalPages: 1,
|
||||||
|
TotalItems: len(entities),
|
||||||
|
CurrentPageCount: 1,
|
||||||
|
}, nil
|
||||||
|
} else {
|
||||||
|
|
||||||
|
countRows, err := q.Query(ctx, sqlQueryCount, prepParams)
|
||||||
|
if err != nil {
|
||||||
|
return nil, pag.Pagination{}, exerr.Wrap(err, "failed to query total-count of paginated entries from DB").Str("table", table).Build()
|
||||||
|
}
|
||||||
|
|
||||||
|
if !countRows.Next() {
|
||||||
|
return nil, pag.Pagination{}, exerr.New(exerr.TypeSQLDecode, "SQL COUNT(*) query returned no rows").Str("table", table).Any("filter", filter).Build()
|
||||||
|
}
|
||||||
|
|
||||||
|
var countRes int
|
||||||
|
err = countRows.Scan(&countRes)
|
||||||
|
if err != nil {
|
||||||
|
return nil, pag.Pagination{}, exerr.Wrap(err, "failed to decode total-count of paginated entries from DB").Str("table", table).Build()
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(entities) > *limit {
|
||||||
|
entities = entities[:*limit]
|
||||||
|
}
|
||||||
|
|
||||||
|
paginationObj := pag.Pagination{
|
||||||
|
Page: page,
|
||||||
|
Limit: langext.Coalesce(limit, countRes),
|
||||||
|
TotalPages: pag.CalcPaginationTotalPages(countRes, langext.Coalesce(limit, countRes)),
|
||||||
|
TotalItems: countRes,
|
||||||
|
CurrentPageCount: len(entities),
|
||||||
|
}
|
||||||
|
|
||||||
|
return entities, paginationObj, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Count(ctx context.Context, q Queryable, table string, filter PaginateFilter) (int, error) {
|
||||||
|
prepParams := PP{}
|
||||||
|
|
||||||
|
filterCond, joinCond, _ := filter.SQL(prepParams)
|
||||||
|
|
||||||
|
sqlQueryCount := "SELECT " + "COUNT(*)" + " FROM " + table + " " + joinCond + " WHERE ( " + filterCond + " )"
|
||||||
|
|
||||||
|
countRows, err := q.Query(ctx, sqlQueryCount, prepParams)
|
||||||
|
if err != nil {
|
||||||
|
return 0, exerr.Wrap(err, "failed to query count of entries from DB").Str("table", table).Build()
|
||||||
|
}
|
||||||
|
|
||||||
|
if !countRows.Next() {
|
||||||
|
return 0, exerr.New(exerr.TypeSQLDecode, "SQL COUNT(*) query returned no rows").Str("table", table).Any("filter", filter).Build()
|
||||||
|
}
|
||||||
|
|
||||||
|
var countRes int
|
||||||
|
err = countRows.Scan(&countRes)
|
||||||
|
if err != nil {
|
||||||
|
return 0, exerr.Wrap(err, "failed to decode count of entries from DB").Str("table", table).Build()
|
||||||
|
}
|
||||||
|
|
||||||
|
return countRes, nil
|
||||||
|
}
|
@@ -93,3 +93,62 @@ func TestTypeConverter2(t *testing.T) {
|
|||||||
tst.AssertEqual(t, "002", r.ID)
|
tst.AssertEqual(t, "002", r.ID)
|
||||||
tst.AssertEqual(t, t0.UnixNano(), r.Timestamp.UnixNano())
|
tst.AssertEqual(t, t0.UnixNano(), r.Timestamp.UnixNano())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestTypeConverter3(t *testing.T) {
|
||||||
|
|
||||||
|
if !langext.InArray("sqlite3", sql.Drivers()) {
|
||||||
|
sqlite.RegisterAsSQLITE3()
|
||||||
|
}
|
||||||
|
|
||||||
|
type RequestData struct {
|
||||||
|
ID string `db:"id"`
|
||||||
|
Timestamp *rfctime.UnixMilliTime `db:"timestamp"`
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
dbdir := t.TempDir()
|
||||||
|
dbfile1 := filepath.Join(dbdir, langext.MustHexUUID()+".sqlite3")
|
||||||
|
|
||||||
|
tst.AssertNoErr(t, os.MkdirAll(dbdir, os.ModePerm))
|
||||||
|
|
||||||
|
url := fmt.Sprintf("file:%s?_pragma=journal_mode(%s)&_pragma=timeout(%d)&_pragma=foreign_keys(%s)&_pragma=busy_timeout(%d)", dbfile1, "DELETE", 1000, "true", 1000)
|
||||||
|
|
||||||
|
xdb := tst.Must(sqlx.Open("sqlite", url))(t)
|
||||||
|
|
||||||
|
db := NewDB(xdb)
|
||||||
|
db.RegisterDefaultConverter()
|
||||||
|
|
||||||
|
_, err := db.Exec(ctx, "CREATE TABLE `requests` ( id TEXT NOT NULL, timestamp INTEGER NULL, PRIMARY KEY (id) ) STRICT", PP{})
|
||||||
|
tst.AssertNoErr(t, err)
|
||||||
|
|
||||||
|
t0 := rfctime.NewUnixMilli(time.Date(2012, 03, 01, 16, 0, 0, 0, time.UTC))
|
||||||
|
|
||||||
|
_, err = InsertSingle(ctx, db, "requests", RequestData{
|
||||||
|
ID: "001",
|
||||||
|
Timestamp: &t0,
|
||||||
|
})
|
||||||
|
tst.AssertNoErr(t, err)
|
||||||
|
|
||||||
|
_, err = InsertSingle(ctx, db, "requests", RequestData{
|
||||||
|
ID: "002",
|
||||||
|
Timestamp: nil,
|
||||||
|
})
|
||||||
|
tst.AssertNoErr(t, err)
|
||||||
|
|
||||||
|
{
|
||||||
|
r1, err := QuerySingle[RequestData](ctx, db, "SELECT * FROM requests WHERE id = '001'", PP{}, SModeExtended, Safe)
|
||||||
|
tst.AssertNoErr(t, err)
|
||||||
|
fmt.Printf("%+v\n", r1)
|
||||||
|
tst.AssertEqual(t, "001", r1.ID)
|
||||||
|
tst.AssertEqual(t, t0.UnixNano(), r1.Timestamp.UnixNano())
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
r2, err := QuerySingle[RequestData](ctx, db, "SELECT * FROM requests WHERE id = '002'", PP{}, SModeExtended, Safe)
|
||||||
|
tst.AssertNoErr(t, err)
|
||||||
|
fmt.Printf("%+v\n", r2)
|
||||||
|
tst.AssertEqual(t, "002", r2.ID)
|
||||||
|
tst.AssertEqual(t, nil, r2.Timestamp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@@ -7,6 +7,7 @@ import (
|
|||||||
"github.com/jmoiron/sqlx/reflectx"
|
"github.com/jmoiron/sqlx/reflectx"
|
||||||
"gogs.mikescher.com/BlackForestBytes/goext/langext"
|
"gogs.mikescher.com/BlackForestBytes/goext/langext"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
// forked from sqlx, but added ability to unmarshal optional-nested structs
|
// forked from sqlx, but added ability to unmarshal optional-nested structs
|
||||||
@@ -18,7 +19,7 @@ type StructScanner struct {
|
|||||||
|
|
||||||
fields [][]int
|
fields [][]int
|
||||||
values []any
|
values []any
|
||||||
converter []DBTypeConverter
|
converter []ssConverter
|
||||||
columns []string
|
columns []string
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -30,6 +31,11 @@ func NewStructScanner(rows *sqlx.Rows, unsafe bool) *StructScanner {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type ssConverter struct {
|
||||||
|
Converter DBTypeConverter
|
||||||
|
RefCount int
|
||||||
|
}
|
||||||
|
|
||||||
func (r *StructScanner) Start(dest any) error {
|
func (r *StructScanner) Start(dest any) error {
|
||||||
v := reflect.ValueOf(dest)
|
v := reflect.ValueOf(dest)
|
||||||
|
|
||||||
@@ -49,7 +55,7 @@ func (r *StructScanner) Start(dest any) error {
|
|||||||
return fmt.Errorf("missing destination name %s in %T", columns[f], dest)
|
return fmt.Errorf("missing destination name %s in %T", columns[f], dest)
|
||||||
}
|
}
|
||||||
r.values = make([]interface{}, len(columns))
|
r.values = make([]interface{}, len(columns))
|
||||||
r.converter = make([]DBTypeConverter, len(columns))
|
r.converter = make([]ssConverter, len(columns))
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -143,13 +149,19 @@ func (r *StructScanner) StructScanExt(q Queryable, dest any) error {
|
|||||||
|
|
||||||
f.Set(reflect.Zero(f.Type())) // set to nil
|
f.Set(reflect.Zero(f.Type())) // set to nil
|
||||||
} else {
|
} else {
|
||||||
if r.converter[i] != nil {
|
if r.converter[i].Converter != nil {
|
||||||
val3 := val2.Elem().Interface()
|
val3 := val2.Elem()
|
||||||
conv3, err := r.converter[i].DBToModel(val3)
|
conv3, err := r.converter[i].Converter.DBToModel(val3.Interface())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
f.Set(reflect.ValueOf(conv3))
|
conv3RVal := reflect.ValueOf(conv3)
|
||||||
|
for j := 0; j < r.converter[i].RefCount; j++ {
|
||||||
|
newConv3Val := reflect.New(conv3RVal.Type())
|
||||||
|
newConv3Val.Elem().Set(conv3RVal)
|
||||||
|
conv3RVal = newConv3Val
|
||||||
|
}
|
||||||
|
f.Set(conv3RVal)
|
||||||
} else {
|
} else {
|
||||||
f.Set(val2.Elem())
|
f.Set(val2.Elem())
|
||||||
}
|
}
|
||||||
@@ -184,7 +196,7 @@ func (r *StructScanner) StructScanBase(dest any) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// fieldsByTraversal forked from github.com/jmoiron/sqlx@v1.3.5/sqlx.go
|
// fieldsByTraversal forked from github.com/jmoiron/sqlx@v1.3.5/sqlx.go
|
||||||
func fieldsByTraversalExtended(q Queryable, v reflect.Value, traversals [][]int, values []interface{}, converter []DBTypeConverter) error {
|
func fieldsByTraversalExtended(q Queryable, v reflect.Value, traversals [][]int, values []interface{}, converter []ssConverter) error {
|
||||||
v = reflect.Indirect(v)
|
v = reflect.Indirect(v)
|
||||||
if v.Kind() != reflect.Struct {
|
if v.Kind() != reflect.Struct {
|
||||||
return errors.New("argument not a struct")
|
return errors.New("argument not a struct")
|
||||||
@@ -205,14 +217,26 @@ func fieldsByTraversalExtended(q Queryable, v reflect.Value, traversals [][]int,
|
|||||||
_v := langext.Ptr[any](nil)
|
_v := langext.Ptr[any](nil)
|
||||||
values[i] = _v
|
values[i] = _v
|
||||||
foundConverter = true
|
foundConverter = true
|
||||||
converter[i] = conv
|
converter[i] = ssConverter{Converter: conv, RefCount: 0}
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if !foundConverter {
|
||||||
|
// also allow non-pointer converter for pointer-types
|
||||||
|
for _, conv := range q.ListConverter() {
|
||||||
|
if conv.ModelTypeString() == strings.TrimLeft(typeStr, "*") {
|
||||||
|
_v := langext.Ptr[any](nil)
|
||||||
|
values[i] = _v
|
||||||
|
foundConverter = true
|
||||||
|
converter[i] = ssConverter{Converter: conv, RefCount: len(typeStr) - len(strings.TrimLeft(typeStr, "*"))} // kind hacky way to get the amount of ptr before <f>, but it works...
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if !foundConverter {
|
if !foundConverter {
|
||||||
values[i] = reflect.New(reflect.PointerTo(f.Type())).Interface()
|
values[i] = reflect.New(reflect.PointerTo(f.Type())).Interface()
|
||||||
converter[i] = nil
|
converter[i] = ssConverter{Converter: nil, RefCount: -1}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@@ -4,6 +4,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"database/sql"
|
"database/sql"
|
||||||
"github.com/jmoiron/sqlx"
|
"github.com/jmoiron/sqlx"
|
||||||
|
"gogs.mikescher.com/BlackForestBytes/goext/exerr"
|
||||||
"gogs.mikescher.com/BlackForestBytes/goext/langext"
|
"gogs.mikescher.com/BlackForestBytes/goext/langext"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -48,7 +49,7 @@ func (tx *transaction) Rollback() error {
|
|||||||
for _, v := range tx.db.lstr {
|
for _, v := range tx.db.lstr {
|
||||||
err := v.PreTxRollback(tx.id)
|
err := v.PreTxRollback(tx.id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return exerr.Wrap(err, "failed to call SQL pre-rollback listener").Int("tx.id", int(tx.id)).Build()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -69,7 +70,7 @@ func (tx *transaction) Commit() error {
|
|||||||
for _, v := range tx.db.lstr {
|
for _, v := range tx.db.lstr {
|
||||||
err := v.PreTxCommit(tx.id)
|
err := v.PreTxCommit(tx.id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return exerr.Wrap(err, "failed to call SQL pre-commit listener").Int("tx.id", int(tx.id)).Build()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -91,7 +92,7 @@ func (tx *transaction) Exec(ctx context.Context, sqlstr string, prep PP) (sql.Re
|
|||||||
for _, v := range tx.db.lstr {
|
for _, v := range tx.db.lstr {
|
||||||
err := v.PreExec(ctx, langext.Ptr(tx.id), &sqlstr, &prep)
|
err := v.PreExec(ctx, langext.Ptr(tx.id), &sqlstr, &prep)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, exerr.Wrap(err, "failed to call SQL pre-exec listener").Int("tx.id", int(tx.id)).Str("original_sql", origsql).Str("sql", sqlstr).Any("sql_params", prep).Build()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -106,7 +107,7 @@ func (tx *transaction) Exec(ctx context.Context, sqlstr string, prep PP) (sql.Re
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, exerr.Wrap(err, "Failed to [exec] sql statement").Int("tx.id", int(tx.id)).Str("original_sql", origsql).Str("sql", sqlstr).Any("sql_params", prep).Build()
|
||||||
}
|
}
|
||||||
return res, nil
|
return res, nil
|
||||||
}
|
}
|
||||||
@@ -116,7 +117,7 @@ func (tx *transaction) Query(ctx context.Context, sqlstr string, prep PP) (*sqlx
|
|||||||
for _, v := range tx.db.lstr {
|
for _, v := range tx.db.lstr {
|
||||||
err := v.PreQuery(ctx, langext.Ptr(tx.id), &sqlstr, &prep)
|
err := v.PreQuery(ctx, langext.Ptr(tx.id), &sqlstr, &prep)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, exerr.Wrap(err, "failed to call SQL pre-query listener").Int("tx.id", int(tx.id)).Str("original_sql", origsql).Str("sql", sqlstr).Any("sql_params", prep).Build()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -131,7 +132,7 @@ func (tx *transaction) Query(ctx context.Context, sqlstr string, prep PP) (*sqlx
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, exerr.Wrap(err, "Failed to [query] sql statement").Int("tx.id", int(tx.id)).Str("original_sql", origsql).Str("sql", sqlstr).Any("sql_params", prep).Build()
|
||||||
}
|
}
|
||||||
return rows, nil
|
return rows, nil
|
||||||
}
|
}
|
||||||
|
@@ -9,7 +9,7 @@ import (
|
|||||||
pag "gogs.mikescher.com/BlackForestBytes/goext/pagination"
|
pag "gogs.mikescher.com/BlackForestBytes/goext/pagination"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (c *Coll[TData]) Paginate(ctx context.Context, filter pag.Filter, page int, limit *int) ([]TData, pag.Pagination, error) {
|
func (c *Coll[TData]) Paginate(ctx context.Context, filter pag.MongoFilter, page int, limit *int) ([]TData, pag.Pagination, error) {
|
||||||
type totalCountResult struct {
|
type totalCountResult struct {
|
||||||
Count int `bson:"count"`
|
Count int `bson:"count"`
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user