diff --git a/scnserver/db/dbtools/preprocessor.go b/scnserver/db/dbtools/preprocessor.go index 818bc3c..72db340 100644 --- a/scnserver/db/dbtools/preprocessor.go +++ b/scnserver/db/dbtools/preprocessor.go @@ -42,7 +42,7 @@ var regexAlias = rext.W(regexp.MustCompile("([A-Za-z_\\-0-9]+)\\s+AS\\s+([A-Za-z func NewDBPreprocessor(db sq.DB) (*DBPreprocessor, error) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second) + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() obj := &DBPreprocessor{ diff --git a/scnserver/db/impl/primary/database.go b/scnserver/db/impl/primary/database.go index 36ae731..4d0d474 100644 --- a/scnserver/db/impl/primary/database.go +++ b/scnserver/db/impl/primary/database.go @@ -46,7 +46,7 @@ func NewPrimaryDatabase(cfg server.Config) (*Database, error) { xdb, err := sqlx.Open("sqlite3", url) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to open sqlite3 database: %v", err) } if conf.SingleConn { @@ -67,7 +67,7 @@ func NewPrimaryDatabase(cfg server.Config) (*Database, error) { pp, err := dbtools.NewDBPreprocessor(qqdb) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to init db-preprocessor: %v", err) } qqdb.AddListener(pp) diff --git a/scnserver/jobs/requestLogCleanupJob.go b/scnserver/jobs/requestLogCleanupJob.go index 24a3de0..95a81f4 100644 --- a/scnserver/jobs/requestLogCleanupJob.go +++ b/scnserver/jobs/requestLogCleanupJob.go @@ -105,6 +105,9 @@ func (j *RequestLogCleanupJob) execute() (err error) { ctx := j.app.NewSimpleTransactionContext(10 * time.Second) defer ctx.Cancel() + j.app.RequestDatabaseLock.Lock() + defer j.app.RequestDatabaseLock.Unlock() + deleted, err := j.app.Database.Requests.Cleanup(ctx, j.app.Config.ReqLogHistoryMaxCount, j.app.Config.ReqLogHistoryMaxDuration) if err != nil { return err diff --git a/scnserver/jobs/requestLogCollectorJob.go b/scnserver/jobs/requestLogCollectorJob.go index 036b78b..09aa848 100644 --- a/scnserver/jobs/requestLogCollectorJob.go +++ b/scnserver/jobs/requestLogCollectorJob.go @@ -90,6 +90,8 @@ mainLoop: } func (j *RequestLogCollectorJob) insertLog(requestid models.RequestID, rl models.RequestLog) error { + j.app.RequestDatabaseLock.Lock() + defer j.app.RequestDatabaseLock.Unlock() ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() diff --git a/scnserver/logic/application.go b/scnserver/logic/application.go index 71cc319..e1c6aca 100644 --- a/scnserver/logic/application.go +++ b/scnserver/logic/application.go @@ -3,6 +3,7 @@ package logic import ( scn "blackforestbytes.com/simplecloudnotifier" "blackforestbytes.com/simplecloudnotifier/db" + ct "blackforestbytes.com/simplecloudnotifier/db/cursortoken" "blackforestbytes.com/simplecloudnotifier/db/simplectx" "blackforestbytes.com/simplecloudnotifier/google" "blackforestbytes.com/simplecloudnotifier/models" @@ -20,6 +21,7 @@ import ( "os" "os/signal" "strings" + "sync" "syscall" "time" ) @@ -36,6 +38,7 @@ type Application struct { IsRunning *syncext.AtomicBool RequestLogQueue chan models.RequestLog MainDatabaseLock golock.RWMutex + RequestDatabaseLock sync.Mutex keyTokenLastUsedDCI *dataext.SyncMap[models.KeyTokenID, *dataext.DelayedCombiningInvoker] } @@ -46,6 +49,7 @@ func NewApp(db *DBPool) *Application { IsRunning: syncext.NewAtomicBool(false), RequestLogQueue: make(chan models.RequestLog, 8192), MainDatabaseLock: golock.NewCASMutex(), + RequestDatabaseLock: sync.Mutex{}, keyTokenLastUsedDCI: dataext.NewSyncMap[models.KeyTokenID, *dataext.DelayedCombiningInvoker](), } } @@ -353,3 +357,10 @@ func (app *Application) InsertRequestLog(data models.RequestLog) { log.Error().Msg("failed to insert request-log (queue full)") } } + +func (app *Application) ListRequestLogs(ctx *simplectx.SimpleContext, filter models.RequestLogFilter, pageSize *int, start ct.CursorToken) ([]models.RequestLog, ct.CursorToken, error) { + app.RequestDatabaseLock.Lock() + defer app.RequestDatabaseLock.Unlock() + + return app.Database.Requests.ListRequestLogs(ctx, filter, pageSize, start) +} diff --git a/scnserver/logic/dbpool.go b/scnserver/logic/dbpool.go index d260332..f6f9c43 100644 --- a/scnserver/logic/dbpool.go +++ b/scnserver/logic/dbpool.go @@ -7,6 +7,7 @@ import ( primarydb "blackforestbytes.com/simplecloudnotifier/db/impl/primary" requestsdb "blackforestbytes.com/simplecloudnotifier/db/impl/requests" "context" + "fmt" ) type DBPool struct { @@ -19,17 +20,17 @@ func NewDBPool(conf scn.Config) (*DBPool, error) { dbprimary, err := primarydb.NewPrimaryDatabase(conf) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to create primary database: %w", err) } dbrequests, err := requestsdb.NewRequestsDatabase(conf) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to create requests database: %w", err) } dblogs, err := logsdb.NewLogsDatabase(conf) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to create logs database: %w", err) } return &DBPool{ diff --git a/scnserver/test/database_test.go b/scnserver/test/database_test.go index 793100c..40229b8 100644 --- a/scnserver/test/database_test.go +++ b/scnserver/test/database_test.go @@ -294,582 +294,34 @@ func TestRequestsDB_Current(t *testing.T) { } func TestPrimaryDB_Migrate_from_3_to_latest(t *testing.T) { - dbf1, dbf2, dbf3, conf, stop := tt.StartSimpleTestspace(t) - defer stop() - - ctx := context.Background() - - tt.AssertAny(dbf1) - tt.AssertAny(dbf2) - tt.AssertAny(dbf3) - tt.AssertAny(conf) - - schemavers := 3 - - { - url := fmt.Sprintf("file:%s", dbf1) - - xdb, err := sqlx.Open("sqlite3", url) - tt.TestFailIfErr(t, err) - - qqdb := sq.NewDB(xdb, sq.DBOptions{}) - - dbschema := schema.PrimarySchema[schemavers] - - _, err = qqdb.Exec(ctx, dbschema.SQL, sq.PP{}) - tt.TestFailIfErr(t, err) - - _, err = qqdb.Exec(ctx, "INSERT INTO meta (meta_key, value_int) VALUES (:key, :val) ON CONFLICT(meta_key) DO UPDATE SET value_int = :val", sq.PP{ - "key": "schema", - "val": schemavers, - }) - - _, err = qqdb.Exec(ctx, "INSERT INTO meta (meta_key, value_txt) VALUES (:key, :val) ON CONFLICT(meta_key) DO UPDATE SET value_txt = :val", sq.PP{ - "key": "schema_hash", - "val": dbschema.Hash, - }) - - { - tctx := simplectx.CreateSimpleContext(ctx, nil) - schemHashDB, err := sq.HashSqliteDatabase(tctx, qqdb) - tt.TestFailIfErr(t, err) - tt.AssertEqual(t, "schemHashDB", dbschema.Hash, schemHashDB) - err = tctx.CommitTransaction() - tt.TestFailIfErr(t, err) - } - - err = qqdb.Exit() - tt.TestFailIfErr(t, err) - } - - { - db1, err := primary.NewPrimaryDatabase(conf) - tt.TestFailIfErr(t, err) - - { - tctx := simplectx.CreateSimpleContext(ctx, nil) - - schema1, err := db1.ReadSchema(tctx) - tt.TestFailIfErr(t, err) - tt.AssertEqual(t, "schema1", schemavers, schema1) - - err = tctx.CommitTransaction() - tt.TestFailIfErr(t, err) - } - - //================================================ - { - err = db1.Migrate(ctx) - tt.TestFailIfErr(t, err) - } - //================================================ - - { - tctx := simplectx.CreateSimpleContext(ctx, nil) - - schema2, err := db1.ReadSchema(tctx) - tt.TestFailIfErr(t, err) - tt.AssertEqual(t, "schema2", schema.PrimarySchemaVersion, schema2) - - err = tctx.CommitTransaction() - tt.TestFailIfErr(t, err) - } - - { - tctx := simplectx.CreateSimpleContext(ctx, nil) - schemHashDB, err := sq.HashSqliteDatabase(tctx, db1.DB()) - tt.TestFailIfErr(t, err) - tt.AssertEqual(t, "schemHashDB", schema.PrimarySchema[schema.PrimarySchemaVersion].Hash, schemHashDB) - err = tctx.CommitTransaction() - tt.TestFailIfErr(t, err) - } - - err = db1.Stop(ctx) - tt.TestFailIfErr(t, err) - } + _internalTestPrimaryDBMigrateToLatest(t, 3) } func TestPrimaryDB_Migrate_from_4_to_latest(t *testing.T) { - dbf1, dbf2, dbf3, conf, stop := tt.StartSimpleTestspace(t) - defer stop() - - ctx := context.Background() - - tt.AssertAny(dbf1) - tt.AssertAny(dbf2) - tt.AssertAny(dbf3) - tt.AssertAny(conf) - - schemavers := 4 - - { - url := fmt.Sprintf("file:%s", dbf1) - - xdb, err := sqlx.Open("sqlite3", url) - tt.TestFailIfErr(t, err) - - qqdb := sq.NewDB(xdb, sq.DBOptions{}) - - dbschema := schema.PrimarySchema[schemavers] - - _, err = qqdb.Exec(ctx, dbschema.SQL, sq.PP{}) - tt.TestFailIfErr(t, err) - - _, err = qqdb.Exec(ctx, "INSERT INTO meta (meta_key, value_int) VALUES (:key, :val) ON CONFLICT(meta_key) DO UPDATE SET value_int = :val", sq.PP{ - "key": "schema", - "val": schemavers, - }) - - _, err = qqdb.Exec(ctx, "INSERT INTO meta (meta_key, value_txt) VALUES (:key, :val) ON CONFLICT(meta_key) DO UPDATE SET value_txt = :val", sq.PP{ - "key": "schema_hash", - "val": dbschema.Hash, - }) - - { - tctx := simplectx.CreateSimpleContext(ctx, nil) - schemHashDB, err := sq.HashSqliteDatabase(tctx, qqdb) - tt.TestFailIfErr(t, err) - tt.AssertEqual(t, "schemHashDB", dbschema.Hash, schemHashDB) - err = tctx.CommitTransaction() - tt.TestFailIfErr(t, err) - } - - err = qqdb.Exit() - tt.TestFailIfErr(t, err) - } - - { - db1, err := primary.NewPrimaryDatabase(conf) - tt.TestFailIfErr(t, err) - - { - tctx := simplectx.CreateSimpleContext(ctx, nil) - - schema1, err := db1.ReadSchema(tctx) - tt.TestFailIfErr(t, err) - tt.AssertEqual(t, "schema1", schemavers, schema1) - - err = tctx.CommitTransaction() - tt.TestFailIfErr(t, err) - } - - //================================================ - { - err = db1.Migrate(ctx) - tt.TestFailIfErr(t, err) - } - //================================================ - - { - tctx := simplectx.CreateSimpleContext(ctx, nil) - - schema2, err := db1.ReadSchema(tctx) - tt.TestFailIfErr(t, err) - tt.AssertEqual(t, "schema2", schema.PrimarySchemaVersion, schema2) - - err = tctx.CommitTransaction() - tt.TestFailIfErr(t, err) - } - - { - tctx := simplectx.CreateSimpleContext(ctx, nil) - schemHashDB, err := sq.HashSqliteDatabase(tctx, db1.DB()) - tt.TestFailIfErr(t, err) - tt.AssertEqual(t, "schemHashDB", schema.PrimarySchema[schema.PrimarySchemaVersion].Hash, schemHashDB) - err = tctx.CommitTransaction() - tt.TestFailIfErr(t, err) - } - - err = db1.Stop(ctx) - tt.TestFailIfErr(t, err) - } + _internalTestPrimaryDBMigrateToLatest(t, 4) } func TestPrimaryDB_Migrate_from_5_to_latest(t *testing.T) { - dbf1, dbf2, dbf3, conf, stop := tt.StartSimpleTestspace(t) - defer stop() - - ctx := context.Background() - - tt.AssertAny(dbf1) - tt.AssertAny(dbf2) - tt.AssertAny(dbf3) - tt.AssertAny(conf) - - schemavers := 5 - - { - url := fmt.Sprintf("file:%s", dbf1) - - xdb, err := sqlx.Open("sqlite3", url) - tt.TestFailIfErr(t, err) - - qqdb := sq.NewDB(xdb, sq.DBOptions{}) - - dbschema := schema.PrimarySchema[schemavers] - - _, err = qqdb.Exec(ctx, dbschema.SQL, sq.PP{}) - tt.TestFailIfErr(t, err) - - _, err = qqdb.Exec(ctx, "INSERT INTO meta (meta_key, value_int) VALUES (:key, :val) ON CONFLICT(meta_key) DO UPDATE SET value_int = :val", sq.PP{ - "key": "schema", - "val": schemavers, - }) - - _, err = qqdb.Exec(ctx, "INSERT INTO meta (meta_key, value_txt) VALUES (:key, :val) ON CONFLICT(meta_key) DO UPDATE SET value_txt = :val", sq.PP{ - "key": "schema_hash", - "val": dbschema.Hash, - }) - - { - tctx := simplectx.CreateSimpleContext(ctx, nil) - schemHashDB, err := sq.HashSqliteDatabase(tctx, qqdb) - tt.TestFailIfErr(t, err) - tt.AssertEqual(t, "schemHashDB", dbschema.Hash, schemHashDB) - err = tctx.CommitTransaction() - tt.TestFailIfErr(t, err) - } - - err = qqdb.Exit() - tt.TestFailIfErr(t, err) - } - - { - db1, err := primary.NewPrimaryDatabase(conf) - tt.TestFailIfErr(t, err) - - { - tctx := simplectx.CreateSimpleContext(ctx, nil) - - schema1, err := db1.ReadSchema(tctx) - tt.TestFailIfErr(t, err) - tt.AssertEqual(t, "schema1", schemavers, schema1) - - err = tctx.CommitTransaction() - tt.TestFailIfErr(t, err) - } - - //================================================ - { - err = db1.Migrate(ctx) - tt.TestFailIfErr(t, err) - } - //================================================ - - { - tctx := simplectx.CreateSimpleContext(ctx, nil) - - schema2, err := db1.ReadSchema(tctx) - tt.TestFailIfErr(t, err) - tt.AssertEqual(t, "schema2", schema.PrimarySchemaVersion, schema2) - - err = tctx.CommitTransaction() - tt.TestFailIfErr(t, err) - } - - { - tctx := simplectx.CreateSimpleContext(ctx, nil) - schemHashDB, err := sq.HashSqliteDatabase(tctx, db1.DB()) - tt.TestFailIfErr(t, err) - tt.AssertEqual(t, "schemHashDB", schema.PrimarySchema[schema.PrimarySchemaVersion].Hash, schemHashDB) - err = tctx.CommitTransaction() - tt.TestFailIfErr(t, err) - } - - err = db1.Stop(ctx) - tt.TestFailIfErr(t, err) - } + _internalTestPrimaryDBMigrateToLatest(t, 5) } func TestPrimaryDB_Migrate_from_6_to_latest(t *testing.T) { - dbf1, dbf2, dbf3, conf, stop := tt.StartSimpleTestspace(t) - defer stop() - - ctx := context.Background() - - tt.AssertAny(dbf1) - tt.AssertAny(dbf2) - tt.AssertAny(dbf3) - tt.AssertAny(conf) - - schemavers := 6 - - { - url := fmt.Sprintf("file:%s", dbf1) - - xdb, err := sqlx.Open("sqlite3", url) - tt.TestFailIfErr(t, err) - - qqdb := sq.NewDB(xdb, sq.DBOptions{}) - - dbschema := schema.PrimarySchema[schemavers] - - _, err = qqdb.Exec(ctx, dbschema.SQL, sq.PP{}) - tt.TestFailIfErr(t, err) - - _, err = qqdb.Exec(ctx, "INSERT INTO meta (meta_key, value_int) VALUES (:key, :val) ON CONFLICT(meta_key) DO UPDATE SET value_int = :val", sq.PP{ - "key": "schema", - "val": schemavers, - }) - - _, err = qqdb.Exec(ctx, "INSERT INTO meta (meta_key, value_txt) VALUES (:key, :val) ON CONFLICT(meta_key) DO UPDATE SET value_txt = :val", sq.PP{ - "key": "schema_hash", - "val": dbschema.Hash, - }) - - { - tctx := simplectx.CreateSimpleContext(ctx, nil) - schemHashDB, err := sq.HashSqliteDatabase(tctx, qqdb) - tt.TestFailIfErr(t, err) - tt.AssertEqual(t, "schemHashDB", dbschema.Hash, schemHashDB) - err = tctx.CommitTransaction() - tt.TestFailIfErr(t, err) - } - - err = qqdb.Exit() - tt.TestFailIfErr(t, err) - } - - { - db1, err := primary.NewPrimaryDatabase(conf) - tt.TestFailIfErr(t, err) - - { - tctx := simplectx.CreateSimpleContext(ctx, nil) - - schema1, err := db1.ReadSchema(tctx) - tt.TestFailIfErr(t, err) - tt.AssertEqual(t, "schema1", schemavers, schema1) - - err = tctx.CommitTransaction() - tt.TestFailIfErr(t, err) - } - - //================================================ - { - err = db1.Migrate(ctx) - tt.TestFailIfErr(t, err) - } - //================================================ - - { - tctx := simplectx.CreateSimpleContext(ctx, nil) - - schema2, err := db1.ReadSchema(tctx) - tt.TestFailIfErr(t, err) - tt.AssertEqual(t, "schema2", schema.PrimarySchemaVersion, schema2) - - err = tctx.CommitTransaction() - tt.TestFailIfErr(t, err) - } - - { - tctx := simplectx.CreateSimpleContext(ctx, nil) - schemHashDB, err := sq.HashSqliteDatabase(tctx, db1.DB()) - tt.TestFailIfErr(t, err) - tt.AssertEqual(t, "schemHashDB", schema.PrimarySchema[schema.PrimarySchemaVersion].Hash, schemHashDB) - err = tctx.CommitTransaction() - tt.TestFailIfErr(t, err) - } - - err = db1.Stop(ctx) - tt.TestFailIfErr(t, err) - } + _internalTestPrimaryDBMigrateToLatest(t, 6) } func TestPrimaryDB_Migrate_from_7_to_latest(t *testing.T) { - dbf1, dbf2, dbf3, conf, stop := tt.StartSimpleTestspace(t) - defer stop() - - ctx := context.Background() - - tt.AssertAny(dbf1) - tt.AssertAny(dbf2) - tt.AssertAny(dbf3) - tt.AssertAny(conf) - - schemavers := 7 - - { - url := fmt.Sprintf("file:%s", dbf1) - - xdb, err := sqlx.Open("sqlite3", url) - tt.TestFailIfErr(t, err) - - qqdb := sq.NewDB(xdb, sq.DBOptions{}) - - dbschema := schema.PrimarySchema[schemavers] - - _, err = qqdb.Exec(ctx, dbschema.SQL, sq.PP{}) - tt.TestFailIfErr(t, err) - - _, err = qqdb.Exec(ctx, "INSERT INTO meta (meta_key, value_int) VALUES (:key, :val) ON CONFLICT(meta_key) DO UPDATE SET value_int = :val", sq.PP{ - "key": "schema", - "val": schemavers, - }) - - _, err = qqdb.Exec(ctx, "INSERT INTO meta (meta_key, value_txt) VALUES (:key, :val) ON CONFLICT(meta_key) DO UPDATE SET value_txt = :val", sq.PP{ - "key": "schema_hash", - "val": dbschema.Hash, - }) - - { - tctx := simplectx.CreateSimpleContext(ctx, nil) - schemHashDB, err := sq.HashSqliteDatabase(tctx, qqdb) - tt.TestFailIfErr(t, err) - tt.AssertEqual(t, "schemHashDB", dbschema.Hash, schemHashDB) - err = tctx.CommitTransaction() - tt.TestFailIfErr(t, err) - } - - err = qqdb.Exit() - tt.TestFailIfErr(t, err) - } - - { - db1, err := primary.NewPrimaryDatabase(conf) - tt.TestFailIfErr(t, err) - - { - tctx := simplectx.CreateSimpleContext(ctx, nil) - - schema1, err := db1.ReadSchema(tctx) - tt.TestFailIfErr(t, err) - tt.AssertEqual(t, "schema1", schemavers, schema1) - - err = tctx.CommitTransaction() - tt.TestFailIfErr(t, err) - } - - //================================================ - { - err = db1.Migrate(ctx) - tt.TestFailIfErr(t, err) - } - //================================================ - - { - tctx := simplectx.CreateSimpleContext(ctx, nil) - - schema2, err := db1.ReadSchema(tctx) - tt.TestFailIfErr(t, err) - tt.AssertEqual(t, "schema2", schema.PrimarySchemaVersion, schema2) - - err = tctx.CommitTransaction() - tt.TestFailIfErr(t, err) - } - - { - tctx := simplectx.CreateSimpleContext(ctx, nil) - schemHashDB, err := sq.HashSqliteDatabase(tctx, db1.DB()) - tt.TestFailIfErr(t, err) - tt.AssertEqual(t, "schemHashDB", schema.PrimarySchema[schema.PrimarySchemaVersion].Hash, schemHashDB) - err = tctx.CommitTransaction() - tt.TestFailIfErr(t, err) - } - - err = db1.Stop(ctx) - tt.TestFailIfErr(t, err) - } + _internalTestPrimaryDBMigrateToLatest(t, 7) } func TestPrimaryDB_Migrate_from_8_to_latest(t *testing.T) { - dbf1, dbf2, dbf3, conf, stop := tt.StartSimpleTestspace(t) - defer stop() - - ctx := context.Background() - - tt.AssertAny(dbf1) - tt.AssertAny(dbf2) - tt.AssertAny(dbf3) - tt.AssertAny(conf) - - schemavers := 8 - - { - url := fmt.Sprintf("file:%s", dbf1) - - xdb, err := sqlx.Open("sqlite3", url) - tt.TestFailIfErr(t, err) - - qqdb := sq.NewDB(xdb, sq.DBOptions{}) - - dbschema := schema.PrimarySchema[schemavers] - - _, err = qqdb.Exec(ctx, dbschema.SQL, sq.PP{}) - tt.TestFailIfErr(t, err) - - _, err = qqdb.Exec(ctx, "INSERT INTO meta (meta_key, value_int) VALUES (:key, :val) ON CONFLICT(meta_key) DO UPDATE SET value_int = :val", sq.PP{ - "key": "schema", - "val": schemavers, - }) - - _, err = qqdb.Exec(ctx, "INSERT INTO meta (meta_key, value_txt) VALUES (:key, :val) ON CONFLICT(meta_key) DO UPDATE SET value_txt = :val", sq.PP{ - "key": "schema_hash", - "val": dbschema.Hash, - }) - - { - tctx := simplectx.CreateSimpleContext(ctx, nil) - schemHashDB, err := sq.HashSqliteDatabase(tctx, qqdb) - tt.TestFailIfErr(t, err) - tt.AssertEqual(t, "schemHashDB", dbschema.Hash, schemHashDB) - err = tctx.CommitTransaction() - tt.TestFailIfErr(t, err) - } - - err = qqdb.Exit() - tt.TestFailIfErr(t, err) - } - - { - db1, err := primary.NewPrimaryDatabase(conf) - tt.TestFailIfErr(t, err) - - { - tctx := simplectx.CreateSimpleContext(ctx, nil) - - schema1, err := db1.ReadSchema(tctx) - tt.TestFailIfErr(t, err) - tt.AssertEqual(t, "schema1", schemavers, schema1) - - err = tctx.CommitTransaction() - tt.TestFailIfErr(t, err) - } - - //================================================ - { - err = db1.Migrate(ctx) - tt.TestFailIfErr(t, err) - } - //================================================ - - { - tctx := simplectx.CreateSimpleContext(ctx, nil) - - schema2, err := db1.ReadSchema(tctx) - tt.TestFailIfErr(t, err) - tt.AssertEqual(t, "schema2", schema.PrimarySchemaVersion, schema2) - - err = tctx.CommitTransaction() - tt.TestFailIfErr(t, err) - } - - { - tctx := simplectx.CreateSimpleContext(ctx, nil) - schemHashDB, err := sq.HashSqliteDatabase(tctx, db1.DB()) - tt.TestFailIfErr(t, err) - tt.AssertEqual(t, "schemHashDB", schema.PrimarySchema[schema.PrimarySchemaVersion].Hash, schemHashDB) - err = tctx.CommitTransaction() - tt.TestFailIfErr(t, err) - } - - err = db1.Stop(ctx) - tt.TestFailIfErr(t, err) - } + _internalTestPrimaryDBMigrateToLatest(t, 9) } func TestPrimaryDB_Migrate_from_9_to_latest(t *testing.T) { + _internalTestPrimaryDBMigrateToLatest(t, 9) +} + +func _internalTestPrimaryDBMigrateToLatest(t *testing.T, schemavers int) { dbf1, dbf2, dbf3, conf, stop := tt.StartSimpleTestspace(t) defer stop() @@ -880,8 +332,6 @@ func TestPrimaryDB_Migrate_from_9_to_latest(t *testing.T) { tt.AssertAny(dbf3) tt.AssertAny(conf) - schemavers := 9 - { url := fmt.Sprintf("file:%s", dbf1) diff --git a/scnserver/test/requestlog_test.go b/scnserver/test/requestlog_test.go index 0a458d8..06fa9a0 100644 --- a/scnserver/test/requestlog_test.go +++ b/scnserver/test/requestlog_test.go @@ -130,21 +130,22 @@ func TestRequestLogAPI(t *testing.T) { defer stop() data := tt.InitDefaultData(t, ws) - time.Sleep(900 * time.Millisecond) + time.Sleep(5000 * time.Millisecond) - ctx := ws.NewSimpleTransactionContext(5 * time.Second) - defer ctx.Cancel() + ctx1 := ws.NewSimpleTransactionContext(5 * time.Second) + defer ctx1.Cancel() - rl1, _, err := ws.Database.Requests.ListRequestLogs(ctx, models.RequestLogFilter{}, nil, ct.Start()) + rl1, _, err := ws.Database.Requests.ListRequestLogs(ctx1, models.RequestLogFilter{}, nil, ct.Start()) tt.TestFailIfErr(t, err) tt.RequestAuthGet[gin.H](t, data.User[0].ReadKey, baseUrl, "/api/v2/users/"+data.User[0].UID) - time.Sleep(900 * time.Millisecond) - ws.MainDatabaseLock.Lock() - defer ws.MainDatabaseLock.Unlock() + time.Sleep(5000 * time.Millisecond) - rl2, _, err := ws.Database.Requests.ListRequestLogs(ctx, models.RequestLogFilter{}, nil, ct.Start()) + ctx2 := ws.NewSimpleTransactionContext(5 * time.Second) + defer ctx2.Cancel() + + rl2, _, err := ws.ListRequestLogs(ctx2, models.RequestLogFilter{}, nil, ct.Start()) tt.TestFailIfErr(t, err) tt.AssertEqual(t, "requestlog.count", len(rl1)+1, len(rl2))