diff --git a/internal/test/helpers.go b/internal/test/helpers.go index 00c31be40..72e0f4c34 100644 --- a/internal/test/helpers.go +++ b/internal/test/helpers.go @@ -3,6 +3,8 @@ package test import ( "bytes" "encoding/hex" + "os" + "path/filepath" "strconv" "strings" "sync" @@ -369,3 +371,13 @@ func ScriptSchnorrSig(t *testing.T, pubKey *btcec.PublicKey) txscript.TapLeaf { require.NoError(t, err) return txscript.NewBaseTapLeaf(script2) } + +// ReadTestDataFile reads a file from the testdata directory and returns its +// content as a string. +func ReadTestDataFile(t *testing.T, fileName string) string { + path := filepath.Join("testdata", fileName) + fileBytes, err := os.ReadFile(path) + require.NoError(t, err) + + return string(fileBytes) +} diff --git a/tapdb/asset_minting_test.go b/tapdb/asset_minting_test.go index d3486abc0..50755f7ab 100644 --- a/tapdb/asset_minting_test.go +++ b/tapdb/asset_minting_test.go @@ -36,6 +36,13 @@ func newAssetStore(t *testing.T) (*AssetMintingStore, *AssetStore, // First, Make a new test database. db := NewTestDB(t) + mintStore, assetStore := newAssetStoreFromDB(db.BaseDB) + return mintStore, assetStore, db +} + +// newAssetStoreFromDB makes a new instance of the AssetMintingStore backed by +// the passed database. +func newAssetStoreFromDB(db *BaseDB) (*AssetMintingStore, *AssetStore) { // TODO(roasbeef): can use another layer of type params since // duplicated? txCreator := func(tx *sql.Tx) PendingAssetStore { @@ -50,7 +57,7 @@ func newAssetStore(t *testing.T) (*AssetMintingStore, *AssetStore, testClock := clock.NewTestClock(time.Now()) return NewAssetMintingStore(assetMintingDB), - NewAssetStore(assetsDB, testClock), db + NewAssetStore(assetsDB, testClock) } func assertBatchState(t *testing.T, batch *tapgarden.MintingBatch, diff --git a/tapdb/assets_store.go b/tapdb/assets_store.go index 2699445eb..8621f992e 100644 --- a/tapdb/assets_store.go +++ b/tapdb/assets_store.go @@ -861,7 +861,7 @@ func fetchAssetsWithWitness(ctx context.Context, q ActiveAssetsStore, // First, we'll fetch all the assets we know of on disk. dbAssets, err := q.QueryAssets(ctx, assetFilter) if err != nil { - return nil, nil, fmt.Errorf("unable to read db assets: %v", err) + return nil, nil, fmt.Errorf("unable to read db assets: %w", err) } assetIDs := fMap(dbAssets, func(a ConfirmedAsset) int64 { diff --git a/tapdb/migrations.go b/tapdb/migrations.go index 117feac90..8532de071 100644 --- a/tapdb/migrations.go +++ b/tapdb/migrations.go @@ -2,6 +2,7 @@ package tapdb import ( "bytes" + "errors" "io" "io/fs" "net/http" @@ -12,11 +13,31 @@ import ( "github.com/golang-migrate/migrate/v4/source/httpfs" ) -// applyMigrations executes all database migration files found in the given file +// MigrationTarget is a functional option that can be passed to applyMigrations +// to specify a target version to migrate to. +type MigrationTarget func(mig *migrate.Migrate) error + +var ( + // TargetLatest is a MigrationTarget that migrates to the latest + // version available. + TargetLatest = func(mig *migrate.Migrate) error { + return mig.Up() + } + + // TargetVersion is a MigrationTarget that migrates to the given + // version. + TargetVersion = func(version uint) MigrationTarget { + return func(mig *migrate.Migrate) error { + return mig.Migrate(version) + } + } +) + +// applyMigrations executes database migration files found in the given file // system under the given path, using the passed database driver and database -// name. -func applyMigrations(fs fs.FS, driver database.Driver, path, - dbName string) error { +// name, up to or down to the given target version. +func applyMigrations(fs fs.FS, driver database.Driver, path, dbName string, + targetVersion MigrationTarget) error { // With the migrate instance open, we'll create a new migration source // using the embedded file system stored in sqlSchemas. The library @@ -36,8 +57,10 @@ func applyMigrations(fs fs.FS, driver database.Driver, path, if err != nil { return err } - err = sqlMigrate.Up() - if err != nil && err != migrate.ErrNoChange { + + // Execute the migration based on the target given. + err = targetVersion(sqlMigrate) + if err != nil && !errors.Is(err, migrate.ErrNoChange) { return err } diff --git a/tapdb/migrations_test.go b/tapdb/migrations_test.go new file mode 100644 index 000000000..6eda94381 --- /dev/null +++ b/tapdb/migrations_test.go @@ -0,0 +1,53 @@ +package tapdb + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" +) + +// TestMigrationSteps is an example test that illustrates how to test database +// migrations by selectively applying only some migrations, inserting dummy data +// and then applying the remaining migrations. +func TestMigrationSteps(t *testing.T) { + ctx := context.Background() + + // As a first step, we create a new database but only migrate to + // version 1, which only contains the macaroon tables. + db := NewTestDBWithVersion(t, 1) + + // If we create an assets store now, there should be no tables for the + // assets yet. + _, assetStore := newAssetStoreFromDB(db.BaseDB) + _, err := assetStore.FetchAllAssets(ctx, true, true, nil) + require.True(t, IsSchemaError(MapSQLError(err))) + + // We now migrate to a later but not yet latest version. + err = db.ExecuteMigrations(TargetVersion(11)) + require.NoError(t, err) + + // Now there should be an asset table. + _, err = assetStore.FetchAllAssets(ctx, true, true, nil) + require.NoError(t, err) + + // Assuming the next version does some changes to the data within the + // asset table, we now add some dummy data to the assets related tables, + // so we could then test that migration. + InsertTestdata(t, db.BaseDB, "migrations_test_00011_dummy_data.sql") + + // Make sure we now have actual assets in the database. + dbAssets, err := assetStore.FetchAllAssets(ctx, true, true, nil) + require.NoError(t, err) + require.Len(t, dbAssets, 4) + + // And now that we have test data inserted, we can migrate to the latest + // version. + err = db.ExecuteMigrations(TargetLatest) + require.NoError(t, err) + + // Here we would now test that the migration to the latest version did + // what we expected it to do. But this is just an example, illustrating + // the steps that can be taken to test migrations, so we are done for + // this test. +} diff --git a/tapdb/postgres.go b/tapdb/postgres.go index 8ae4c97a3..1d4e5b745 100644 --- a/tapdb/postgres.go +++ b/tapdb/postgres.go @@ -30,6 +30,16 @@ var ( // fully executed yet. So this time needs to be chosen correctly to be // longer than the longest expected individual test run time. DefaultPostgresFixtureLifetime = 60 * time.Minute + + // postgresSchemaReplacements is a map of schema strings that need to be + // replaced for postgres. This is needed because we write the schemas + // to work with sqlite primarily, and postgres has some differences. + postgresSchemaReplacements = map[string]string{ + "BLOB": "BYTEA", + "INTEGER PRIMARY KEY": "SERIAL PRIMARY KEY", + "BIGINT PRIMARY KEY": "BIGSERIAL PRIMARY KEY", + "TIMESTAMP": "TIMESTAMP WITHOUT TIME ZONE", + } ) // PostgresConfig holds the postgres database configuration. @@ -107,44 +117,41 @@ func NewPostgresStore(cfg *PostgresConfig) (*PostgresStore, error) { rawDb.SetConnMaxLifetime(connMaxLifetime) rawDb.SetConnMaxIdleTime(connMaxIdleTime) - if !cfg.SkipMigrations { - // Now that the database is open, populate the database with - // our set of schemas based on our embedded in-memory file - // system. - // - // First, we'll need to open up a new migration instance for - // our current target database: sqlite. - driver, err := postgres_migrate.WithInstance( - rawDb, &postgres_migrate.Config{}, - ) - if err != nil { - return nil, err - } - - postgresFS := newReplacerFS(sqlSchemas, map[string]string{ - "BLOB": "BYTEA", - "INTEGER PRIMARY KEY": "SERIAL PRIMARY KEY", - "BIGINT PRIMARY KEY": "BIGSERIAL PRIMARY KEY", - "TIMESTAMP": "TIMESTAMP WITHOUT TIME ZONE", - }) - - err = applyMigrations( - postgresFS, driver, "sqlc/migrations", cfg.DBName, - ) - if err != nil { - return nil, err - } - } - queries := sqlc.NewPostgres(rawDb) - - return &PostgresStore{ + s := &PostgresStore{ cfg: cfg, BaseDB: &BaseDB{ DB: rawDb, Queries: queries, }, - }, nil + } + + // Now that the database is open, populate the database with our set of + // schemas based on our embedded in-memory file system. + if !cfg.SkipMigrations { + if err := s.ExecuteMigrations(TargetLatest); err != nil { + return nil, fmt.Errorf("error executing migrations: "+ + "%w", err) + } + } + + return s, nil +} + +// ExecuteMigrations runs migrations for the Postgres database, depending on the +// target given, either all migrations or up to a given version. +func (s *PostgresStore) ExecuteMigrations(target MigrationTarget) error { + driver, err := postgres_migrate.WithInstance( + s.DB, &postgres_migrate.Config{}, + ) + if err != nil { + return fmt.Errorf("error creating postgres migration: %w", err) + } + + postgresFS := newReplacerFS(sqlSchemas, postgresSchemaReplacements) + return applyMigrations( + postgresFS, driver, "sqlc/migrations", s.cfg.DBName, target, + ) } // NewTestPostgresDB is a helper function that creates a Postgres database for @@ -164,3 +171,27 @@ func NewTestPostgresDB(t *testing.T) *PostgresStore { return store } + +// NewTestPostgresDBWithVersion is a helper function that creates a Postgres +// database for testing and migrates it to the given version. +func NewTestPostgresDBWithVersion(t *testing.T, version uint) *PostgresStore { + t.Helper() + + t.Logf("Creating new Postgres DB for testing, migrating to version %d", + version) + + sqlFixture := NewTestPgFixture(t, DefaultPostgresFixtureLifetime, true) + storeCfg := sqlFixture.GetConfig() + storeCfg.SkipMigrations = true + store, err := NewPostgresStore(storeCfg) + require.NoError(t, err) + + err = store.ExecuteMigrations(TargetVersion(version)) + require.NoError(t, err) + + t.Cleanup(func() { + sqlFixture.TearDown(t) + }) + + return store +} diff --git a/tapdb/sqlerrors.go b/tapdb/sqlerrors.go index a9f3cb8f2..3b6afd381 100644 --- a/tapdb/sqlerrors.go +++ b/tapdb/sqlerrors.go @@ -3,6 +3,7 @@ package tapdb import ( "errors" "fmt" + "strings" "github.com/jackc/pgconn" "github.com/jackc/pgerrcode" @@ -52,6 +53,20 @@ func parseSqliteError(sqliteErr *sqlite.Error) error { DbError: sqliteErr, } + // Generic error, need to parse the message further. + case sqlite3.SQLITE_ERROR: + errMsg := sqliteErr.Error() + + switch { + case strings.Contains(errMsg, "no such table"): + return &ErrSchemaError{ + DbError: sqliteErr, + } + + default: + return fmt.Errorf("unknown sqlite error: %w", sqliteErr) + } + default: return fmt.Errorf("unknown sqlite error: %w", sqliteErr) } @@ -73,6 +88,12 @@ func parsePostgresError(pqErr *pgconn.PgError) error { DbError: pqErr, } + // Handle schema error. + case pgerrcode.UndefinedColumn, pgerrcode.UndefinedTable: + return &ErrSchemaError{ + DbError: pqErr, + } + default: return fmt.Errorf("unknown postgres error: %w", pqErr) } @@ -111,3 +132,25 @@ func IsSerializationError(err error) bool { var serializationError *ErrSerializationError return errors.As(err, &serializationError) } + +// ErrSchemaError is an error type which represents a database agnostic error +// that the schema of the database is incorrect for the given query. +type ErrSchemaError struct { + DbError error +} + +// Unwrap returns the wrapped error. +func (e ErrSchemaError) Unwrap() error { + return e.DbError +} + +// Error returns the error message. +func (e ErrSchemaError) Error() string { + return e.DbError.Error() +} + +// IsSchemaError returns true if the given error is a schema error. +func IsSchemaError(err error) bool { + var schemaError *ErrSchemaError + return errors.As(err, &schemaError) +} diff --git a/tapdb/sqlite.go b/tapdb/sqlite.go index 500980fcb..ef60ed06d 100644 --- a/tapdb/sqlite.go +++ b/tapdb/sqlite.go @@ -36,6 +36,16 @@ const ( defaultConnMaxLifetime = 10 * time.Minute ) +var ( + // sqliteSchemaReplacements is a map of schema strings that need to be + // replaced for sqlite. This is needed because sqlite doesn't directly + // support the BIGINT type for primary keys, so we need to replace it + // with INTEGER. + sqliteSchemaReplacements = map[string]string{ + "BIGINT PRIMARY KEY": "INTEGER PRIMARY KEY", + } +) + // SqliteConfig holds all the config arguments needed to interact with our // sqlite DB. type SqliteConfig struct { @@ -118,41 +128,41 @@ func NewSqliteStore(cfg *SqliteConfig) (*SqliteStore, error) { db.SetMaxIdleConns(defaultMaxConns) db.SetConnMaxLifetime(defaultConnMaxLifetime) - if !cfg.SkipMigrations { - // Now that the database is open, populate the database with - // our set of schemas based on our embedded in-memory file - // system. - // - // First, we'll need to open up a new migration instance for - // our current target database: sqlite. - driver, err := sqlite_migrate.WithInstance( - db, &sqlite_migrate.Config{}, - ) - if err != nil { - return nil, err - } - - sqliteFS := newReplacerFS(sqlSchemas, map[string]string{ - "BIGINT PRIMARY KEY": "INTEGER PRIMARY KEY", - }) - - err = applyMigrations( - sqliteFS, driver, "sqlc/migrations", "sqlc", - ) - if err != nil { - return nil, err - } - } - queries := sqlc.NewSqlite(db) - - return &SqliteStore{ + s := &SqliteStore{ cfg: cfg, BaseDB: &BaseDB{ DB: db, Queries: queries, }, - }, nil + } + + // Now that the database is open, populate the database with our set of + // schemas based on our embedded in-memory file system. + if !cfg.SkipMigrations { + if err := s.ExecuteMigrations(TargetLatest); err != nil { + return nil, fmt.Errorf("error executing migrations: "+ + "%w", err) + } + } + + return s, nil +} + +// ExecuteMigrations runs migrations for the sqlite database, depending on the +// target given, either all migrations or up to a given version. +func (s *SqliteStore) ExecuteMigrations(target MigrationTarget) error { + driver, err := sqlite_migrate.WithInstance( + s.DB, &sqlite_migrate.Config{}, + ) + if err != nil { + return fmt.Errorf("error creating sqlite migration: %w", err) + } + + sqliteFS := newReplacerFS(sqlSchemas, sqliteSchemaReplacements) + return applyMigrations( + sqliteFS, driver, "sqlc/migrations", "sqlite", target, + ) } // NewTestSqliteDB is a helper function that creates an SQLite database for @@ -177,3 +187,30 @@ func NewTestSqliteDB(t *testing.T) *SqliteStore { return sqlDB } + +// NewTestSqliteDBWithVersion is a helper function that creates an SQLite +// database for testing and migrates it to the given version. +func NewTestSqliteDBWithVersion(t *testing.T, version uint) *SqliteStore { + t.Helper() + + t.Logf("Creating new SQLite DB for testing, migrating to version %d", + version) + + // TODO(roasbeef): if we pass :memory: for the file name, then we get + // an in mem version to speed up tests + dbFileName := filepath.Join(t.TempDir(), "tmp.db") + sqlDB, err := NewSqliteStore(&SqliteConfig{ + DatabaseFileName: dbFileName, + SkipMigrations: true, + }) + require.NoError(t, err) + + err = sqlDB.ExecuteMigrations(TargetVersion(version)) + require.NoError(t, err) + + t.Cleanup(func() { + require.NoError(t, sqlDB.DB.Close()) + }) + + return sqlDB +} diff --git a/tapdb/sqlutils.go b/tapdb/sqlutils.go index 5df237c9d..71c293eca 100644 --- a/tapdb/sqlutils.go +++ b/tapdb/sqlutils.go @@ -1,15 +1,19 @@ package tapdb import ( + "context" "database/sql" "encoding/binary" "fmt" "io" + "regexp" "strconv" "testing" "time" "github.com/btcsuite/btcd/wire" + "github.com/lightninglabs/taproot-assets/internal/test" + "github.com/lightninglabs/taproot-assets/tapdb/sqlc" "github.com/stretchr/testify/require" "golang.org/x/exp/constraints" ) @@ -173,3 +177,26 @@ func parseCoalesceNumericType[T constraints.Integer](value any) (T, error) { "value '%v' as number", value, value) } } + +// InsertTestdata reads the given file from the testdata directory and inserts +// its content into the given database. +func InsertTestdata(t *testing.T, db *BaseDB, fileName string) { + ctx := context.Background() + var opts AssetStoreTxOptions + tx, err := db.BeginTx(ctx, &opts) + require.NoError(t, err) + + testData := test.ReadTestDataFile(t, fileName) + + // If we're using Postgres, we need to convert the SQLite hex literals + // (X'') to Postgres hex literals ('\x'). + if db.Backend() == sqlc.BackendTypePostgres { + rex := regexp.MustCompile(`X'([0-9a-f]+?)'`) + testData = rex.ReplaceAllString(testData, `'\x$1'`) + t.Logf("Postgres test data: %v", testData) + } + + _, err = tx.Exec(testData) + require.NoError(t, err) + require.NoError(t, tx.Commit()) +} diff --git a/tapdb/test_postgres.go b/tapdb/test_postgres.go index b192f02c5..4a7a4009e 100644 --- a/tapdb/test_postgres.go +++ b/tapdb/test_postgres.go @@ -10,3 +10,9 @@ import ( func NewTestDB(t *testing.T) *PostgresStore { return NewTestPostgresDB(t) } + +// NewTestDBWithVersion is a helper function that creates a Postgres database +// for testing and migrates it to the given version. +func NewTestDBWithVersion(t *testing.T, version uint) *PostgresStore { + return NewTestPostgresDBWithVersion(t, version) +} diff --git a/tapdb/test_sqlite.go b/tapdb/test_sqlite.go index eeb5e3ba1..98d4a9e14 100644 --- a/tapdb/test_sqlite.go +++ b/tapdb/test_sqlite.go @@ -10,3 +10,9 @@ import ( func NewTestDB(t *testing.T) *SqliteStore { return NewTestSqliteDB(t) } + +// NewTestDBWithVersion is a helper function that creates an SQLite database for +// testing and migrates it to the given version. +func NewTestDBWithVersion(t *testing.T, version uint) *SqliteStore { + return NewTestSqliteDBWithVersion(t, version) +} diff --git a/tapdb/testdata/migrations_test_00011_dummy_data.sql b/tapdb/testdata/migrations_test_00011_dummy_data.sql new file mode 100644 index 000000000..86f6a8526 --- /dev/null +++ b/tapdb/testdata/migrations_test_00011_dummy_data.sql @@ -0,0 +1,55 @@ +-- This is some sample data to illustrate how we can insert test data during +-- unit tests to test individual DB migration steps. The file name should always +-- contain the version number of the DB schema that it will be applied _after_. +-- The data must be in the format of Sqlite (using the X'' syntax for BLOB +-- values). It will be converted to Postgres ('\x') on the fly by the +-- InsertTestdata helper function. +-- The following steps can be used to dump a SQLite file: +-- sqlite3 test.db +-- .output /tmp/dump.sql +-- .dump +-- .exit +-- +-- ATTENTION: This will output boolean values as numeric values (0/1) instead +-- of true/false. But Postgres only understands true/false, so those values +-- must be manually converted. +INSERT INTO chain_txns VALUES(1,X'a1594fc379308b2a209f6d0bdb8602e9f87cf71fc232c69032b9a5fed28f9331',1980,X'02000000000101022cd51ca4d850c5f71ceedf7c50a08ff82d66612b22f631eac95e6b52cbbd2d0000000000ffffffff02e80300000000000022512018ac5a65a0d12e7846c89d24705e2697b1da14627978ba8db24bdbce21fc2aa85cd5f5050000000022512030263d67b4275144b2b00921d220a1311b9a4465fa656ba7d5754b421cb4308402483045022100fa32af97cab8a765dc347c3ff57b14f9810b6dbfc4d02727fb099d1ed875660602204cb66f3bbd92925707158b4aa67338c50a9ffddceb023875eb82b78b3967e007012102eb9cd2a22fd11c40823cb7b0f0fba4156138af69cf73c0644be54f4d46ba480700000000',441,X'4295613d85ccbc455159eb4ddd1e266ca10041d3c75726286b7dfeb3132c9c4f',1); +INSERT INTO chain_txns VALUES(2,X'4bf6c11eca17b2961189b333590d37c91807d4bdfe27d399d5eb73f149c79c57',1980,X'02000000000101066fc9ac5c52372f8a671d863d7c06f882c6be93cfc4d55e0fec3debeea639350000000000ffffffff02e803000000000000225120f9b4ac57dfee8edabd502c13117c8d15b716083e19f2b4c751b7790743f4f1d05cd5f5050000000022512003cd69f8dea6ad2eeecd089711f62086d3ddcd608aff6af3274e2f5be3021bf0024830450221008a7193fc60406b465cc8432112f988264c5ff00974eee1345770e7d561b0a4a6022077fd0c3652f2ee6fa900496a3539fb43d274d5aee2cdb0feddbf9bf265b5ce2301210279d9d7f2377dd133f7db657e8e9829555dabf63a6dbb543879fefda87349fad900000000',442,X'9ab21cbc24c0b52624b91eec456cb4c7c95f28d30ca120092be5d532fffea556',1); + +INSERT INTO genesis_points VALUES(1,X'022cd51ca4d850c5f71ceedf7c50a08ff82d66612b22f631eac95e6b52cbbd2d00000000',1); +INSERT INTO genesis_points VALUES(2,X'066fc9ac5c52372f8a671d863d7c06f882c6be93cfc4d55e0fec3debeea6393500000000',2); + +INSERT INTO assets_meta VALUES(1,X'2b990b7adb1faf51ccb9b1c73bc5e73926db39cdec8906d4fd3c6c423a3c9821',X'736f6d65206d65746164617461',0); + +INSERT INTO genesis_assets VALUES(1,X'add7d0d7cc37e58a7c0d8ad40b6904050d2baa25a1829f00689c4b27b524dd04','itestbuxx-collectible',1,0,1,1); +INSERT INTO genesis_assets VALUES(2,X'c333a1aff9b61d231d72bdf446f404de41fcef0a485943b1e3e39d6b362e0223','itestbuxx',1,0,0,1); +INSERT INTO genesis_assets VALUES(3,X'032d6c47653590d2f5ee85da3868fb1c069254cda2983ef8ac10a551cb234e8f','itestbuxx-money-printer-brrr',1,0,0,2); +INSERT INTO genesis_assets VALUES(4,X'09ed5c8cc51d5037fef2bff77ea4daa7469e4d1912398ad15788af32e4cd131b','itestbuxx-collectible-brrr',1,0,1,2); + +INSERT INTO internal_keys VALUES(1,X'02827d74858d152da1fae12010ad8d3c46b595c2d4480512a6575925424617124f',212,0); +INSERT INTO internal_keys VALUES(2,X'03efbcf2878876bae81ca9a7f6476764d2da38d565b9fb2b691e7bb22fd99f9e5e',212,2); +INSERT INTO internal_keys VALUES(3,X'02b1f54a12c7336eb7061de2a48669ce1425b17f1af98fec07e6799e34ffb8952e',212,1); +INSERT INTO internal_keys VALUES(4,X'0278b6cb348e51a5b422a58f5fb85a00d41dfa5eadbe4b12cc95786c374149ecb1',212,3); +INSERT INTO internal_keys VALUES(5,X'022dfc94e1dbe0472ccab24af26ac3ffb5972e9070783432370d54148356dcdbcb',212,7); +INSERT INTO internal_keys VALUES(6,X'03b75960cb31c18433bc23c875507c8031195399e9059c7e9210db806a0e24cdf3',212,6); +INSERT INTO internal_keys VALUES(7,X'03a28195c3aef7e7cd0871c8ccff7f7e8e9a49cabc0714d3db5917c40362ace5f4',212,5); +INSERT INTO internal_keys VALUES(8,X'0265d4de6502f8467056e2b766736042c2e95b5557bcf1927df08a7905f64bd195',212,4); + +INSERT INTO asset_groups VALUES(1,X'032953169c02b88ec3d7007860a6b645942002af67b7ec0c72c71e91f39d26f729',NULL,5,2); +INSERT INTO asset_groups VALUES(2,X'033d22afa7dcfd4b31a784e779585a1da742850f37b89dd3d63e3cd4fa2ac282f6',NULL,7,2); + +INSERT INTO asset_group_witnesses VALUES(1,X'0140ef218618ef8af8a72ec969c22eec3e281ed14aad0cd0dcd86428fa52d4b17abbe725f025b7ab09236b95b930655626c8a7877e582035fe96640a0f0e95c3caf5',3,1); +INSERT INTO asset_group_witnesses VALUES(2,X'0140115b9961b2bae244bdcd52c61d43e38a189c6531e73ede464aacda7b66a78456e2e175c9fc9b80d47e2c32e1752ddd45869e26f235ec5e1e82f0d6914b17f07f',4,2); + +INSERT INTO managed_utxos VALUES(1,X'a1594fc379308b2a209f6d0bdb8602e9f87cf71fc232c69032b9a5fed28f933100000000',1000,1,X'1dd3e2cf0bbbee32832c4deb57bbae58779fa599be0b8eb1f61e8c624157e2fa',NULL,X'1dd3e2cf0bbbee32832c4deb57bbae58779fa599be0b8eb1f61e8c624157e2fa',1,NULL,NULL); +INSERT INTO managed_utxos VALUES(2,X'4bf6c11eca17b2961189b333590d37c91807d4bdfe27d399d5eb73f149c79c5700000000',1000,4,X'050d5a59d0ba1d475e4b650d0d8ba03bfd10227205923c79aa1f9c32e335b56c',NULL,X'050d5a59d0ba1d475e4b650d0d8ba03bfd10227205923c79aa1f9c32e335b56c',2,NULL,NULL); + +INSERT INTO script_keys VALUES(1,2,X'029c571fffcac1a1a7cd3372bd202ad8562f28e48b90f8a4eb714eca062f576ee6',NULL); +INSERT INTO script_keys VALUES(2,3,X'02518f72c52f06689e9ba7b128beb9fcf5acf8fdfc22cd6a3c2507e0bc39c87c12',NULL); +INSERT INTO script_keys VALUES(3,6,X'02e9321fab24e1a8027c43aae6102407ae083f07c1b55dc411605ba2cd2f5d0e90',NULL); +INSERT INTO script_keys VALUES(4,8,X'023dfedb0b8aec81c440b5b180afe84b70dbc49e8a9d7d94b189f564f74394e1a0',NULL); + +INSERT INTO assets VALUES(1,1,1,1,NULL,0,1,0,0,NULL,NULL,1,false); +INSERT INTO assets VALUES(2,2,0,2,NULL,0,5000,0,0,NULL,NULL,1,false); +INSERT INTO assets VALUES(3,3,1,3,1,0,5000,0,0,NULL,NULL,2,false); +INSERT INTO assets VALUES(4,4,0,4,2,0,1,0,0,NULL,NULL,2,false);