Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 5 additions & 8 deletions tapdb/assets_store.go
Original file line number Diff line number Diff line change
Expand Up @@ -3312,14 +3312,11 @@ func (a *AssetStore) LogAnchorTxConfirm(ctx context.Context,
// in the transfer.
for _, b := range burns {
_, err = q.InsertBurn(ctx, sqlc.InsertBurnParams{
TransferID: int32(assetTransfer.ID),
Note: sql.NullString{
String: b.Note,
Valid: b.Note != "",
},
AssetID: b.AssetID,
GroupKey: b.GroupKey,
Amount: int64(b.Amount),
TransferID: assetTransfer.ID,
Note: sqlStr(b.Note),
AssetID: b.AssetID,
GroupKey: b.GroupKey,
Amount: int64(b.Amount),
})
if err != nil {
return fmt.Errorf("failed to insert burn in "+
Expand Down
4 changes: 2 additions & 2 deletions tapdb/assets_store_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2865,7 +2865,7 @@ func TestQueryAssetBurns(t *testing.T) {
assetID := inputAsset.ID()

_, err = assetsStore.db.InsertBurn(ctx, sqlc.InsertBurnParams{
TransferID: int32(assetTransfers[0].ID),
TransferID: assetTransfers[0].ID,
Note: sql.NullString{
String: "burn",
Valid: true,
Expand All @@ -2883,7 +2883,7 @@ func TestQueryAssetBurns(t *testing.T) {
require.Len(t, burns, 1)

_, err = assetsStore.db.InsertBurn(ctx, sqlc.InsertBurnParams{
TransferID: int32(assetTransfers[0].ID),
TransferID: assetTransfers[0].ID,
Note: sql.NullString{
String: "burn",
Valid: true,
Expand Down
2 changes: 1 addition & 1 deletion tapdb/migrations.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ const (
// daemon.
//
// NOTE: This MUST be updated when a new migration is added.
LatestMigrationVersion = 36
LatestMigrationVersion = 37
)

// DatabaseBackend is an interface that contains all methods our different
Expand Down
24 changes: 24 additions & 0 deletions tapdb/migrations_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -670,3 +670,27 @@ func TestMigration33(t *testing.T) {
),
)
}

// TestMigration37 tests that the Golang based post-migration check for the
// asset burn insertion works as expected.
func TestMigration37(t *testing.T) {
ctx := context.Background()

db := NewTestDBWithVersion(t, 36)

// We need to insert some test data that will be affected by the
// migration number 37.
InsertTestdata(t, db.BaseDB, "migrations_test_00037_dummy_data.sql")

// And now that we have test data inserted, we can migrate to the latest
// version.
err := db.ExecuteMigrations(TargetLatest, WithPostStepCallbacks(
makePostStepCallbacks(db, postMigrationChecks),
))
require.NoError(t, err)

burns, err := db.QueryBurns(ctx, QueryBurnsFilters{})
require.NoError(t, err)

require.Len(t, burns, 5)
}
103 changes: 102 additions & 1 deletion tapdb/post_migration_checks.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ import (
"time"

"github.com/btcsuite/btcd/btcec/v2/schnorr"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/golang-migrate/migrate/v4"
"github.com/golang-migrate/migrate/v4/database"
"github.com/lightninglabs/taproot-assets/asset"
Expand All @@ -20,6 +21,11 @@ const (
// Migration33ScriptKeyType is the version of the migration that
// introduces the script key type.
Migration33ScriptKeyType = 33

// Migration37InsertAssetBurns is the version of the migration that
// inserts the asset burns into the specific asset burns table by
// querying all assets and detecting burns from their witnesses.
Migration37InsertAssetBurns = 37
)

// postMigrationCheck is a function type for a function that performs a
Expand All @@ -32,7 +38,8 @@ var (
// applied. These functions are used to perform additional checks on the
// database state that are not fully expressible in SQL.
postMigrationChecks = map[uint]postMigrationCheck{
Migration33ScriptKeyType: determineAndAssignScriptKeyType,
Migration33ScriptKeyType: determineAndAssignScriptKeyType,
Migration37InsertAssetBurns: insertAssetBurns,
}
)

Expand Down Expand Up @@ -216,3 +223,97 @@ func determineAndAssignScriptKeyType(ctx context.Context,

return nil
}

// insertAssetBurns queries all assets and detects burns from their witnesses,
// then inserts the asset burns into the specific asset burns table.
func insertAssetBurns(ctx context.Context, q sqlc.Querier) error {
defaultClock := clock.NewDefaultClock()

log.Debugf("Detecting script key types")

// We start by fetching all assets, even the spent ones. We then collect
// a list of the burn keys from the assets (because burn keys can only
// be calculated from the asset's witness).
assetFilter := QueryAssetFilters{
Now: sql.NullTime{
Time: defaultClock.Now().UTC(),
Valid: true,
},
}
dbAssets, assetWitnesses, err := fetchAssetsWithWitness(
ctx, q, assetFilter,
)
if err != nil {
return fmt.Errorf("error fetching assets: %w", err)
}

chainAssets, err := dbAssetsToChainAssets(
dbAssets, assetWitnesses, defaultClock,
)
if err != nil {
return fmt.Errorf("error converting assets: %w", err)
}

burnAssets := fn.Filter(chainAssets, func(a *asset.ChainAsset) bool {
return a.IsBurn()
})

burnsInTable, err := q.QueryBurns(ctx, sqlc.QueryBurnsParams{})
if err != nil {
return err
}

burnsMatch := func(b sqlc.QueryBurnsRow, a *asset.ChainAsset) bool {
txMatch := (chainhash.Hash(b.AnchorTxid)) == a.AnchorTx.TxHash()
assetIDMatch := (asset.ID(b.AssetID)) == a.ID()
amountMatch := uint64(b.Amount) == a.Amount
return txMatch && assetIDMatch && amountMatch
}
burnAssetsNotInTable := fn.Filter(
burnAssets, func(a *asset.ChainAsset) bool {
return fn.NotAny(
burnsInTable, func(b sqlc.QueryBurnsRow) bool {
return burnsMatch(b, a)
},
)
},
)

log.Debugf("Found %d asset burns not in burn table, adding them now",
len(burnAssetsNotInTable))
for _, burnAsset := range burnAssetsNotInTable {
assetTransfers, err := q.QueryAssetTransfers(ctx, TransferQuery{
AnchorTxHash: fn.ByteSlice(burnAsset.AnchorTx.TxHash()),
})
if err != nil {
return fmt.Errorf("unable to query asset transfers: %w",
err)
}
if len(assetTransfers) != 1 {
log.Warnf("Found %d asset transfers for burn asset "+
"%s, expected 1: %v", len(assetTransfers),
burnAsset.ID(), assetTransfers)

continue
}
assetTransfer := assetTransfers[0]

var groupKeyBytes []byte
if burnAsset.GroupKey != nil {
gk := burnAsset.GroupKey.GroupPubKey
groupKeyBytes = gk.SerializeCompressed()
}

_, err = q.InsertBurn(ctx, sqlc.InsertBurnParams{
TransferID: assetTransfer.ID,
AssetID: fn.ByteSlice(burnAsset.ID()),
GroupKey: groupKeyBytes,
Amount: int64(burnAsset.Amount),
})
if err != nil {
return fmt.Errorf("error inserting burn: %w", err)
}
}

return nil
}
31 changes: 31 additions & 0 deletions tapdb/sqlc/migrations/000037_insert_asset_burns_migration.down.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
-- We roll back the transfer ID type change from BIGINT to INTEGER.

CREATE TABLE IF NOT EXISTS asset_burn_transfers_corrected (
-- The auto-incrementing integer that identifies this burn transfer.
burn_id INTEGER PRIMARY KEY,

-- A reference to the primary key of the transfer that includes this burn.
transfer_id INTEGER NOT NULL REFERENCES asset_transfers(id),

-- A note that may contain user defined metadata.
note TEXT,

-- The asset id of the burnt asset.
asset_id BLOB NOT NULL REFERENCES genesis_assets(asset_id),

-- The group key of the group the burnt asset belonged to.
group_key BLOB REFERENCES asset_groups(tweaked_group_key),

-- The amount of the asset that was burned.
amount BIGINT NOT NULL
);

INSERT INTO asset_burn_transfers_corrected (
burn_id, transfer_id, note, asset_id, group_key, amount
)
SELECT burn_id, transfer_id, note, asset_id, group_key, amount
FROM asset_burn_transfers;

DROP TABLE asset_burn_transfers;

ALTER TABLE asset_burn_transfers_corrected RENAME TO asset_burn_transfers;
32 changes: 32 additions & 0 deletions tapdb/sqlc/migrations/000037_insert_asset_burns_migration.up.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
-- We need to modify the transfer ID to be a BIGINT instead of INTEGER,
-- otherwise at some point things will break unexpectedly.

CREATE TABLE IF NOT EXISTS asset_burn_transfers_corrected (
-- The auto-incrementing integer that identifies this burn transfer.
burn_id INTEGER PRIMARY KEY,

-- A reference to the primary key of the transfer that includes this burn.
transfer_id BIGINT NOT NULL REFERENCES asset_transfers(id),

-- A note that may contain user defined metadata.
note TEXT,

-- The asset id of the burnt asset.
asset_id BLOB NOT NULL REFERENCES genesis_assets(asset_id),

-- The group key of the group the burnt asset belonged to.
group_key BLOB REFERENCES asset_groups(tweaked_group_key),

-- The amount of the asset that was burned.
amount BIGINT NOT NULL
);

INSERT INTO asset_burn_transfers_corrected (
burn_id, transfer_id, note, asset_id, group_key, amount
)
SELECT burn_id, transfer_id, note, asset_id, group_key, amount
FROM asset_burn_transfers;

DROP TABLE asset_burn_transfers;

ALTER TABLE asset_burn_transfers_corrected RENAME TO asset_burn_transfers;
2 changes: 1 addition & 1 deletion tapdb/sqlc/models.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

8 changes: 4 additions & 4 deletions tapdb/sqlc/schemas/generated_schema.sql
Original file line number Diff line number Diff line change
Expand Up @@ -96,13 +96,13 @@ CREATE TABLE addrs (
proof_courier_addr BLOB NOT NULL
);

CREATE TABLE asset_burn_transfers (
CREATE TABLE "asset_burn_transfers" (
-- The auto-incrementing integer that identifies this burn transfer.
burn_id INTEGER PRIMARY KEY,
burn_id INTEGER PRIMARY KEY,

-- A reference to the primary key of the transfer that includes this burn.
transfer_id INTEGER NOT NULL REFERENCES asset_transfers(id),
transfer_id BIGINT NOT NULL REFERENCES asset_transfers(id),

-- A note that may contain user defined metadata.
note TEXT,

Expand Down
2 changes: 1 addition & 1 deletion tapdb/sqlc/transfers.sql.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading
Loading