Skip to content

Commit aff882c

Browse files
authored
Merge pull request #2201 from hirosystems/develop
release to master
2 parents f00f286 + 26c53dc commit aff882c

File tree

7 files changed

+210
-134
lines changed

7 files changed

+210
-134
lines changed
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,87 @@
1+
/* eslint-disable camelcase */
2+
3+
exports.shorthands = undefined;
4+
5+
exports.up = pgm => {
6+
pgm.dropTable('nft_custody_unanchored');
7+
};
8+
9+
exports.down = pgm => {
10+
pgm.createTable('nft_custody_unanchored', {
11+
asset_identifier: {
12+
type: 'string',
13+
notNull: true,
14+
},
15+
value: {
16+
type: 'bytea',
17+
notNull: true,
18+
},
19+
recipient: {
20+
type: 'text',
21+
},
22+
block_height: {
23+
type: 'integer',
24+
notNull: true,
25+
},
26+
index_block_hash: {
27+
type: 'bytea',
28+
notNull: true,
29+
},
30+
parent_index_block_hash: {
31+
type: 'bytea',
32+
notNull: true,
33+
},
34+
microblock_hash: {
35+
type: 'bytea',
36+
notNull: true,
37+
},
38+
microblock_sequence: {
39+
type: 'integer',
40+
notNull: true,
41+
},
42+
tx_id: {
43+
type: 'bytea',
44+
notNull: true,
45+
},
46+
tx_index: {
47+
type: 'smallint',
48+
notNull: true,
49+
},
50+
event_index: {
51+
type: 'integer',
52+
notNull: true,
53+
},
54+
});
55+
pgm.createConstraint('nft_custody_unanchored', 'nft_custody_unanchored_unique', 'UNIQUE(asset_identifier, value)');
56+
pgm.createIndex('nft_custody_unanchored', ['recipient', 'asset_identifier']);
57+
pgm.createIndex('nft_custody_unanchored', 'value');
58+
pgm.createIndex('nft_custody_unanchored', [
59+
{ name: 'block_height', sort: 'DESC' },
60+
{ name: 'microblock_sequence', sort: 'DESC' },
61+
{ name: 'tx_index', sort: 'DESC' },
62+
{ name: 'event_index', sort: 'DESC' }
63+
]);
64+
pgm.sql(`
65+
INSERT INTO nft_custody_unanchored (asset_identifier, value, recipient, tx_id, block_height, index_block_hash, parent_index_block_hash, microblock_hash, microblock_sequence, tx_index, event_index) (
66+
SELECT
67+
DISTINCT ON(asset_identifier, value) asset_identifier, value, recipient, tx_id, nft.block_height,
68+
nft.index_block_hash, nft.parent_index_block_hash, nft.microblock_hash, nft.microblock_sequence, nft.tx_index, nft.event_index
69+
FROM
70+
nft_events AS nft
71+
INNER JOIN
72+
txs USING (tx_id)
73+
WHERE
74+
txs.canonical = true
75+
AND txs.microblock_canonical = true
76+
AND nft.canonical = true
77+
AND nft.microblock_canonical = true
78+
ORDER BY
79+
asset_identifier,
80+
value,
81+
txs.block_height DESC,
82+
txs.microblock_sequence DESC,
83+
txs.tx_index DESC,
84+
nft.event_index DESC
85+
)
86+
`);
87+
};

src/api/routes/tokens.ts

-3
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,6 @@ export const TokenRoutes: FastifyPluginAsync<
4949
),
5050
limit: LimitParam(ResourceType.Token, 'Limit', 'max number of tokens to fetch'),
5151
offset: OffsetParam('Offset', 'index of first tokens to fetch'),
52-
unanchored: UnanchoredParamSchema,
5352
tx_metadata: Type.Boolean({
5453
default: false,
5554
description:
@@ -95,15 +94,13 @@ export const TokenRoutes: FastifyPluginAsync<
9594

9695
const limit = getPagingQueryLimit(ResourceType.Token, req.query.limit);
9796
const offset = parsePagingQueryInput(req.query.offset ?? 0);
98-
const includeUnanchored = req.query.unanchored ?? false;
9997
const includeTxMetadata = req.query.tx_metadata ?? false;
10098

10199
const { results, total } = await fastify.db.getNftHoldings({
102100
principal: principal,
103101
assetIdentifiers: assetIdentifiers,
104102
offset: offset,
105103
limit: limit,
106-
includeUnanchored: includeUnanchored,
107104
includeTxMetadata: includeTxMetadata,
108105
});
109106
const parsedResults = results.map(result => {

src/datastore/pg-store.ts

+10-5
Original file line numberDiff line numberDiff line change
@@ -3356,16 +3356,12 @@ export class PgStore extends BasePgStore {
33563356
assetIdentifiers?: string[];
33573357
limit: number;
33583358
offset: number;
3359-
includeUnanchored: boolean;
33603359
includeTxMetadata: boolean;
33613360
}): Promise<{ results: NftHoldingInfoWithTxMetadata[]; total: number }> {
33623361
const queryArgs: (string | string[] | number)[] = [args.principal, args.limit, args.offset];
33633362
if (args.assetIdentifiers) {
33643363
queryArgs.push(args.assetIdentifiers);
33653364
}
3366-
const nftCustody = args.includeUnanchored
3367-
? this.sql(`nft_custody_unanchored`)
3368-
: this.sql(`nft_custody`);
33693365
const assetIdFilter =
33703366
args.assetIdentifiers && args.assetIdentifiers.length > 0
33713367
? this.sql`AND nft.asset_identifier IN ${this.sql(args.assetIdentifiers)}`
@@ -3375,7 +3371,7 @@ export class PgStore extends BasePgStore {
33753371
>`
33763372
WITH nft AS (
33773373
SELECT *, (COUNT(*) OVER())::INTEGER AS count
3378-
FROM ${nftCustody} AS nft
3374+
FROM nft_custody AS nft
33793375
WHERE nft.recipient = ${args.principal}
33803376
${assetIdFilter}
33813377
ORDER BY block_height DESC, microblock_sequence DESC, tx_index DESC, event_index DESC
@@ -4528,4 +4524,13 @@ export class PgStore extends BasePgStore {
45284524
`;
45294525
if (result.count) return result[0];
45304526
}
4527+
4528+
async getStacksBlockCountAtPreviousBurnBlock(): Promise<number> {
4529+
const result = await this.sql<{ count: string }[]>`
4530+
SELECT COUNT(*) AS count
4531+
FROM blocks
4532+
WHERE burn_block_height = (SELECT burn_block_height - 1 FROM chain_tip) AND canonical = TRUE
4533+
`;
4534+
return parseInt(result[0]?.count ?? '0');
4535+
}
45314536
}

src/datastore/pg-write-store.ts

+57-67
Original file line numberDiff line numberDiff line change
@@ -1416,10 +1416,9 @@ export class PgWriteStore extends PgStore {
14161416
INSERT INTO nft_events ${sql(nftEventInserts)}
14171417
`;
14181418
if (tx.canonical && tx.microblock_canonical) {
1419-
const table = microblock ? sql`nft_custody_unanchored` : sql`nft_custody`;
14201419
await sql`
1421-
INSERT INTO ${table} ${sql(Array.from(custodyInsertsMap.values()))}
1422-
ON CONFLICT ON CONSTRAINT ${table}_unique DO UPDATE SET
1420+
INSERT INTO nft_custody ${sql(Array.from(custodyInsertsMap.values()))}
1421+
ON CONFLICT ON CONSTRAINT nft_custody_unique DO UPDATE SET
14231422
tx_id = EXCLUDED.tx_id,
14241423
index_block_hash = EXCLUDED.index_block_hash,
14251424
parent_index_block_hash = EXCLUDED.parent_index_block_hash,
@@ -1431,22 +1430,22 @@ export class PgWriteStore extends PgStore {
14311430
block_height = EXCLUDED.block_height
14321431
WHERE
14331432
(
1434-
EXCLUDED.block_height > ${table}.block_height
1433+
EXCLUDED.block_height > nft_custody.block_height
14351434
)
14361435
OR (
1437-
EXCLUDED.block_height = ${table}.block_height
1438-
AND EXCLUDED.microblock_sequence > ${table}.microblock_sequence
1436+
EXCLUDED.block_height = nft_custody.block_height
1437+
AND EXCLUDED.microblock_sequence > nft_custody.microblock_sequence
14391438
)
14401439
OR (
1441-
EXCLUDED.block_height = ${table}.block_height
1442-
AND EXCLUDED.microblock_sequence = ${table}.microblock_sequence
1443-
AND EXCLUDED.tx_index > ${table}.tx_index
1440+
EXCLUDED.block_height = nft_custody.block_height
1441+
AND EXCLUDED.microblock_sequence = nft_custody.microblock_sequence
1442+
AND EXCLUDED.tx_index > nft_custody.tx_index
14441443
)
14451444
OR (
1446-
EXCLUDED.block_height = ${table}.block_height
1447-
AND EXCLUDED.microblock_sequence = ${table}.microblock_sequence
1448-
AND EXCLUDED.tx_index = ${table}.tx_index
1449-
AND EXCLUDED.event_index > ${table}.event_index
1445+
EXCLUDED.block_height = nft_custody.block_height
1446+
AND EXCLUDED.microblock_sequence = nft_custody.microblock_sequence
1447+
AND EXCLUDED.tx_index = nft_custody.tx_index
1448+
AND EXCLUDED.event_index > nft_custody.event_index
14501449
)
14511450
`;
14521451
}
@@ -1781,6 +1780,12 @@ export class PgWriteStore extends PgStore {
17811780
});
17821781
}
17831782

1783+
async updateBurnChainBlockHeight(args: { blockHeight: number }): Promise<void> {
1784+
await this.sql`
1785+
UPDATE chain_tip SET burn_block_height = GREATEST(${args.blockHeight}, burn_block_height)
1786+
`;
1787+
}
1788+
17841789
async insertSlotHoldersBatch(sql: PgSqlClient, slotHolders: DbRewardSlotHolder[]): Promise<void> {
17851790
const slotValues: RewardSlotHolderInsertValues[] = slotHolders.map(slot => ({
17861791
canonical: true,
@@ -2515,10 +2520,6 @@ export class PgWriteStore extends PgStore {
25152520
AND (index_block_hash = ${args.indexBlockHash} OR index_block_hash = '\\x'::bytea)
25162521
AND tx_id IN ${sql(txIds)}
25172522
`;
2518-
await this.updateNftCustodyFromReOrg(sql, {
2519-
index_block_hash: args.indexBlockHash,
2520-
microblocks: args.microblocks,
2521-
});
25222523
}
25232524

25242525
// Update unanchored tx count in `chain_tip` table
@@ -2539,54 +2540,46 @@ export class PgWriteStore extends PgStore {
25392540
sql: PgSqlClient,
25402541
args: {
25412542
index_block_hash: string;
2542-
microblocks: string[];
25432543
}
25442544
): Promise<void> {
2545-
for (const table of [sql`nft_custody`, sql`nft_custody_unanchored`]) {
2546-
await sql`
2547-
INSERT INTO ${table}
2548-
(asset_identifier, value, tx_id, index_block_hash, parent_index_block_hash, microblock_hash,
2549-
microblock_sequence, recipient, event_index, tx_index, block_height)
2550-
(
2551-
SELECT
2552-
DISTINCT ON(asset_identifier, value) asset_identifier, value, tx_id, txs.index_block_hash,
2553-
txs.parent_index_block_hash, txs.microblock_hash, txs.microblock_sequence, recipient,
2554-
nft.event_index, txs.tx_index, txs.block_height
2555-
FROM
2556-
nft_events AS nft
2557-
INNER JOIN
2558-
txs USING (tx_id)
2559-
WHERE
2560-
txs.canonical = true
2561-
AND txs.microblock_canonical = true
2562-
AND nft.canonical = true
2563-
AND nft.microblock_canonical = true
2564-
AND nft.index_block_hash = ${args.index_block_hash}
2565-
${
2566-
args.microblocks.length > 0
2567-
? sql`AND nft.microblock_hash IN ${sql(args.microblocks)}`
2568-
: sql``
2569-
}
2570-
ORDER BY
2571-
asset_identifier,
2572-
value,
2573-
txs.block_height DESC,
2574-
txs.microblock_sequence DESC,
2575-
txs.tx_index DESC,
2576-
nft.event_index DESC
2577-
)
2578-
ON CONFLICT ON CONSTRAINT ${table}_unique DO UPDATE SET
2579-
tx_id = EXCLUDED.tx_id,
2580-
index_block_hash = EXCLUDED.index_block_hash,
2581-
parent_index_block_hash = EXCLUDED.parent_index_block_hash,
2582-
microblock_hash = EXCLUDED.microblock_hash,
2583-
microblock_sequence = EXCLUDED.microblock_sequence,
2584-
recipient = EXCLUDED.recipient,
2585-
event_index = EXCLUDED.event_index,
2586-
tx_index = EXCLUDED.tx_index,
2587-
block_height = EXCLUDED.block_height
2588-
`;
2589-
}
2545+
await sql`
2546+
INSERT INTO nft_custody
2547+
(asset_identifier, value, tx_id, index_block_hash, parent_index_block_hash, microblock_hash,
2548+
microblock_sequence, recipient, event_index, tx_index, block_height)
2549+
(
2550+
SELECT
2551+
DISTINCT ON(asset_identifier, value) asset_identifier, value, tx_id, txs.index_block_hash,
2552+
txs.parent_index_block_hash, txs.microblock_hash, txs.microblock_sequence, recipient,
2553+
nft.event_index, txs.tx_index, txs.block_height
2554+
FROM
2555+
nft_events AS nft
2556+
INNER JOIN
2557+
txs USING (tx_id)
2558+
WHERE
2559+
txs.canonical = true
2560+
AND txs.microblock_canonical = true
2561+
AND nft.canonical = true
2562+
AND nft.microblock_canonical = true
2563+
AND nft.index_block_hash = ${args.index_block_hash}
2564+
ORDER BY
2565+
asset_identifier,
2566+
value,
2567+
txs.block_height DESC,
2568+
txs.microblock_sequence DESC,
2569+
txs.tx_index DESC,
2570+
nft.event_index DESC
2571+
)
2572+
ON CONFLICT ON CONSTRAINT nft_custody_unique DO UPDATE SET
2573+
tx_id = EXCLUDED.tx_id,
2574+
index_block_hash = EXCLUDED.index_block_hash,
2575+
parent_index_block_hash = EXCLUDED.parent_index_block_hash,
2576+
microblock_hash = EXCLUDED.microblock_hash,
2577+
microblock_sequence = EXCLUDED.microblock_sequence,
2578+
recipient = EXCLUDED.recipient,
2579+
event_index = EXCLUDED.event_index,
2580+
tx_index = EXCLUDED.tx_index,
2581+
block_height = EXCLUDED.block_height
2582+
`;
25902583
}
25912584

25922585
/**
@@ -3050,10 +3043,7 @@ export class PgWriteStore extends PgStore {
30503043
updatedEntities.markedNonCanonical.nftEvents += nftResult.count;
30513044
}
30523045
if (nftResult.count)
3053-
await this.updateNftCustodyFromReOrg(sql, {
3054-
index_block_hash: indexBlockHash,
3055-
microblocks: [],
3056-
});
3046+
await this.updateNftCustodyFromReOrg(sql, { index_block_hash: indexBlockHash });
30573047
});
30583048
q.enqueue(async () => {
30593049
const pox2Result = await sql`

src/event-stream/event-server.ts

+25-10
Original file line numberDiff line numberDiff line change
@@ -130,6 +130,7 @@ async function handleBurnBlockMessage(
130130
burnchainBlockHeight: burnBlockMsg.burn_block_height,
131131
slotHolders: slotHolders,
132132
});
133+
await db.updateBurnChainBlockHeight({ blockHeight: burnBlockMsg.burn_block_height });
133134
}
134135

135136
async function handleMempoolTxsMessage(rawTxs: string[], db: PgWriteStore): Promise<void> {
@@ -631,18 +632,32 @@ interface EventMessageHandler {
631632
handleNewAttachment(msg: CoreNodeAttachmentMessage[], db: PgWriteStore): Promise<void> | void;
632633
}
633634

634-
function createMessageProcessorQueue(): EventMessageHandler {
635+
function createMessageProcessorQueue(db: PgWriteStore): EventMessageHandler {
635636
// Create a promise queue so that only one message is handled at a time.
636637
const processorQueue = new PQueue({ concurrency: 1 });
637638

638-
let eventTimer: prom.Histogram<'event'> | undefined;
639+
let metrics:
640+
| {
641+
eventTimer: prom.Histogram;
642+
blocksInPreviousBurnBlock: prom.Gauge;
643+
}
644+
| undefined;
639645
if (isProdEnv) {
640-
eventTimer = new prom.Histogram({
641-
name: 'stacks_event_ingestion_timers',
642-
help: 'Event ingestion timers',
643-
labelNames: ['event'],
644-
buckets: prom.exponentialBuckets(50, 3, 10), // 10 buckets, from 50 ms to 15 minutes
645-
});
646+
metrics = {
647+
eventTimer: new prom.Histogram({
648+
name: 'stacks_event_ingestion_timers',
649+
help: 'Event ingestion timers',
650+
labelNames: ['event'],
651+
buckets: prom.exponentialBuckets(50, 3, 10), // 10 buckets, from 50 ms to 15 minutes
652+
}),
653+
blocksInPreviousBurnBlock: new prom.Gauge({
654+
name: 'stacks_blocks_in_previous_burn_block',
655+
help: 'Number of Stacks blocks produced in the previous burn block',
656+
async collect() {
657+
this.set(await db.getStacksBlockCountAtPreviousBurnBlock());
658+
},
659+
}),
660+
};
646661
}
647662

648663
const observeEvent = async (event: string, fn: () => Promise<void>) => {
@@ -651,7 +666,7 @@ function createMessageProcessorQueue(): EventMessageHandler {
651666
await fn();
652667
} finally {
653668
const elapsedMs = timer.getElapsed();
654-
eventTimer?.observe({ event }, elapsedMs);
669+
metrics?.eventTimer.observe({ event }, elapsedMs);
655670
}
656671
};
657672

@@ -738,7 +753,7 @@ export async function startEventServer(opts: {
738753
serverPort?: number;
739754
}): Promise<EventStreamServer> {
740755
const db = opts.datastore;
741-
const messageHandler = opts.messageHandler ?? createMessageProcessorQueue();
756+
const messageHandler = opts.messageHandler ?? createMessageProcessorQueue(db);
742757

743758
let eventHost = opts.serverHost ?? process.env['STACKS_CORE_EVENT_HOST'];
744759
const eventPort = opts.serverPort ?? parseInt(process.env['STACKS_CORE_EVENT_PORT'] ?? '', 10);

0 commit comments

Comments
 (0)