Skip to content

Commit 6b94028

Browse files
authored
Merge pull request #6255 from Algo-devops-service/relstable4.0.2
2 parents 63d271c + d94b696 commit 6b94028

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

61 files changed

+2456
-1929
lines changed

.circleci/config.yml

+2-2
Original file line numberDiff line numberDiff line change
@@ -47,11 +47,11 @@ executors:
4747
resource_class: arm.large
4848
mac_arm64_medium:
4949
macos:
50-
xcode: 14.2.0
50+
xcode: 14.3.1
5151
resource_class: macos.m1.medium.gen1
5252
mac_arm64_large:
5353
macos:
54-
xcode: 14.2.0
54+
xcode: 14.3.1
5555
resource_class: macos.m1.large.gen1
5656

5757
slack-fail-stop-step: &slack-fail-post-step

.github/workflows/build.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ jobs:
2929
go-version: ${{ env.GO_VERSION }}
3030
- name: Restore libsodium from cache
3131
id: cache-libsodium
32-
uses: actions/cache@v3.3.1
32+
uses: actions/cache@v4
3333
with:
3434
path: crypto/libs
3535
key: libsodium-fork-v2-${{ runner.os }}-${{ hashFiles('crypto/libsodium-fork/**') }}

.github/workflows/container.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ on:
1010
jobs:
1111
build-and-push:
1212
name: Build and Push to DockerHub
13-
runs-on: ubuntu-latest
13+
runs-on: ubuntu-22.04
1414
steps:
1515
- name: Checkout Code
1616
uses: actions/checkout@v4

.github/workflows/reviewdog.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ jobs:
5555
run: mkdir -p cicdtmp/golangci-lint
5656
- name: Check if custom golangci-lint is already built
5757
id: cache-golangci-lint
58-
uses: actions/cache@v3.3.1
58+
uses: actions/cache@v4
5959
with:
6060
path: cicdtmp/golangci-lint/golangci-lint-cgo
6161
key: cicd-golangci-lint-cgo-v0.0.3-${{ env.GO_VERSION }}-${{ env.GOLANGCI_LINT_VERSION }}

Makefile

+4-4
Original file line numberDiff line numberDiff line change
@@ -176,16 +176,16 @@ universal:
176176
ifeq ($(OS_TYPE),darwin)
177177
# build amd64 Mac binaries
178178
mkdir -p $(GOPATH1)/bin-darwin-amd64
179-
CROSS_COMPILE_ARCH=amd64 GOBIN=$(GOPATH1)/bin-darwin-amd64 MACOSX_DEPLOYMENT_TARGET=12.0 EXTRA_CONFIGURE_FLAGS='CFLAGS="-arch x86_64 -mmacos-version-min=12.0" --host=x86_64-apple-darwin' $(MAKE)
179+
CROSS_COMPILE_ARCH=amd64 GOBIN=$(GOPATH1)/bin-darwin-amd64 MACOSX_DEPLOYMENT_TARGET=13.0 EXTRA_CONFIGURE_FLAGS='CFLAGS="-arch x86_64 -mmacos-version-min=13.0" --host=x86_64-apple-darwin' $(MAKE)
180180

181181
# build arm64 Mac binaries
182182
mkdir -p $(GOPATH1)/bin-darwin-arm64
183-
CROSS_COMPILE_ARCH=arm64 GOBIN=$(GOPATH1)/bin-darwin-arm64 MACOSX_DEPLOYMENT_TARGET=12.0 EXTRA_CONFIGURE_FLAGS='CFLAGS="-arch arm64 -mmacos-version-min=12.0" --host=aarch64-apple-darwin' $(MAKE)
183+
CROSS_COMPILE_ARCH=arm64 GOBIN=$(GOPATH1)/bin-darwin-arm64 MACOSX_DEPLOYMENT_TARGET=13.0 EXTRA_CONFIGURE_FLAGS='CFLAGS="-arch arm64 -mmacos-version-min=13.0" --host=aarch64-apple-darwin' $(MAKE)
184184

185185
# same for buildsrc-special
186186
cd tools/block-generator && \
187-
CROSS_COMPILE_ARCH=amd64 GOBIN=$(GOPATH1)/bin-darwin-amd64 MACOSX_DEPLOYMENT_TARGET=12.0 EXTRA_CONFIGURE_FLAGS='CFLAGS="-arch x86_64 -mmacos-version-min=12.0" --host=x86_64-apple-darwin' $(MAKE)
188-
CROSS_COMPILE_ARCH=arm64 GOBIN=$(GOPATH1)/bin-darwin-arm64 MACOSX_DEPLOYMENT_TARGET=12.0 EXTRA_CONFIGURE_FLAGS='CFLAGS="-arch arm64 -mmacos-version-min=12.0" --host=aarch64-apple-darwin' $(MAKE)
187+
CROSS_COMPILE_ARCH=amd64 GOBIN=$(GOPATH1)/bin-darwin-amd64 MACOSX_DEPLOYMENT_TARGET=13.0 EXTRA_CONFIGURE_FLAGS='CFLAGS="-arch x86_64 -mmacos-version-min=13.0" --host=x86_64-apple-darwin' $(MAKE)
188+
CROSS_COMPILE_ARCH=arm64 GOBIN=$(GOPATH1)/bin-darwin-arm64 MACOSX_DEPLOYMENT_TARGET=13.0 EXTRA_CONFIGURE_FLAGS='CFLAGS="-arch arm64 -mmacos-version-min=13.0" --host=aarch64-apple-darwin' $(MAKE)
189189

190190
# lipo together
191191
mkdir -p $(GOPATH1)/bin

buildnumber.dat

+1-1
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
1
1+
2

catchup/catchpointService.go

+3-1
Original file line numberDiff line numberDiff line change
@@ -317,16 +317,18 @@ func (cs *CatchpointCatchupService) processStageLedgerDownload() error {
317317
start := time.Now()
318318
err0 = lf.downloadLedger(cs.ctx, peer, round)
319319
if err0 == nil {
320-
cs.log.Infof("ledger downloaded in %d seconds", time.Since(start)/time.Second)
320+
cs.log.Infof("ledger downloaded from %s in %d seconds", peerAddress(peer), time.Since(start)/time.Second)
321321
start = time.Now()
322322
err0 = cs.ledgerAccessor.BuildMerkleTrie(cs.ctx, cs.updateVerifiedCounts)
323323
if err0 == nil {
324324
cs.log.Infof("built merkle trie in %d seconds", time.Since(start)/time.Second)
325325
break
326326
}
327327
// failed to build the merkle trie for the above catchpoint file.
328+
cs.log.Infof("failed to build merkle trie for catchpoint file from %s: %v", peerAddress(peer), err0)
328329
cs.blocksDownloadPeerSelector.rankPeer(psp, peerRankInvalidDownload)
329330
} else {
331+
cs.log.Infof("failed to download catchpoint ledger from peer %s: %v", peerAddress(peer), err0)
330332
cs.blocksDownloadPeerSelector.rankPeer(psp, peerRankDownloadFailed)
331333
}
332334

cmd/algons/dnsCmd.go

+2-7
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@ import (
2323
"net"
2424
"os"
2525
"regexp"
26+
"slices"
2627
"sort"
2728
"strings"
2829

@@ -439,13 +440,7 @@ func checkedDelete(toDelete []cloudflare.DNSRecordResponseEntry, cloudflareDNS *
439440

440441
func getEntries(getNetwork string, recordType string) ([]cloudflare.DNSRecordResponseEntry, error) {
441442
recordTypes := []string{"A", "CNAME", "SRV", "TXT"}
442-
isKnown := false
443-
for _, known := range append(recordTypes, "") {
444-
if recordType == known {
445-
isKnown = true
446-
break
447-
}
448-
}
443+
isKnown := slices.Contains(recordTypes, recordType) || recordType == ""
449444
if !isKnown {
450445
return nil, fmt.Errorf("invalid recordType specified %s", recordType)
451446
}

cmd/catchpointdump/commands.go

+1
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,7 @@ func init() {
4444
rootCmd.AddCommand(fileCmd)
4545
rootCmd.AddCommand(netCmd)
4646
rootCmd.AddCommand(databaseCmd)
47+
rootCmd.AddCommand(infoCmd)
4748
}
4849

4950
var rootCmd = &cobra.Command{

cmd/catchpointdump/file.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -591,7 +591,7 @@ func printOnlineAccounts(databaseName string, stagingTables bool, outFile *os.Fi
591591
}
592592

593593
return dbAccessor.Atomic(func(ctx context.Context, tx *sql.Tx) error {
594-
rows, err := sqlitedriver.MakeOnlineAccountsIter(ctx, tx, stagingTables, 0)
594+
rows, err := sqlitedriver.MakeOrderedOnlineAccountsIter(ctx, tx, stagingTables, 0)
595595
if err != nil {
596596
return err
597597
}

cmd/catchpointdump/info.go

+274
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,274 @@
1+
// Copyright (C) 2019-2025 Algorand, Inc.
2+
// This file is part of go-algorand
3+
//
4+
// go-algorand is free software: you can redistribute it and/or modify
5+
// it under the terms of the GNU Affero General Public License as
6+
// published by the Free Software Foundation, either version 3 of the
7+
// License, or (at your option) any later version.
8+
//
9+
// go-algorand is distributed in the hope that it will be useful,
10+
// but WITHOUT ANY WARRANTY; without even the implied warranty of
11+
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12+
// GNU Affero General Public License for more details.
13+
//
14+
// You should have received a copy of the GNU Affero General Public License
15+
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
16+
17+
package main
18+
19+
import (
20+
"bufio"
21+
"context"
22+
"fmt"
23+
"io"
24+
"net/http"
25+
"os"
26+
"strconv"
27+
"strings"
28+
"time"
29+
30+
"github.com/spf13/cobra"
31+
32+
"github.com/algorand/go-algorand/ledger"
33+
"github.com/algorand/go-algorand/network"
34+
"github.com/algorand/go-algorand/protocol"
35+
"github.com/algorand/go-algorand/util"
36+
)
37+
38+
var infoFile string
39+
40+
func init() {
41+
infoCmd.Flags().StringVarP(&infoFile, "tar", "t", "", "Specify the catchpoint file (.tar or .tar.gz) to read")
42+
infoCmd.Flags().StringVarP(&networkName, "net", "n", "", "Specify the network name (e.g. mainnet.algorand.network)")
43+
infoCmd.Flags().IntVarP(&round, "round", "r", 0, "Specify the round number (e.g. 7700000). Only used if --relay/-p is given.")
44+
infoCmd.Flags().StringVarP(&relayAddress, "relay", "p", "", "Relay address to download from (e.g. r-ru.algorand-mainnet.network:4160). If specified, fetch instead of reading local --tar.")
45+
}
46+
47+
// infoCmd defines a new cobra command that only loads and prints the CatchpointFileHeader.
48+
var infoCmd = &cobra.Command{
49+
Use: "info",
50+
Short: "Show header info from a catchpoint tar file",
51+
Long: "Reads the specified catchpoint tar (or tar.gz) file, locates the content.json block, and prints the CatchpointFileHeader fields without loading the entire ledger.",
52+
Args: validateNoPosArgsFn,
53+
Run: func(cmd *cobra.Command, args []string) {
54+
// If user gave us a relay, stream from the network:
55+
if relayAddress != "" {
56+
// If they gave a relay, they must also give us a valid network and round
57+
if networkName == "" || round == 0 {
58+
cmd.HelpFunc()(cmd, args)
59+
reportErrorf("Must specify --net and --round when using --relay")
60+
}
61+
// Attempt to read the CatchpointFileHeader from the network stream
62+
fileHeader, err := loadCatchpointFileHeaderFromRelay(relayAddress, networkName, round)
63+
if err != nil {
64+
reportErrorf("Error streaming CatchpointFileHeader from relay %s: %v", relayAddress, err)
65+
}
66+
if fileHeader.Version == 0 {
67+
fmt.Printf("No valid header was found streaming from relay '%s'.\n", relayAddress)
68+
return
69+
}
70+
fmt.Printf("Relay: %s\n", relayAddress)
71+
printHeaderFields(fileHeader)
72+
return
73+
}
74+
75+
// Otherwise, fallback to local file usage:
76+
if infoFile == "" {
77+
cmd.HelpFunc()(cmd, args)
78+
return
79+
}
80+
fi, err := os.Stat(infoFile)
81+
if err != nil {
82+
reportErrorf("Unable to stat file '%s': %v", infoFile, err)
83+
}
84+
if fi.Size() == 0 {
85+
reportErrorf("File '%s' is empty.", infoFile)
86+
}
87+
88+
// Open the catchpoint file
89+
f, err := os.Open(infoFile)
90+
if err != nil {
91+
reportErrorf("Unable to open file '%s': %v", infoFile, err)
92+
}
93+
defer f.Close()
94+
95+
// Extract just the file header
96+
fileHeader, err := loadCatchpointFileHeader(f, fi.Size())
97+
if err != nil {
98+
reportErrorf("Error reading CatchpointFileHeader from '%s': %v", infoFile, err)
99+
}
100+
101+
// Print out the fields (mimicking the logic in printAccountsDatabase, but simpler)
102+
if fileHeader.Version == 0 {
103+
fmt.Printf("No valid header was found.\n")
104+
return
105+
}
106+
107+
printHeaderFields(fileHeader)
108+
},
109+
}
110+
111+
func printHeaderFields(fileHeader ledger.CatchpointFileHeader) {
112+
fmt.Printf("Version: %d\n", fileHeader.Version)
113+
fmt.Printf("Balances Round: %d\n", fileHeader.BalancesRound)
114+
fmt.Printf("Block Round: %d\n", fileHeader.BlocksRound)
115+
fmt.Printf("Block Header Digest: %s\n", fileHeader.BlockHeaderDigest.String())
116+
fmt.Printf("Catchpoint: %s\n", fileHeader.Catchpoint)
117+
fmt.Printf("Total Accounts: %d\n", fileHeader.TotalAccounts)
118+
fmt.Printf("Total KVs: %d\n", fileHeader.TotalKVs)
119+
fmt.Printf("Total Online Accounts: %d\n", fileHeader.TotalOnlineAccounts)
120+
fmt.Printf("Total Online Round Params: %d\n", fileHeader.TotalOnlineRoundParams)
121+
fmt.Printf("Total Chunks: %d\n", fileHeader.TotalChunks)
122+
123+
totals := fileHeader.Totals
124+
fmt.Printf("AccountTotals - Online Money: %d\n", totals.Online.Money.Raw)
125+
fmt.Printf("AccountTotals - Online RewardUnits: %d\n", totals.Online.RewardUnits)
126+
fmt.Printf("AccountTotals - Offline Money: %d\n", totals.Offline.Money.Raw)
127+
fmt.Printf("AccountTotals - Offline RewardUnits: %d\n", totals.Offline.RewardUnits)
128+
fmt.Printf("AccountTotals - Not Participating Money: %d\n", totals.NotParticipating.Money.Raw)
129+
fmt.Printf("AccountTotals - Not Participating RewardUnits: %d\n", totals.NotParticipating.RewardUnits)
130+
fmt.Printf("AccountTotals - Rewards Level: %d\n", totals.RewardsLevel)
131+
}
132+
133+
// loadCatchpointFileHeader reads only enough of the tar (or tar.gz) to
134+
// decode the ledger.CatchpointFileHeader from the "content.json" chunk.
135+
func loadCatchpointFileHeader(catchpointFile io.Reader, catchpointFileSize int64) (ledger.CatchpointFileHeader, error) {
136+
var fileHeader ledger.CatchpointFileHeader
137+
fmt.Printf("Scanning for CatchpointFileHeader in tar...\n\n")
138+
139+
catchpointReader := bufio.NewReader(catchpointFile)
140+
tarReader, _, err := getCatchpointTarReader(catchpointReader, catchpointFileSize)
141+
if err != nil {
142+
return fileHeader, err
143+
}
144+
145+
for {
146+
hdr, err := tarReader.Next()
147+
if err != nil {
148+
if err == io.EOF {
149+
// We reached the end without finding content.json
150+
break
151+
}
152+
return fileHeader, err
153+
}
154+
155+
// We only need the "content.json" file
156+
if hdr.Name == ledger.CatchpointContentFileName {
157+
// Read exactly hdr.Size bytes
158+
buf := make([]byte, hdr.Size)
159+
_, readErr := io.ReadFull(tarReader, buf)
160+
if readErr != nil && readErr != io.EOF {
161+
return fileHeader, readErr
162+
}
163+
164+
// Decode into fileHeader
165+
readErr = protocol.Decode(buf, &fileHeader)
166+
if readErr != nil {
167+
return fileHeader, readErr
168+
}
169+
// Once we have the fileHeader, we can break out.
170+
// If you wanted to keep scanning, you could keep going,
171+
// but it’s not needed just for the header.
172+
return fileHeader, nil
173+
}
174+
175+
// Otherwise skip this chunk
176+
skipBytes := hdr.Size
177+
n, err := io.Copy(io.Discard, tarReader)
178+
if err != nil {
179+
return fileHeader, err
180+
}
181+
182+
// skip any leftover in case we didn't read the entire chunk
183+
if skipBytes > n {
184+
// keep discarding until we've skipped skipBytes total
185+
_, err := io.CopyN(io.Discard, tarReader, skipBytes-n)
186+
if err != nil {
187+
return fileHeader, err
188+
}
189+
}
190+
}
191+
// If we get here, we never found the content.json entry
192+
return fileHeader, nil
193+
}
194+
195+
// loadCatchpointFileHeaderFromRelay opens a streaming HTTP connection to the
196+
// given relay for the given round, then scans the (possibly gzip) tar stream
197+
// until it finds `content.json`, decodes the ledger.CatchpointFileHeader, and
198+
// immediately closes the network connection (so we don't download the entire file).
199+
func loadCatchpointFileHeaderFromRelay(relay string, netName string, round int) (ledger.CatchpointFileHeader, error) {
200+
var fileHeader ledger.CatchpointFileHeader
201+
202+
// Create an HTTP GET to the relay
203+
genesisID := strings.Split(netName, ".")[0] + "-v1.0"
204+
urlTemplate := "http://" + relay + "/v1/" + genesisID + "/%s/" + strconv.FormatUint(uint64(round), 36)
205+
catchpointURL := fmt.Sprintf(urlTemplate, "ledger")
206+
207+
req, err := http.NewRequest(http.MethodGet, catchpointURL, nil)
208+
if err != nil {
209+
return fileHeader, err
210+
}
211+
// Add a short-ish timeout or rely on default
212+
ctx, cancelFn := context.WithTimeout(context.Background(), 60*time.Second)
213+
defer cancelFn()
214+
req = req.WithContext(ctx)
215+
network.SetUserAgentHeader(req.Header)
216+
217+
resp, err := http.DefaultClient.Do(req)
218+
if err != nil {
219+
return fileHeader, err
220+
}
221+
if resp.StatusCode != http.StatusOK {
222+
// e.g. 404 if not found
223+
return fileHeader, fmt.Errorf("HTTP status code %d from relay", resp.StatusCode)
224+
}
225+
defer resp.Body.Close()
226+
227+
// Wrap with a small "watchdog" so we don't hang if data stops flowing
228+
wdReader := util.MakeWatchdogStreamReader(resp.Body, 4096, 4096, 5*time.Second)
229+
defer wdReader.Close()
230+
231+
// Use isGzip logic from file.go
232+
// We have to peek the first 2 bytes to see if it's gz
233+
peekReader := bufio.NewReader(wdReader)
234+
// We'll fake a size of "unknown" since we don't truly know the length
235+
tarReader, _, err := getCatchpointTarReader(peekReader, -1 /* unknown size */)
236+
if err != nil {
237+
return fileHeader, err
238+
}
239+
240+
// Now read each tar entry, ignoring everything except "content.json"
241+
for {
242+
hdr, err := tarReader.Next()
243+
if err != nil {
244+
if err == io.EOF {
245+
// finished the entire tar stream
246+
break
247+
}
248+
return fileHeader, err
249+
}
250+
if hdr.Name == ledger.CatchpointContentFileName {
251+
// We only need "content.json"
252+
buf := make([]byte, hdr.Size)
253+
_, readErr := io.ReadFull(tarReader, buf)
254+
if readErr != nil && readErr != io.EOF {
255+
return fileHeader, readErr
256+
}
257+
258+
// decode
259+
decodeErr := protocol.Decode(buf, &fileHeader)
260+
if decodeErr != nil {
261+
return fileHeader, decodeErr
262+
}
263+
// Done! We can return immediately.
264+
return fileHeader, nil
265+
}
266+
// If not content.json, skip over this tar chunk
267+
_, err = io.Copy(io.Discard, tarReader)
268+
if err != nil {
269+
return fileHeader, err
270+
}
271+
}
272+
// If we exit the loop, we never found content.json
273+
return fileHeader, nil
274+
}

0 commit comments

Comments
 (0)