Skip to content

Commit

Permalink
remove Duration completely
Browse files Browse the repository at this point in the history
  • Loading branch information
LexLuthr committed Mar 3, 2025
1 parent b686203 commit 2ec0435
Show file tree
Hide file tree
Showing 10 changed files with 74 additions and 90 deletions.
6 changes: 3 additions & 3 deletions cmd/sptool/toolbox_deal_tools.go
Original file line number Diff line number Diff line change
Expand Up @@ -257,7 +257,7 @@ var generateRandCar = &cli.Command{
Usage: "creates a randomly generated dense car",
ArgsUsage: "<outputPath>",
Flags: []cli.Flag{
&cli.IntFlag{
&cli.Int64Flag{
Name: "size",
Aliases: []string{"s"},
Usage: "The size of the data to turn into a car",
Expand All @@ -282,11 +282,11 @@ var generateRandCar = &cli.Command{
}

outPath := cctx.Args().Get(0)
size := cctx.Int("size")
size := cctx.Int64("size")
cs := cctx.Int64("chunksize")
ml := cctx.Int("maxlinks")

rf, err := testutils.CreateRandomFile(outPath, int(time.Now().Unix()), size)
rf, err := testutils.CreateRandomFile(outPath, time.Now().Unix(), size)
if err != nil {
return err
}
Expand Down
8 changes: 6 additions & 2 deletions deps/config/common.go
Original file line number Diff line number Diff line change
@@ -1,5 +1,9 @@
package config

import (
"time"
)

type Common struct {
API API
Backup Backup
Expand All @@ -13,7 +17,7 @@ type API struct {
// Binding address for the Binary's API
ListenAddress string
RemoteListenAddress string
Timeout Duration
Timeout time.Duration
}

// // Common
Expand Down Expand Up @@ -63,7 +67,7 @@ type Libp2p struct {
ConnMgrHigh uint
// ConnMgrGrace is a time duration that new connections are immune from being
// closed by the connection manager.
ConnMgrGrace Duration
ConnMgrGrace time.Duration
}

type Pubsub struct {
Expand Down
20 changes: 0 additions & 20 deletions deps/config/doc_gen.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

56 changes: 28 additions & 28 deletions deps/config/old_lotus_miner.go
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ type DAGStoreConfig struct {
// The time between calls to periodic dagstore GC, in time.Duration string
// representation, e.g. 1m, 5m, 1h.
// Default value: 1 minute.
GCInterval Duration
GCInterval time.Duration
}

type MinerAddressConfig struct {
Expand Down Expand Up @@ -203,11 +203,11 @@ type SealingConfig struct {
// CommittedCapacitySectorLifetime is the duration a Committed Capacity (CC) sector will
// live before it must be extended or converted into sector containing deals before it is
// terminated. Value must be between 180-1278 days (1278 in nv21, 540 before nv21).
CommittedCapacitySectorLifetime Duration
CommittedCapacitySectorLifetime time.Duration

// Period of time that a newly created sector will wait for more deals to be packed in to before it starts to seal.
// Sectors which are fully filled will start sealing immediately
WaitDealsDelay Duration
WaitDealsDelay time.Duration

// Whether to keep unsealed copies of deal data regardless of whether the client requested that. This lets the miner
// avoid the relatively high cost of unsealing the data later, at the cost of more storage space
Expand All @@ -234,9 +234,9 @@ type SealingConfig struct {
// maximum precommit batch size - batches will be sent immediately above this size
MaxPreCommitBatch int
// how long to wait before submitting a batch after crossing the minimum batch size
PreCommitBatchWait Duration
PreCommitBatchWait time.Duration
// time buffer for forceful batch submission before sectors/deal in batch would start expiring
PreCommitBatchSlack Duration
PreCommitBatchSlack time.Duration

// enable / disable commit aggregation (takes effect after nv13)
AggregateCommits bool
Expand All @@ -245,9 +245,9 @@ type SealingConfig struct {
// maximum batched commit size - batches will be sent immediately above this size
MaxCommitBatch int
// how long to wait before submitting a batch after crossing the minimum batch size
CommitBatchWait Duration
CommitBatchWait time.Duration
// time buffer for forceful batch submission before sectors/deals in batch would start expiring
CommitBatchSlack Duration
CommitBatchSlack time.Duration

// network BaseFee below which to stop doing precommit batching, instead
// sending precommit messages to the chain individually. When the basefee is
Expand All @@ -267,7 +267,7 @@ type SealingConfig struct {

TerminateBatchMax uint64
TerminateBatchMin uint64
TerminateBatchWait Duration
TerminateBatchWait time.Duration

// Keep this many sectors in sealing pipeline, start CC if needed
// todo TargetSealingSectors uint64
Expand Down Expand Up @@ -304,7 +304,7 @@ type ProvingConfig struct {
// test challenge took longer than this timeout
// WARNING: Setting this value too high risks missing PoSt deadline in case IO operations related to this sector are
// blocked (e.g. in case of disconnected NFS mount)
SingleCheckTimeout Duration
SingleCheckTimeout time.Duration

// Maximum amount of time a proving pre-check can take for an entire partition. If the check times out, sectors in
// the partition which didn't get checked on time will be skipped
Expand All @@ -313,7 +313,7 @@ type ProvingConfig struct {
// test challenge took longer than this timeout
// WARNING: Setting this value too high risks missing PoSt deadline in case IO operations related to this partition are
// blocked or slow
PartitionCheckTimeout Duration
PartitionCheckTimeout time.Duration

// Disable Window PoSt computation on the lotus-miner process even if no window PoSt workers are present.
//
Expand Down Expand Up @@ -434,12 +434,12 @@ type DealmakingConfig struct {
// Maximum expected amount of time getting the deal into a sealed sector will take
// This includes the time the deal will need to get transferred and published
// before being assigned to a sector
ExpectedSealDuration Duration
ExpectedSealDuration time.Duration
// Maximum amount of time proposed deal StartEpoch can be in future
MaxDealStartDelay Duration
MaxDealStartDelay time.Duration
// When a deal is ready to publish, the amount of time to wait for more
// deals to be ready to publish before publishing them all as a batch
PublishMsgPeriod Duration
PublishMsgPeriod time.Duration
// The maximum number of deals to include in a single PublishStorageDeals
// message
MaxDealsPerPublishMsg uint64
Expand Down Expand Up @@ -537,7 +537,7 @@ func DefaultStorageMiner() *StorageMiner {
MaxWaitDealsSectors: 2, // 64G with 32G sectors
MaxSealingSectors: 0,
MaxSealingSectorsForDeals: 0,
WaitDealsDelay: Duration(time.Hour * 6),
WaitDealsDelay: time.Hour * 6,
AlwaysKeepUnsealedCopy: true,
FinalizeEarly: false,
MakeNewSectorForDeals: true,
Expand All @@ -547,32 +547,32 @@ func DefaultStorageMiner() *StorageMiner {
DisableCollateralFallback: false,

MaxPreCommitBatch: miner5.PreCommitSectorBatchMaxSize, // up to 256 sectors
PreCommitBatchWait: Duration(24 * time.Hour), // this should be less than 31.5 hours, which is the expiration of a precommit ticket
PreCommitBatchWait: 24 * time.Hour, // this should be less than 31.5 hours, which is the expiration of a precommit ticket
// XXX snap deals wait deals slack if first
PreCommitBatchSlack: Duration(3 * time.Hour), // time buffer for forceful batch submission before sectors/deals in batch would start expiring, higher value will lower the chances for message fail due to expiration
PreCommitBatchSlack: 3 * time.Hour, // time buffer for forceful batch submission before sectors/deals in batch would start expiring, higher value will lower the chances for message fail due to expiration

CommittedCapacitySectorLifetime: Duration(builtin.EpochDurationSeconds * uint64(maxSectorExtentsion) * uint64(time.Second)),
CommittedCapacitySectorLifetime: time.Duration(builtin.EpochDurationSeconds * uint64(maxSectorExtentsion) * uint64(time.Second)),

AggregateCommits: true,
MinCommitBatch: miner5.MinAggregatedSectors, // per FIP13, we must have at least four proofs to aggregate, where 4 is the cross over point where aggregation wins out on single provecommit gas costs
MaxCommitBatch: miner5.MaxAggregatedSectors, // maximum 819 sectors, this is the maximum aggregation per FIP13
CommitBatchWait: Duration(24 * time.Hour), // this can be up to 30 days
CommitBatchSlack: Duration(1 * time.Hour), // time buffer for forceful batch submission before sectors/deals in batch would start expiring, higher value will lower the chances for message fail due to expiration
CommitBatchWait: 24 * time.Hour, // this can be up to 30 days
CommitBatchSlack: 1 * time.Hour, // time buffer for forceful batch submission before sectors/deals in batch would start expiring, higher value will lower the chances for message fail due to expiration

BatchPreCommitAboveBaseFee: types.FIL(types.BigMul(types.PicoFil, types.NewInt(320))), // 0.32 nFIL
AggregateAboveBaseFee: types.FIL(types.BigMul(types.PicoFil, types.NewInt(320))), // 0.32 nFIL

TerminateBatchMin: 1,
TerminateBatchMax: 100,
TerminateBatchWait: Duration(5 * time.Minute),
TerminateBatchWait: 5 * time.Minute,
MaxSectorProveCommitsSubmittedPerEpoch: 20,
UseSyntheticPoRep: false,
},

Proving: ProvingConfig{
ParallelCheckLimit: 32,
PartitionCheckTimeout: Duration(20 * time.Minute),
SingleCheckTimeout: Duration(10 * time.Minute),
PartitionCheckTimeout: 20 * time.Minute,
SingleCheckTimeout: 10 * time.Minute,
},

Storage: SealerConfig{
Expand Down Expand Up @@ -605,9 +605,9 @@ func DefaultStorageMiner() *StorageMiner {
ConsiderUnverifiedStorageDeals: true,
PieceCidBlocklist: []cid.Cid{},
// TODO: It'd be nice to set this based on sector size
MaxDealStartDelay: Duration(time.Hour * 24 * 14),
ExpectedSealDuration: Duration(time.Hour * 24),
PublishMsgPeriod: Duration(time.Hour),
MaxDealStartDelay: time.Hour * 24 * 14,
ExpectedSealDuration: time.Hour * 24,
PublishMsgPeriod: time.Hour,
MaxDealsPerPublishMsg: 8,
MaxProviderCollateralMultiplier: 2,

Expand Down Expand Up @@ -678,7 +678,7 @@ func DefaultStorageMiner() *StorageMiner {
MaxConcurrentIndex: 5,
MaxConcurrencyStorageCalls: 100,
MaxConcurrentUnseals: 5,
GCInterval: Duration(1 * time.Minute),
GCInterval: time.Minute,
},
HarmonyDB: HarmonyDB{
Hosts: []string{"127.0.0.1"},
Expand All @@ -698,7 +698,7 @@ func defCommon() Common {
return Common{
API: API{
ListenAddress: "/ip4/127.0.0.1/tcp/1234/http",
Timeout: Duration(30 * time.Second),
Timeout: 30 * time.Second,
},
Logging: Logging{
SubsystemLevels: map[string]string{
Expand All @@ -722,7 +722,7 @@ func defCommon() Common {

ConnMgrLow: 150,
ConnMgrHigh: 180,
ConnMgrGrace: Duration(20 * time.Second),
ConnMgrGrace: 20 * time.Second,
},
Pubsub: Pubsub{
Bootstrapper: false,
Expand Down
19 changes: 0 additions & 19 deletions deps/config/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -447,25 +447,6 @@ type CurioProvingConfig struct {
PartitionCheckTimeout time.Duration
}

// Duration is a wrapper type for time.Duration
// for decoding and encoding from/to TOML
type Duration time.Duration

func (dur Duration) MarshalText() ([]byte, error) {
d := time.Duration(dur)
return []byte(d.String()), nil
}

// UnmarshalText implements interface for TOML decoding
func (dur *Duration) UnmarshalText(text []byte) error {
d, err := time.ParseDuration(string(text))
if err != nil {
return err
}
*dur = Duration(d)
return err
}

type CurioIngestConfig struct {
// MaxMarketRunningPipelines is the maximum number of market pipelines that can be actively running tasks.
// A "running" pipeline is one that has at least one task currently assigned to a machine (owner_id is not null).
Expand Down
12 changes: 9 additions & 3 deletions lib/testutils/testutils.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,19 +27,25 @@ import (

const defaultHashFunction = uint64(multihash.BLAKE2B_MIN + 31)

func CreateRandomFile(dir string, rseed, size int) (string, error) {
source := io.LimitReader(rand.New(rand.NewSource(int64(rseed))), int64(size))
func CreateRandomFile(dir string, rseed int64, size int64) (string, error) {
source := io.LimitReader(rand.New(rand.NewSource(rseed)), size)

file, err := os.CreateTemp(dir, "sourcefile.dat")
if err != nil {
return "", err
}

_, err = io.Copy(file, source)
buff := make([]byte, 4<<20)

n, err := io.CopyBuffer(file, source, buff)
if err != nil {
return "", err
}

if n != size {
return "", fmt.Errorf("incorrect file size: written %d != expected %d", n, size)
}

//
_, err = file.Seek(0, io.SeekStart)
if err != nil {
Expand Down
2 changes: 1 addition & 1 deletion market/indexstore/indexstore_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ func TestNewIndexStore(t *testing.T) {
_ = os.RemoveAll(dir)
}()

rf, err := testutils.CreateRandomFile(dir, int(time.Now().Unix()), 8000000)
rf, err := testutils.CreateRandomFile(dir, time.Now().Unix(), 8000000)
require.NoError(t, err)

caropts := []carv2.Option{
Expand Down
5 changes: 2 additions & 3 deletions web/api/apihelper/apihelper.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,11 @@ import (
logging "github.com/ipfs/go-log/v2"
)

var log = logging.Logger("cu/web/apihelper")
var log = logging.Logger("apihelper")

func OrHTTPFail(w http.ResponseWriter, err error) {
if err != nil {
w.WriteHeader(500)
_, _ = w.Write([]byte(err.Error()))
http.Error(w, err.Error(), 500)
log.Errorw("http fail", "err", err, "stack", string(debug.Stack()))
panic(err)
}
Expand Down
29 changes: 19 additions & 10 deletions web/api/config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,11 @@ import (
"github.com/filecoin-project/curio/deps"
"github.com/filecoin-project/curio/deps/config"
"github.com/filecoin-project/curio/web/api/apihelper"

"github.com/filecoin-project/lotus/chain/types"
)

var log = logging.Logger("curio/web/config")
var log = logging.Logger("config-ui")

type cfg struct {
*deps.Deps
Expand Down Expand Up @@ -59,10 +61,16 @@ func (c *cfg) addLayer(w http.ResponseWriter, r *http.Request) {
func getSch(w http.ResponseWriter, r *http.Request) {
ref := jsonschema.Reflector{
Mapper: func(i reflect.Type) *jsonschema.Schema {
if i == reflect.TypeOf(config.Duration(time.Second)) {
if i == reflect.TypeOf(types.MustParseFIL("1 Fil")) { // Override the Pattern for types.FIL
return &jsonschema.Schema{
Type: "string",
Pattern: "1 fil/0.03 fil/0.31/1 attofil",
}
}
if i == reflect.TypeOf(time.Second) { // Override the Pattern for types.FIL
return &jsonschema.Schema{
Type: "string",
Format: "duration",
Type: "string",
Pattern: "0h0m0s",
}
}
return nil
Expand Down Expand Up @@ -147,14 +155,15 @@ func (c *cfg) setLayer(w http.ResponseWriter, r *http.Request) {

configStr := tomlData.String()

curioCfg := config.DefaultCurioConfig()
_, err = deps.LoadConfigWithUpgrades(tomlData.String(), curioCfg)
apihelper.OrHTTPFail(w, err)

cb, err := config.ConfigUpdate(curioCfg, config.DefaultCurioConfig(), config.Commented(true), config.DefaultKeepUncommented(), config.NoEnv())
apihelper.OrHTTPFail(w, err)

// Generate a full commented string if this is base layer
if layer == "base" {
// Parse the into CurioConfig TOML
curioCfg := config.DefaultCurioConfig()
_, err = deps.LoadConfigWithUpgrades(tomlData.String(), curioCfg)
apihelper.OrHTTPFail(w, err)
cb, err := config.ConfigUpdate(curioCfg, config.DefaultCurioConfig(), config.Commented(true), config.DefaultKeepUncommented(), config.NoEnv())
apihelper.OrHTTPFail(w, err)
configStr = string(cb)
}

Expand Down
Loading

0 comments on commit 2ec0435

Please sign in to comment.