diff --git a/cmd/sptool/toolbox_deal_tools.go b/cmd/sptool/toolbox_deal_tools.go index 535bb0534..d90f9cc4b 100644 --- a/cmd/sptool/toolbox_deal_tools.go +++ b/cmd/sptool/toolbox_deal_tools.go @@ -257,7 +257,7 @@ var generateRandCar = &cli.Command{ Usage: "creates a randomly generated dense car", ArgsUsage: "", Flags: []cli.Flag{ - &cli.IntFlag{ + &cli.Int64Flag{ Name: "size", Aliases: []string{"s"}, Usage: "The size of the data to turn into a car", @@ -282,11 +282,11 @@ var generateRandCar = &cli.Command{ } outPath := cctx.Args().Get(0) - size := cctx.Int("size") + size := cctx.Int64("size") cs := cctx.Int64("chunksize") ml := cctx.Int("maxlinks") - rf, err := testutils.CreateRandomFile(outPath, int(time.Now().Unix()), size) + rf, err := testutils.CreateRandomFile(outPath, time.Now().Unix(), size) if err != nil { return err } diff --git a/deps/config/common.go b/deps/config/common.go index 9e3b45be9..e911e3c48 100644 --- a/deps/config/common.go +++ b/deps/config/common.go @@ -1,5 +1,9 @@ package config +import ( + "time" +) + type Common struct { API API Backup Backup @@ -13,7 +17,7 @@ type API struct { // Binding address for the Binary's API ListenAddress string RemoteListenAddress string - Timeout Duration + Timeout time.Duration } // // Common @@ -63,7 +67,7 @@ type Libp2p struct { ConnMgrHigh uint // ConnMgrGrace is a time duration that new connections are immune from being // closed by the connection manager. - ConnMgrGrace Duration + ConnMgrGrace time.Duration } type Pubsub struct { diff --git a/deps/config/doc_gen.go b/deps/config/doc_gen.go index 5fdef4d7c..41701ffd5 100644 --- a/deps/config/doc_gen.go +++ b/deps/config/doc_gen.go @@ -766,26 +766,6 @@ also be bounded by resources available on the machine. (Default: 0 - unlimited)` also be bounded by resources available on the machine. (Default: 8 - unlimited)`, }, }, - "Duration time.Duration": { - { - Name: "func", - Type: "(dur", - - Comment: ``, - }, - { - Name: "d", - Type: ":=", - - Comment: ``, - }, - { - Name: "return", - Type: "[]byte(d.String()),", - - Comment: ``, - }, - }, "HTTPConfig": { { Name: "Enable", diff --git a/deps/config/old_lotus_miner.go b/deps/config/old_lotus_miner.go index 1b01edca0..296ee0516 100644 --- a/deps/config/old_lotus_miner.go +++ b/deps/config/old_lotus_miner.go @@ -85,7 +85,7 @@ type DAGStoreConfig struct { // The time between calls to periodic dagstore GC, in time.Duration string // representation, e.g. 1m, 5m, 1h. // Default value: 1 minute. - GCInterval Duration + GCInterval time.Duration } type MinerAddressConfig struct { @@ -203,11 +203,11 @@ type SealingConfig struct { // CommittedCapacitySectorLifetime is the duration a Committed Capacity (CC) sector will // live before it must be extended or converted into sector containing deals before it is // terminated. Value must be between 180-1278 days (1278 in nv21, 540 before nv21). - CommittedCapacitySectorLifetime Duration + CommittedCapacitySectorLifetime time.Duration // Period of time that a newly created sector will wait for more deals to be packed in to before it starts to seal. // Sectors which are fully filled will start sealing immediately - WaitDealsDelay Duration + WaitDealsDelay time.Duration // Whether to keep unsealed copies of deal data regardless of whether the client requested that. This lets the miner // avoid the relatively high cost of unsealing the data later, at the cost of more storage space @@ -234,9 +234,9 @@ type SealingConfig struct { // maximum precommit batch size - batches will be sent immediately above this size MaxPreCommitBatch int // how long to wait before submitting a batch after crossing the minimum batch size - PreCommitBatchWait Duration + PreCommitBatchWait time.Duration // time buffer for forceful batch submission before sectors/deal in batch would start expiring - PreCommitBatchSlack Duration + PreCommitBatchSlack time.Duration // enable / disable commit aggregation (takes effect after nv13) AggregateCommits bool @@ -245,9 +245,9 @@ type SealingConfig struct { // maximum batched commit size - batches will be sent immediately above this size MaxCommitBatch int // how long to wait before submitting a batch after crossing the minimum batch size - CommitBatchWait Duration + CommitBatchWait time.Duration // time buffer for forceful batch submission before sectors/deals in batch would start expiring - CommitBatchSlack Duration + CommitBatchSlack time.Duration // network BaseFee below which to stop doing precommit batching, instead // sending precommit messages to the chain individually. When the basefee is @@ -267,7 +267,7 @@ type SealingConfig struct { TerminateBatchMax uint64 TerminateBatchMin uint64 - TerminateBatchWait Duration + TerminateBatchWait time.Duration // Keep this many sectors in sealing pipeline, start CC if needed // todo TargetSealingSectors uint64 @@ -304,7 +304,7 @@ type ProvingConfig struct { // test challenge took longer than this timeout // WARNING: Setting this value too high risks missing PoSt deadline in case IO operations related to this sector are // blocked (e.g. in case of disconnected NFS mount) - SingleCheckTimeout Duration + SingleCheckTimeout time.Duration // Maximum amount of time a proving pre-check can take for an entire partition. If the check times out, sectors in // the partition which didn't get checked on time will be skipped @@ -313,7 +313,7 @@ type ProvingConfig struct { // test challenge took longer than this timeout // WARNING: Setting this value too high risks missing PoSt deadline in case IO operations related to this partition are // blocked or slow - PartitionCheckTimeout Duration + PartitionCheckTimeout time.Duration // Disable Window PoSt computation on the lotus-miner process even if no window PoSt workers are present. // @@ -434,12 +434,12 @@ type DealmakingConfig struct { // Maximum expected amount of time getting the deal into a sealed sector will take // This includes the time the deal will need to get transferred and published // before being assigned to a sector - ExpectedSealDuration Duration + ExpectedSealDuration time.Duration // Maximum amount of time proposed deal StartEpoch can be in future - MaxDealStartDelay Duration + MaxDealStartDelay time.Duration // When a deal is ready to publish, the amount of time to wait for more // deals to be ready to publish before publishing them all as a batch - PublishMsgPeriod Duration + PublishMsgPeriod time.Duration // The maximum number of deals to include in a single PublishStorageDeals // message MaxDealsPerPublishMsg uint64 @@ -537,7 +537,7 @@ func DefaultStorageMiner() *StorageMiner { MaxWaitDealsSectors: 2, // 64G with 32G sectors MaxSealingSectors: 0, MaxSealingSectorsForDeals: 0, - WaitDealsDelay: Duration(time.Hour * 6), + WaitDealsDelay: time.Hour * 6, AlwaysKeepUnsealedCopy: true, FinalizeEarly: false, MakeNewSectorForDeals: true, @@ -547,32 +547,32 @@ func DefaultStorageMiner() *StorageMiner { DisableCollateralFallback: false, MaxPreCommitBatch: miner5.PreCommitSectorBatchMaxSize, // up to 256 sectors - PreCommitBatchWait: Duration(24 * time.Hour), // this should be less than 31.5 hours, which is the expiration of a precommit ticket + PreCommitBatchWait: 24 * time.Hour, // this should be less than 31.5 hours, which is the expiration of a precommit ticket // XXX snap deals wait deals slack if first - PreCommitBatchSlack: Duration(3 * time.Hour), // time buffer for forceful batch submission before sectors/deals in batch would start expiring, higher value will lower the chances for message fail due to expiration + PreCommitBatchSlack: 3 * time.Hour, // time buffer for forceful batch submission before sectors/deals in batch would start expiring, higher value will lower the chances for message fail due to expiration - CommittedCapacitySectorLifetime: Duration(builtin.EpochDurationSeconds * uint64(maxSectorExtentsion) * uint64(time.Second)), + CommittedCapacitySectorLifetime: time.Duration(builtin.EpochDurationSeconds * uint64(maxSectorExtentsion) * uint64(time.Second)), AggregateCommits: true, MinCommitBatch: miner5.MinAggregatedSectors, // per FIP13, we must have at least four proofs to aggregate, where 4 is the cross over point where aggregation wins out on single provecommit gas costs MaxCommitBatch: miner5.MaxAggregatedSectors, // maximum 819 sectors, this is the maximum aggregation per FIP13 - CommitBatchWait: Duration(24 * time.Hour), // this can be up to 30 days - CommitBatchSlack: Duration(1 * time.Hour), // time buffer for forceful batch submission before sectors/deals in batch would start expiring, higher value will lower the chances for message fail due to expiration + CommitBatchWait: 24 * time.Hour, // this can be up to 30 days + CommitBatchSlack: 1 * time.Hour, // time buffer for forceful batch submission before sectors/deals in batch would start expiring, higher value will lower the chances for message fail due to expiration BatchPreCommitAboveBaseFee: types.FIL(types.BigMul(types.PicoFil, types.NewInt(320))), // 0.32 nFIL AggregateAboveBaseFee: types.FIL(types.BigMul(types.PicoFil, types.NewInt(320))), // 0.32 nFIL TerminateBatchMin: 1, TerminateBatchMax: 100, - TerminateBatchWait: Duration(5 * time.Minute), + TerminateBatchWait: 5 * time.Minute, MaxSectorProveCommitsSubmittedPerEpoch: 20, UseSyntheticPoRep: false, }, Proving: ProvingConfig{ ParallelCheckLimit: 32, - PartitionCheckTimeout: Duration(20 * time.Minute), - SingleCheckTimeout: Duration(10 * time.Minute), + PartitionCheckTimeout: 20 * time.Minute, + SingleCheckTimeout: 10 * time.Minute, }, Storage: SealerConfig{ @@ -605,9 +605,9 @@ func DefaultStorageMiner() *StorageMiner { ConsiderUnverifiedStorageDeals: true, PieceCidBlocklist: []cid.Cid{}, // TODO: It'd be nice to set this based on sector size - MaxDealStartDelay: Duration(time.Hour * 24 * 14), - ExpectedSealDuration: Duration(time.Hour * 24), - PublishMsgPeriod: Duration(time.Hour), + MaxDealStartDelay: time.Hour * 24 * 14, + ExpectedSealDuration: time.Hour * 24, + PublishMsgPeriod: time.Hour, MaxDealsPerPublishMsg: 8, MaxProviderCollateralMultiplier: 2, @@ -678,7 +678,7 @@ func DefaultStorageMiner() *StorageMiner { MaxConcurrentIndex: 5, MaxConcurrencyStorageCalls: 100, MaxConcurrentUnseals: 5, - GCInterval: Duration(1 * time.Minute), + GCInterval: time.Minute, }, HarmonyDB: HarmonyDB{ Hosts: []string{"127.0.0.1"}, @@ -698,7 +698,7 @@ func defCommon() Common { return Common{ API: API{ ListenAddress: "/ip4/127.0.0.1/tcp/1234/http", - Timeout: Duration(30 * time.Second), + Timeout: 30 * time.Second, }, Logging: Logging{ SubsystemLevels: map[string]string{ @@ -722,7 +722,7 @@ func defCommon() Common { ConnMgrLow: 150, ConnMgrHigh: 180, - ConnMgrGrace: Duration(20 * time.Second), + ConnMgrGrace: 20 * time.Second, }, Pubsub: Pubsub{ Bootstrapper: false, diff --git a/deps/config/types.go b/deps/config/types.go index 8551d323e..b3eb8958e 100644 --- a/deps/config/types.go +++ b/deps/config/types.go @@ -447,25 +447,6 @@ type CurioProvingConfig struct { PartitionCheckTimeout time.Duration } -// Duration is a wrapper type for time.Duration -// for decoding and encoding from/to TOML -type Duration time.Duration - -func (dur Duration) MarshalText() ([]byte, error) { - d := time.Duration(dur) - return []byte(d.String()), nil -} - -// UnmarshalText implements interface for TOML decoding -func (dur *Duration) UnmarshalText(text []byte) error { - d, err := time.ParseDuration(string(text)) - if err != nil { - return err - } - *dur = Duration(d) - return err -} - type CurioIngestConfig struct { // MaxMarketRunningPipelines is the maximum number of market pipelines that can be actively running tasks. // A "running" pipeline is one that has at least one task currently assigned to a machine (owner_id is not null). diff --git a/lib/testutils/testutils.go b/lib/testutils/testutils.go index 8009ca867..a8682f27b 100644 --- a/lib/testutils/testutils.go +++ b/lib/testutils/testutils.go @@ -27,19 +27,25 @@ import ( const defaultHashFunction = uint64(multihash.BLAKE2B_MIN + 31) -func CreateRandomFile(dir string, rseed, size int) (string, error) { - source := io.LimitReader(rand.New(rand.NewSource(int64(rseed))), int64(size)) +func CreateRandomFile(dir string, rseed int64, size int64) (string, error) { + source := io.LimitReader(rand.New(rand.NewSource(rseed)), size) file, err := os.CreateTemp(dir, "sourcefile.dat") if err != nil { return "", err } - _, err = io.Copy(file, source) + buff := make([]byte, 4<<20) + + n, err := io.CopyBuffer(file, source, buff) if err != nil { return "", err } + if n != size { + return "", fmt.Errorf("incorrect file size: written %d != expected %d", n, size) + } + // _, err = file.Seek(0, io.SeekStart) if err != nil { diff --git a/market/indexstore/indexstore_test.go b/market/indexstore/indexstore_test.go index a84e6bc74..a2cf37a97 100644 --- a/market/indexstore/indexstore_test.go +++ b/market/indexstore/indexstore_test.go @@ -42,7 +42,7 @@ func TestNewIndexStore(t *testing.T) { _ = os.RemoveAll(dir) }() - rf, err := testutils.CreateRandomFile(dir, int(time.Now().Unix()), 8000000) + rf, err := testutils.CreateRandomFile(dir, time.Now().Unix(), 8000000) require.NoError(t, err) caropts := []carv2.Option{ diff --git a/web/api/apihelper/apihelper.go b/web/api/apihelper/apihelper.go index 4729d66a4..c2464096f 100644 --- a/web/api/apihelper/apihelper.go +++ b/web/api/apihelper/apihelper.go @@ -7,12 +7,11 @@ import ( logging "github.com/ipfs/go-log/v2" ) -var log = logging.Logger("cu/web/apihelper") +var log = logging.Logger("apihelper") func OrHTTPFail(w http.ResponseWriter, err error) { if err != nil { - w.WriteHeader(500) - _, _ = w.Write([]byte(err.Error())) + http.Error(w, err.Error(), 500) log.Errorw("http fail", "err", err, "stack", string(debug.Stack())) panic(err) } diff --git a/web/api/config/config.go b/web/api/config/config.go index f42698050..20f68c6df 100644 --- a/web/api/config/config.go +++ b/web/api/config/config.go @@ -16,9 +16,11 @@ import ( "github.com/filecoin-project/curio/deps" "github.com/filecoin-project/curio/deps/config" "github.com/filecoin-project/curio/web/api/apihelper" + + "github.com/filecoin-project/lotus/chain/types" ) -var log = logging.Logger("curio/web/config") +var log = logging.Logger("config-ui") type cfg struct { *deps.Deps @@ -59,10 +61,16 @@ func (c *cfg) addLayer(w http.ResponseWriter, r *http.Request) { func getSch(w http.ResponseWriter, r *http.Request) { ref := jsonschema.Reflector{ Mapper: func(i reflect.Type) *jsonschema.Schema { - if i == reflect.TypeOf(config.Duration(time.Second)) { + if i == reflect.TypeOf(types.MustParseFIL("1 Fil")) { // Override the Pattern for types.FIL + return &jsonschema.Schema{ + Type: "string", + Pattern: "1 fil/0.03 fil/0.31/1 attofil", + } + } + if i == reflect.TypeOf(time.Second) { // Override the Pattern for types.FIL return &jsonschema.Schema{ - Type: "string", - Format: "duration", + Type: "string", + Pattern: "0h0m0s", } } return nil @@ -147,14 +155,15 @@ func (c *cfg) setLayer(w http.ResponseWriter, r *http.Request) { configStr := tomlData.String() + curioCfg := config.DefaultCurioConfig() + _, err = deps.LoadConfigWithUpgrades(tomlData.String(), curioCfg) + apihelper.OrHTTPFail(w, err) + + cb, err := config.ConfigUpdate(curioCfg, config.DefaultCurioConfig(), config.Commented(true), config.DefaultKeepUncommented(), config.NoEnv()) + apihelper.OrHTTPFail(w, err) + // Generate a full commented string if this is base layer if layer == "base" { - // Parse the into CurioConfig TOML - curioCfg := config.DefaultCurioConfig() - _, err = deps.LoadConfigWithUpgrades(tomlData.String(), curioCfg) - apihelper.OrHTTPFail(w, err) - cb, err := config.ConfigUpdate(curioCfg, config.DefaultCurioConfig(), config.Commented(true), config.DefaultKeepUncommented(), config.NoEnv()) - apihelper.OrHTTPFail(w, err) configStr = string(cb) } diff --git a/web/static/config/edit.html b/web/static/config/edit.html index 30fccaf22..4bfb41476 100644 --- a/web/static/config/edit.html +++ b/web/static/config/edit.html @@ -147,7 +147,12 @@ window.location.href = '/pages/config/list/'; }) .catch(error => { - alert('Error saving data:', error); + console.error('Error saving data:', error); + if (error.response && error.response.data) { + alert('Error saving data: ' + error.response.data.message); + } else { + alert('An unexpected error occurred. Please try again.'); + } }); } });