diff --git a/internal/backend/backendrun/operation.go b/internal/backend/backendrun/operation.go index 164f9ef25c87..0db70e713e89 100644 --- a/internal/backend/backendrun/operation.go +++ b/internal/backend/backendrun/operation.go @@ -75,6 +75,7 @@ type Operation struct { // plan for an apply operation. PlanId string PlanRefresh bool // PlanRefresh will do a refresh before a plan + PlanLight bool // PlanLight enables light plan mode, skipping refresh for unchanged resources PlanOutPath string // PlanOutPath is the path to save the plan // PlanOutBackend is the backend to store with the plan. This is the diff --git a/internal/backend/local/backend_local.go b/internal/backend/local/backend_local.go index 4e900de9efd7..2add2431cc05 100644 --- a/internal/backend/local/backend_local.go +++ b/internal/backend/local/backend_local.go @@ -96,6 +96,7 @@ func (b *Local) localRun(op *backendrun.Operation) (*backendrun.LocalRun, *confi stateMeta = &m } log.Printf("[TRACE] backend/local: populating backendrun.LocalRun from plan file") + // TODO: write light option to plan file ret, configSnap, ctxDiags = b.localRunForPlanFile(op, lp, ret, &coreOpts, stateMeta) if ctxDiags.HasErrors() { diags = diags.Append(ctxDiags) @@ -207,6 +208,7 @@ func (b *Local) localRunDirect(op *backendrun.Operation, run *backendrun.LocalRu ForceReplace: op.ForceReplace, SetVariables: variables, SkipRefresh: op.Type != backendrun.OperationTypeRefresh && !op.PlanRefresh, + LightMode: op.PlanLight, GenerateConfigPath: op.GenerateConfigOut, DeferralAllowed: op.DeferralAllowed, Query: op.Query, diff --git a/internal/cloud/backend_plan.go b/internal/cloud/backend_plan.go index 325fefd6a7f3..bc745328c85a 100644 --- a/internal/cloud/backend_plan.go +++ b/internal/cloud/backend_plan.go @@ -65,6 +65,15 @@ func (b *Cloud) opPlan(stopCtx, cancelCtx context.Context, op *backendrun.Operat )) } + if op.PlanLight { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Light plan mode is not supported with cloud backends", + fmt.Sprintf("%s does not support the -light flag. ", b.appName)+ + "Light plan mode is only available for local operations.", + )) + } + if op.PlanFile != nil { diags = diags.Append(tfdiags.Sourceless( tfdiags.Error, diff --git a/internal/command/arguments/extended.go b/internal/command/arguments/extended.go index 0c05e8f0e49f..0ca916427935 100644 --- a/internal/command/arguments/extended.go +++ b/internal/command/arguments/extended.go @@ -63,6 +63,9 @@ type Operation struct { // state before proceeding. Default is true. Refresh bool + // Light + Light bool + // Targets allow limiting an operation to a set of resource addresses and // their dependencies. Targets []addrs.Targetable @@ -287,6 +290,7 @@ func extendedFlagSet(name string, state *State, operation *Operation, vars *Vars f.IntVar(&operation.Parallelism, "parallelism", DefaultParallelism, "parallelism") f.BoolVar(&operation.DeferralAllowed, "allow-deferral", false, "allow-deferral") f.BoolVar(&operation.Refresh, "refresh", true, "refresh") + f.BoolVar(&operation.Light, "light", false, "light") f.BoolVar(&operation.destroyRaw, "destroy", false, "destroy") f.BoolVar(&operation.refreshOnlyRaw, "refresh-only", false, "refresh-only") f.Var((*FlagStringSlice)(&operation.targetsRaw), "target", "target") diff --git a/internal/command/arguments/plan.go b/internal/command/arguments/plan.go index d4d2a746a46c..982bafa05ad3 100644 --- a/internal/command/arguments/plan.go +++ b/internal/command/arguments/plan.go @@ -4,6 +4,7 @@ package arguments import ( + "github.com/hashicorp/terraform/internal/plans" "github.com/hashicorp/terraform/internal/tfdiags" ) @@ -30,6 +31,12 @@ type Plan struct { // be written to. GenerateConfigPath string + // Light enables "light plan" mode, where Terraform skips reading remote + // state for resources that have not changed in the local configuration + // or local state. The user is telling Terraform to trust that nothing + // has been modified outside of the local configuration. + Light bool + // ViewType specifies which output format to use ViewType ViewType } @@ -82,6 +89,24 @@ func ParsePlan(args []string) (*Plan, tfdiags.Diagnostics) { diags = diags.Append(plan.Operation.Parse()) + // Validate -light flag compatibility + if plan.Light { + if plan.Operation.PlanMode == plans.RefreshOnlyMode { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Incompatible plan options", + "The -light and -refresh-only options are mutually exclusive. Light mode skips refreshing unchanged resources, while refresh-only mode requires refreshing all resources.", + )) + } + if plan.Operation.PlanMode == plans.DestroyMode { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Incompatible plan options", + "The -light and -destroy options are mutually exclusive. A destroy plan requires reading the current state of all resources.", + )) + } + } + // JSON view currently does not support input, so we disable it here if json { plan.InputEnabled = false diff --git a/internal/command/arguments/plan_test.go b/internal/command/arguments/plan_test.go index e7fe6f401fdd..9664ead6fd0f 100644 --- a/internal/command/arguments/plan_test.go +++ b/internal/command/arguments/plan_test.go @@ -50,6 +50,23 @@ func TestParsePlan_basicValid(t *testing.T) { }, }, }, + "light mode": { + []string{"-light"}, + &Plan{ + DetailedExitCode: false, + InputEnabled: true, + Light: true, + OutPath: "", + ViewType: ViewHuman, + State: &State{Lock: true}, + Vars: &Vars{}, + Operation: &Operation{ + PlanMode: plans.NormalMode, + Parallelism: 10, + Refresh: true, + }, + }, + }, "JSON view disables input": { []string{"-json"}, &Plan{ @@ -96,6 +113,26 @@ func TestParsePlan_invalid(t *testing.T) { } } +func TestParsePlan_lightWithDestroy(t *testing.T) { + _, diags := ParsePlan([]string{"-light", "-destroy"}) + if len(diags) == 0 { + t.Fatal("expected diags but got none") + } + if got, want := diags.Err().Error(), "mutually exclusive"; !strings.Contains(got, want) { + t.Fatalf("wrong diags\n got: %s\nwant: %s", got, want) + } +} + +func TestParsePlan_lightWithRefreshOnly(t *testing.T) { + _, diags := ParsePlan([]string{"-light", "-refresh-only"}) + if len(diags) == 0 { + t.Fatal("expected diags but got none") + } + if got, want := diags.Err().Error(), "mutually exclusive"; !strings.Contains(got, want) { + t.Fatalf("wrong diags\n got: %s\nwant: %s", got, want) + } +} + func TestParsePlan_tooManyArguments(t *testing.T) { got, diags := ParsePlan([]string{"saved.tfplan"}) if len(diags) == 0 { diff --git a/internal/command/plan.go b/internal/command/plan.go index 4172ff884872..14bc51cf3511 100644 --- a/internal/command/plan.go +++ b/internal/command/plan.go @@ -86,6 +86,9 @@ func (c *PlanCommand) Run(rawArgs []string) int { return 1 } + // Light plan mode: skip refreshing unchanged resources + opReq.PlanLight = args.Light + // Collect variable value and add them to the operation request diags = diags.Append(c.GatherVariables(opReq, args.Vars)) if diags.HasErrors() { @@ -150,6 +153,7 @@ func (c *PlanCommand) OperationRequest( opReq.Hooks = view.Hooks() opReq.PlanRefresh = args.Refresh opReq.PlanOutPath = planOutPath + opReq.PlanLight = args.Light opReq.GenerateConfigOut = generateConfigOut opReq.Targets = args.Targets opReq.ForceReplace = args.ForceReplace @@ -234,6 +238,14 @@ Plan Customization Options: planning faster, but at the expense of possibly planning against a stale record of the remote system state. + -light Enable light plan mode. In this mode, Terraform skips + reading remote state for resources that have not changed + in the local configuration or local state. This is useful + when you trust that nothing has been modified outside of + the local Terraform configuration, and can significantly + speed up planning for large configurations. Incompatible + with -refresh-only and -destroy. + -replace=resource Force replacement of a particular resource instance using its resource address. If the plan would've normally produced an update or no-op action for this instance, diff --git a/internal/plans/plan.go b/internal/plans/plan.go index 0995c0f1caf3..d18e80b75eda 100644 --- a/internal/plans/plan.go +++ b/internal/plans/plan.go @@ -169,6 +169,11 @@ type Plan struct { // and builtin calls which may access external state so that calls during // apply can be checked for consistency. FunctionResults []lang.FunctionResultHash + + // Light is true if this plan was created in "light" mode, where + // Terraform skipped reading remote state for resources that have not + // changed in the local configuration and the state dependents of those resources. + Light bool } // ProviderAddrs returns a list of all of the provider configuration addresses diff --git a/internal/terraform/context_apply_test.go b/internal/terraform/context_apply_test.go index b1099dfa1b25..7732c6edaf03 100644 --- a/internal/terraform/context_apply_test.go +++ b/internal/terraform/context_apply_test.go @@ -10426,6 +10426,7 @@ func TestContext2Apply_ProviderMeta_refresh_set(t *testing.T) { Providers: map[addrs.Provider]providers.Factory{ addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), }, + Parallelism: 1, }) plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) diff --git a/internal/terraform/context_plan.go b/internal/terraform/context_plan.go index d6861f8fc2fd..17bb0c5206ca 100644 --- a/internal/terraform/context_plan.go +++ b/internal/terraform/context_plan.go @@ -27,6 +27,38 @@ import ( "github.com/hashicorp/terraform/internal/tfdiags" ) +// nodePlanContext holds contextual flags that influence how individual resource +// nodes behave during the plan walk. It is derived from PlanOpts and +// copied into every resource node. Each node may further modify its own copy of this +// struct, or/and pass it to child nodes. +type nodePlanContext struct { + lightMode bool + skipPlanChanges bool + + // skipRefresh indicates that we should skip refreshing managed resources + skipRefresh bool + + // preDestroyRefresh indicates that we are executing the refresh which + // happens immediately before a destroy plan, which happens to use the + // normal planing mode so skipPlanChanges cannot be set. + preDestroyRefresh bool +} + +func (pc nodePlanContext) withSkipPlanChanges(v bool) nodePlanContext { + pc.skipPlanChanges = v + return pc +} + +func (pc nodePlanContext) withSkipRefresh(v bool) nodePlanContext { + pc.skipRefresh = v + return pc +} + +func (pc nodePlanContext) withPreDestroyRefresh(v bool) nodePlanContext { + pc.preDestroyRefresh = v + return pc +} + // PlanOpts are the various options that affect the details of how Terraform // will build a plan. type PlanOpts struct { @@ -41,6 +73,12 @@ type PlanOpts struct { // instance using its corresponding provider. SkipRefresh bool + // LightMode enables terraform to plan each resource against local state first, + // if the result is a NoOp the expensive remote-state refresh is skipped entirely. + // Resources whose local plan shows changes are still refreshed and + // re-planned so the final diff is accurate. + LightMode bool + // PreDestroyRefresh indicated that this is being passed to a plan used to // refresh the state immediately before a destroy plan. // FIXME: This is a temporary fix to allow the pre-destroy refresh to @@ -253,6 +291,22 @@ func (c *Context) PlanAndEval(config *configs.Config, prevRunState *states.State )) return nil, nil, diags } + if opts.LightMode && opts.Mode != plans.NormalMode { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Incompatible plan options", + "Light plan mode is only compatible with normal planning mode. It cannot be used with -refresh-only or -destroy.", + )) + return nil, nil, diags + } + if opts.LightMode { + log.Printf("[INFO] Light plan mode enabled: skipping refresh for all resources") + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Warning, + "Light plan mode is in effect", + "You are creating a plan with the -light option, which skips reading the current state of remote resources. The resulting plan may not detect changes made outside of Terraform (drift). Use a normal plan to get a complete view of all changes.", + )) + } if len(opts.ForceReplace) > 0 && opts.Mode != plans.NormalMode { // The other modes don't generate no-op or update actions that we might // upgrade to be "replace", so doesn't make sense to combine those. @@ -335,6 +389,14 @@ The -target option is not for routine use, and is provided only for exceptional } } + if opts.LightMode { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Warning, + "Light plan mode is in effect", + `You are creating a plan with the light mode, which means that the result of this plan may not represent all of the changes that may have occurred outside of Terraform.`, + )) + } + var plan *plans.Plan var planDiags tfdiags.Diagnostics var evalScope *lang.Scope @@ -459,6 +521,17 @@ func (c *Context) checkApplyGraph(plan *plans.Plan, config *configs.Config, opts return diags } +func (opts *PlanOpts) nodeContext() nodePlanContext { + if opts == nil { + return nodePlanContext{} + } + return nodePlanContext{ + lightMode: opts.LightMode, + skipRefresh: opts.SkipRefresh, + preDestroyRefresh: opts.PreDestroyRefresh, + } +} + var DefaultPlanOpts = &PlanOpts{ Mode: plans.NormalMode, } @@ -880,6 +953,7 @@ func (c *Context) planWalk(config *configs.Config, prevRunState *states.State, o Checks: states.NewCheckResults(walker.Checks), Timestamp: timestamp, FunctionResults: funcResults.GetHashes(), + Light: opts.LightMode, // Other fields get populated by Context.Plan after we return } @@ -1006,8 +1080,7 @@ func (c *Context) planGraph(config *configs.Config, prevRunState *states.State, Plugins: c.plugins, Targets: opts.Targets, ForceReplace: opts.ForceReplace, - skipRefresh: opts.SkipRefresh, - preDestroyRefresh: opts.PreDestroyRefresh, + planCtx: opts.nodeContext(), Operation: walkPlan, ExternalReferences: opts.ExternalReferences, Overrides: opts.Overrides, @@ -1022,6 +1095,8 @@ func (c *Context) planGraph(config *configs.Config, prevRunState *states.State, }).Build(addrs.RootModuleInstance) return graph, walkPlan, diags case plans.RefreshOnlyMode: + nodeCtx := opts.nodeContext(). + withSkipPlanChanges(true) // this activates "refresh only" mode. graph, diags := (&PlanGraphBuilder{ Config: config, State: prevRunState, @@ -1030,8 +1105,7 @@ func (c *Context) planGraph(config *configs.Config, prevRunState *states.State, Plugins: c.plugins, Targets: append(opts.Targets, opts.ActionTargets...), ActionTargets: opts.ActionTargets, - skipRefresh: opts.SkipRefresh, - skipPlanChanges: true, // this activates "refresh only" mode. + planCtx: nodeCtx, Operation: walkPlan, ExternalReferences: opts.ExternalReferences, Overrides: opts.Overrides, @@ -1047,7 +1121,7 @@ func (c *Context) planGraph(config *configs.Config, prevRunState *states.State, ExternalProviderConfigs: externalProviderConfigs, Plugins: c.plugins, Targets: opts.Targets, - skipRefresh: opts.SkipRefresh, + planCtx: opts.nodeContext(), Operation: walkPlanDestroy, Overrides: opts.Overrides, SkipGraphValidation: c.graphOpts.SkipGraphValidation, diff --git a/internal/terraform/context_plan2_test.go b/internal/terraform/context_plan2_test.go index d7f5eaae5ce2..4cb047f1fb37 100644 --- a/internal/terraform/context_plan2_test.go +++ b/internal/terraform/context_plan2_test.go @@ -5,9 +5,11 @@ package terraform import ( "bytes" + "encoding/json" "errors" "fmt" "path/filepath" + "slices" "sort" "strings" "sync" @@ -993,8 +995,7 @@ resource "test_object" "a" { }) plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.DestroyMode, - SkipRefresh: false, // the default + Mode: plans.DestroyMode, }) tfdiags.AssertNoErrors(t, diags) @@ -7891,3 +7892,252 @@ locals { }, })) } + +func TestContext2Plan_lightModePartialUpdate(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` + resource "test_object" "unchanged" { + value = "original1" + } + + resource "test_object" "changed" { + value = "updated" + } +`, + }) + + p := new(testing_provider.MockProvider) + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&providerSchema{ + ResourceTypes: map[string]*configschema.Block{ + "test_object": { + Attributes: map[string]*configschema.Attribute{ + "value": { + Type: cty.String, + Optional: true, + }, + }, + }, + }, + }) + + reqs := make([]string, 0) + + p.ReadResourceFn = func(req providers.ReadResourceRequest) (resp providers.ReadResourceResponse) { + if req.PriorState.GetAttr("value").AsString() == "original1" { + t.Errorf("unexpected read resource request for unchanged resource") + } + value := req.PriorState.GetAttr("value").AsString() + reqs = append(reqs, value) + resp.NewState = cty.ObjectVal(map[string]cty.Value{ + "value": cty.StringVal("updated-from-cloud"), + }) + return resp + } + + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent(mustResourceInstanceAddr("test_object.unchanged"), &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"value":"original1"}`), + Status: states.ObjectReady, + }, mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`)) + + s.SetResourceInstanceCurrent(mustResourceInstanceAddr("test_object.changed"), &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"value":"original2"}`), + Status: states.ObjectReady, + }, mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`)) + }) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.NormalMode, + LightMode: true, + }) + + tfdiags.AssertNoErrors(t, diags) + + // Verify the plan changes + change := plan.Changes.ResourceInstance(mustResourceInstanceAddr("test_object.changed")) + if change.Action != plans.Update { + t.Fatalf("expected update action for 'test_object.changed', got: %v", change.Action) + } + + unchanged := plan.Changes.ResourceInstance(mustResourceInstanceAddr("test_object.unchanged")) + if unchanged.Action != plans.NoOp { + t.Fatalf("expected no-op action for 'test_object.unchanged', got: %v", unchanged.Action) + } + + // Verify the read resource request values + if cmp.Diff(reqs, []string{"original2"}) != "" { + t.Fatalf("unexpected read resource request values: %v", reqs) + } +} + +func TestContext2Plan_lightModePartialUpdate2(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` + resource "test_object" "changed" { + value = "updated" + } +`, + }) + + p := new(testing_provider.MockProvider) + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&providerSchema{ + ResourceTypes: map[string]*configschema.Block{ + "test_object": { + Attributes: map[string]*configschema.Attribute{ + "value": { + Type: cty.String, + Optional: true, + }, + }, + }, + }, + }) + + reqs := make([]string, 0) + + p.ReadResourceFn = func(req providers.ReadResourceRequest) (resp providers.ReadResourceResponse) { + value := req.PriorState.GetAttr("value").AsString() + reqs = append(reqs, value) + resp.NewState = cty.ObjectVal(map[string]cty.Value{ + "value": cty.StringVal("updated-from-cloud"), + }) + return resp + } + + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent(mustResourceInstanceAddr("test_object.changed"), &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"value":"original2"}`), + Status: states.ObjectReady, + }, mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`)) + }) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.NormalMode, + LightMode: true, + }) + + tfdiags.AssertNoErrors(t, diags) + + // Verify the plan changes + change := plan.Changes.ResourceInstance(mustResourceInstanceAddr("test_object.changed")) + if change.Action != plans.Update { + t.Fatalf("expected update action for 'test_object.changed', got: %v", change.Action) + } +} + +func TestContext2Plan_lightModeUpgradedSchema(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` + resource "test_object" "unchanged" { + value = "original1" + } + resource "test_object" "changed" { + value = "updated" + } +`, + }) + + p := new(testing_provider.MockProvider) + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&providerSchema{ + ResourceTypes: map[string]*configschema.Block{ + "test_object": { + Attributes: map[string]*configschema.Attribute{ + "value": { + Type: cty.String, + Optional: true, + }, + }, + }, + }, + ResourceTypeSchemaVersions: map[string]uint64{ + "test_object": 2, // indicates that the schema has been upgraded + }, + }) + + reqs := make([]string, 0) + + p.ReadResourceFn = func(req providers.ReadResourceRequest) (resp providers.ReadResourceResponse) { + value := req.PriorState.GetAttr("value").AsString() + reqs = append(reqs, value) + resp.NewState = cty.ObjectVal(map[string]cty.Value{ + "value": cty.StringVal("updated-from-cloud"), + }) + return resp + } + p.UpgradeResourceStateFn = func(req providers.UpgradeResourceStateRequest) (resp providers.UpgradeResourceStateResponse) { + // We should've been given the prior state JSON as our input to upgrade. + if !bytes.Contains(req.RawStateJSON, []byte("original")) { + t.Fatalf("UpgradeResourceState request doesn't contain the original state JSON") + } + mp := make(map[string]any) + err := json.Unmarshal(req.RawStateJSON, &mp) + if err != nil { + t.Fatalf("failed to unmarshal state JSON: %s", err) + } + val := cty.StringVal(mp["value"].(string)) + + // We'll put something different in "value" as part of upgrading, just + // so that we can verify that a full plan is forced when a state upgrade is done. + if bytes.Contains(req.RawStateJSON, []byte("original1")) { + val = cty.StringVal("upgraded") + } + resp.UpgradedState = cty.ObjectVal(map[string]cty.Value{"value": val}) + return resp + } + + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent(mustResourceInstanceAddr("test_object.unchanged"), &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"value":"original1"}`), + Status: states.ObjectReady, + }, mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`)) + + s.SetResourceInstanceCurrent(mustResourceInstanceAddr("test_object.changed"), &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"value":"original2"}`), + Status: states.ObjectReady, + }, mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`)) + }) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.NormalMode, + LightMode: true, + }) + + tfdiags.AssertNoErrors(t, diags) + + // Verify the plan changes + change := plan.Changes.ResourceInstance(mustResourceInstanceAddr("test_object.changed")) + if change.Action != plans.Update { + t.Fatalf("expected update action for 'test_object.changed', got: %v", change.Action) + } + + unchanged := plan.Changes.ResourceInstance(mustResourceInstanceAddr("test_object.unchanged")) + if unchanged.Action != plans.Update { + t.Fatalf("expected update action for 'test_object.unchanged', got: %v", unchanged.Action) + } + + // Verify the read resource request values. THe unchanged resource should + // have been upgraded to "upgraded", causing the full plan cycle to + // be triggered. + slices.Sort(reqs) + if cmp.Diff(reqs, []string{"original2", "upgraded"}) != "" { + t.Fatalf("unexpected read resource request values: %v", reqs) + } +} diff --git a/internal/terraform/graph_builder_plan.go b/internal/terraform/graph_builder_plan.go index 3ef6ae3e186a..910c75962a3c 100644 --- a/internal/terraform/graph_builder_plan.go +++ b/internal/terraform/graph_builder_plan.go @@ -60,19 +60,9 @@ type PlanGraphBuilder struct { // action instead. Create and Delete actions are not affected. ForceReplace []addrs.AbsResourceInstance - // skipRefresh indicates that we should skip refreshing managed resources - skipRefresh bool - - // preDestroyRefresh indicates that we are executing the refresh which - // happens immediately before a destroy plan, which happens to use the - // normal planing mode so skipPlanChanges cannot be set. - preDestroyRefresh bool - - // skipPlanChanges indicates that we should skip the step of comparing - // prior state with configuration and generating planned changes to - // resource instances. (This is for the "refresh only" planning mode, - // where we _only_ do the refresh step.) - skipPlanChanges bool + // planCtx carries per-node planning context info that are + // propagated to individual resource nodes during graph building. + planCtx nodePlanContext ConcreteProvider ConcreteProviderNodeFunc ConcreteResource ConcreteResourceNodeFunc @@ -210,7 +200,7 @@ func (b *PlanGraphBuilder) Steps() []GraphTransformer { &LocalTransformer{Config: b.Config}, &OutputTransformer{ Config: b.Config, - RefreshOnly: b.skipPlanChanges || b.preDestroyRefresh, + RefreshOnly: b.planCtx.skipPlanChanges || b.planCtx.preDestroyRefresh, Destroying: b.Operation == walkPlanDestroy, Overrides: b.Overrides, AllowRootEphemeralOutputs: b.AllowRootEphemeralOutputs, @@ -338,10 +328,8 @@ func (b *PlanGraphBuilder) initPlan() { a.overridePreventDestroy = b.overridePreventDestroy return &nodeExpandPlannableResource{ NodeAbstractResource: a, - skipRefresh: b.skipRefresh, - skipPlanChanges: b.skipPlanChanges, - preDestroyRefresh: b.preDestroyRefresh, forceReplace: b.ForceReplace, + planCtx: b.planCtx, } } @@ -349,10 +337,9 @@ func (b *PlanGraphBuilder) initPlan() { a.overridePreventDestroy = b.overridePreventDestroy return &NodePlannableResourceInstanceOrphan{ NodeAbstractResourceInstance: a, - skipRefresh: b.skipRefresh, - skipPlanChanges: b.skipPlanChanges, forgetResources: b.forgetResources, forgetModules: b.forgetModules, + planCtx: b.planCtx, } } @@ -362,10 +349,9 @@ func (b *PlanGraphBuilder) initPlan() { NodeAbstractResourceInstance: a, DeposedKey: key, - skipRefresh: b.skipRefresh, - skipPlanChanges: b.skipPlanChanges, forgetResources: b.forgetResources, forgetModules: b.forgetModules, + planCtx: b.planCtx, } } } @@ -377,7 +363,7 @@ func (b *PlanGraphBuilder) initDestroy() { a.overridePreventDestroy = b.overridePreventDestroy return &NodePlanDestroyableResourceInstance{ NodeAbstractResourceInstance: a, - skipRefresh: b.skipRefresh, + planCtx: b.planCtx, } } } @@ -424,12 +410,13 @@ func (b *PlanGraphBuilder) initImport() { // not going to combine importing with other changes. This is // temporary to try and maintain existing import behaviors, but // planning will need to be allowed for more complex configurations. - skipPlanChanges: true, - + // // We also skip refresh for now, since the plan output is written // as the new state, and users are not expecting the import process // to update any other instances in state. - skipRefresh: true, + planCtx: b.planCtx. + withSkipPlanChanges(true). + withSkipRefresh(true), } } } diff --git a/internal/terraform/node_resource_abstract_instance.go b/internal/terraform/node_resource_abstract_instance.go index 332e61f0eb4b..f0d35093d64d 100644 --- a/internal/terraform/node_resource_abstract_instance.go +++ b/internal/terraform/node_resource_abstract_instance.go @@ -44,8 +44,6 @@ type NodeAbstractResourceInstance struct { Dependencies []addrs.ConfigResource - preDestroyRefresh bool - // During import (or query) we may generate configuration for a resource, which needs // to be stored in the final change. generatedConfigHCL string @@ -791,6 +789,7 @@ func (n *NodeAbstractResourceInstance) refresh(ctx EvalContext, deposedKey state func (n *NodeAbstractResourceInstance) plan( ctx EvalContext, + planCtx nodePlanContext, plannedChange *plans.ResourceInstanceChange, currentState *states.ResourceInstanceObject, createBeforeDestroy bool, @@ -830,11 +829,7 @@ func (n *NodeAbstractResourceInstance) plan( } config := *n.Config - - checkRuleSeverity := tfdiags.Error - if n.preDestroyRefresh { - checkRuleSeverity = tfdiags.Warning - } + checkRuleSeverity := getCheckRuleSeverity(planCtx) if plannedChange != nil { // If we already planned the action, we stick to that plan @@ -843,7 +838,6 @@ func (n *NodeAbstractResourceInstance) plan( // Evaluate the configuration forEach, _, _ := evaluateForEachExpression(n.Config.ForEach, ctx, false) - keyData = EvalDataForInstanceKey(n.ResourceInstanceAddr().Resource.Key, forEach) checkDiags := evalCheckRules( @@ -3034,6 +3028,7 @@ func getAction(addr addrs.AbsResourceInstance, priorVal, plannedNewVal cty.Value actionReason = plans.ResourceInstanceReplaceBecauseCannotUpdate } case eq && !forceReplace: + // TODO: WHat if the resource has force replace? action = plans.NoOp default: action = plans.Update diff --git a/internal/terraform/node_resource_apply_instance.go b/internal/terraform/node_resource_apply_instance.go index ee551a1e49a6..2d3beb811fd3 100644 --- a/internal/terraform/node_resource_apply_instance.go +++ b/internal/terraform/node_resource_apply_instance.go @@ -34,6 +34,9 @@ type NodeApplyableResourceInstance struct { // forceReplace indicates that this resource is being replaced for external // reasons, like a -replace flag or via replace_triggered_by. forceReplace bool + + // planCtx is the plan context for this resource instance. + planCtx nodePlanContext } var ( @@ -265,7 +268,7 @@ func (n *NodeApplyableResourceInstance) managedResourceExecute(ctx EvalContext) // Make a new diff, in case we've learned new values in the state // during apply which we can now incorporate. - diffApply, _, deferred, repeatData, planDiags := n.plan(ctx, diff, state, false, n.forceReplace) + diffApply, _, deferred, repeatData, planDiags := n.plan(ctx, n.planCtx, diff, state, false, n.forceReplace) diags = diags.Append(planDiags) if diags.HasErrors() { return diags diff --git a/internal/terraform/node_resource_destroy_deposed.go b/internal/terraform/node_resource_destroy_deposed.go index 2fdf6fccfa04..ad179a6906be 100644 --- a/internal/terraform/node_resource_destroy_deposed.go +++ b/internal/terraform/node_resource_destroy_deposed.go @@ -36,12 +36,10 @@ type NodePlanDeposedResourceInstanceObject struct { *NodeAbstractResourceInstance DeposedKey states.DeposedKey - // skipRefresh indicates that we should skip refreshing individual instances - skipRefresh bool - - // skipPlanChanges indicates we should skip trying to plan change actions - // for any instances. - skipPlanChanges bool + // planCtx carries per-node planning context flags (e.g. light-mode, + // skip-refresh, pre-destroy-refresh, skip-plan-changes). + // See the nodePlanContext type for details on the individual fields. + planCtx nodePlanContext // forgetResources lists resources that should not be destroyed, only removed // from state. @@ -134,7 +132,7 @@ func (n *NodePlanDeposedResourceInstanceObject) Execute(ctx EvalContext, op walk // logic here is a bit overloaded. // // We also don't refresh when forgetting instances, as it is unnecessary. - if !n.skipRefresh && op != walkPlanDestroy && !forget { + if !n.planCtx.skipRefresh && !n.planCtx.lightMode && op != walkPlanDestroy && !forget { // Refresh this object even though it may be destroyed, in // case it's already been deleted outside of Terraform. If this is a // normal plan, providers expect a Read request to remove missing @@ -162,7 +160,7 @@ func (n *NodePlanDeposedResourceInstanceObject) Execute(ctx EvalContext, op walk } } - if !n.skipPlanChanges { + if !n.planCtx.skipPlanChanges { var change *plans.ResourceInstanceChange var pDiags tfdiags.Diagnostics var planDeferred *providers.Deferred diff --git a/internal/terraform/node_resource_partial_plan.go b/internal/terraform/node_resource_partial_plan.go index 9c4c4d86fa08..a51efd733521 100644 --- a/internal/terraform/node_resource_partial_plan.go +++ b/internal/terraform/node_resource_partial_plan.go @@ -52,11 +52,10 @@ func (n *nodeExpandPlannableResource) dynamicExpandPartial(ctx EvalContext, know // And add a node to the graph for this resource. g.Add(&nodePlannablePartialExpandedResource{ - addr: resourceAddr, - config: n.Config, - resolvedProvider: n.ResolvedProvider, - skipPlanChanges: n.skipPlanChanges, - preDestroyRefresh: n.preDestroyRefresh, + addr: resourceAddr, + config: n.Config, + resolvedProvider: n.ResolvedProvider, + planCtx: n.planCtx, }) } @@ -69,6 +68,8 @@ func (n *nodeExpandPlannableResource) dynamicExpandPartial(ctx EvalContext, know state := ss.Lock() defer ss.Unlock() + // TODO(sams): Comment why we need to skip plan changes + planCtx := n.planCtx.withSkipPlanChanges(true) Resources: for _, res := range state.Resources(n.Addr) { @@ -87,7 +88,7 @@ func (n *nodeExpandPlannableResource) dynamicExpandPartial(ctx EvalContext, know // Then each of the instances is a "maybe orphan" // instance, and we need to add a node for that. maybeOrphanResources.Add(res.Addr.Instance(key)) - g.Add(n.concreteResource(ctx, addrs.MakeMap[addrs.AbsResourceInstance, cty.Value](), addrs.MakeMap[addrs.PartialExpandedResource, addrs.Set[addrs.AbsResourceInstance]](), true)(NewNodeAbstractResourceInstance(res.Addr.Instance(key)))) + g.Add(n.concreteResource(ctx, addrs.MakeMap[addrs.AbsResourceInstance, cty.Value](), addrs.MakeMap[addrs.PartialExpandedResource, addrs.Set[addrs.AbsResourceInstance]](), planCtx)(NewNodeAbstractResourceInstance(res.Addr.Instance(key)))) } // Move onto the next resource. @@ -284,7 +285,7 @@ func (n *nodeExpandPlannableResource) knownModuleSubgraph(ctx EvalContext, addr DynamicTransformer(func(graph *Graph) error { // We'll add a node for all the known instance keys. for _, key := range knownInstKeys { - graph.Add(n.concreteResource(ctx, knownImports, unknownImports, n.skipPlanChanges)(NewNodeAbstractResourceInstance(addr.Instance(key)))) + graph.Add(n.concreteResource(ctx, knownImports, unknownImports, n.planCtx)(NewNodeAbstractResourceInstance(addr.Instance(key)))) } return nil }), @@ -295,11 +296,10 @@ func (n *nodeExpandPlannableResource) knownModuleSubgraph(ctx EvalContext, addr addr := addr.Module.UnexpandedResource(addr.Resource) graph.Add(&nodePlannablePartialExpandedResource{ - addr: addr, - config: n.Config, - resolvedProvider: n.ResolvedProvider, - skipPlanChanges: n.skipPlanChanges, - preDestroyRefresh: n.preDestroyRefresh, + addr: addr, + config: n.Config, + resolvedProvider: n.ResolvedProvider, + planCtx: n.planCtx, }) } return nil @@ -336,7 +336,7 @@ func (n *nodeExpandPlannableResource) knownModuleSubgraph(ctx EvalContext, addr // to a known instance but we have unknown keys so we don't // know for sure that it's been deleted. maybeOrphans.Add(addr.Instance(key)) - graph.Add(n.concreteResource(ctx, addrs.MakeMap[addrs.AbsResourceInstance, cty.Value](), addrs.MakeMap[addrs.PartialExpandedResource, addrs.Set[addrs.AbsResourceInstance]](), true)(NewNodeAbstractResourceInstance(addr.Instance(key)))) + graph.Add(n.concreteResource(ctx, addrs.MakeMap[addrs.AbsResourceInstance, cty.Value](), addrs.MakeMap[addrs.PartialExpandedResource, addrs.Set[addrs.AbsResourceInstance]](), n.planCtx.withSkipPlanChanges(true))(NewNodeAbstractResourceInstance(addr.Instance(key)))) continue } diff --git a/internal/terraform/node_resource_plan.go b/internal/terraform/node_resource_plan.go index 1b8763c2af35..baa3babf021c 100644 --- a/internal/terraform/node_resource_plan.go +++ b/internal/terraform/node_resource_plan.go @@ -29,14 +29,10 @@ type nodeExpandPlannableResource struct { // on regardless of what the configuration says. ForceCreateBeforeDestroy *bool - // skipRefresh indicates that we should skip refreshing individual instances - skipRefresh bool - - preDestroyRefresh bool - - // skipPlanChanges indicates we should skip trying to plan change actions - // for any instances. - skipPlanChanges bool + // planCtx carries per-node planning context flags (e.g. light-mode, + // skip-refresh, pre-destroy-refresh, skip-plan-changes) that are + // propagated to concrete resource instance nodes. + planCtx nodePlanContext // forceReplace are resource instance addresses where the user wants to // force generating a replace action. This set isn't pre-filtered, so @@ -507,7 +503,7 @@ func (n *nodeExpandPlannableResource) resourceInstanceSubgraph(ctx EvalContext, steps := []GraphTransformer{ // Expand the count or for_each (if present) &ResourceCountTransformer{ - Concrete: n.concreteResource(ctx, imports, addrs.MakeMap[addrs.PartialExpandedResource, addrs.Set[addrs.AbsResourceInstance]](), n.skipPlanChanges), + Concrete: n.concreteResource(ctx, imports, addrs.MakeMap[addrs.PartialExpandedResource, addrs.Set[addrs.AbsResourceInstance]](), n.planCtx), Schema: n.Schema, Addr: n.ResourceAddr(), InstanceAddrs: instanceAddrs, @@ -545,7 +541,7 @@ func (n *nodeExpandPlannableResource) resourceInstanceSubgraph(ctx EvalContext, return graph, diags } -func (n *nodeExpandPlannableResource) concreteResource(ctx EvalContext, knownImports addrs.Map[addrs.AbsResourceInstance, cty.Value], unknownImports addrs.Map[addrs.PartialExpandedResource, addrs.Set[addrs.AbsResourceInstance]], skipPlanChanges bool) func(*NodeAbstractResourceInstance) dag.Vertex { +func (n *nodeExpandPlannableResource) concreteResource(ctx EvalContext, knownImports addrs.Map[addrs.AbsResourceInstance, cty.Value], unknownImports addrs.Map[addrs.PartialExpandedResource, addrs.Set[addrs.AbsResourceInstance]], planCtx nodePlanContext) func(*NodeAbstractResourceInstance) dag.Vertex { return func(a *NodeAbstractResourceInstance) dag.Vertex { var m *NodePlannableResourceInstance @@ -575,7 +571,6 @@ func (n *nodeExpandPlannableResource) concreteResource(ctx EvalContext, knownImp a.ProviderMetas = n.ProviderMetas a.dependsOn = n.dependsOn a.Dependencies = n.dependencies - a.preDestroyRefresh = n.preDestroyRefresh a.generateConfigPath = n.generateConfigPath m = &NodePlannableResourceInstance{ @@ -585,8 +580,7 @@ func (n *nodeExpandPlannableResource) concreteResource(ctx EvalContext, knownImp // to force on CreateBeforeDestroy due to dependencies on other // nodes that have it. ForceCreateBeforeDestroy: n.CreateBeforeDestroy(), - skipRefresh: n.skipRefresh, - skipPlanChanges: skipPlanChanges, + planCtx: planCtx, forceReplace: slices.ContainsFunc(n.forceReplace, a.Addr.Equal), } @@ -629,8 +623,7 @@ func (n *nodeExpandPlannableResource) concreteResourceOrphan(a *NodeAbstractReso return &NodePlannableResourceInstanceOrphan{ NodeAbstractResourceInstance: a, - skipRefresh: n.skipRefresh, - skipPlanChanges: n.skipPlanChanges, + planCtx: n.planCtx, } } diff --git a/internal/terraform/node_resource_plan_destroy.go b/internal/terraform/node_resource_plan_destroy.go index 0962ce8fa853..aadbb58ab611 100644 --- a/internal/terraform/node_resource_plan_destroy.go +++ b/internal/terraform/node_resource_plan_destroy.go @@ -20,8 +20,10 @@ import ( type NodePlanDestroyableResourceInstance struct { *NodeAbstractResourceInstance - // skipRefresh indicates that we should skip refreshing - skipRefresh bool + // planCtx carries per-node planning context flags (e.g. light-mode, + // skip-refresh, pre-destroy-refresh, skip-plan-changes). + // See the nodePlanContext type for details on the individual fields. + planCtx nodePlanContext } var ( @@ -85,7 +87,7 @@ func (n *NodePlanDestroyableResourceInstance) managedResourceExecute(ctx EvalCon // running a normal plan walk when refresh is enabled. These two // conditionals must agree (be exactly opposite) in order to get the // correct behavior in both cases. - if n.skipRefresh { + if n.planCtx.skipRefresh || n.planCtx.lightMode { diags = diags.Append(n.writeResourceInstanceState(ctx, state, prevRunState)) if diags.HasErrors() { return diags diff --git a/internal/terraform/node_resource_plan_instance.go b/internal/terraform/node_resource_plan_instance.go index bccc9027d897..30cfb74ab4d3 100644 --- a/internal/terraform/node_resource_plan_instance.go +++ b/internal/terraform/node_resource_plan_instance.go @@ -33,12 +33,10 @@ type NodePlannableResourceInstance struct { *NodeAbstractResourceInstance ForceCreateBeforeDestroy bool - // skipRefresh indicates that we should skip refreshing individual instances - skipRefresh bool - - // skipPlanChanges indicates we should skip trying to plan change actions - // for any instances. - skipPlanChanges bool + // planCtx carries per-node planning context flags (e.g. light-mode, + // skip-refresh, pre-destroy-refresh, skip-plan-changes). + // See the nodePlanContext type for details on the individual fields. + planCtx nodePlanContext // forceReplace indicates that this resource is being replaced for external // reasons, like a -replace flag or via replace_triggered_by. @@ -72,8 +70,10 @@ func (n *NodePlannableResourceInstance) Execute(ctx EvalContext, op walkOperatio switch addr.Resource.Resource.Mode { case addrs.ManagedResourceMode: return n.managedResourceExecute(ctx) + // return n.Execute2(ctx, op) case addrs.DataResourceMode: return n.dataResourceExecute(ctx) + // return n.Execute2(ctx, op) case addrs.EphemeralResourceMode: return n.ephemeralResourceExecute(ctx) case addrs.ListResourceMode: @@ -100,13 +100,10 @@ func (n *NodePlannableResourceInstance) dataResourceExecute(ctx EvalContext) (di return diags } - checkRuleSeverity := tfdiags.Error - if n.skipPlanChanges || n.preDestroyRefresh { - checkRuleSeverity = tfdiags.Warning - } + checkRuleSeverity := getCheckRuleSeverity(n.planCtx) deferrals := ctx.Deferrals() - change, state, deferred, repeatData, planDiags := n.planDataSource(ctx, checkRuleSeverity, n.skipPlanChanges, deferrals.ShouldDeferResourceInstanceChanges(addr, n.Dependencies)) + change, state, deferred, repeatData, planDiags := n.planDataSource(ctx, checkRuleSeverity, n.planCtx.skipPlanChanges, deferrals.ShouldDeferResourceInstanceChanges(addr, n.Dependencies)) diags = diags.Append(planDiags) if diags.HasErrors() { return diags @@ -179,13 +176,9 @@ func (n *NodePlannableResourceInstance) ephemeralResourceExecute(ctx EvalContext func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext) (diags tfdiags.Diagnostics) { config := n.Config addr := n.ResourceInstanceAddr() - var instanceRefreshState *states.ResourceInstanceObject - checkRuleSeverity := tfdiags.Error - if n.skipPlanChanges || n.preDestroyRefresh { - checkRuleSeverity = tfdiags.Warning - } + checkRuleSeverity := getCheckRuleSeverity(n.planCtx) provider, providerSchema, err := getProvider(ctx, n.ResolvedProvider) diags = diags.Append(err) @@ -200,67 +193,55 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext) } } - importing := n.importTarget != cty.NilVal && !n.preDestroyRefresh + importing := n.importTarget != cty.NilVal && !n.planCtx.preDestroyRefresh - var deferred *providers.Deferred + // Read or import the existing state of the resource instance. + instanceRefreshState, importTarget, deferred, readDiags := n.readExistingState(ctx, provider, providerSchema) + diags = diags.Append(readDiags) + if diags.HasErrors() { + return diags + } - // If the resource is to be imported, we now ask the provider for an Import - // and a Refresh, and save the resulting state to instanceRefreshState. + // if the resource is deferred, due to unknown config, but we are supposed to generate config + // generate config, we return here. + // TODO(sams): better comment + if deferred != nil && n.Config == nil && len(n.generateConfigPath) > 0 { + return diags + } - if importing { - if n.importTarget.IsWhollyKnown() { - var importDiags tfdiags.Diagnostics - instanceRefreshState, deferred, importDiags = n.importState(ctx, addr, n.importTarget, provider, providerSchema) - diags = diags.Append(importDiags) - } else { - // Otherwise, just mark the resource as deferred without trying to - // import it. - deferred = &providers.Deferred{ - Reason: providers.DeferredReasonResourceConfigUnknown, - } - if n.Config == nil && len(n.generateConfigPath) > 0 { - // Then we're supposed to be generating configuration for this - // resource, but we can't because the configuration is unknown. - // - // Normally, the rest of this function would just be about - // planning the known configuration to make sure everything we - // do know about it is correct, but we can't even do that here. - // - // What we'll do is write out the address as being deferred with - // an entirely unknown value. Then we'll skip the rest of this - // function. (a) We're going to panic later when it complains - // about having no configuration, and (b) the rest of the - // function isn't doing anything as there is no configuration - // to validate. + // Now we have the state value, then In light mode, + // We start by planning the resource, and if it is a no-op, we skip the read step + if n.planCtx.lightMode { + var plannedChange *plans.ResourceInstanceChange + var planDiags tfdiags.Diagnostics + plannedChange, deferred, planDiags = n.planManagedResource( + ctx, + instanceRefreshState, + deferred, + importTarget, + false, + ) + diags = diags.Append(planDiags) + if diags.HasErrors() { + return diags + } - impliedType := providerSchema.ResourceTypes[addr.Resource.Resource.Type].Body.ImpliedType() - ctx.Deferrals().ReportResourceInstanceDeferred(addr, providers.DeferredReasonResourceConfigUnknown, &plans.ResourceInstanceChange{ - Addr: addr, - PrevRunAddr: addr, - ProviderAddr: n.ResolvedProvider, - Change: plans.Change{ - Action: plans.NoOp, // assume we'll get the config generation correct. - Before: cty.NullVal(impliedType), - After: cty.UnknownVal(impliedType), - Importing: &plans.Importing{ - Target: n.importTarget, - }, - }, - }) - return diags - } + // If the plannedchange is a no-op, write the change and return + if plannedChange.Action == plans.NoOp { + diags = diags.Append(n.writeChange(ctx, plannedChange, "")) + return diags } - } else { - var readDiags tfdiags.Diagnostics - instanceRefreshState, readDiags = n.readResourceInstanceState(ctx, addr) - diags = diags.Append(readDiags) - if diags.HasErrors() { - // Pre-Diff error hook - diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreDiff(n.HookResourceIdentity(), addrs.NotDeposed, cty.DynamicVal, cty.DynamicVal, diags.Err()) - })) + + // if the plan is deferred, we can just return here. + // TODO(sams): Is there a scenario where a prior val results in deferral, but + // refresh may have prevented that? + if deferred != nil { return diags } + + // + // Otherwise we continue with the read step, + // which will reconcile the local state and config with the remote state } if deferred == nil { @@ -305,41 +286,20 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext) // Refresh, maybe // The import process handles its own refresh - if !n.skipRefresh && !importing { + if !n.planCtx.skipRefresh && !importing { var refreshDiags tfdiags.Diagnostics - instanceRefreshState, refreshDeferred, refreshDiags = n.refresh(ctx, states.NotDeposed, instanceRefreshState, ctx.Deferrals().DeferralAllowed()) + instanceRefreshState, refreshDeferred, refreshDiags = n.refreshState(ctx, deferred, instanceRefreshState) diags = diags.Append(refreshDiags) if diags.HasErrors() { return diags } - if instanceRefreshState != nil { - // When refreshing we start by merging the stored dependencies and - // the configured dependencies. The configured dependencies will be - // stored to state once the changes are applied. If the plan - // results in no changes, we will re-write these dependencies - // below. - instanceRefreshState.Dependencies = mergeDeps(n.Dependencies, instanceRefreshState.Dependencies) - } - if deferred == nil && refreshDeferred != nil { deferred = refreshDeferred } - - if deferred == nil { - diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, refreshState)) - } - - if diags.HasErrors() { - // Pre-Diff error hook - diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreDiff(n.HookResourceIdentity(), addrs.NotDeposed, cty.DynamicVal, cty.DynamicVal, diags.Err()) - })) - return diags - } } - if n.skipRefresh && !importing && updatedCBD { + if (n.planCtx.skipRefresh || n.planCtx.lightMode) && !importing && updatedCBD { // CreateBeforeDestroy must be set correctly in the state which is used // to create the apply graph, so if we did not refresh the state make // sure we still update any changes to CreateBeforeDestroy. @@ -354,159 +314,16 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext) } // Plan the instance, unless we're in the refresh-only mode - if !n.skipPlanChanges { - - // add this instance to n.forceReplace if replacement is triggered by - // another change - repData := instances.RepetitionData{} - switch k := addr.Resource.Key.(type) { - case addrs.IntKey: - repData.CountIndex = k.Value() - case addrs.StringKey: - repData.EachKey = k.Value() - repData.EachValue = cty.DynamicVal - } - - diags = diags.Append(n.replaceTriggered(ctx, repData)) - if diags.HasErrors() { - // Pre-Diff error hook - diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreDiff(n.HookResourceIdentity(), addrs.NotDeposed, cty.DynamicVal, cty.DynamicVal, diags.Err()) - })) - return diags - } - - change, instancePlanState, planDeferred, repeatData, planDiags := n.plan( - ctx, nil, instanceRefreshState, n.ForceCreateBeforeDestroy, n.forceReplace, + if !n.planCtx.skipPlanChanges { + var planDiags tfdiags.Diagnostics + _, deferred, planDiags = n.planManagedResource( + ctx, + instanceRefreshState, + deferred, + importTarget, + true, ) diags = diags.Append(planDiags) - if diags.HasErrors() { - // If we are importing and generating a configuration, we need to - // ensure the change is written out so the configuration can be - // captured. - if planDeferred == nil && len(n.generateConfigPath) > 0 { - // Update our return plan - change := &plans.ResourceInstanceChange{ - Addr: n.Addr, - PrevRunAddr: n.prevRunAddr(ctx), - ProviderAddr: n.ResolvedProvider, - Change: plans.Change{ - // we only need a placeholder, so this will be a NoOp - Action: plans.NoOp, - Before: instanceRefreshState.Value, - After: instanceRefreshState.Value, - GeneratedConfig: n.generatedConfigHCL, - }, - } - diags = diags.Append(n.writeChange(ctx, change, "")) - } - - return diags - } - - if deferred == nil && planDeferred != nil { - deferred = planDeferred - } - - if importing { - // There is a subtle difference between the import by identity - // and the import by ID. When importing by identity, we need to - // make sure to use the complete identity return by the provider - // instead of the (potential) incomplete one from the configuration. - if n.importTarget.Type().IsObjectType() { - change.Importing = &plans.Importing{Target: instanceRefreshState.Identity} - } else { - change.Importing = &plans.Importing{Target: n.importTarget} - } - } - - // FIXME: here we update the change to reflect the reason for - // replacement, but we still overload forceReplace to get the correct - // change planned. - if len(n.replaceTriggeredBy) > 0 { - change.ActionReason = plans.ResourceInstanceReplaceByTriggers - } - - deferrals := ctx.Deferrals() - if deferred != nil { - // Then this resource has been deferred either during the import, - // refresh or planning stage. We'll report the deferral and - // store what we could produce in the deferral tracker. - deferrals.ReportResourceInstanceDeferred(addr, deferred.Reason, change) - } else if !deferrals.ShouldDeferResourceInstanceChanges(n.Addr, n.Dependencies) { - // We intentionally write the change before the subsequent checks, because - // all of the checks below this point are for problems caused by the - // context surrounding the change, rather than the change itself, and - // so it's helpful to still include the valid-in-isolation change as - // part of the plan as additional context in our error output. - // - // FIXME: it is currently important that we write resource changes to - // the plan (n.writeChange) before we write the corresponding state - // (n.writeResourceInstanceState). - // - // This is because the planned resource state will normally have the - // status of states.ObjectPlanned, which causes later logic to refer to - // the contents of the plan to retrieve the resource data. Because - // there is no shared lock between these two data structures, reversing - // the order of these writes will cause a brief window of inconsistency - // which can lead to a failed safety check. - // - // Future work should adjust these APIs such that it is impossible to - // update these two data structures incorrectly through any objects - // reachable via the terraform.EvalContext API. - diags = diags.Append(n.writeChange(ctx, change, "")) - if diags.HasErrors() { - return diags - } - diags = diags.Append(n.writeResourceInstanceState(ctx, instancePlanState, workingState)) - if diags.HasErrors() { - return diags - } - - diags = diags.Append(n.checkPreventDestroy(change)) - if diags.HasErrors() { - return diags - } - - // If this plan resulted in a NoOp, then apply won't have a chance to make - // any changes to the stored dependencies. Since this is a NoOp we know - // that the stored dependencies will have no effect during apply, and we can - // write them out now. - if change.Action == plans.NoOp && !depsEqual(instanceRefreshState.Dependencies, n.Dependencies) { - // the refresh state will be the final state for this resource, so - // finalize the dependencies here if they need to be updated. - instanceRefreshState.Dependencies = n.Dependencies - diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, refreshState)) - if diags.HasErrors() { - return diags - } - } - - // Post-conditions might block completion. We intentionally do this - // _after_ writing the state/diff because we want to check against - // the result of the operation, and to fail on future operations - // until the user makes the condition succeed. - // (Note that some preconditions will end up being skipped during - // planning, because their conditions depend on values not yet known.) - checkDiags := evalCheckRules( - addrs.ResourcePostcondition, - n.Config.Postconditions, - ctx, n.ResourceInstanceAddr(), repeatData, - checkRuleSeverity, - ) - diags = diags.Append(checkDiags) - } else { - // The deferrals tracker says that we must defer changes for - // this resource instance, presumably due to a dependency on an - // upstream object that was already deferred. Therefore we just - // report our own deferral (capturing a placeholder value in the - // deferral tracker) and don't add anything to the plan or - // working state. - // In this case, the expression evaluator should use the placeholder - // value registered here as the value of this resource instance, - // instead of using the plan. - deferrals.ReportResourceInstanceDeferred(n.Addr, providers.DeferredReasonDeferredPrereq, change) - } } else { // In refresh-only mode we need to evaluate the for-each expression in // order to supply the value to the pre- and post-condition check @@ -568,6 +385,300 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext) return diags } +func (n *NodePlannableResourceInstance) readExistingState(ctx EvalContext, + provider providers.Interface, + providerSchema providers.ProviderSchema, +) (*states.ResourceInstanceObject, *plans.Importing, *providers.Deferred, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + var deferred *providers.Deferred + var existingState *states.ResourceInstanceObject + var importTarget *plans.Importing + addr := n.ResourceInstanceAddr() + importing := n.importTarget != cty.NilVal && !n.planCtx.preDestroyRefresh + + // If the resource is to be imported, we now ask the provider for an Import + // and a Refresh, and save the resulting state to existingState. + if importing { + importTarget = &plans.Importing{Target: n.importTarget} + // importState takes care of refreshing its imported state + if n.importTarget.IsWhollyKnown() { + var importDiags tfdiags.Diagnostics + existingState, deferred, importDiags = n.importState(ctx, addr, n.importTarget, provider, providerSchema) + diags = diags.Append(importDiags) + } else { + // Otherwise, just mark the resource as deferred without trying to + // import it. + deferred = &providers.Deferred{ + Reason: providers.DeferredReasonResourceConfigUnknown, + } + if n.Config == nil && len(n.generateConfigPath) > 0 { + // Then we're supposed to be generating configuration for this + // resource, but we can't because the configuration is unknown. + // + // Normally, the rest of this function would just be about + // planning the known configuration to make sure everything we + // do know about it is correct, but we can't even do that here. + // + // What we'll do is write out the address as being deferred with + // an entirely unknown value. Then we'll skip the rest of this + // function. (a) We're going to panic later when it complains + // about having no configuration, and (b) the rest of the + // function isn't doing anything as there is no configuration + // to validate. + + impliedType := providerSchema.ResourceTypes[addr.Resource.Resource.Type].Body.ImpliedType() + ctx.Deferrals().ReportResourceInstanceDeferred(addr, providers.DeferredReasonResourceConfigUnknown, &plans.ResourceInstanceChange{ + Addr: addr, + PrevRunAddr: addr, + ProviderAddr: n.ResolvedProvider, + Change: plans.Change{ + Action: plans.NoOp, // assume we'll get the config generation correct. + Before: cty.NullVal(impliedType), + After: cty.UnknownVal(impliedType), + Importing: &plans.Importing{ + Target: n.importTarget, + }, + }, + }) + return nil, importTarget, deferred, diags + } + } + + // There is a subtle difference between the import by identity + // and the import by ID. When importing by identity, we need to + // make sure to use the complete identity return by the provider + // instead of the (potential) incomplete one from the configuration. + if n.importTarget.Type().IsObjectType() && existingState != nil { + importTarget = &plans.Importing{Target: existingState.Identity} + } + + } else { + var readDiags tfdiags.Diagnostics + existingState, readDiags = n.readResourceInstanceState(ctx, addr) + diags = diags.Append(readDiags) + if diags.HasErrors() { + // Pre-Diff error hook + diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { + return h.PreDiff(n.HookResourceIdentity(), addrs.NotDeposed, cty.DynamicVal, cty.DynamicVal, diags.Err()) + })) + return nil, importTarget, deferred, diags + } + } + + return existingState, importTarget, deferred, diags +} + +func (n *NodePlannableResourceInstance) refreshState(ctx EvalContext, deferred *providers.Deferred, state *states.ResourceInstanceObject) (*states.ResourceInstanceObject, *providers.Deferred, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + // refresh + riNode := &NodeAbstractResourceInstance{ + Addr: n.Addr, + NodeAbstractResource: n.NodeAbstractResource, + override: n.override, + } + refreshedState, refreshDeferred, refreshDiags := riNode.refresh(ctx, states.NotDeposed, state, ctx.Deferrals().DeferralAllowed()) + diags = diags.Append(refreshDiags) + if diags.HasErrors() { + return refreshedState, deferred, diags + } + + if refreshedState != nil { + // When refreshing we start by merging the stored dependencies and + // the configured dependencies. The configured dependencies will be + // stored to state once the changes are applied. If the plan + // results in no changes, we will re-write these dependencies + // below. + refreshedState.Dependencies = mergeDeps( + n.Dependencies, refreshedState.Dependencies, + ) + } + + if deferred == nil && refreshDeferred != nil { + deferred = refreshDeferred + } + + if deferred == nil { + // Only write the state if the change isn't being deferred. We're also + // reporting the deferred status to the caller, so they should know + // not to read from the state. + diags = diags.Append(n.writeResourceInstanceState(ctx, refreshedState, refreshState)) + } + + if diags.HasErrors() { + // Pre-Diff error hook + diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { + return h.PreDiff(n.HookResourceIdentity(), addrs.NotDeposed, cty.DynamicVal, cty.DynamicVal, diags.Err()) + })) + } + + return refreshedState, refreshDeferred, diags +} + +func (n *NodePlannableResourceInstance) planManagedResource( + ctx EvalContext, + instanceRefreshState *states.ResourceInstanceObject, + deferred *providers.Deferred, + importTarget *plans.Importing, + writeChange bool, +) (*plans.ResourceInstanceChange, *providers.Deferred, tfdiags.Diagnostics) { + + changeWriter := func(ctx EvalContext, change *plans.ResourceInstanceChange, deposedKey states.DeposedKey) error { + return nil + } + if writeChange { + changeWriter = n.writeChange + } + var diags tfdiags.Diagnostics + addr := n.ResourceInstanceAddr() + + checkRuleSeverity := getCheckRuleSeverity(n.planCtx) + + // add this instance to n.forceReplace if replacement is triggered by + // another change + repData := instances.RepetitionData{} + switch k := addr.Resource.Key.(type) { + case addrs.IntKey: + repData.CountIndex = k.Value() + case addrs.StringKey: + repData.EachKey = k.Value() + repData.EachValue = cty.DynamicVal + } + + diags = diags.Append(n.replaceTriggered(ctx, repData)) + if diags.HasErrors() { + // Pre-Diff error hook + diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { + return h.PreDiff(n.HookResourceIdentity(), addrs.NotDeposed, cty.DynamicVal, cty.DynamicVal, diags.Err()) + })) + return nil, deferred, diags + } + + change, instancePlanState, planDeferred, repeatData, planDiags := n.plan( + ctx, n.planCtx, nil, instanceRefreshState, n.ForceCreateBeforeDestroy, n.forceReplace, + ) + diags = diags.Append(planDiags) + if diags.HasErrors() { + // If we are importing and generating a configuration, we need to + // ensure the change is written out so the configuration can be + // captured. + if planDeferred == nil && len(n.generateConfigPath) > 0 { + // Update our return plan + change := &plans.ResourceInstanceChange{ + Addr: n.Addr, + PrevRunAddr: n.prevRunAddr(ctx), + ProviderAddr: n.ResolvedProvider, + Change: plans.Change{ + // we only need a placeholder, so this will be a NoOp + Action: plans.NoOp, + Before: instanceRefreshState.Value, + After: instanceRefreshState.Value, + GeneratedConfig: n.generatedConfigHCL, + }, + } + diags = diags.Append(changeWriter(ctx, change, "")) + } + + return change, deferred, diags + } + + if deferred == nil && planDeferred != nil { + deferred = planDeferred + } + + change.Importing = importTarget + + // FIXME: here we update the change to reflect the reason for + // replacement, but we still overload forceReplace to get the correct + // change planned. + if len(n.replaceTriggeredBy) > 0 { + change.ActionReason = plans.ResourceInstanceReplaceByTriggers + } + + deferrals := ctx.Deferrals() + // TODO: planning twice means deferral twice, and that is an error. + if deferred != nil { + // Then this resource has been deferred either during the import, + // refresh or planning stage. We'll report the deferral and + // store what we could produce in the deferral tracker. + deferrals.ReportResourceInstanceDeferred(addr, deferred.Reason, change) + } else if !deferrals.ShouldDeferResourceInstanceChanges(n.Addr, n.Dependencies) { + // We intentionally write the change before the subsequent checks, because + // all of the checks below this point are for problems caused by the + // context surrounding the change, rather than the change itself, and + // so it's helpful to still include the valid-in-isolation change as + // part of the plan as additional context in our error output. + // + // FIXME: it is currently important that we write resource changes to + // the plan (n.writeChange) before we write the corresponding state + // (n.writeResourceInstanceState). + // + // This is because the planned resource state will normally have the + // status of states.ObjectPlanned, which causes later logic to refer to + // the contents of the plan to retrieve the resource data. Because + // there is no shared lock between these two data structures, reversing + // the order of these writes will cause a brief window of inconsistency + // which can lead to a failed safety check. + // + // Future work should adjust these APIs such that it is impossible to + // update these two data structures incorrectly through any objects + // reachable via the terraform.EvalContext API. + diags = diags.Append(changeWriter(ctx, change, "")) + if diags.HasErrors() { + return change, deferred, diags + } + diags = diags.Append(n.writeResourceInstanceState(ctx, instancePlanState, workingState)) + if diags.HasErrors() { + return change, deferred, diags + } + + diags = diags.Append(n.checkPreventDestroy(change)) + if diags.HasErrors() { + return change, deferred, diags + } + + // If this plan resulted in a NoOp, then apply won't have a chance to make + // any changes to the stored dependencies. Since this is a NoOp we know + // that the stored dependencies will have no effect during apply, and we can + // write them out now. + if change.Action == plans.NoOp && !depsEqual(instanceRefreshState.Dependencies, n.Dependencies) { + // the refresh state will be the final state for this resource, so + // finalize the dependencies here if they need to be updated. + instanceRefreshState.Dependencies = n.Dependencies + diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, refreshState)) + if diags.HasErrors() { + return change, deferred, diags + } + } + + // Post-conditions might block completion. We intentionally do this + // _after_ writing the state/diff because we want to check against + // the result of the operation, and to fail on future operations + // until the user makes the condition succeed. + // (Note that some preconditions will end up being skipped during + // planning, because their conditions depend on values not yet known.) + checkDiags := evalCheckRules( + addrs.ResourcePostcondition, + n.Config.Postconditions, + ctx, n.ResourceInstanceAddr(), repeatData, + checkRuleSeverity, + ) + diags = diags.Append(checkDiags) + } else { + // The deferrals tracker says that we must defer changes for + // this resource instance, presumably due to a dependency on an + // upstream object that was already deferred. Therefore we just + // report our own deferral (capturing a placeholder value in the + // deferral tracker) and don't add anything to the plan or + // working state. + // In this case, the expression evaluator should use the placeholder + // value registered here as the value of this resource instance, + // instead of using the plan. + deferrals.ReportResourceInstanceDeferred(n.Addr, providers.DeferredReasonDeferredPrereq, change) + } + + return change, deferred, diags +} + // replaceTriggered checks if this instance needs to be replace due to a change // in a replace_triggered_by reference. If replacement is required, the // instance address is added to forceReplace @@ -820,15 +931,7 @@ func (n *NodePlannableResourceInstance) importState(ctx EvalContext, addr addrs. )) } - // refresh - riNode := &NodeAbstractResourceInstance{ - Addr: n.Addr, - NodeAbstractResource: NodeAbstractResource{ - ResolvedProvider: n.ResolvedProvider, - }, - override: n.override, - } - instanceRefreshState, refreshDeferred, refreshDiags := riNode.refresh(ctx, states.NotDeposed, importedState, ctx.Deferrals().DeferralAllowed()) + instanceRefreshState, refreshDeferred, refreshDiags := n.refreshState(ctx, deferred, importedState) diags = diags.Append(refreshDiags) if diags.HasErrors() { return instanceRefreshState, deferred, diags @@ -901,12 +1004,6 @@ func (n *NodePlannableResourceInstance) importState(ctx EvalContext, addr addrs. } } - if deferred == nil { - // Only write the state if the change isn't being deferred. We're also - // reporting the deferred status to the caller, so they should know - // not to read from the state. - diags = diags.Append(riNode.writeResourceInstanceState(ctx, instanceRefreshState, refreshState)) - } return instanceRefreshState, deferred, diags } @@ -1125,3 +1222,11 @@ func actionIsTriggeredByEvent(events []configs.ActionTriggerEvent, action plans. } return triggeredEvents } + +func getCheckRuleSeverity(ctx nodePlanContext) tfdiags.Severity { + checkRuleSeverity := tfdiags.Error + if ctx.skipPlanChanges || ctx.preDestroyRefresh { + checkRuleSeverity = tfdiags.Warning + } + return checkRuleSeverity +} diff --git a/internal/terraform/node_resource_plan_orphan.go b/internal/terraform/node_resource_plan_orphan.go index 51364049f232..9a1dde4fa6f4 100644 --- a/internal/terraform/node_resource_plan_orphan.go +++ b/internal/terraform/node_resource_plan_orphan.go @@ -19,12 +19,10 @@ import ( type NodePlannableResourceInstanceOrphan struct { *NodeAbstractResourceInstance - // skipRefresh indicates that we should skip refreshing individual instances - skipRefresh bool - - // skipPlanChanges indicates we should skip trying to plan change actions - // for any instances. - skipPlanChanges bool + // planCtx carries per-node planning context flags (e.g. light-mode, + // skip-refresh, pre-destroy-refresh, skip-plan-changes). + // See the nodePlanContext type for details on the individual fields. + planCtx nodePlanContext // forgetResources lists resources that should not be destroyed, only removed // from state. @@ -122,7 +120,7 @@ func (n *NodePlannableResourceInstanceOrphan) managedResourceExecute(ctx EvalCon } } - if !n.skipRefresh && !forget { + if !n.planCtx.skipRefresh && !n.planCtx.lightMode && !forget { // Refresh this instance even though it is going to be destroyed, in // order to catch missing resources. If this is a normal plan, // providers expect a Read request to remove missing resources from the @@ -179,7 +177,7 @@ func (n *NodePlannableResourceInstanceOrphan) managedResourceExecute(ctx EvalCon // refresh indicates the instance no longer exists, there is also nothing // to plan because there is no longer any state and it doesn't exist in the // config. - if n.skipPlanChanges || oldState == nil || oldState.Value.IsNull() { + if n.planCtx.skipPlanChanges || oldState == nil || oldState.Value.IsNull() { return diags.Append(n.writeResourceInstanceState(ctx, oldState, workingState)) } diff --git a/internal/terraform/node_resource_plan_partialexp.go b/internal/terraform/node_resource_plan_partialexp.go index 3a3a09b7a940..d94fdfb75994 100644 --- a/internal/terraform/node_resource_plan_partialexp.go +++ b/internal/terraform/node_resource_plan_partialexp.go @@ -30,11 +30,10 @@ import ( // // This is the partial-expanded equivalent of NodePlannableResourceInstance. type nodePlannablePartialExpandedResource struct { - addr addrs.PartialExpandedResource - config *configs.Resource - resolvedProvider addrs.AbsProviderConfig - skipPlanChanges bool - preDestroyRefresh bool + addr addrs.PartialExpandedResource + config *configs.Resource + resolvedProvider addrs.AbsProviderConfig + planCtx nodePlanContext } var ( @@ -95,7 +94,7 @@ func (n *nodePlannablePartialExpandedResource) Execute(ctx EvalContext, op walkO // need to destroy. return nil case walkPlan: - if n.preDestroyRefresh || n.skipPlanChanges { + if n.planCtx.preDestroyRefresh || n.planCtx.skipPlanChanges { // During any kind of refresh, we also don't really care about // partial resources. We only care about the fully-expanded resources // already in state, so we don't need to plan partial resources. @@ -184,7 +183,7 @@ func (n *nodePlannablePartialExpandedResource) managedResourceExecute(ctx EvalCo // - Evaluating the preconditions/postconditions to see if they produce // a definitive fail result even with the partial information. - if n.skipPlanChanges { + if n.planCtx.skipPlanChanges { // If we're supposed to be making a refresh-only plan then there's // not really anything else to do here, since we can only refresh // specific known resource instances (which another graph node should