From 645c55d79f5e2176d330d0455284a10ea174744d Mon Sep 17 00:00:00 2001 From: Samsondeen Dare Date: Sat, 7 Feb 2026 19:09:22 +0100 Subject: [PATCH 1/4] Plan light --- internal/backend/backendrun/operation.go | 1 + internal/backend/local/backend_local.go | 4 + internal/command/arguments/extended.go | 4 + internal/command/plan.go | 1 + internal/terraform/context_plan.go | 12 + internal/terraform/context_plan2_test.go | 256 +++++ internal/terraform/context_walk.go | 3 + internal/terraform/eval_context.go | 2 + internal/terraform/eval_context_builtin.go | 5 + internal/terraform/eval_context_mock.go | 5 + internal/terraform/graph_builder_plan.go | 2 + internal/terraform/graph_walk_context.go | 2 + internal/terraform/node_resource_abstract.go | 25 + internal/terraform/node_resource_manager.go | 54 + .../terraform/node_resource_plan_instance.go | 14 +- .../terraform/node_resource_plan_instance2.go | 937 ++++++++++++++++++ .../node_resource_plan_instance_ds.go | 64 ++ 17 files changed, 1389 insertions(+), 2 deletions(-) create mode 100644 internal/terraform/node_resource_manager.go create mode 100644 internal/terraform/node_resource_plan_instance2.go create mode 100644 internal/terraform/node_resource_plan_instance_ds.go diff --git a/internal/backend/backendrun/operation.go b/internal/backend/backendrun/operation.go index 164f9ef25c87..f2368dfdf3af 100644 --- a/internal/backend/backendrun/operation.go +++ b/internal/backend/backendrun/operation.go @@ -81,6 +81,7 @@ type Operation struct { // backend that will be used when applying the plan. // Only one of PlanOutBackend or PlanOutStateStore may be set. PlanOutBackend *plans.Backend + PlanLight bool // PlanOutStateStore is the state_store to store with the plan. This is the // state store that will be used when applying the plan. diff --git a/internal/backend/local/backend_local.go b/internal/backend/local/backend_local.go index 4e900de9efd7..424948b547f6 100644 --- a/internal/backend/local/backend_local.go +++ b/internal/backend/local/backend_local.go @@ -96,6 +96,7 @@ func (b *Local) localRun(op *backendrun.Operation) (*backendrun.LocalRun, *confi stateMeta = &m } log.Printf("[TRACE] backend/local: populating backendrun.LocalRun from plan file") + // TODO: write light option to plan file ret, configSnap, ctxDiags = b.localRunForPlanFile(op, lp, ret, &coreOpts, stateMeta) if ctxDiags.HasErrors() { diags = diags.Append(ctxDiags) @@ -210,6 +211,9 @@ func (b *Local) localRunDirect(op *backendrun.Operation, run *backendrun.LocalRu GenerateConfigPath: op.GenerateConfigOut, DeferralAllowed: op.DeferralAllowed, Query: op.Query, + PlanCtx: terraform.PlanContext{ + LightMode: op.PlanLight, + }, } run.PlanOpts = planOpts diff --git a/internal/command/arguments/extended.go b/internal/command/arguments/extended.go index 0c05e8f0e49f..0ca916427935 100644 --- a/internal/command/arguments/extended.go +++ b/internal/command/arguments/extended.go @@ -63,6 +63,9 @@ type Operation struct { // state before proceeding. Default is true. Refresh bool + // Light + Light bool + // Targets allow limiting an operation to a set of resource addresses and // their dependencies. Targets []addrs.Targetable @@ -287,6 +290,7 @@ func extendedFlagSet(name string, state *State, operation *Operation, vars *Vars f.IntVar(&operation.Parallelism, "parallelism", DefaultParallelism, "parallelism") f.BoolVar(&operation.DeferralAllowed, "allow-deferral", false, "allow-deferral") f.BoolVar(&operation.Refresh, "refresh", true, "refresh") + f.BoolVar(&operation.Light, "light", false, "light") f.BoolVar(&operation.destroyRaw, "destroy", false, "destroy") f.BoolVar(&operation.refreshOnlyRaw, "refresh-only", false, "refresh-only") f.Var((*FlagStringSlice)(&operation.targetsRaw), "target", "target") diff --git a/internal/command/plan.go b/internal/command/plan.go index 4172ff884872..ec80a82b2e95 100644 --- a/internal/command/plan.go +++ b/internal/command/plan.go @@ -150,6 +150,7 @@ func (c *PlanCommand) OperationRequest( opReq.Hooks = view.Hooks() opReq.PlanRefresh = args.Refresh opReq.PlanOutPath = planOutPath + opReq.PlanLight = args.Light opReq.GenerateConfigOut = generateConfigOut opReq.Targets = args.Targets opReq.ForceReplace = args.ForceReplace diff --git a/internal/terraform/context_plan.go b/internal/terraform/context_plan.go index d6861f8fc2fd..477547415cce 100644 --- a/internal/terraform/context_plan.go +++ b/internal/terraform/context_plan.go @@ -41,6 +41,8 @@ type PlanOpts struct { // instance using its corresponding provider. SkipRefresh bool + PlanCtx PlanContext + // PreDestroyRefresh indicated that this is being passed to a plan used to // refresh the state immediately before a destroy plan. // FIXME: This is a temporary fix to allow the pre-destroy refresh to @@ -335,6 +337,14 @@ The -target option is not for routine use, and is provided only for exceptional } } + if opts.PlanCtx.LightMode { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Warning, + "Light plan mode is in effect", + `You are creating a plan with the light mode, which means that the result of this plan may not represent all of the changes that may have occurred outside of Terraform.`, + )) + } + var plan *plans.Plan var planDiags tfdiags.Diagnostics var evalScope *lang.Scope @@ -797,6 +807,7 @@ func (c *Context) planWalk(config *configs.Config, prevRunState *states.State, o DeferralAllowed: opts.DeferralAllowed, ExternalDependencyDeferred: opts.ExternalDependencyDeferred, Changes: changes, + PlanCtx: opts.PlanCtx, MoveResults: moveResults, Overrides: opts.Overrides, PlanTimeTimestamp: timestamp, @@ -1019,6 +1030,7 @@ func (c *Context) planGraph(config *configs.Config, prevRunState *states.State, queryPlan: opts.Query, overridePreventDestroy: opts.OverridePreventDestroy, AllowRootEphemeralOutputs: opts.AllowRootEphemeralOutputs, + Ctx: opts.PlanCtx, }).Build(addrs.RootModuleInstance) return graph, walkPlan, diags case plans.RefreshOnlyMode: diff --git a/internal/terraform/context_plan2_test.go b/internal/terraform/context_plan2_test.go index d7f5eaae5ce2..62fb35c86cd8 100644 --- a/internal/terraform/context_plan2_test.go +++ b/internal/terraform/context_plan2_test.go @@ -5,9 +5,11 @@ package terraform import ( "bytes" + "encoding/json" "errors" "fmt" "path/filepath" + "slices" "sort" "strings" "sync" @@ -7891,3 +7893,257 @@ locals { }, })) } + +func TestContext2Plan_lightModePartialUpdate(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` + resource "test_object" "unchanged" { + value = "original1" + } + + resource "test_object" "changed" { + value = "updated" + } +`, + }) + + p := new(testing_provider.MockProvider) + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&providerSchema{ + ResourceTypes: map[string]*configschema.Block{ + "test_object": { + Attributes: map[string]*configschema.Attribute{ + "value": { + Type: cty.String, + Optional: true, + }, + }, + }, + }, + }) + + reqs := make([]string, 0) + + p.ReadResourceFn = func(req providers.ReadResourceRequest) (resp providers.ReadResourceResponse) { + if req.PriorState.GetAttr("value").AsString() == "original1" { + t.Errorf("unexpected read resource request for unchanged resource") + } + value := req.PriorState.GetAttr("value").AsString() + reqs = append(reqs, value) + resp.NewState = cty.ObjectVal(map[string]cty.Value{ + "value": cty.StringVal("updated-from-cloud"), + }) + return resp + } + + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent(mustResourceInstanceAddr("test_object.unchanged"), &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"value":"original1"}`), + Status: states.ObjectReady, + }, mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`)) + + s.SetResourceInstanceCurrent(mustResourceInstanceAddr("test_object.changed"), &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"value":"original2"}`), + Status: states.ObjectReady, + }, mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`)) + }) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.NormalMode, + PlanCtx: PlanContext{ + LightMode: true, + }, + }) + + tfdiags.AssertNoErrors(t, diags) + + // Verify the plan changes + change := plan.Changes.ResourceInstance(mustResourceInstanceAddr("test_object.changed")) + if change.Action != plans.Update { + t.Fatalf("expected update action for 'test_object.changed', got: %v", change.Action) + } + + unchanged := plan.Changes.ResourceInstance(mustResourceInstanceAddr("test_object.unchanged")) + if unchanged.Action != plans.NoOp { + t.Fatalf("expected no-op action for 'test_object.unchanged', got: %v", unchanged.Action) + } + + // Verify the read resource request values + if cmp.Diff(reqs, []string{"original2"}) != "" { + t.Fatalf("unexpected read resource request values: %v", reqs) + } +} + +func TestContext2Plan_lightModePartialUpdate2(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` + resource "test_object" "changed" { + value = "updated" + } +`, + }) + + p := new(testing_provider.MockProvider) + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&providerSchema{ + ResourceTypes: map[string]*configschema.Block{ + "test_object": { + Attributes: map[string]*configschema.Attribute{ + "value": { + Type: cty.String, + Optional: true, + }, + }, + }, + }, + }) + + reqs := make([]string, 0) + + p.ReadResourceFn = func(req providers.ReadResourceRequest) (resp providers.ReadResourceResponse) { + value := req.PriorState.GetAttr("value").AsString() + reqs = append(reqs, value) + resp.NewState = cty.ObjectVal(map[string]cty.Value{ + "value": cty.StringVal("updated-from-cloud"), + }) + return resp + } + + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent(mustResourceInstanceAddr("test_object.changed"), &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"value":"original2"}`), + Status: states.ObjectReady, + }, mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`)) + }) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.NormalMode, + PlanCtx: PlanContext{LightMode: true}, + }) + + tfdiags.AssertNoErrors(t, diags) + + // Verify the plan changes + change := plan.Changes.ResourceInstance(mustResourceInstanceAddr("test_object.changed")) + if change.Action != plans.Update { + t.Fatalf("expected update action for 'test_object.changed', got: %v", change.Action) + } +} + +func TestContext2Plan_lightModeUpgradedSchema(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` + resource "test_object" "unchanged" { + value = "original1" + } + + resource "test_object" "changed" { + value = "updated" + } +`, + }) + + p := new(testing_provider.MockProvider) + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&providerSchema{ + ResourceTypes: map[string]*configschema.Block{ + "test_object": { + Attributes: map[string]*configschema.Attribute{ + "value": { + Type: cty.String, + Optional: true, + }, + }, + }, + }, + ResourceTypeSchemaVersions: map[string]uint64{ + "test_object": 2, // indicates that the schema has been upgraded + }, + }) + + reqs := make([]string, 0) + + p.ReadResourceFn = func(req providers.ReadResourceRequest) (resp providers.ReadResourceResponse) { + value := req.PriorState.GetAttr("value").AsString() + reqs = append(reqs, value) + resp.NewState = cty.ObjectVal(map[string]cty.Value{ + "value": cty.StringVal("updated-from-cloud"), + }) + return resp + } + p.UpgradeResourceStateFn = func(req providers.UpgradeResourceStateRequest) (resp providers.UpgradeResourceStateResponse) { + // We should've been given the prior state JSON as our input to upgrade. + if !bytes.Contains(req.RawStateJSON, []byte("original")) { + t.Fatalf("UpgradeResourceState request doesn't contain the original state JSON") + } + mp := make(map[string]any) + err := json.Unmarshal(req.RawStateJSON, &mp) + if err != nil { + t.Fatalf("failed to unmarshal state JSON: %s", err) + } + val := cty.StringVal(mp["value"].(string)) + + // We'll put something different in "value" as part of upgrading, just + // so that we can verify that a full plan is forced when a state upgrade is done. + if bytes.Contains(req.RawStateJSON, []byte("original1")) { + val = cty.StringVal("upgraded") + } + resp.UpgradedState = cty.ObjectVal(map[string]cty.Value{"value": val}) + return resp + } + + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent(mustResourceInstanceAddr("test_object.unchanged"), &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"value":"original1"}`), + Status: states.ObjectReady, + }, mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`)) + + s.SetResourceInstanceCurrent(mustResourceInstanceAddr("test_object.changed"), &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"value":"original2"}`), + Status: states.ObjectReady, + }, mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`)) + }) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.NormalMode, + PlanCtx: PlanContext{ + LightMode: true, + }, + }) + + tfdiags.AssertNoErrors(t, diags) + + // Verify the plan changes + change := plan.Changes.ResourceInstance(mustResourceInstanceAddr("test_object.changed")) + if change.Action != plans.Update { + t.Fatalf("expected update action for 'test_object.changed', got: %v", change.Action) + } + + unchanged := plan.Changes.ResourceInstance(mustResourceInstanceAddr("test_object.unchanged")) + if unchanged.Action != plans.Update { + t.Fatalf("expected update action for 'test_object.unchanged', got: %v", unchanged.Action) + } + + // Verify the read resource request values. THe unchanged resource should + // have been upgraded to "upgraded", causing the full plan cycle to + // be triggered. + slices.Sort(reqs) + if cmp.Diff(reqs, []string{"original2", "upgraded"}) != "" { + t.Fatalf("unexpected read resource request values: %v", reqs) + } +} diff --git a/internal/terraform/context_walk.go b/internal/terraform/context_walk.go index 42e5fc2b0996..8c7f071e2e3a 100644 --- a/internal/terraform/context_walk.go +++ b/internal/terraform/context_walk.go @@ -70,6 +70,8 @@ type graphWalkOpts struct { // the apply phase. PlanTimeTimestamp time.Time + PlanCtx PlanContext + // Overrides contains the set of overrides we should apply during this // operation. Overrides *mocking.Overrides @@ -203,5 +205,6 @@ func (c *Context) graphWalker(graph *Graph, operation walkOperation, opts *graph Forget: opts.Forget, Actions: actions.NewActions(), Deprecations: deprecation.NewDeprecations(), + PlanCtx: opts.PlanCtx, } } diff --git a/internal/terraform/eval_context.go b/internal/terraform/eval_context.go index f8b02e724f0b..aa91874de470 100644 --- a/internal/terraform/eval_context.go +++ b/internal/terraform/eval_context.go @@ -211,6 +211,8 @@ type EvalContext interface { // this execution. Overrides() *mocking.Overrides + PlanCtx() PlanContext + // withScope derives a new EvalContext that has all of the same global // context, but a new evaluation scope. withScope(scope evalContextScope) EvalContext diff --git a/internal/terraform/eval_context_builtin.go b/internal/terraform/eval_context_builtin.go index 5f66c0fdb2ed..63b4e9b6f070 100644 --- a/internal/terraform/eval_context_builtin.go +++ b/internal/terraform/eval_context_builtin.go @@ -95,6 +95,7 @@ type BuiltinEvalContext struct { OverrideValues *mocking.Overrides ActionsValue *actions.Actions DeprecationsValue *deprecation.Deprecations + PlanContext PlanContext } // BuiltinEvalContext implements EvalContext @@ -115,6 +116,10 @@ func (ctx *BuiltinEvalContext) StopCtx() context.Context { return ctx.StopContext } +func (ctx *BuiltinEvalContext) PlanCtx() PlanContext { + return ctx.PlanContext +} + func (ctx *BuiltinEvalContext) Hook(fn func(Hook) (HookAction, error)) error { for _, h := range ctx.Hooks { action, err := fn(h) diff --git a/internal/terraform/eval_context_mock.go b/internal/terraform/eval_context_mock.go index 20dffe8ef6f4..0a8cbf05cb3d 100644 --- a/internal/terraform/eval_context_mock.go +++ b/internal/terraform/eval_context_mock.go @@ -439,6 +439,11 @@ func (c *MockEvalContext) Overrides() *mocking.Overrides { return c.OverrideValues } +func (c *MockEvalContext) PlanCtx() PlanContext { + // This is a no-op for the mock. + return PlanContext{} +} + func (c *MockEvalContext) Forget() bool { c.ForgetCalled = true return c.ForgetValues diff --git a/internal/terraform/graph_builder_plan.go b/internal/terraform/graph_builder_plan.go index 3ef6ae3e186a..b88b21d24eae 100644 --- a/internal/terraform/graph_builder_plan.go +++ b/internal/terraform/graph_builder_plan.go @@ -63,6 +63,8 @@ type PlanGraphBuilder struct { // skipRefresh indicates that we should skip refreshing managed resources skipRefresh bool + Ctx PlanContext + // preDestroyRefresh indicates that we are executing the refresh which // happens immediately before a destroy plan, which happens to use the // normal planing mode so skipPlanChanges cannot be set. diff --git a/internal/terraform/graph_walk_context.go b/internal/terraform/graph_walk_context.go index a8f5dbcbb671..865736392039 100644 --- a/internal/terraform/graph_walk_context.go +++ b/internal/terraform/graph_walk_context.go @@ -53,6 +53,7 @@ type ContextGraphWalker struct { Config *configs.Config PlanTimestamp time.Time Overrides *mocking.Overrides + PlanCtx PlanContext // Forget if set to true will cause the plan to forget all resources. This is // only allowed in the context of a destroy plan. Forget bool @@ -145,6 +146,7 @@ func (w *ContextGraphWalker) EvalContext() EvalContext { PrevRunStateValue: w.PrevRunState, Evaluator: evaluator, OverrideValues: w.Overrides, + PlanContext: w.PlanCtx, forget: w.Forget, ActionsValue: w.Actions, DeprecationsValue: w.Deprecations, diff --git a/internal/terraform/node_resource_abstract.go b/internal/terraform/node_resource_abstract.go index f4787cb30ef9..95da12a4d84b 100644 --- a/internal/terraform/node_resource_abstract.go +++ b/internal/terraform/node_resource_abstract.go @@ -489,6 +489,31 @@ func (n *NodeAbstractResource) recordResourceData(ctx EvalContext, addr addrs.Ab return diags } +// schemaUpgradeRequired determines if the state representation of a resource needs upgrading +// based on its schema version in state and its current schema. It returns the state, the schema, +// and a boolean indicating if an upgrade is needed. +func (n *NodeAbstractResource) schemaUpgradeRequired(ctx EvalContext, providerSchema providers.ProviderSchema, addr addrs.AbsResourceInstance) (bool, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + log.Printf("[TRACE] schemaUpgradeRequired: checking schema version for %s", addr) + + src := ctx.State().ResourceInstanceObject(addr, addrs.NotDeposed) + if src == nil { + // No state to upgrade + log.Printf("[TRACE] schemaUpgradeRequired: no state present for %s", addr) + return false, diags + } + + schema := providerSchema.SchemaForResourceAddr(addr.Resource.ContainingResource()) + if schema.Body == nil { + // Shouldn't happen since we should've failed long ago if no schema is present + return false, diags.Append(fmt.Errorf("no schema available for %s while checking for upgrades; this is a bug in Terraform and should be reported", addr)) + } + + // Check if the schema version in state matches the current schema version + upgradeRequired := src.SchemaVersion != uint64(schema.Version) + return upgradeRequired, diags +} + // readResourceInstanceState reads the current object for a specific instance in // the state. func (n *NodeAbstractResource) readResourceInstanceState(ctx EvalContext, addr addrs.AbsResourceInstance) (*states.ResourceInstanceObject, tfdiags.Diagnostics) { diff --git a/internal/terraform/node_resource_manager.go b/internal/terraform/node_resource_manager.go new file mode 100644 index 000000000000..fabd41e60664 --- /dev/null +++ b/internal/terraform/node_resource_manager.go @@ -0,0 +1,54 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package terraform + +import "github.com/hashicorp/terraform/internal/tfdiags" + +// ResourceState is an interface that defines the contract for executing +// a resource state. It takes a context, a node, and resource data as input +// and returns a new resource state and any diagnostics that occurred during +// the execution. +type ResourceState[T any] interface { + Execute(ctx EvalContext, node T, data *ResourceData) (ResourceState[T], tfdiags.Diagnostics) +} + +// ResourceStateManager is a generic state manager for resource instances +// It manages the state of a resource instance and its transitions +// between different states. +type ResourceStateManager[T any] struct { + node T + data *ResourceData + hooks []func(ResourceState[T], *ResourceStateManager[T]) +} + +func NewResourceStateManager[T any](node T) *ResourceStateManager[T] { + return &ResourceStateManager[T]{ + node: node, + data: &ResourceData{}, + hooks: []func(ResourceState[T], *ResourceStateManager[T]){}, + } +} + +func (m *ResourceStateManager[T]) AddHook(hook func(ResourceState[T], *ResourceStateManager[T])) { + m.hooks = append(m.hooks, hook) +} + +func (m *ResourceStateManager[T]) Execute(start ResourceState[T], ctx EvalContext) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + // Start with initial state + currentState := start + + // Execute state transitions until completion or error + for currentState != nil && !diags.HasErrors() { + for _, hook := range m.hooks { + hook(currentState, m) + } + var stateDiags tfdiags.Diagnostics + currentState, stateDiags = currentState.Execute(ctx, m.node, m.data) + diags = diags.Append(stateDiags) + } + + return diags +} diff --git a/internal/terraform/node_resource_plan_instance.go b/internal/terraform/node_resource_plan_instance.go index bccc9027d897..d1ddcf32c915 100644 --- a/internal/terraform/node_resource_plan_instance.go +++ b/internal/terraform/node_resource_plan_instance.go @@ -26,6 +26,14 @@ import ( "github.com/hashicorp/terraform/internal/tfdiags" ) +type PlanContext struct { + // PlanMode is the mode of the plan. This is used to determine how + // the plan is executed and what actions are taken. + PlanMode plans.Mode + + LightMode bool +} + // NodePlannableResourceInstance represents a _single_ resource // instance that is plannable. This means this represents a single // count index, for example. @@ -71,9 +79,11 @@ func (n *NodePlannableResourceInstance) Execute(ctx EvalContext, op walkOperatio // Eval info is different depending on what kind of resource this is switch addr.Resource.Resource.Mode { case addrs.ManagedResourceMode: - return n.managedResourceExecute(ctx) + // return n.managedResourceExecute(ctx) + return n.Execute2(ctx, op) case addrs.DataResourceMode: - return n.dataResourceExecute(ctx) + // return n.dataResourceExecute(ctx) + return n.Execute2(ctx, op) case addrs.EphemeralResourceMode: return n.ephemeralResourceExecute(ctx) case addrs.ListResourceMode: diff --git a/internal/terraform/node_resource_plan_instance2.go b/internal/terraform/node_resource_plan_instance2.go new file mode 100644 index 000000000000..f614348dc2ef --- /dev/null +++ b/internal/terraform/node_resource_plan_instance2.go @@ -0,0 +1,937 @@ +package terraform + +import ( + "fmt" + "log" + "path/filepath" + "strings" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/hashicorp/terraform/internal/addrs" + "github.com/hashicorp/terraform/internal/configs" + "github.com/hashicorp/terraform/internal/instances" + "github.com/hashicorp/terraform/internal/lang/ephemeral" + "github.com/hashicorp/terraform/internal/moduletest/mocking" + "github.com/hashicorp/terraform/internal/plans" + "github.com/hashicorp/terraform/internal/plans/deferring" + "github.com/hashicorp/terraform/internal/providers" + "github.com/hashicorp/terraform/internal/states" + "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/zclconf/go-cty/cty" +) + +type PlanResourceManager ResourceStateManager[*NodePlannableResourceInstance] + +// ResourceData holds the shared data during execution +type ResourceData struct { + // inputs + Addr addrs.AbsResourceInstance + Importing bool + ImportTarget cty.Value + SkipPlanning bool + LightMode bool + + // these are set during the execution + InstanceRefreshState *states.ResourceInstanceObject + Provider providers.Interface + ProviderSchema providers.ProviderSchema + ResourceSchema providers.Schema + Deferred *providers.Deferred + CheckRuleSeverity tfdiags.Severity + RefreshNeeded bool +} + +func (n *NodePlannableResourceInstance) Execute2(ctx EvalContext, op walkOperation) tfdiags.Diagnostics { + stateManager := NewResourceStateManager(n) + steps := []ResourceState[*NodePlannableResourceInstance]{} + stateManager.AddHook(func(state ResourceState[*NodePlannableResourceInstance], manager *ResourceStateManager[*NodePlannableResourceInstance]) { + steps = append(steps, state) + }) + init := &InitializationStep{n.ResourceAddr().Resource.Mode} + diags := stateManager.Execute(init, ctx) + + // Log the steps taken + str := strings.Builder{} + str.WriteString(fmt.Sprintf("Executing %s %s", n.Addr, op)) + str.WriteString(fmt.Sprintf(" in %d steps:", len(steps))) + for _, step := range steps { + str.WriteString(fmt.Sprintf(" -> %T", step)) + } + log.Printf("[TRACE] %s\n", str.String()) + return diags +} + +// InitializationStep is the first step in the state machine. +// It initializes the resource data and sets up the provider. +type InitializationStep struct { + Mode addrs.ResourceMode +} + +func (s *InitializationStep) Execute(ctx EvalContext, node *NodePlannableResourceInstance, data *ResourceData) (ResourceState[*NodePlannableResourceInstance], tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // Initialize basic data + data.Addr = node.ResourceInstanceAddr() + data.Importing = node.importTarget != cty.NilVal && !node.preDestroyRefresh + data.ImportTarget = node.importTarget + data.SkipPlanning = node.skipPlanChanges + data.LightMode = ctx.PlanCtx().LightMode + data.RefreshNeeded = !node.skipRefresh // by default, refresh is needed, unless asked to skip it. Any step that doesn't need it will set this to false. + + // Determine check rule severity + data.CheckRuleSeverity = tfdiags.Error + if node.skipPlanChanges || node.preDestroyRefresh { + data.CheckRuleSeverity = tfdiags.Warning + } + + // Set up provider + provider, providerSchema, err := getProvider(ctx, node.ResolvedProvider) + diags = diags.Append(err) + if diags.HasErrors() { + return nil, diags + } + + data.Provider = provider + data.ProviderSchema = providerSchema + data.ResourceSchema = data.ProviderSchema.SchemaForResourceAddr(node.Addr.Resource.Resource) + if data.ResourceSchema.Body == nil { + // Should be caught during validation, so we don't bother with a pretty error here + diags = diags.Append(fmt.Errorf("provider does not support resource type for %q", node.Addr)) + return nil, diags + } + + // Validate configuration if present + if node.Config != nil { + diags = diags.Append(validateSelfRef(data.Addr.Resource, node.Config.Config, providerSchema)) + if diags.HasErrors() { + return nil, diags + } + } + + // Data source planning goes through a different path + if s.Mode == addrs.DataResourceMode { + return &PlanDataSourceStep{}, diags + } + + // Start importing process. + if data.Importing { + return &ImportingStep{ImportTarget: node.importTarget}, diags + } + + // Check if we need to upgrade the schema. If we do, we must + // refresh the resource instance state to match the new schema. + upgradeRequired, diags := node.schemaUpgradeRequired(ctx, providerSchema, data.Addr) + if diags.HasErrors() { + return nil, diags + } + if upgradeRequired { + data.RefreshNeeded = upgradeRequired + } + + // Read the resource instance from the state + data.InstanceRefreshState, diags = node.readResourceInstanceState(ctx, node.ResourceInstanceAddr()) + if diags.HasErrors() { + return nil, diags + } + return &SaveSnapshotStep{}, diags +} + +// ImportingStep handles the importing of resources +type ImportingStep struct { + ImportTarget cty.Value +} + +func (s *ImportingStep) Execute(ctx EvalContext, node *NodePlannableResourceInstance, data *ResourceData) (ResourceState[*NodePlannableResourceInstance], tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + addr := node.ResourceInstanceAddr() + + // If the target was already in the state, import target would be nil and we + // would not have gotten here, but let's double-check. + if s.ImportTarget == cty.NilVal { + return nil, diags + } + + // Happy path: the import target id is known. Let's import it. + if s.ImportTarget.IsWhollyKnown() { + return &ProviderImportStep{ImportTarget: s.ImportTarget}, diags + } + + // Unknown config. Mark as deferred without importing. + // We can only get here because we allowed unknowns in the + // import target, a behavior that is only supported when + // we allow deferrals. + data.Deferred = &providers.Deferred{ + Reason: providers.DeferredReasonResourceConfigUnknown, + } + + // Handle config generation + if node.Config == nil && len(node.generateConfigPath) > 0 { + // Then we're supposed to be generating configuration for this + // resource, but we can't because the configuration is unknown. + // + // Normally, the next step would just be about + // planning the known configuration to make sure everything we + // do know about it is correct, but we can't even do that here. + // If we attempt to do that, (a) We're going to panic later when it complains + // about having no configuration, and (b) the rest of the + // function isn't doing anything as there is no configuration + // to validate. + // + // What we'll do instead is write out the address as being deferred with + // an entirely unknown value. Therefore we can skip the planning steps + // and go straight to the post-plan deferral step. + impliedType := data.ProviderSchema.ResourceTypes[addr.Resource.Resource.Type].Body.ImpliedType() + return &PostPlanDeferralStep{ + Change: &plans.ResourceInstanceChange{ + Addr: addr, + PrevRunAddr: addr, + ProviderAddr: node.ResolvedProvider, + Change: plans.Change{ + Action: plans.NoOp, // assume we'll get the config generation correct. + Before: cty.NullVal(impliedType), + After: cty.UnknownVal(impliedType), + Importing: &plans.Importing{ + Target: s.ImportTarget, + }, + }, + }, + }, diags + } + + // We can go straight to planning the import, since we know it has no + // state, and thus nothing to refresh. + return &PlanningStep{}, diags +} + +// ProviderImportStep handles the import of resources with the provider. +type ProviderImportStep struct { + ImportTarget cty.Value +} + +func (s *ProviderImportStep) Execute(ctx EvalContext, node *NodePlannableResourceInstance, data *ResourceData) (ResourceState[*NodePlannableResourceInstance], tfdiags.Diagnostics) { + addr := node.ResourceInstanceAddr() + deferralAllowed := ctx.Deferrals().DeferralAllowed() + var diags tfdiags.Diagnostics + absAddr := addr.Resource.Absolute(ctx.Path()) + hookResourceID := HookResourceIdentity{ + Addr: absAddr, + ProviderAddr: node.ResolvedProvider.Provider, + } + provider := data.Provider + + diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { + return h.PrePlanImport(hookResourceID, s.ImportTarget) + })) + if diags.HasErrors() { + return nil, diags + } + + importType := "ID" + var importValue string + + var resp providers.ImportResourceStateResponse + if node.override != nil { + // For overriding resources that are being imported, we cheat a little + // bit and look ahead at the configuration the user has provided and + // we'll use that as the basis for the resource we're going to make up + // that is due to be overridden. + + // Note, we know we have configuration as it's impossible to enable + // config generation during tests, and the validation that config exists + // if configuration generation is off has already happened. + if node.Config == nil { + // But, just in case we change this at some point in the future, + // let's add a specific error message here we can test for to + // document the expectation somewhere. This shouldn't happen in + // production, so we don't bother with a pretty error. + diags = diags.Append(fmt.Errorf("override blocks do not support config generation")) + return nil, diags + } + + forEach, _, _ := evaluateForEachExpression(node.Config.ForEach, ctx, false) + keyData := EvalDataForInstanceKey(node.ResourceInstanceAddr().Resource.Key, forEach) + configVal, _, configDiags := ctx.EvaluateBlock(node.Config.Config, data.ResourceSchema.Body, nil, keyData) + if configDiags.HasErrors() { + // We have an overridden resource so we're definitely in a test and + // the users config is not valid. So give up and just report the + // problems in the users configuration. Normally, we'd import the + // resource before giving up but for a test it doesn't matter, the + // test fails in the same way and the state is just lost anyway. + // + // If there were only warnings from the config then we'll duplicate + // them if we include them (as the config will be loaded again + // later), so only add the configDiags into the main diags if we + // found actual errors. + diags = diags.Append(configDiags) + return nil, diags + } + configVal, _ = configVal.UnmarkDeep() + + // Let's pretend we're reading the value as a data source so we + // pre-compute values now as if the resource has already been created. + override, overrideDiags := mocking.ComputedValuesForDataSource(configVal, &mocking.MockedData{ + Value: node.override.Values, + Range: node.override.Range, + ComputedAsUnknown: false, + }, data.ResourceSchema.Body) + resp = providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: addr.Resource.Resource.Type, + State: ephemeral.StripWriteOnlyAttributes(override, data.ResourceSchema.Body), + }, + }, + Diagnostics: overrideDiags.InConfigBody(node.Config.Config, absAddr.String()), + } + } else { + if s.ImportTarget.Type().IsObjectType() { + // Identity-based import + resp = provider.ImportResourceState(providers.ImportResourceStateRequest{ + TypeName: addr.Resource.Resource.Type, + Identity: s.ImportTarget, + ClientCapabilities: ctx.ClientCapabilities(), + }) + importType = "Identity" + importValue = tfdiags.ObjectToString(s.ImportTarget) + } else { + // ID-based/string import + resp = provider.ImportResourceState(providers.ImportResourceStateRequest{ + TypeName: addr.Resource.Resource.Type, + ID: s.ImportTarget.AsString(), + ClientCapabilities: ctx.ClientCapabilities(), + }) + importValue = s.ImportTarget.AsString() + } + } + + data.Deferred = resp.Deferred + // If we don't support deferrals, but the provider reports a deferral and does not + // emit any error level diagnostics, we should emit an error. + if resp.Deferred != nil && !deferralAllowed && !resp.Diagnostics.HasErrors() { + diags = diags.Append(deferring.UnexpectedProviderDeferralDiagnostic(node.Addr)) + } + diags = diags.Append(resp.Diagnostics) + if diags.HasErrors() { + return nil, diags + } + + count := len(resp.ImportedResources) + switch { + case count > 1: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Multiple import states not supported", + fmt.Sprintf("While attempting to import with %s %s, the provider "+ + "returned multiple resource instance states. This "+ + "is not currently supported.", + importType, importValue, + ), + )) + case count == 0: + // Sanity check against the providers. If the provider defers the response, it may not have been able to return a state, so we'll only error if no deferral was returned. + if resp.Deferred == nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Import returned no resources", + fmt.Sprintf("While attempting to import with %s %s, the provider"+ + "returned no instance states.", + importType, importValue, + ), + )) + return nil, diags + } + + // If we were deferred, then let's make up a resource to represent the + // state we're going to import. + state := providers.ImportedResource{ + TypeName: addr.Resource.Resource.Type, + State: cty.NullVal(data.ResourceSchema.Body.ImpliedType()), + } + + // We skip the read and further validation since we make up the state + // of the imported resource anyways. + data.InstanceRefreshState = states.NewResourceInstanceObjectFromIR(state) + data.RefreshNeeded = false + return &PlanningStep{}, nil + } + + return &PostImportStep{ + ImportType: importType, + ImportValue: importValue, + ImportedResources: resp.ImportedResources, + HookResourceID: hookResourceID}, diags +} + +type PostImportStep struct { + ImportType string + ImportValue string + ImportedResources []providers.ImportedResource + HookResourceID HookResourceIdentity +} + +func (s *PostImportStep) Execute(ctx EvalContext, node *NodePlannableResourceInstance, data *ResourceData) (ResourceState[*NodePlannableResourceInstance], tfdiags.Diagnostics) { + addr := node.ResourceInstanceAddr() + deferred := data.Deferred + var diags tfdiags.Diagnostics + imported := s.ImportedResources + + absAddr := addr.Resource.Absolute(ctx.Path()) + for _, obj := range imported { + log.Printf("[TRACE] PostImportStep: import %s %q produced instance object of type %s", absAddr.String(), s.ImportValue, obj.TypeName) + } + + // We can only call the hooks and validate the imported state if we have + // actually done the import. + if deferred == nil { + // call post-import hook + diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostPlanImport(s.HookResourceID, imported) + })) + } + + if imported[0].TypeName == "" { + diags = diags.Append(fmt.Errorf("import of %s didn't set type", node.Addr.String())) + return nil, diags + } + + // Providers are supposed to return null values for all write-only attributes + writeOnlyDiags := ephemeral.ValidateWriteOnlyAttributes( + "Import returned a non-null value for a write-only attribute", + func(path cty.Path) string { + return fmt.Sprintf( + "While attempting to import with %s %s, the provider %q returned a value for the write-only attribute \"%s%s\". Write-only attributes cannot be read back from the provider. This is a bug in the provider, which should be reported in the provider's own issue tracker.", + s.ImportType, s.ImportValue, node.ResolvedProvider, node.Addr, tfdiags.FormatCtyPath(path), + ) + }, + imported[0].State, + data.ResourceSchema.Body, + ) + diags = diags.Append(writeOnlyDiags) + + if writeOnlyDiags.HasErrors() { + return nil, diags + } + + importedState := states.NewResourceInstanceObjectFromIR(imported[0]) + if deferred == nil && importedState.Value.IsNull() { + // It's actually okay for a deferred import to have returned a null. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Import returned null resource", + fmt.Sprintf("While attempting to import with %s %s, the provider"+ + "returned an instance with no state.", + s.ImportType, s.ImportValue, + ), + )) + + } + data.InstanceRefreshState = importedState + return &ProviderRefreshStep{}, diags +} + +// SaveSnapshotStep saves a snapshot of the resource instance state +// before refreshing the resource. +type SaveSnapshotStep struct{} + +func (s *SaveSnapshotStep) Execute(ctx EvalContext, node *NodePlannableResourceInstance, data *ResourceData) (ResourceState[*NodePlannableResourceInstance], tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // Only write the state if the change isn't being deferred. + if data.Deferred == nil { + // We'll save a snapshot of what we just read from the state into the + // prevRunState before we do anything else, since this will capture the + // result of any schema upgrading that readResourceInstanceState just did, + // but not include any out-of-band changes we might detect in the + // subsequent provider refresh step. + diags = diags.Append(node.writeResourceInstanceState(ctx, data.InstanceRefreshState, prevRunState)) + if diags.HasErrors() { + return nil, diags + } + // Also the refreshState, because that should still reflect schema upgrades + // even if it doesn't reflect upstream changes. + diags = diags.Append(node.writeResourceInstanceState(ctx, data.InstanceRefreshState, refreshState)) + if diags.HasErrors() { + return nil, diags + } + } + + // we may need to detect a change in CreateBeforeDestroy to ensure it's + // stored when we are not refreshing + updated := updateCreateBeforeDestroy(node, data.InstanceRefreshState) + + // If we are in light mode, we may not need to refresh the state. + // If we find out that we have to after planning, the planning step will send us there. + if !data.RefreshNeeded || data.LightMode { + if updated { + // CreateBeforeDestroy must be set correctly in the state which is used + // to create the apply graph, so if we did not refresh the state make + // sure we still update any changes to CreateBeforeDestroy. + diags = diags.Append(node.writeResourceInstanceState(ctx, data.InstanceRefreshState, refreshState)) + if diags.HasErrors() { + return nil, diags + } + } + + // If we only want to refresh the state, then we can skip the + // planning phase. + if data.SkipPlanning { + return &RefreshOnlyStep{prevInstanceState: data.InstanceRefreshState}, diags + } + + // Go straight to planning, since we don't need to refresh the state + return &PlanningStep{}, diags + } + + return &ProviderRefreshStep{}, diags +} + +// ProviderRefreshStep handles refreshing the resource's state +// with the provider. +type ProviderRefreshStep struct{} + +func (s *ProviderRefreshStep) Execute(ctx EvalContext, node *NodePlannableResourceInstance, data *ResourceData) (ResourceState[*NodePlannableResourceInstance], tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // This is the state of the resource before we refresh the value in the provider, we need to keep track + // of this to report this as the before value if the refresh is deferred. + preRefreshInstanceState := data.InstanceRefreshState + + var refreshWasDeferred bool + // Perform the refresh + refreshedState, refreshDeferred, refreshDiags := node.refresh( + ctx, states.NotDeposed, data.InstanceRefreshState, ctx.Deferrals().DeferralAllowed(), + ) + diags = diags.Append(refreshDiags) + if diags.HasErrors() { + return nil, diags + } + + data.InstanceRefreshState = refreshedState + + if data.InstanceRefreshState != nil { + // When refreshing we start by merging the stored dependencies and + // the configured dependencies. The configured dependencies will be + // stored to state once the changes are applied. If the plan + // results in no changes, we will re-write these dependencies + // below. + data.InstanceRefreshState.Dependencies = mergeDeps( + node.Dependencies, data.InstanceRefreshState.Dependencies, + ) + } + + if data.Deferred == nil && refreshDeferred != nil { + data.Deferred = refreshDeferred + } + refreshWasDeferred = refreshDeferred != nil + + if data.Deferred == nil { + diags = diags.Append(node.writeResourceInstanceState(ctx, data.InstanceRefreshState, refreshState)) + } + if diags.HasErrors() { + return nil, diags + } + + // Handle import validation and config generation if needed + if data.Importing { + importDiags := s.handleImportValidationAndConfigGen(ctx, node, data, refreshWasDeferred) + diags = diags.Append(importDiags) + if diags.HasErrors() { + return nil, diags + } + } + + data.RefreshNeeded = false // we just refreshed, we shouldn't need to refresh again + + // If we only want to refresh the state, then we can skip the + // planning phase. + if data.SkipPlanning { + return &RefreshOnlyStep{prevInstanceState: preRefreshInstanceState}, diags + } + + return &PlanningStep{}, diags +} + +// handleImportValidationAndConfigGen handles the import validation and config generation +// after a resource has been refreshed. +func (s *ProviderRefreshStep) handleImportValidationAndConfigGen( + ctx EvalContext, + node *NodePlannableResourceInstance, + data *ResourceData, + refreshWasDeferred bool, +) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + // We only need to handle import validation and config generation + // when we're importing and the import target is wholly known + if !data.ImportTarget.IsWhollyKnown() { + return diags + } + + if !refreshWasDeferred && data.InstanceRefreshState.Value.IsNull() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Cannot import non-existent remote object", + fmt.Sprintf( + "While attempting to import an existing object to %q, "+ + "the provider detected that no object exists with the given id. "+ + "Only pre-existing objects can be imported; check that the id "+ + "is correct and that it is associated with the provider's "+ + "configured region or endpoint, or use \"terraform apply\" to "+ + "create a new remote object for this resource.", + node.Addr, + ), + )) + return diags + } + + // If we're importing and generating config, generate it now. We only + // generate config if the import isn't being deferred. We should generate + // the configuration in the plan that the import is actually happening in. + if data.Deferred == nil && len(node.generateConfigPath) > 0 { + if node.Config != nil { + return diags.Append(fmt.Errorf("tried to generate config for %s, but it already exists", node.Addr)) + } + + // Generate the HCL string first, then parse the HCL body from it. + // First we generate the contents of the resource block for use within + // the planning node. Then we wrap it in an enclosing resource block to + // pass into the plan for rendering. + generatedResource, generatedDiags := node.generateHCLResourceDef(ctx, node.Addr, data.InstanceRefreshState.Value) + diags = diags.Append(generatedDiags) + + // This wraps the content of the resource block in an enclosing resource block + // to pass into the plan for rendering. + node.generatedConfigHCL = generatedResource.String() + + // parse the "file" body as HCL to get the hcl.Body + synthHCLFile, hclDiags := hclsyntax.ParseConfig(generatedResource.Body, filepath.Base(node.generateConfigPath), hcl.Pos{Byte: 0, Line: 1, Column: 1}) + diags = diags.Append(hclDiags) + if hclDiags.HasErrors() { + return diags + } + + // We have to do a kind of mini parsing of the content here to correctly + // mark attributes like 'provider' as hiddenode. We only care about the + // resulting content, so it's remain that gets passed into the resource + // as the config. + _, remain, resourceDiags := synthHCLFile.Body.PartialContent(configs.ResourceBlockSchema) + diags = diags.Append(resourceDiags) + if resourceDiags.HasErrors() { + return diags + } + + node.Config = &configs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: node.Addr.Resource.Resource.Type, + Name: node.Addr.Resource.Resource.Name, + Config: remain, + Managed: &configs.ManagedResource{}, + Provider: node.ResolvedProvider.Provider, + } + } + + return diags +} + +// RefreshOnlyStep handles the refresh-only planning mode +type RefreshOnlyStep struct { + // This is the state of the resource before we refresh the value + prevInstanceState *states.ResourceInstanceObject +} + +func (s *RefreshOnlyStep) Execute(ctx EvalContext, node *NodePlannableResourceInstance, data *ResourceData) (ResourceState[*NodePlannableResourceInstance], tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // In refresh-only mode we need to evaluate the for-each expression in + // order to supply the value to the pre- and post-condition check + // blocks. This has the unfortunate edge case of a refresh-only plan + // executing with a for-each map which has the same keys but different + // values, which could result in a post-condition check relying on that + // value being inaccurate. Unless we decide to store the value of the + // for-each expression in state, this is unavoidable. + forEach, _, _ := evaluateForEachExpression(node.Config.ForEach, ctx, false) + repeatData := EvalDataForInstanceKey(data.Addr.Resource.Key, forEach) + + // Evaluate preconditions + checkDiags := evalCheckRules( + addrs.ResourcePrecondition, + node.Config.Preconditions, + ctx, data.Addr, repeatData, + data.CheckRuleSeverity, + ) + diags = diags.Append(checkDiags) + + // Even if we don't plan changes, we do still need to at least update + // the working state to reflect the refresh result. If not, then e.g. + // any output values refering to this will not react to the drift. + // (Even if we didn't actually refresh above, this will still save + // the result of any schema upgrading we did in readResourceInstanceState.) + diags = diags.Append(node.writeResourceInstanceState(ctx, data.InstanceRefreshState, workingState)) + if diags.HasErrors() { + return nil, diags + } + + // Evaluate postconditions + checkDiags = evalCheckRules( + addrs.ResourcePostcondition, + node.Config.Postconditions, + ctx, data.Addr, repeatData, + data.CheckRuleSeverity, + ) + diags = diags.Append(checkDiags) + + // Report deferral if needed + if data.Deferred != nil { + // Make sure we have a valid state before using it + var beforeValue cty.Value + if s.prevInstanceState != nil { + beforeValue = s.prevInstanceState.Value + } else { + beforeValue = cty.NullVal(data.InstanceRefreshState.Value.Type()) + } + + ctx.Deferrals().ReportResourceInstanceDeferred( + data.Addr, + data.Deferred.Reason, + &plans.ResourceInstanceChange{ + Addr: node.Addr, + PrevRunAddr: node.Addr, + ProviderAddr: node.ResolvedProvider, + Change: plans.Change{ + Action: plans.Read, + Before: beforeValue, + After: data.InstanceRefreshState.Value, + }, + }, + ) + } + + // no more steps. + return nil, diags +} + +// PlanningStep handles the planning phase +type PlanningStep struct{} + +func (s *PlanningStep) Execute(ctx EvalContext, node *NodePlannableResourceInstance, data *ResourceData) (ResourceState[*NodePlannableResourceInstance], tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // Initialize repetition data for replace triggers + repData := instances.RepetitionData{} + switch k := data.Addr.Resource.Key.(type) { + case addrs.IntKey: + repData.CountIndex = k.Value() + case addrs.StringKey: + repData.EachKey = k.Value() + repData.EachValue = cty.DynamicVal + } + + // Check for triggered replacements + diags = diags.Append(node.replaceTriggered(ctx, repData)) + if diags.HasErrors() { + return nil, diags + } + + // Plan the changes + change, instancePlanState, planDeferred, repeatData, planDiags := node.plan( + ctx, nil, data.InstanceRefreshState, node.ForceCreateBeforeDestroy, node.forceReplace, + ) + diags = diags.Append(planDiags) + if diags.HasErrors() { + // Special case for import with config generation + // If we are importing and generating a configuration, we need to + // ensure the change is written out so the configuration can be + // captured. + if planDeferred == nil && len(node.generateConfigPath) > 0 { + // Update our return plan + change := &plans.ResourceInstanceChange{ + Addr: node.Addr, + PrevRunAddr: node.prevRunAddr(ctx), + ProviderAddr: node.ResolvedProvider, + Change: plans.Change{ + // we only need a placeholder, so this will be a NoOp + Action: plans.NoOp, + Before: data.InstanceRefreshState.Value, + After: data.InstanceRefreshState.Value, + GeneratedConfig: node.generatedConfigHCL, + }, + } + diags = diags.Append(node.writeChange(ctx, change, "")) + } + return nil, diags + } + + if data.Deferred == nil && planDeferred != nil { + data.Deferred = planDeferred + } + + // Update import metadata if needed + if data.Importing { + // There is a subtle difference between the import by identity + // and the import by ID. When importing by identity, we need to + // make sure to use the complete identity return by the provider + // instead of the (potential) incomplete one from the configuration. + if node.importTarget.Type().IsObjectType() { + change.Importing = &plans.Importing{Target: data.InstanceRefreshState.Identity} + } else { + change.Importing = &plans.Importing{Target: node.importTarget} + } + } + + // FIXME: here we update the change to reflect the reason for + // replacement, but we still overload forceReplace to get the correct + // change planned. + if len(node.replaceTriggeredBy) > 0 { + change.ActionReason = plans.ResourceInstanceReplaceByTriggers + } + + // Determine if we need to refresh and re-plan + // In light mode, if we didn't refresh before planning but the provider + // has indicated that changes are needed, we need to refresh and re-plan to + // ensure we have the most up-to-date state + refreshChangedResource := data.RefreshNeeded && change.Action != plans.NoOp && data.LightMode + if refreshChangedResource { + // Go back to the refresh step and plan again + return &ProviderRefreshStep{}, nil + } + + // FIXME: here we update the change to reflect the reason for + // replacement, but we still overload forceReplace to get the correct + // change planned. + if len(node.replaceTriggeredBy) > 0 { + change.ActionReason = plans.ResourceInstanceReplaceByTriggers + } + + return &PostPlanDeferralStep{ + RepeatData: repeatData, + PlanState: instancePlanState, + Change: change, + }, diags +} + +// PostPlanDeferralStep handles the deferral of changes after planning +type PostPlanDeferralStep struct { + RepeatData instances.RepetitionData + PlanState *states.ResourceInstanceObject + Change *plans.ResourceInstanceChange +} + +func (s *PostPlanDeferralStep) Execute(ctx EvalContext, node *NodePlannableResourceInstance, data *ResourceData) (ResourceState[*NodePlannableResourceInstance], tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + deferrals := ctx.Deferrals() + if data.Deferred != nil { + // Then this resource has been deferred either during the import, + // refresh or planning stage. We'll report the deferral and + // store what we could produce in the deferral tracker. + deferrals.ReportResourceInstanceDeferred(data.Addr, data.Deferred.Reason, s.Change) + return nil, diags + } + + // We intentionally write the change before the subsequent checks, because + // all of the checks below this point are for problems caused by the + // context surrounding the change, rather than the change itself, and + // so it's helpful to still include the valid-in-isolation change as + // part of the plan as additional context in our error output. + // + // FIXME: it is currently important that we write resource changes to + // the plan (n.writeChange) before we write the corresponding state + // (n.writeResourceInstanceState). + // + // This is because the planned resource state will normally have the + // status of states.ObjectPlanned, which causes later logic to refer to + // the contents of the plan to retrieve the resource data. Because + // there is no shared lock between these two data structures, reversing + // the order of these writes will cause a brief window of inconsistency + // which can lead to a failed safety check. + // + // Future work should adjust these APIs such that it is impossible to + // update these two data structures incorrectly through any objects + // reachable via the terraform.EvalContext API. + if !deferrals.ShouldDeferResourceInstanceChanges(node.Addr, node.Dependencies) { + // Write the change + diags = diags.Append(node.writeChange(ctx, s.Change, "")) + if diags.HasErrors() { + return nil, diags + } + + // Update the working state + diags = diags.Append(node.writeResourceInstanceState(ctx, s.PlanState, workingState)) + if diags.HasErrors() { + return nil, diags + } + + // Check for prevent_destroy violations + diags = diags.Append(node.checkPreventDestroy(s.Change)) + if diags.HasErrors() { + return nil, diags + } + + // If this plan resulted in a NoOp, then apply won't have a chance to make + // any changes to the stored dependencies. Since this is a NoOp we know + // that the stored dependencies will have no effect during apply, and we can + // write them out now. + if s.Change.Action == plans.NoOp && !depsEqual(data.InstanceRefreshState.Dependencies, node.Dependencies) { + // the refresh state will be the final state for this resource, so + // finalize the dependencies here if they need to be updated. + data.InstanceRefreshState.Dependencies = node.Dependencies + diags = diags.Append(node.writeResourceInstanceState(ctx, data.InstanceRefreshState, refreshState)) + if diags.HasErrors() { + return nil, diags + } + } + + return &CheckingPostconditionsStep{s.RepeatData}, diags + } + + // If we get here, it means that the deferrals tracker says that + // that we must defer changes for + // this resource instance, presumably due to a dependency on an + // upstream object that was already deferred. Therefore we just + // report our own deferral (capturing a placeholder value in the + // deferral tracker) and don't add anything to the plan or + // working state. + // In this case, the expression evaluator should use the placeholder + // value registered here as the value of this resource instance, + // instead of using the plan. + deferrals.ReportResourceInstanceDeferred(node.Addr, providers.DeferredReasonDeferredPrereq, s.Change) + return nil, diags +} + +// CheckingPostconditionsStep evaluates postconditions +type CheckingPostconditionsStep struct { + RepeatData instances.RepetitionData +} + +func (s *CheckingPostconditionsStep) Execute(ctx EvalContext, node *NodePlannableResourceInstance, data *ResourceData) (ResourceState[*NodePlannableResourceInstance], tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + // Post-conditions might block completion. We intentionally do this + // _after_ writing the state/diff because we want to check against + // the result of the operation, and to fail on future operations + // until the user makes the condition succeed. + // (Note that some preconditions will end up being skipped during + // planning, because their conditions depend on values not yet known.) + + // Check postconditions + checkDiags := evalCheckRules( + addrs.ResourcePostcondition, + node.Config.Postconditions, + ctx, node.ResourceInstanceAddr(), s.RepeatData, + data.CheckRuleSeverity, + ) + diags = diags.Append(checkDiags) + + // End of execution + return nil, diags +} + +func updateCreateBeforeDestroy(n *NodePlannableResourceInstance, currentState *states.ResourceInstanceObject) (updated bool) { + if n.Config != nil && n.Config.Managed != nil && currentState != nil { + newCBD := n.Config.Managed.CreateBeforeDestroy || n.ForceCreateBeforeDestroy + updated = currentState.CreateBeforeDestroy != newCBD + currentState.CreateBeforeDestroy = newCBD + return updated + } + return false +} diff --git a/internal/terraform/node_resource_plan_instance_ds.go b/internal/terraform/node_resource_plan_instance_ds.go new file mode 100644 index 000000000000..261ef8c47104 --- /dev/null +++ b/internal/terraform/node_resource_plan_instance_ds.go @@ -0,0 +1,64 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package terraform + +import ( + "github.com/hashicorp/terraform/internal/addrs" + "github.com/hashicorp/terraform/internal/tfdiags" +) + +type PlanDataSourceStep struct { +} + +func (s *PlanDataSourceStep) Execute(ctx EvalContext, node *NodePlannableResourceInstance, data *ResourceData) (ResourceState[*NodePlannableResourceInstance], tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + addr := node.ResourceInstanceAddr() + deferrals := ctx.Deferrals() + change, state, deferred, repeatData, planDiags := node.planDataSource(ctx, data.CheckRuleSeverity, data.SkipPlanning, deferrals.ShouldDeferResourceInstanceChanges(addr, node.Dependencies)) + diags = diags.Append(planDiags) + if diags.HasErrors() { + return nil, diags + } + + // A nil change here indicates that Terraform is deciding NOT to make a + // change at all. In which case even if we wanted to try and defer it + // (because of a dependency) we can't as there is no change to defer. + // + // The most common case for this is when the data source is being refreshed + // but depends on unknown values or dependencies which means we just skip + // refreshing the data source. We maintain that behaviour here. + if change != nil && deferred != nil { + // Then this data source got deferred by the provider during planning. + deferrals.ReportDataSourceInstanceDeferred(addr, deferred.Reason, change) + } else { + // Not deferred; business as usual. + + // write the data source into both the refresh state and the + // working state + diags = diags.Append(node.writeResourceInstanceState(ctx, state, refreshState)) + if diags.HasErrors() { + return nil, diags + } + diags = diags.Append(node.writeResourceInstanceState(ctx, state, workingState)) + if diags.HasErrors() { + return nil, diags + } + + diags = diags.Append(node.writeChange(ctx, change, "")) + + // Post-conditions might block further progress. We intentionally do this + // _after_ writing the state/diff because we want to check against + // the result of the operation, and to fail on future operations + // until the user makes the condition succeed. + checkDiags := evalCheckRules( + addrs.ResourcePostcondition, + node.Config.Postconditions, + ctx, addr, repeatData, + data.CheckRuleSeverity, + ) + diags = diags.Append(checkDiags) + } + + return nil, diags +} From 560a5f9965204f13b28600c19f50f7af1cd9f100 Mon Sep 17 00:00:00 2001 From: Samsondeen Dare Date: Sun, 8 Feb 2026 11:21:36 +0100 Subject: [PATCH 2/4] incorporate into existing code structure --- internal/terraform/context_apply_test.go | 1 + .../node_resource_abstract_instance.go | 1 + .../terraform/node_resource_plan_instance.go | 426 ++++++++++-------- 3 files changed, 252 insertions(+), 176 deletions(-) diff --git a/internal/terraform/context_apply_test.go b/internal/terraform/context_apply_test.go index b1099dfa1b25..7732c6edaf03 100644 --- a/internal/terraform/context_apply_test.go +++ b/internal/terraform/context_apply_test.go @@ -10426,6 +10426,7 @@ func TestContext2Apply_ProviderMeta_refresh_set(t *testing.T) { Providers: map[addrs.Provider]providers.Factory{ addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), }, + Parallelism: 1, }) plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) diff --git a/internal/terraform/node_resource_abstract_instance.go b/internal/terraform/node_resource_abstract_instance.go index 332e61f0eb4b..3e1c96492500 100644 --- a/internal/terraform/node_resource_abstract_instance.go +++ b/internal/terraform/node_resource_abstract_instance.go @@ -3034,6 +3034,7 @@ func getAction(addr addrs.AbsResourceInstance, priorVal, plannedNewVal cty.Value actionReason = plans.ResourceInstanceReplaceBecauseCannotUpdate } case eq && !forceReplace: + // TODO: WHat if the resource has force replace? action = plans.NoOp default: action = plans.Update diff --git a/internal/terraform/node_resource_plan_instance.go b/internal/terraform/node_resource_plan_instance.go index d1ddcf32c915..16574427ea29 100644 --- a/internal/terraform/node_resource_plan_instance.go +++ b/internal/terraform/node_resource_plan_instance.go @@ -79,11 +79,11 @@ func (n *NodePlannableResourceInstance) Execute(ctx EvalContext, op walkOperatio // Eval info is different depending on what kind of resource this is switch addr.Resource.Resource.Mode { case addrs.ManagedResourceMode: - // return n.managedResourceExecute(ctx) - return n.Execute2(ctx, op) + return n.managedResourceExecute(ctx) + // return n.Execute2(ctx, op) case addrs.DataResourceMode: - // return n.dataResourceExecute(ctx) - return n.Execute2(ctx, op) + return n.dataResourceExecute(ctx) + // return n.Execute2(ctx, op) case addrs.EphemeralResourceMode: return n.ephemeralResourceExecute(ctx) case addrs.ListResourceMode: @@ -211,13 +211,17 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext) } importing := n.importTarget != cty.NilVal && !n.preDestroyRefresh - var deferred *providers.Deferred + var importTarget *plans.Importing + shouldRefresh := !n.skipRefresh // If the resource is to be imported, we now ask the provider for an Import // and a Refresh, and save the resulting state to instanceRefreshState. if importing { + importTarget = &plans.Importing{Target: n.importTarget} + // importState takes care of refreshing its imported state + shouldRefresh = shouldRefresh && !importing if n.importTarget.IsWhollyKnown() { var importDiags tfdiags.Diagnostics instanceRefreshState, deferred, importDiags = n.importState(ctx, addr, n.importTarget, provider, providerSchema) @@ -260,6 +264,15 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext) return diags } } + + // There is a subtle difference between the import by identity + // and the import by ID. When importing by identity, we need to + // make sure to use the complete identity return by the provider + // instead of the (potential) incomplete one from the configuration. + if n.importTarget.Type().IsObjectType() && instanceRefreshState != nil { + importTarget = &plans.Importing{Target: instanceRefreshState.Identity} + } + } else { var readDiags tfdiags.Diagnostics instanceRefreshState, readDiags = n.readResourceInstanceState(ctx, addr) @@ -273,6 +286,33 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext) } } + // Now we have the state value + // + // Then In light mode, + // We start by planning the resource, and if it is a no-op, + // we skip the read step + if ctx.PlanCtx().LightMode { + change, planDiags := n.planManagedResource( + ctx, + instanceRefreshState, + deferred, + importTarget, + false, + ) + diags = diags.Append(planDiags) + if diags.HasErrors() { + return diags + } + + // If the change is a no-op, write the change and return + if change.Action == plans.NoOp { + diags = diags.Append(n.writeChange(ctx, change, "")) + return diags + } + // Otherwise we continue with the read step, + // which will reconcile the local state and config with the remote state + } + if deferred == nil { // We'll save a snapshot of what we just read from the state into the // prevRunState before we do anything else, since this will capture the @@ -315,23 +355,14 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext) // Refresh, maybe // The import process handles its own refresh - if !n.skipRefresh && !importing { + if shouldRefresh { var refreshDiags tfdiags.Diagnostics - instanceRefreshState, refreshDeferred, refreshDiags = n.refresh(ctx, states.NotDeposed, instanceRefreshState, ctx.Deferrals().DeferralAllowed()) + instanceRefreshState, refreshDeferred, refreshDiags = n.refreshState(ctx, instanceRefreshState) diags = diags.Append(refreshDiags) if diags.HasErrors() { return diags } - if instanceRefreshState != nil { - // When refreshing we start by merging the stored dependencies and - // the configured dependencies. The configured dependencies will be - // stored to state once the changes are applied. If the plan - // results in no changes, we will re-write these dependencies - // below. - instanceRefreshState.Dependencies = mergeDeps(n.Dependencies, instanceRefreshState.Dependencies) - } - if deferred == nil && refreshDeferred != nil { deferred = refreshDeferred } @@ -365,158 +396,14 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext) // Plan the instance, unless we're in the refresh-only mode if !n.skipPlanChanges { - - // add this instance to n.forceReplace if replacement is triggered by - // another change - repData := instances.RepetitionData{} - switch k := addr.Resource.Key.(type) { - case addrs.IntKey: - repData.CountIndex = k.Value() - case addrs.StringKey: - repData.EachKey = k.Value() - repData.EachValue = cty.DynamicVal - } - - diags = diags.Append(n.replaceTriggered(ctx, repData)) - if diags.HasErrors() { - // Pre-Diff error hook - diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreDiff(n.HookResourceIdentity(), addrs.NotDeposed, cty.DynamicVal, cty.DynamicVal, diags.Err()) - })) - return diags - } - - change, instancePlanState, planDeferred, repeatData, planDiags := n.plan( - ctx, nil, instanceRefreshState, n.ForceCreateBeforeDestroy, n.forceReplace, + _, planDiags := n.planManagedResource( + ctx, + instanceRefreshState, + deferred, + importTarget, + true, ) diags = diags.Append(planDiags) - if diags.HasErrors() { - // If we are importing and generating a configuration, we need to - // ensure the change is written out so the configuration can be - // captured. - if planDeferred == nil && len(n.generateConfigPath) > 0 { - // Update our return plan - change := &plans.ResourceInstanceChange{ - Addr: n.Addr, - PrevRunAddr: n.prevRunAddr(ctx), - ProviderAddr: n.ResolvedProvider, - Change: plans.Change{ - // we only need a placeholder, so this will be a NoOp - Action: plans.NoOp, - Before: instanceRefreshState.Value, - After: instanceRefreshState.Value, - GeneratedConfig: n.generatedConfigHCL, - }, - } - diags = diags.Append(n.writeChange(ctx, change, "")) - } - - return diags - } - - if deferred == nil && planDeferred != nil { - deferred = planDeferred - } - - if importing { - // There is a subtle difference between the import by identity - // and the import by ID. When importing by identity, we need to - // make sure to use the complete identity return by the provider - // instead of the (potential) incomplete one from the configuration. - if n.importTarget.Type().IsObjectType() { - change.Importing = &plans.Importing{Target: instanceRefreshState.Identity} - } else { - change.Importing = &plans.Importing{Target: n.importTarget} - } - } - - // FIXME: here we update the change to reflect the reason for - // replacement, but we still overload forceReplace to get the correct - // change planned. - if len(n.replaceTriggeredBy) > 0 { - change.ActionReason = plans.ResourceInstanceReplaceByTriggers - } - - deferrals := ctx.Deferrals() - if deferred != nil { - // Then this resource has been deferred either during the import, - // refresh or planning stage. We'll report the deferral and - // store what we could produce in the deferral tracker. - deferrals.ReportResourceInstanceDeferred(addr, deferred.Reason, change) - } else if !deferrals.ShouldDeferResourceInstanceChanges(n.Addr, n.Dependencies) { - // We intentionally write the change before the subsequent checks, because - // all of the checks below this point are for problems caused by the - // context surrounding the change, rather than the change itself, and - // so it's helpful to still include the valid-in-isolation change as - // part of the plan as additional context in our error output. - // - // FIXME: it is currently important that we write resource changes to - // the plan (n.writeChange) before we write the corresponding state - // (n.writeResourceInstanceState). - // - // This is because the planned resource state will normally have the - // status of states.ObjectPlanned, which causes later logic to refer to - // the contents of the plan to retrieve the resource data. Because - // there is no shared lock between these two data structures, reversing - // the order of these writes will cause a brief window of inconsistency - // which can lead to a failed safety check. - // - // Future work should adjust these APIs such that it is impossible to - // update these two data structures incorrectly through any objects - // reachable via the terraform.EvalContext API. - diags = diags.Append(n.writeChange(ctx, change, "")) - if diags.HasErrors() { - return diags - } - diags = diags.Append(n.writeResourceInstanceState(ctx, instancePlanState, workingState)) - if diags.HasErrors() { - return diags - } - - diags = diags.Append(n.checkPreventDestroy(change)) - if diags.HasErrors() { - return diags - } - - // If this plan resulted in a NoOp, then apply won't have a chance to make - // any changes to the stored dependencies. Since this is a NoOp we know - // that the stored dependencies will have no effect during apply, and we can - // write them out now. - if change.Action == plans.NoOp && !depsEqual(instanceRefreshState.Dependencies, n.Dependencies) { - // the refresh state will be the final state for this resource, so - // finalize the dependencies here if they need to be updated. - instanceRefreshState.Dependencies = n.Dependencies - diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, refreshState)) - if diags.HasErrors() { - return diags - } - } - - // Post-conditions might block completion. We intentionally do this - // _after_ writing the state/diff because we want to check against - // the result of the operation, and to fail on future operations - // until the user makes the condition succeed. - // (Note that some preconditions will end up being skipped during - // planning, because their conditions depend on values not yet known.) - checkDiags := evalCheckRules( - addrs.ResourcePostcondition, - n.Config.Postconditions, - ctx, n.ResourceInstanceAddr(), repeatData, - checkRuleSeverity, - ) - diags = diags.Append(checkDiags) - } else { - // The deferrals tracker says that we must defer changes for - // this resource instance, presumably due to a dependency on an - // upstream object that was already deferred. Therefore we just - // report our own deferral (capturing a placeholder value in the - // deferral tracker) and don't add anything to the plan or - // working state. - // In this case, the expression evaluator should use the placeholder - // value registered here as the value of this resource instance, - // instead of using the plan. - deferrals.ReportResourceInstanceDeferred(n.Addr, providers.DeferredReasonDeferredPrereq, change) - } } else { // In refresh-only mode we need to evaluate the for-each expression in // order to supply the value to the pre- and post-condition check @@ -578,6 +465,201 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext) return diags } +func (n *NodePlannableResourceInstance) refreshState(ctx EvalContext, state *states.ResourceInstanceObject) (*states.ResourceInstanceObject, *providers.Deferred, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + // refresh + riNode := &NodeAbstractResourceInstance{ + Addr: n.Addr, + NodeAbstractResource: n.NodeAbstractResource, + override: n.override, + } + refreshedState, deferred, refreshDiags := riNode.refresh(ctx, states.NotDeposed, state, ctx.Deferrals().DeferralAllowed()) + diags = diags.Append(refreshDiags) + if diags.HasErrors() { + return refreshedState, deferred, diags + } + + if refreshedState != nil { + // When refreshing we start by merging the stored dependencies and + // the configured dependencies. The configured dependencies will be + // stored to state once the changes are applied. If the plan + // results in no changes, we will re-write these dependencies + // below. + refreshedState.Dependencies = mergeDeps( + n.Dependencies, refreshedState.Dependencies, + ) + } + return refreshedState, deferred, diags +} + +func (n *NodePlannableResourceInstance) planManagedResource( + ctx EvalContext, + instanceRefreshState *states.ResourceInstanceObject, + deferred *providers.Deferred, + importTarget *plans.Importing, + write bool, +) (*plans.ResourceInstanceChange, tfdiags.Diagnostics) { + + writeChange := func(ctx EvalContext, change *plans.ResourceInstanceChange, deposedKey states.DeposedKey) error { + return nil + } + if write { + writeChange = n.writeChange + } + var diags tfdiags.Diagnostics + addr := n.ResourceInstanceAddr() + + checkRuleSeverity := tfdiags.Error + if n.skipPlanChanges || n.preDestroyRefresh { + checkRuleSeverity = tfdiags.Warning + } + + // add this instance to n.forceReplace if replacement is triggered by + // another change + repData := instances.RepetitionData{} + switch k := addr.Resource.Key.(type) { + case addrs.IntKey: + repData.CountIndex = k.Value() + case addrs.StringKey: + repData.EachKey = k.Value() + repData.EachValue = cty.DynamicVal + } + + diags = diags.Append(n.replaceTriggered(ctx, repData)) + if diags.HasErrors() { + // Pre-Diff error hook + diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { + return h.PreDiff(n.HookResourceIdentity(), addrs.NotDeposed, cty.DynamicVal, cty.DynamicVal, diags.Err()) + })) + return nil, diags + } + + change, instancePlanState, planDeferred, repeatData, planDiags := n.plan( + ctx, nil, instanceRefreshState, n.ForceCreateBeforeDestroy, n.forceReplace, + ) + diags = diags.Append(planDiags) + if diags.HasErrors() { + // If we are importing and generating a configuration, we need to + // ensure the change is written out so the configuration can be + // captured. + if planDeferred == nil && len(n.generateConfigPath) > 0 { + // Update our return plan + change := &plans.ResourceInstanceChange{ + Addr: n.Addr, + PrevRunAddr: n.prevRunAddr(ctx), + ProviderAddr: n.ResolvedProvider, + Change: plans.Change{ + // we only need a placeholder, so this will be a NoOp + Action: plans.NoOp, + Before: instanceRefreshState.Value, + After: instanceRefreshState.Value, + GeneratedConfig: n.generatedConfigHCL, + }, + } + diags = diags.Append(writeChange(ctx, change, "")) + } + + return change, diags + } + + if deferred == nil && planDeferred != nil { + deferred = planDeferred + } + + change.Importing = importTarget + + // FIXME: here we update the change to reflect the reason for + // replacement, but we still overload forceReplace to get the correct + // change planned. + if len(n.replaceTriggeredBy) > 0 { + change.ActionReason = plans.ResourceInstanceReplaceByTriggers + } + + deferrals := ctx.Deferrals() + // TODO: planning twice means deferral twice, and that is an error. + if deferred != nil { + // Then this resource has been deferred either during the import, + // refresh or planning stage. We'll report the deferral and + // store what we could produce in the deferral tracker. + deferrals.ReportResourceInstanceDeferred(addr, deferred.Reason, change) + } else if !deferrals.ShouldDeferResourceInstanceChanges(n.Addr, n.Dependencies) { + // We intentionally write the change before the subsequent checks, because + // all of the checks below this point are for problems caused by the + // context surrounding the change, rather than the change itself, and + // so it's helpful to still include the valid-in-isolation change as + // part of the plan as additional context in our error output. + // + // FIXME: it is currently important that we write resource changes to + // the plan (n.writeChange) before we write the corresponding state + // (n.writeResourceInstanceState). + // + // This is because the planned resource state will normally have the + // status of states.ObjectPlanned, which causes later logic to refer to + // the contents of the plan to retrieve the resource data. Because + // there is no shared lock between these two data structures, reversing + // the order of these writes will cause a brief window of inconsistency + // which can lead to a failed safety check. + // + // Future work should adjust these APIs such that it is impossible to + // update these two data structures incorrectly through any objects + // reachable via the terraform.EvalContext API. + diags = diags.Append(writeChange(ctx, change, "")) + if diags.HasErrors() { + return change, diags + } + diags = diags.Append(n.writeResourceInstanceState(ctx, instancePlanState, workingState)) + if diags.HasErrors() { + return change, diags + } + + diags = diags.Append(n.checkPreventDestroy(change)) + if diags.HasErrors() { + return change, diags + } + + // If this plan resulted in a NoOp, then apply won't have a chance to make + // any changes to the stored dependencies. Since this is a NoOp we know + // that the stored dependencies will have no effect during apply, and we can + // write them out now. + if change.Action == plans.NoOp && !depsEqual(instanceRefreshState.Dependencies, n.Dependencies) { + // the refresh state will be the final state for this resource, so + // finalize the dependencies here if they need to be updated. + instanceRefreshState.Dependencies = n.Dependencies + diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, refreshState)) + if diags.HasErrors() { + return change, diags + } + } + + // Post-conditions might block completion. We intentionally do this + // _after_ writing the state/diff because we want to check against + // the result of the operation, and to fail on future operations + // until the user makes the condition succeed. + // (Note that some preconditions will end up being skipped during + // planning, because their conditions depend on values not yet known.) + checkDiags := evalCheckRules( + addrs.ResourcePostcondition, + n.Config.Postconditions, + ctx, n.ResourceInstanceAddr(), repeatData, + checkRuleSeverity, + ) + diags = diags.Append(checkDiags) + } else { + // The deferrals tracker says that we must defer changes for + // this resource instance, presumably due to a dependency on an + // upstream object that was already deferred. Therefore we just + // report our own deferral (capturing a placeholder value in the + // deferral tracker) and don't add anything to the plan or + // working state. + // In this case, the expression evaluator should use the placeholder + // value registered here as the value of this resource instance, + // instead of using the plan. + deferrals.ReportResourceInstanceDeferred(n.Addr, providers.DeferredReasonDeferredPrereq, change) + } + + return change, diags +} + // replaceTriggered checks if this instance needs to be replace due to a change // in a replace_triggered_by reference. If replacement is required, the // instance address is added to forceReplace @@ -830,15 +912,7 @@ func (n *NodePlannableResourceInstance) importState(ctx EvalContext, addr addrs. )) } - // refresh - riNode := &NodeAbstractResourceInstance{ - Addr: n.Addr, - NodeAbstractResource: NodeAbstractResource{ - ResolvedProvider: n.ResolvedProvider, - }, - override: n.override, - } - instanceRefreshState, refreshDeferred, refreshDiags := riNode.refresh(ctx, states.NotDeposed, importedState, ctx.Deferrals().DeferralAllowed()) + instanceRefreshState, refreshDeferred, refreshDiags := n.refreshState(ctx, importedState) diags = diags.Append(refreshDiags) if diags.HasErrors() { return instanceRefreshState, deferred, diags @@ -915,7 +989,7 @@ func (n *NodePlannableResourceInstance) importState(ctx EvalContext, addr addrs. // Only write the state if the change isn't being deferred. We're also // reporting the deferred status to the caller, so they should know // not to read from the state. - diags = diags.Append(riNode.writeResourceInstanceState(ctx, instanceRefreshState, refreshState)) + diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, refreshState)) } return instanceRefreshState, deferred, diags } From b4b546aa7aeabdc2cd1529a8e0ffa332d953cafa Mon Sep 17 00:00:00 2001 From: Samsondeen Dare Date: Mon, 9 Feb 2026 18:57:00 +0100 Subject: [PATCH 3/4] implement plan -light --- internal/backend/backendrun/operation.go | 1 + internal/backend/local/backend_local.go | 1 + internal/cloud/backend_plan.go | 9 ++ internal/command/arguments/plan.go | 26 ++++++ internal/command/arguments/plan_test.go | 37 ++++++++ internal/command/plan.go | 11 +++ internal/plans/plan.go | 7 ++ internal/terraform/context_plan.go | 87 +++++++++++++++++-- internal/terraform/context_plan2_test.go | 20 ++--- internal/terraform/graph_builder_plan.go | 40 +++------ .../node_resource_abstract_instance.go | 5 +- .../terraform/node_resource_apply_instance.go | 5 +- .../node_resource_destroy_deposed.go | 14 ++- .../terraform/node_resource_partial_plan.go | 26 +++--- internal/terraform/node_resource_plan.go | 23 ++--- .../terraform/node_resource_plan_destroy.go | 8 +- .../terraform/node_resource_plan_instance.go | 39 +++++---- .../terraform/node_resource_plan_orphan.go | 14 ++- .../node_resource_plan_partialexp.go | 13 ++- 19 files changed, 263 insertions(+), 123 deletions(-) diff --git a/internal/backend/backendrun/operation.go b/internal/backend/backendrun/operation.go index f2368dfdf3af..98d24cb300c6 100644 --- a/internal/backend/backendrun/operation.go +++ b/internal/backend/backendrun/operation.go @@ -75,6 +75,7 @@ type Operation struct { // plan for an apply operation. PlanId string PlanRefresh bool // PlanRefresh will do a refresh before a plan + PlanLight bool // PlanLight enables light plan mode, skipping refresh for unchanged resources PlanOutPath string // PlanOutPath is the path to save the plan // PlanOutBackend is the backend to store with the plan. This is the diff --git a/internal/backend/local/backend_local.go b/internal/backend/local/backend_local.go index 424948b547f6..2c98dbe07031 100644 --- a/internal/backend/local/backend_local.go +++ b/internal/backend/local/backend_local.go @@ -208,6 +208,7 @@ func (b *Local) localRunDirect(op *backendrun.Operation, run *backendrun.LocalRu ForceReplace: op.ForceReplace, SetVariables: variables, SkipRefresh: op.Type != backendrun.OperationTypeRefresh && !op.PlanRefresh, + LightMode: op.PlanLight, GenerateConfigPath: op.GenerateConfigOut, DeferralAllowed: op.DeferralAllowed, Query: op.Query, diff --git a/internal/cloud/backend_plan.go b/internal/cloud/backend_plan.go index 325fefd6a7f3..bc745328c85a 100644 --- a/internal/cloud/backend_plan.go +++ b/internal/cloud/backend_plan.go @@ -65,6 +65,15 @@ func (b *Cloud) opPlan(stopCtx, cancelCtx context.Context, op *backendrun.Operat )) } + if op.PlanLight { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Light plan mode is not supported with cloud backends", + fmt.Sprintf("%s does not support the -light flag. ", b.appName)+ + "Light plan mode is only available for local operations.", + )) + } + if op.PlanFile != nil { diags = diags.Append(tfdiags.Sourceless( tfdiags.Error, diff --git a/internal/command/arguments/plan.go b/internal/command/arguments/plan.go index d4d2a746a46c..9e06b88ec4e7 100644 --- a/internal/command/arguments/plan.go +++ b/internal/command/arguments/plan.go @@ -4,6 +4,7 @@ package arguments import ( + "github.com/hashicorp/terraform/internal/plans" "github.com/hashicorp/terraform/internal/tfdiags" ) @@ -30,6 +31,12 @@ type Plan struct { // be written to. GenerateConfigPath string + // Light enables "light plan" mode, where Terraform skips reading remote + // state for resources that have not changed in the local configuration + // or local state. The user is telling Terraform to trust that nothing + // has been modified outside of the local configuration. + Light bool + // ViewType specifies which output format to use ViewType ViewType } @@ -50,6 +57,7 @@ func ParsePlan(args []string) (*Plan, tfdiags.Diagnostics) { cmdFlags.BoolVar(&plan.InputEnabled, "input", true, "input") cmdFlags.StringVar(&plan.OutPath, "out", "", "out") cmdFlags.StringVar(&plan.GenerateConfigPath, "generate-config-out", "", "generate-config-out") + cmdFlags.BoolVar(&plan.Light, "light", false, "light") var json bool cmdFlags.BoolVar(&json, "json", false, "json") @@ -82,6 +90,24 @@ func ParsePlan(args []string) (*Plan, tfdiags.Diagnostics) { diags = diags.Append(plan.Operation.Parse()) + // Validate -light flag compatibility + if plan.Light { + if plan.Operation.PlanMode == plans.RefreshOnlyMode { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Incompatible plan options", + "The -light and -refresh-only options are mutually exclusive. Light mode skips refreshing unchanged resources, while refresh-only mode requires refreshing all resources.", + )) + } + if plan.Operation.PlanMode == plans.DestroyMode { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Incompatible plan options", + "The -light and -destroy options are mutually exclusive. A destroy plan requires reading the current state of all resources.", + )) + } + } + // JSON view currently does not support input, so we disable it here if json { plan.InputEnabled = false diff --git a/internal/command/arguments/plan_test.go b/internal/command/arguments/plan_test.go index e7fe6f401fdd..9664ead6fd0f 100644 --- a/internal/command/arguments/plan_test.go +++ b/internal/command/arguments/plan_test.go @@ -50,6 +50,23 @@ func TestParsePlan_basicValid(t *testing.T) { }, }, }, + "light mode": { + []string{"-light"}, + &Plan{ + DetailedExitCode: false, + InputEnabled: true, + Light: true, + OutPath: "", + ViewType: ViewHuman, + State: &State{Lock: true}, + Vars: &Vars{}, + Operation: &Operation{ + PlanMode: plans.NormalMode, + Parallelism: 10, + Refresh: true, + }, + }, + }, "JSON view disables input": { []string{"-json"}, &Plan{ @@ -96,6 +113,26 @@ func TestParsePlan_invalid(t *testing.T) { } } +func TestParsePlan_lightWithDestroy(t *testing.T) { + _, diags := ParsePlan([]string{"-light", "-destroy"}) + if len(diags) == 0 { + t.Fatal("expected diags but got none") + } + if got, want := diags.Err().Error(), "mutually exclusive"; !strings.Contains(got, want) { + t.Fatalf("wrong diags\n got: %s\nwant: %s", got, want) + } +} + +func TestParsePlan_lightWithRefreshOnly(t *testing.T) { + _, diags := ParsePlan([]string{"-light", "-refresh-only"}) + if len(diags) == 0 { + t.Fatal("expected diags but got none") + } + if got, want := diags.Err().Error(), "mutually exclusive"; !strings.Contains(got, want) { + t.Fatalf("wrong diags\n got: %s\nwant: %s", got, want) + } +} + func TestParsePlan_tooManyArguments(t *testing.T) { got, diags := ParsePlan([]string{"saved.tfplan"}) if len(diags) == 0 { diff --git a/internal/command/plan.go b/internal/command/plan.go index ec80a82b2e95..14bc51cf3511 100644 --- a/internal/command/plan.go +++ b/internal/command/plan.go @@ -86,6 +86,9 @@ func (c *PlanCommand) Run(rawArgs []string) int { return 1 } + // Light plan mode: skip refreshing unchanged resources + opReq.PlanLight = args.Light + // Collect variable value and add them to the operation request diags = diags.Append(c.GatherVariables(opReq, args.Vars)) if diags.HasErrors() { @@ -235,6 +238,14 @@ Plan Customization Options: planning faster, but at the expense of possibly planning against a stale record of the remote system state. + -light Enable light plan mode. In this mode, Terraform skips + reading remote state for resources that have not changed + in the local configuration or local state. This is useful + when you trust that nothing has been modified outside of + the local Terraform configuration, and can significantly + speed up planning for large configurations. Incompatible + with -refresh-only and -destroy. + -replace=resource Force replacement of a particular resource instance using its resource address. If the plan would've normally produced an update or no-op action for this instance, diff --git a/internal/plans/plan.go b/internal/plans/plan.go index 0995c0f1caf3..9dc877d16999 100644 --- a/internal/plans/plan.go +++ b/internal/plans/plan.go @@ -169,6 +169,13 @@ type Plan struct { // and builtin calls which may access external state so that calls during // apply can be checked for consistency. FunctionResults []lang.FunctionResultHash + + // Light is true if this plan was created in "light plan" mode, where + // Terraform skipped reading remote state for resources that have not + // changed in the local configuration or local state. This is recorded + // for UI purposes so that the user can be reminded that the plan may + // not reflect out-of-band changes to remote resources. + Light bool } // ProviderAddrs returns a list of all of the provider configuration addresses diff --git a/internal/terraform/context_plan.go b/internal/terraform/context_plan.go index 477547415cce..54166c548e5b 100644 --- a/internal/terraform/context_plan.go +++ b/internal/terraform/context_plan.go @@ -27,6 +27,39 @@ import ( "github.com/hashicorp/terraform/internal/tfdiags" ) +// nodePlanContext holds contextual flags that influence how individual resource +// nodes behave during the plan walk. It is derived from PlanOpts and +// threaded into every resource node. +type nodePlanContext struct { + // lightMode, when set to true, activates "light plan" mode. In this mode, + // Terraform plans each resource against local state first; if the result + // is a NoOp the remote-state refresh is skipped entirely. + lightMode bool + + skipPlanChanges bool + skipRefresh bool + preDestroyRefresh bool +} + +func (pc nodePlanContext) withSkipPlanChanges(v bool) nodePlanContext { + pc.skipPlanChanges = v + return pc +} + +func (pc nodePlanContext) withSkipRefresh(v bool) nodePlanContext { + pc.skipRefresh = v + return pc +} + +func (pc nodePlanContext) withPreDestroyRefresh(v bool) nodePlanContext { + pc.preDestroyRefresh = v + return pc +} + +func (pc nodePlanContext) SkipRefresh() bool { + return pc.skipRefresh || pc.lightMode +} + // PlanOpts are the various options that affect the details of how Terraform // will build a plan. type PlanOpts struct { @@ -41,7 +74,12 @@ type PlanOpts struct { // instance using its corresponding provider. SkipRefresh bool - PlanCtx PlanContext + // LightMode, when set to true, activates "light plan" mode. In this mode, + // Terraform plans each resource against local state first; if the result + // is a NoOp the expensive remote-state refresh is skipped entirely. + // Resources whose local plan shows changes are still refreshed and + // re-planned so the final diff is accurate. + LightMode bool // PreDestroyRefresh indicated that this is being passed to a plan used to // refresh the state immediately before a destroy plan. @@ -255,6 +293,22 @@ func (c *Context) PlanAndEval(config *configs.Config, prevRunState *states.State )) return nil, nil, diags } + if opts.LightMode && opts.Mode != plans.NormalMode { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Incompatible plan options", + "Light plan mode is only compatible with normal planning mode. It cannot be used with -refresh-only or -destroy.", + )) + return nil, nil, diags + } + if opts.LightMode { + log.Printf("[INFO] Light plan mode enabled: skipping refresh for all resources") + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Warning, + "Light plan mode is in effect", + "You are creating a plan with the -light option, which skips reading the current state of remote resources. The resulting plan may not detect changes made outside of Terraform (drift). Use a normal plan to get a complete view of all changes.", + )) + } if len(opts.ForceReplace) > 0 && opts.Mode != plans.NormalMode { // The other modes don't generate no-op or update actions that we might // upgrade to be "replace", so doesn't make sense to combine those. @@ -469,6 +523,25 @@ func (c *Context) checkApplyGraph(plan *plans.Plan, config *configs.Config, opts return diags } +// nodeContext derives a nodePlanContext from the caller-facing fields on +// PlanOpts. This is the single point where the public API maps into the +// internal per-node flags that get threaded through the graph builder and +// into every resource node. +// +// Flags that are purely internal (e.g. skipPlanChanges, which is derived +// from plans.RefreshOnlyMode) are NOT set here — they are applied in +// planGraph where the mode-specific logic lives. +func (opts *PlanOpts) nodeContext() nodePlanContext { + if opts == nil { + return nodePlanContext{} + } + return nodePlanContext{ + lightMode: opts.LightMode, + skipRefresh: opts.SkipRefresh, + preDestroyRefresh: opts.PreDestroyRefresh, + } +} + var DefaultPlanOpts = &PlanOpts{ Mode: plans.NormalMode, } @@ -891,6 +964,7 @@ func (c *Context) planWalk(config *configs.Config, prevRunState *states.State, o Checks: states.NewCheckResults(walker.Checks), Timestamp: timestamp, FunctionResults: funcResults.GetHashes(), + Light: opts.LightMode, // Other fields get populated by Context.Plan after we return } @@ -1017,8 +1091,7 @@ func (c *Context) planGraph(config *configs.Config, prevRunState *states.State, Plugins: c.plugins, Targets: opts.Targets, ForceReplace: opts.ForceReplace, - skipRefresh: opts.SkipRefresh, - preDestroyRefresh: opts.PreDestroyRefresh, + planCtx: opts.nodeContext(), Operation: walkPlan, ExternalReferences: opts.ExternalReferences, Overrides: opts.Overrides, @@ -1034,6 +1107,9 @@ func (c *Context) planGraph(config *configs.Config, prevRunState *states.State, }).Build(addrs.RootModuleInstance) return graph, walkPlan, diags case plans.RefreshOnlyMode: + nctx := opts. + nodeContext(). + withSkipPlanChanges(true) // this activates "refresh only" mode. graph, diags := (&PlanGraphBuilder{ Config: config, State: prevRunState, @@ -1042,8 +1118,7 @@ func (c *Context) planGraph(config *configs.Config, prevRunState *states.State, Plugins: c.plugins, Targets: append(opts.Targets, opts.ActionTargets...), ActionTargets: opts.ActionTargets, - skipRefresh: opts.SkipRefresh, - skipPlanChanges: true, // this activates "refresh only" mode. + planCtx: nctx, Operation: walkPlan, ExternalReferences: opts.ExternalReferences, Overrides: opts.Overrides, @@ -1059,7 +1134,7 @@ func (c *Context) planGraph(config *configs.Config, prevRunState *states.State, ExternalProviderConfigs: externalProviderConfigs, Plugins: c.plugins, Targets: opts.Targets, - skipRefresh: opts.SkipRefresh, + planCtx: opts.nodeContext(), Operation: walkPlanDestroy, Overrides: opts.Overrides, SkipGraphValidation: c.graphOpts.SkipGraphValidation, diff --git a/internal/terraform/context_plan2_test.go b/internal/terraform/context_plan2_test.go index 62fb35c86cd8..4cb047f1fb37 100644 --- a/internal/terraform/context_plan2_test.go +++ b/internal/terraform/context_plan2_test.go @@ -995,8 +995,7 @@ resource "test_object" "a" { }) plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.DestroyMode, - SkipRefresh: false, // the default + Mode: plans.DestroyMode, }) tfdiags.AssertNoErrors(t, diags) @@ -7954,10 +7953,8 @@ func TestContext2Plan_lightModePartialUpdate(t *testing.T) { }) plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.NormalMode, - PlanCtx: PlanContext{ - LightMode: true, - }, + Mode: plans.NormalMode, + LightMode: true, }) tfdiags.AssertNoErrors(t, diags) @@ -8027,8 +8024,8 @@ func TestContext2Plan_lightModePartialUpdate2(t *testing.T) { }) plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.NormalMode, - PlanCtx: PlanContext{LightMode: true}, + Mode: plans.NormalMode, + LightMode: true, }) tfdiags.AssertNoErrors(t, diags) @@ -8046,7 +8043,6 @@ func TestContext2Plan_lightModeUpgradedSchema(t *testing.T) { resource "test_object" "unchanged" { value = "original1" } - resource "test_object" "changed" { value = "updated" } @@ -8120,10 +8116,8 @@ func TestContext2Plan_lightModeUpgradedSchema(t *testing.T) { }) plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.NormalMode, - PlanCtx: PlanContext{ - LightMode: true, - }, + Mode: plans.NormalMode, + LightMode: true, }) tfdiags.AssertNoErrors(t, diags) diff --git a/internal/terraform/graph_builder_plan.go b/internal/terraform/graph_builder_plan.go index b88b21d24eae..1c72821af99d 100644 --- a/internal/terraform/graph_builder_plan.go +++ b/internal/terraform/graph_builder_plan.go @@ -60,21 +60,10 @@ type PlanGraphBuilder struct { // action instead. Create and Delete actions are not affected. ForceReplace []addrs.AbsResourceInstance - // skipRefresh indicates that we should skip refreshing managed resources - skipRefresh bool - - Ctx PlanContext - - // preDestroyRefresh indicates that we are executing the refresh which - // happens immediately before a destroy plan, which happens to use the - // normal planing mode so skipPlanChanges cannot be set. - preDestroyRefresh bool - - // skipPlanChanges indicates that we should skip the step of comparing - // prior state with configuration and generating planned changes to - // resource instances. (This is for the "refresh only" planning mode, - // where we _only_ do the refresh step.) - skipPlanChanges bool + // planCtx carries per-node planning context flags (e.g. light-mode, + // skip-refresh, pre-destroy-refresh, skip-plan-changes) that are + // propagated to individual resource nodes during graph building. + planCtx nodePlanContext ConcreteProvider ConcreteProviderNodeFunc ConcreteResource ConcreteResourceNodeFunc @@ -212,7 +201,7 @@ func (b *PlanGraphBuilder) Steps() []GraphTransformer { &LocalTransformer{Config: b.Config}, &OutputTransformer{ Config: b.Config, - RefreshOnly: b.skipPlanChanges || b.preDestroyRefresh, + RefreshOnly: b.planCtx.skipPlanChanges || b.planCtx.preDestroyRefresh, Destroying: b.Operation == walkPlanDestroy, Overrides: b.Overrides, AllowRootEphemeralOutputs: b.AllowRootEphemeralOutputs, @@ -340,10 +329,8 @@ func (b *PlanGraphBuilder) initPlan() { a.overridePreventDestroy = b.overridePreventDestroy return &nodeExpandPlannableResource{ NodeAbstractResource: a, - skipRefresh: b.skipRefresh, - skipPlanChanges: b.skipPlanChanges, - preDestroyRefresh: b.preDestroyRefresh, forceReplace: b.ForceReplace, + planCtx: b.planCtx, } } @@ -351,10 +338,9 @@ func (b *PlanGraphBuilder) initPlan() { a.overridePreventDestroy = b.overridePreventDestroy return &NodePlannableResourceInstanceOrphan{ NodeAbstractResourceInstance: a, - skipRefresh: b.skipRefresh, - skipPlanChanges: b.skipPlanChanges, forgetResources: b.forgetResources, forgetModules: b.forgetModules, + planCtx: b.planCtx, } } @@ -364,10 +350,9 @@ func (b *PlanGraphBuilder) initPlan() { NodeAbstractResourceInstance: a, DeposedKey: key, - skipRefresh: b.skipRefresh, - skipPlanChanges: b.skipPlanChanges, forgetResources: b.forgetResources, forgetModules: b.forgetModules, + planCtx: b.planCtx, } } } @@ -379,7 +364,7 @@ func (b *PlanGraphBuilder) initDestroy() { a.overridePreventDestroy = b.overridePreventDestroy return &NodePlanDestroyableResourceInstance{ NodeAbstractResourceInstance: a, - skipRefresh: b.skipRefresh, + planCtx: b.planCtx, } } } @@ -426,12 +411,13 @@ func (b *PlanGraphBuilder) initImport() { // not going to combine importing with other changes. This is // temporary to try and maintain existing import behaviors, but // planning will need to be allowed for more complex configurations. - skipPlanChanges: true, - + // // We also skip refresh for now, since the plan output is written // as the new state, and users are not expecting the import process // to update any other instances in state. - skipRefresh: true, + planCtx: b.planCtx. + withSkipPlanChanges(true). + withSkipRefresh(true), } } } diff --git a/internal/terraform/node_resource_abstract_instance.go b/internal/terraform/node_resource_abstract_instance.go index 3e1c96492500..fe78e870bcb1 100644 --- a/internal/terraform/node_resource_abstract_instance.go +++ b/internal/terraform/node_resource_abstract_instance.go @@ -44,8 +44,6 @@ type NodeAbstractResourceInstance struct { Dependencies []addrs.ConfigResource - preDestroyRefresh bool - // During import (or query) we may generate configuration for a resource, which needs // to be stored in the final change. generatedConfigHCL string @@ -791,6 +789,7 @@ func (n *NodeAbstractResourceInstance) refresh(ctx EvalContext, deposedKey state func (n *NodeAbstractResourceInstance) plan( ctx EvalContext, + planCtx nodePlanContext, plannedChange *plans.ResourceInstanceChange, currentState *states.ResourceInstanceObject, createBeforeDestroy bool, @@ -832,7 +831,7 @@ func (n *NodeAbstractResourceInstance) plan( config := *n.Config checkRuleSeverity := tfdiags.Error - if n.preDestroyRefresh { + if planCtx.preDestroyRefresh { checkRuleSeverity = tfdiags.Warning } diff --git a/internal/terraform/node_resource_apply_instance.go b/internal/terraform/node_resource_apply_instance.go index ee551a1e49a6..2d3beb811fd3 100644 --- a/internal/terraform/node_resource_apply_instance.go +++ b/internal/terraform/node_resource_apply_instance.go @@ -34,6 +34,9 @@ type NodeApplyableResourceInstance struct { // forceReplace indicates that this resource is being replaced for external // reasons, like a -replace flag or via replace_triggered_by. forceReplace bool + + // planCtx is the plan context for this resource instance. + planCtx nodePlanContext } var ( @@ -265,7 +268,7 @@ func (n *NodeApplyableResourceInstance) managedResourceExecute(ctx EvalContext) // Make a new diff, in case we've learned new values in the state // during apply which we can now incorporate. - diffApply, _, deferred, repeatData, planDiags := n.plan(ctx, diff, state, false, n.forceReplace) + diffApply, _, deferred, repeatData, planDiags := n.plan(ctx, n.planCtx, diff, state, false, n.forceReplace) diags = diags.Append(planDiags) if diags.HasErrors() { return diags diff --git a/internal/terraform/node_resource_destroy_deposed.go b/internal/terraform/node_resource_destroy_deposed.go index 2fdf6fccfa04..ad179a6906be 100644 --- a/internal/terraform/node_resource_destroy_deposed.go +++ b/internal/terraform/node_resource_destroy_deposed.go @@ -36,12 +36,10 @@ type NodePlanDeposedResourceInstanceObject struct { *NodeAbstractResourceInstance DeposedKey states.DeposedKey - // skipRefresh indicates that we should skip refreshing individual instances - skipRefresh bool - - // skipPlanChanges indicates we should skip trying to plan change actions - // for any instances. - skipPlanChanges bool + // planCtx carries per-node planning context flags (e.g. light-mode, + // skip-refresh, pre-destroy-refresh, skip-plan-changes). + // See the nodePlanContext type for details on the individual fields. + planCtx nodePlanContext // forgetResources lists resources that should not be destroyed, only removed // from state. @@ -134,7 +132,7 @@ func (n *NodePlanDeposedResourceInstanceObject) Execute(ctx EvalContext, op walk // logic here is a bit overloaded. // // We also don't refresh when forgetting instances, as it is unnecessary. - if !n.skipRefresh && op != walkPlanDestroy && !forget { + if !n.planCtx.skipRefresh && !n.planCtx.lightMode && op != walkPlanDestroy && !forget { // Refresh this object even though it may be destroyed, in // case it's already been deleted outside of Terraform. If this is a // normal plan, providers expect a Read request to remove missing @@ -162,7 +160,7 @@ func (n *NodePlanDeposedResourceInstanceObject) Execute(ctx EvalContext, op walk } } - if !n.skipPlanChanges { + if !n.planCtx.skipPlanChanges { var change *plans.ResourceInstanceChange var pDiags tfdiags.Diagnostics var planDeferred *providers.Deferred diff --git a/internal/terraform/node_resource_partial_plan.go b/internal/terraform/node_resource_partial_plan.go index 9c4c4d86fa08..a51efd733521 100644 --- a/internal/terraform/node_resource_partial_plan.go +++ b/internal/terraform/node_resource_partial_plan.go @@ -52,11 +52,10 @@ func (n *nodeExpandPlannableResource) dynamicExpandPartial(ctx EvalContext, know // And add a node to the graph for this resource. g.Add(&nodePlannablePartialExpandedResource{ - addr: resourceAddr, - config: n.Config, - resolvedProvider: n.ResolvedProvider, - skipPlanChanges: n.skipPlanChanges, - preDestroyRefresh: n.preDestroyRefresh, + addr: resourceAddr, + config: n.Config, + resolvedProvider: n.ResolvedProvider, + planCtx: n.planCtx, }) } @@ -69,6 +68,8 @@ func (n *nodeExpandPlannableResource) dynamicExpandPartial(ctx EvalContext, know state := ss.Lock() defer ss.Unlock() + // TODO(sams): Comment why we need to skip plan changes + planCtx := n.planCtx.withSkipPlanChanges(true) Resources: for _, res := range state.Resources(n.Addr) { @@ -87,7 +88,7 @@ func (n *nodeExpandPlannableResource) dynamicExpandPartial(ctx EvalContext, know // Then each of the instances is a "maybe orphan" // instance, and we need to add a node for that. maybeOrphanResources.Add(res.Addr.Instance(key)) - g.Add(n.concreteResource(ctx, addrs.MakeMap[addrs.AbsResourceInstance, cty.Value](), addrs.MakeMap[addrs.PartialExpandedResource, addrs.Set[addrs.AbsResourceInstance]](), true)(NewNodeAbstractResourceInstance(res.Addr.Instance(key)))) + g.Add(n.concreteResource(ctx, addrs.MakeMap[addrs.AbsResourceInstance, cty.Value](), addrs.MakeMap[addrs.PartialExpandedResource, addrs.Set[addrs.AbsResourceInstance]](), planCtx)(NewNodeAbstractResourceInstance(res.Addr.Instance(key)))) } // Move onto the next resource. @@ -284,7 +285,7 @@ func (n *nodeExpandPlannableResource) knownModuleSubgraph(ctx EvalContext, addr DynamicTransformer(func(graph *Graph) error { // We'll add a node for all the known instance keys. for _, key := range knownInstKeys { - graph.Add(n.concreteResource(ctx, knownImports, unknownImports, n.skipPlanChanges)(NewNodeAbstractResourceInstance(addr.Instance(key)))) + graph.Add(n.concreteResource(ctx, knownImports, unknownImports, n.planCtx)(NewNodeAbstractResourceInstance(addr.Instance(key)))) } return nil }), @@ -295,11 +296,10 @@ func (n *nodeExpandPlannableResource) knownModuleSubgraph(ctx EvalContext, addr addr := addr.Module.UnexpandedResource(addr.Resource) graph.Add(&nodePlannablePartialExpandedResource{ - addr: addr, - config: n.Config, - resolvedProvider: n.ResolvedProvider, - skipPlanChanges: n.skipPlanChanges, - preDestroyRefresh: n.preDestroyRefresh, + addr: addr, + config: n.Config, + resolvedProvider: n.ResolvedProvider, + planCtx: n.planCtx, }) } return nil @@ -336,7 +336,7 @@ func (n *nodeExpandPlannableResource) knownModuleSubgraph(ctx EvalContext, addr // to a known instance but we have unknown keys so we don't // know for sure that it's been deleted. maybeOrphans.Add(addr.Instance(key)) - graph.Add(n.concreteResource(ctx, addrs.MakeMap[addrs.AbsResourceInstance, cty.Value](), addrs.MakeMap[addrs.PartialExpandedResource, addrs.Set[addrs.AbsResourceInstance]](), true)(NewNodeAbstractResourceInstance(addr.Instance(key)))) + graph.Add(n.concreteResource(ctx, addrs.MakeMap[addrs.AbsResourceInstance, cty.Value](), addrs.MakeMap[addrs.PartialExpandedResource, addrs.Set[addrs.AbsResourceInstance]](), n.planCtx.withSkipPlanChanges(true))(NewNodeAbstractResourceInstance(addr.Instance(key)))) continue } diff --git a/internal/terraform/node_resource_plan.go b/internal/terraform/node_resource_plan.go index 1b8763c2af35..baa3babf021c 100644 --- a/internal/terraform/node_resource_plan.go +++ b/internal/terraform/node_resource_plan.go @@ -29,14 +29,10 @@ type nodeExpandPlannableResource struct { // on regardless of what the configuration says. ForceCreateBeforeDestroy *bool - // skipRefresh indicates that we should skip refreshing individual instances - skipRefresh bool - - preDestroyRefresh bool - - // skipPlanChanges indicates we should skip trying to plan change actions - // for any instances. - skipPlanChanges bool + // planCtx carries per-node planning context flags (e.g. light-mode, + // skip-refresh, pre-destroy-refresh, skip-plan-changes) that are + // propagated to concrete resource instance nodes. + planCtx nodePlanContext // forceReplace are resource instance addresses where the user wants to // force generating a replace action. This set isn't pre-filtered, so @@ -507,7 +503,7 @@ func (n *nodeExpandPlannableResource) resourceInstanceSubgraph(ctx EvalContext, steps := []GraphTransformer{ // Expand the count or for_each (if present) &ResourceCountTransformer{ - Concrete: n.concreteResource(ctx, imports, addrs.MakeMap[addrs.PartialExpandedResource, addrs.Set[addrs.AbsResourceInstance]](), n.skipPlanChanges), + Concrete: n.concreteResource(ctx, imports, addrs.MakeMap[addrs.PartialExpandedResource, addrs.Set[addrs.AbsResourceInstance]](), n.planCtx), Schema: n.Schema, Addr: n.ResourceAddr(), InstanceAddrs: instanceAddrs, @@ -545,7 +541,7 @@ func (n *nodeExpandPlannableResource) resourceInstanceSubgraph(ctx EvalContext, return graph, diags } -func (n *nodeExpandPlannableResource) concreteResource(ctx EvalContext, knownImports addrs.Map[addrs.AbsResourceInstance, cty.Value], unknownImports addrs.Map[addrs.PartialExpandedResource, addrs.Set[addrs.AbsResourceInstance]], skipPlanChanges bool) func(*NodeAbstractResourceInstance) dag.Vertex { +func (n *nodeExpandPlannableResource) concreteResource(ctx EvalContext, knownImports addrs.Map[addrs.AbsResourceInstance, cty.Value], unknownImports addrs.Map[addrs.PartialExpandedResource, addrs.Set[addrs.AbsResourceInstance]], planCtx nodePlanContext) func(*NodeAbstractResourceInstance) dag.Vertex { return func(a *NodeAbstractResourceInstance) dag.Vertex { var m *NodePlannableResourceInstance @@ -575,7 +571,6 @@ func (n *nodeExpandPlannableResource) concreteResource(ctx EvalContext, knownImp a.ProviderMetas = n.ProviderMetas a.dependsOn = n.dependsOn a.Dependencies = n.dependencies - a.preDestroyRefresh = n.preDestroyRefresh a.generateConfigPath = n.generateConfigPath m = &NodePlannableResourceInstance{ @@ -585,8 +580,7 @@ func (n *nodeExpandPlannableResource) concreteResource(ctx EvalContext, knownImp // to force on CreateBeforeDestroy due to dependencies on other // nodes that have it. ForceCreateBeforeDestroy: n.CreateBeforeDestroy(), - skipRefresh: n.skipRefresh, - skipPlanChanges: skipPlanChanges, + planCtx: planCtx, forceReplace: slices.ContainsFunc(n.forceReplace, a.Addr.Equal), } @@ -629,8 +623,7 @@ func (n *nodeExpandPlannableResource) concreteResourceOrphan(a *NodeAbstractReso return &NodePlannableResourceInstanceOrphan{ NodeAbstractResourceInstance: a, - skipRefresh: n.skipRefresh, - skipPlanChanges: n.skipPlanChanges, + planCtx: n.planCtx, } } diff --git a/internal/terraform/node_resource_plan_destroy.go b/internal/terraform/node_resource_plan_destroy.go index 0962ce8fa853..aadbb58ab611 100644 --- a/internal/terraform/node_resource_plan_destroy.go +++ b/internal/terraform/node_resource_plan_destroy.go @@ -20,8 +20,10 @@ import ( type NodePlanDestroyableResourceInstance struct { *NodeAbstractResourceInstance - // skipRefresh indicates that we should skip refreshing - skipRefresh bool + // planCtx carries per-node planning context flags (e.g. light-mode, + // skip-refresh, pre-destroy-refresh, skip-plan-changes). + // See the nodePlanContext type for details on the individual fields. + planCtx nodePlanContext } var ( @@ -85,7 +87,7 @@ func (n *NodePlanDestroyableResourceInstance) managedResourceExecute(ctx EvalCon // running a normal plan walk when refresh is enabled. These two // conditionals must agree (be exactly opposite) in order to get the // correct behavior in both cases. - if n.skipRefresh { + if n.planCtx.skipRefresh || n.planCtx.lightMode { diags = diags.Append(n.writeResourceInstanceState(ctx, state, prevRunState)) if diags.HasErrors() { return diags diff --git a/internal/terraform/node_resource_plan_instance.go b/internal/terraform/node_resource_plan_instance.go index 16574427ea29..51e99afaae42 100644 --- a/internal/terraform/node_resource_plan_instance.go +++ b/internal/terraform/node_resource_plan_instance.go @@ -41,12 +41,10 @@ type NodePlannableResourceInstance struct { *NodeAbstractResourceInstance ForceCreateBeforeDestroy bool - // skipRefresh indicates that we should skip refreshing individual instances - skipRefresh bool - - // skipPlanChanges indicates we should skip trying to plan change actions - // for any instances. - skipPlanChanges bool + // planCtx carries per-node planning context flags (e.g. light-mode, + // skip-refresh, pre-destroy-refresh, skip-plan-changes). + // See the nodePlanContext type for details on the individual fields. + planCtx nodePlanContext // forceReplace indicates that this resource is being replaced for external // reasons, like a -replace flag or via replace_triggered_by. @@ -110,13 +108,10 @@ func (n *NodePlannableResourceInstance) dataResourceExecute(ctx EvalContext) (di return diags } - checkRuleSeverity := tfdiags.Error - if n.skipPlanChanges || n.preDestroyRefresh { - checkRuleSeverity = tfdiags.Warning - } + checkRuleSeverity := getCheckRuleSeverity(n.planCtx) deferrals := ctx.Deferrals() - change, state, deferred, repeatData, planDiags := n.planDataSource(ctx, checkRuleSeverity, n.skipPlanChanges, deferrals.ShouldDeferResourceInstanceChanges(addr, n.Dependencies)) + change, state, deferred, repeatData, planDiags := n.planDataSource(ctx, checkRuleSeverity, n.planCtx.skipPlanChanges, deferrals.ShouldDeferResourceInstanceChanges(addr, n.Dependencies)) diags = diags.Append(planDiags) if diags.HasErrors() { return diags @@ -192,10 +187,7 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext) var instanceRefreshState *states.ResourceInstanceObject - checkRuleSeverity := tfdiags.Error - if n.skipPlanChanges || n.preDestroyRefresh { - checkRuleSeverity = tfdiags.Warning - } + checkRuleSeverity := getCheckRuleSeverity(n.planCtx) provider, providerSchema, err := getProvider(ctx, n.ResolvedProvider) diags = diags.Append(err) @@ -210,7 +202,8 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext) } } - importing := n.importTarget != cty.NilVal && !n.preDestroyRefresh + importing := n.importTarget != cty.NilVal && !n.planCtx.preDestroyRefresh + var deferred *providers.Deferred var importTarget *plans.Importing shouldRefresh := !n.skipRefresh @@ -355,7 +348,7 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext) // Refresh, maybe // The import process handles its own refresh - if shouldRefresh { + if !n.planCtx.SkipRefresh() && !importing { var refreshDiags tfdiags.Diagnostics instanceRefreshState, refreshDeferred, refreshDiags = n.refreshState(ctx, instanceRefreshState) diags = diags.Append(refreshDiags) @@ -380,7 +373,7 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext) } } - if n.skipRefresh && !importing && updatedCBD { + if n.planCtx.SkipRefresh() && !importing && updatedCBD { // CreateBeforeDestroy must be set correctly in the state which is used // to create the apply graph, so if we did not refresh the state make // sure we still update any changes to CreateBeforeDestroy. @@ -395,7 +388,7 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext) } // Plan the instance, unless we're in the refresh-only mode - if !n.skipPlanChanges { + if !n.planCtx.skipPlanChanges { _, planDiags := n.planManagedResource( ctx, instanceRefreshState, @@ -1209,3 +1202,11 @@ func actionIsTriggeredByEvent(events []configs.ActionTriggerEvent, action plans. } return triggeredEvents } + +func getCheckRuleSeverity(ctx nodePlanContext) tfdiags.Severity { + checkRuleSeverity := tfdiags.Error + if ctx.skipPlanChanges || ctx.preDestroyRefresh { + checkRuleSeverity = tfdiags.Warning + } + return checkRuleSeverity +} diff --git a/internal/terraform/node_resource_plan_orphan.go b/internal/terraform/node_resource_plan_orphan.go index 51364049f232..9a1dde4fa6f4 100644 --- a/internal/terraform/node_resource_plan_orphan.go +++ b/internal/terraform/node_resource_plan_orphan.go @@ -19,12 +19,10 @@ import ( type NodePlannableResourceInstanceOrphan struct { *NodeAbstractResourceInstance - // skipRefresh indicates that we should skip refreshing individual instances - skipRefresh bool - - // skipPlanChanges indicates we should skip trying to plan change actions - // for any instances. - skipPlanChanges bool + // planCtx carries per-node planning context flags (e.g. light-mode, + // skip-refresh, pre-destroy-refresh, skip-plan-changes). + // See the nodePlanContext type for details on the individual fields. + planCtx nodePlanContext // forgetResources lists resources that should not be destroyed, only removed // from state. @@ -122,7 +120,7 @@ func (n *NodePlannableResourceInstanceOrphan) managedResourceExecute(ctx EvalCon } } - if !n.skipRefresh && !forget { + if !n.planCtx.skipRefresh && !n.planCtx.lightMode && !forget { // Refresh this instance even though it is going to be destroyed, in // order to catch missing resources. If this is a normal plan, // providers expect a Read request to remove missing resources from the @@ -179,7 +177,7 @@ func (n *NodePlannableResourceInstanceOrphan) managedResourceExecute(ctx EvalCon // refresh indicates the instance no longer exists, there is also nothing // to plan because there is no longer any state and it doesn't exist in the // config. - if n.skipPlanChanges || oldState == nil || oldState.Value.IsNull() { + if n.planCtx.skipPlanChanges || oldState == nil || oldState.Value.IsNull() { return diags.Append(n.writeResourceInstanceState(ctx, oldState, workingState)) } diff --git a/internal/terraform/node_resource_plan_partialexp.go b/internal/terraform/node_resource_plan_partialexp.go index 3a3a09b7a940..d94fdfb75994 100644 --- a/internal/terraform/node_resource_plan_partialexp.go +++ b/internal/terraform/node_resource_plan_partialexp.go @@ -30,11 +30,10 @@ import ( // // This is the partial-expanded equivalent of NodePlannableResourceInstance. type nodePlannablePartialExpandedResource struct { - addr addrs.PartialExpandedResource - config *configs.Resource - resolvedProvider addrs.AbsProviderConfig - skipPlanChanges bool - preDestroyRefresh bool + addr addrs.PartialExpandedResource + config *configs.Resource + resolvedProvider addrs.AbsProviderConfig + planCtx nodePlanContext } var ( @@ -95,7 +94,7 @@ func (n *nodePlannablePartialExpandedResource) Execute(ctx EvalContext, op walkO // need to destroy. return nil case walkPlan: - if n.preDestroyRefresh || n.skipPlanChanges { + if n.planCtx.preDestroyRefresh || n.planCtx.skipPlanChanges { // During any kind of refresh, we also don't really care about // partial resources. We only care about the fully-expanded resources // already in state, so we don't need to plan partial resources. @@ -184,7 +183,7 @@ func (n *nodePlannablePartialExpandedResource) managedResourceExecute(ctx EvalCo // - Evaluating the preconditions/postconditions to see if they produce // a definitive fail result even with the partial information. - if n.skipPlanChanges { + if n.planCtx.skipPlanChanges { // If we're supposed to be making a refresh-only plan then there's // not really anything else to do here, since we can only refresh // specific known resource instances (which another graph node should From 51972d55b69be9ff623fbfa2622c6b049283c01c Mon Sep 17 00:00:00 2001 From: Samsondeen Dare Date: Mon, 9 Feb 2026 20:19:35 +0100 Subject: [PATCH 4/4] add readExistingState method --- internal/backend/backendrun/operation.go | 1 - internal/backend/local/backend_local.go | 3 - internal/command/arguments/plan.go | 1 - internal/plans/plan.go | 6 +- internal/terraform/context_plan.go | 43 +- internal/terraform/context_walk.go | 3 - internal/terraform/eval_context.go | 2 - internal/terraform/eval_context_builtin.go | 5 - internal/terraform/eval_context_mock.go | 5 - internal/terraform/graph_builder_plan.go | 3 +- internal/terraform/graph_walk_context.go | 2 - internal/terraform/node_resource_abstract.go | 25 - .../node_resource_abstract_instance.go | 7 +- internal/terraform/node_resource_manager.go | 54 - .../terraform/node_resource_plan_instance.go | 292 +++--- .../terraform/node_resource_plan_instance2.go | 937 ------------------ .../node_resource_plan_instance_ds.go | 64 -- 17 files changed, 175 insertions(+), 1278 deletions(-) delete mode 100644 internal/terraform/node_resource_manager.go delete mode 100644 internal/terraform/node_resource_plan_instance2.go delete mode 100644 internal/terraform/node_resource_plan_instance_ds.go diff --git a/internal/backend/backendrun/operation.go b/internal/backend/backendrun/operation.go index 98d24cb300c6..0db70e713e89 100644 --- a/internal/backend/backendrun/operation.go +++ b/internal/backend/backendrun/operation.go @@ -82,7 +82,6 @@ type Operation struct { // backend that will be used when applying the plan. // Only one of PlanOutBackend or PlanOutStateStore may be set. PlanOutBackend *plans.Backend - PlanLight bool // PlanOutStateStore is the state_store to store with the plan. This is the // state store that will be used when applying the plan. diff --git a/internal/backend/local/backend_local.go b/internal/backend/local/backend_local.go index 2c98dbe07031..2add2431cc05 100644 --- a/internal/backend/local/backend_local.go +++ b/internal/backend/local/backend_local.go @@ -212,9 +212,6 @@ func (b *Local) localRunDirect(op *backendrun.Operation, run *backendrun.LocalRu GenerateConfigPath: op.GenerateConfigOut, DeferralAllowed: op.DeferralAllowed, Query: op.Query, - PlanCtx: terraform.PlanContext{ - LightMode: op.PlanLight, - }, } run.PlanOpts = planOpts diff --git a/internal/command/arguments/plan.go b/internal/command/arguments/plan.go index 9e06b88ec4e7..982bafa05ad3 100644 --- a/internal/command/arguments/plan.go +++ b/internal/command/arguments/plan.go @@ -57,7 +57,6 @@ func ParsePlan(args []string) (*Plan, tfdiags.Diagnostics) { cmdFlags.BoolVar(&plan.InputEnabled, "input", true, "input") cmdFlags.StringVar(&plan.OutPath, "out", "", "out") cmdFlags.StringVar(&plan.GenerateConfigPath, "generate-config-out", "", "generate-config-out") - cmdFlags.BoolVar(&plan.Light, "light", false, "light") var json bool cmdFlags.BoolVar(&json, "json", false, "json") diff --git a/internal/plans/plan.go b/internal/plans/plan.go index 9dc877d16999..d18e80b75eda 100644 --- a/internal/plans/plan.go +++ b/internal/plans/plan.go @@ -170,11 +170,9 @@ type Plan struct { // apply can be checked for consistency. FunctionResults []lang.FunctionResultHash - // Light is true if this plan was created in "light plan" mode, where + // Light is true if this plan was created in "light" mode, where // Terraform skipped reading remote state for resources that have not - // changed in the local configuration or local state. This is recorded - // for UI purposes so that the user can be reminded that the plan may - // not reflect out-of-band changes to remote resources. + // changed in the local configuration and the state dependents of those resources. Light bool } diff --git a/internal/terraform/context_plan.go b/internal/terraform/context_plan.go index 54166c548e5b..17bb0c5206ca 100644 --- a/internal/terraform/context_plan.go +++ b/internal/terraform/context_plan.go @@ -29,15 +29,18 @@ import ( // nodePlanContext holds contextual flags that influence how individual resource // nodes behave during the plan walk. It is derived from PlanOpts and -// threaded into every resource node. +// copied into every resource node. Each node may further modify its own copy of this +// struct, or/and pass it to child nodes. type nodePlanContext struct { - // lightMode, when set to true, activates "light plan" mode. In this mode, - // Terraform plans each resource against local state first; if the result - // is a NoOp the remote-state refresh is skipped entirely. - lightMode bool + lightMode bool + skipPlanChanges bool - skipPlanChanges bool - skipRefresh bool + // skipRefresh indicates that we should skip refreshing managed resources + skipRefresh bool + + // preDestroyRefresh indicates that we are executing the refresh which + // happens immediately before a destroy plan, which happens to use the + // normal planing mode so skipPlanChanges cannot be set. preDestroyRefresh bool } @@ -56,10 +59,6 @@ func (pc nodePlanContext) withPreDestroyRefresh(v bool) nodePlanContext { return pc } -func (pc nodePlanContext) SkipRefresh() bool { - return pc.skipRefresh || pc.lightMode -} - // PlanOpts are the various options that affect the details of how Terraform // will build a plan. type PlanOpts struct { @@ -74,9 +73,8 @@ type PlanOpts struct { // instance using its corresponding provider. SkipRefresh bool - // LightMode, when set to true, activates "light plan" mode. In this mode, - // Terraform plans each resource against local state first; if the result - // is a NoOp the expensive remote-state refresh is skipped entirely. + // LightMode enables terraform to plan each resource against local state first, + // if the result is a NoOp the expensive remote-state refresh is skipped entirely. // Resources whose local plan shows changes are still refreshed and // re-planned so the final diff is accurate. LightMode bool @@ -391,7 +389,7 @@ The -target option is not for routine use, and is provided only for exceptional } } - if opts.PlanCtx.LightMode { + if opts.LightMode { diags = diags.Append(tfdiags.Sourceless( tfdiags.Warning, "Light plan mode is in effect", @@ -523,14 +521,6 @@ func (c *Context) checkApplyGraph(plan *plans.Plan, config *configs.Config, opts return diags } -// nodeContext derives a nodePlanContext from the caller-facing fields on -// PlanOpts. This is the single point where the public API maps into the -// internal per-node flags that get threaded through the graph builder and -// into every resource node. -// -// Flags that are purely internal (e.g. skipPlanChanges, which is derived -// from plans.RefreshOnlyMode) are NOT set here — they are applied in -// planGraph where the mode-specific logic lives. func (opts *PlanOpts) nodeContext() nodePlanContext { if opts == nil { return nodePlanContext{} @@ -880,7 +870,6 @@ func (c *Context) planWalk(config *configs.Config, prevRunState *states.State, o DeferralAllowed: opts.DeferralAllowed, ExternalDependencyDeferred: opts.ExternalDependencyDeferred, Changes: changes, - PlanCtx: opts.PlanCtx, MoveResults: moveResults, Overrides: opts.Overrides, PlanTimeTimestamp: timestamp, @@ -1103,12 +1092,10 @@ func (c *Context) planGraph(config *configs.Config, prevRunState *states.State, queryPlan: opts.Query, overridePreventDestroy: opts.OverridePreventDestroy, AllowRootEphemeralOutputs: opts.AllowRootEphemeralOutputs, - Ctx: opts.PlanCtx, }).Build(addrs.RootModuleInstance) return graph, walkPlan, diags case plans.RefreshOnlyMode: - nctx := opts. - nodeContext(). + nodeCtx := opts.nodeContext(). withSkipPlanChanges(true) // this activates "refresh only" mode. graph, diags := (&PlanGraphBuilder{ Config: config, @@ -1118,7 +1105,7 @@ func (c *Context) planGraph(config *configs.Config, prevRunState *states.State, Plugins: c.plugins, Targets: append(opts.Targets, opts.ActionTargets...), ActionTargets: opts.ActionTargets, - planCtx: nctx, + planCtx: nodeCtx, Operation: walkPlan, ExternalReferences: opts.ExternalReferences, Overrides: opts.Overrides, diff --git a/internal/terraform/context_walk.go b/internal/terraform/context_walk.go index 8c7f071e2e3a..42e5fc2b0996 100644 --- a/internal/terraform/context_walk.go +++ b/internal/terraform/context_walk.go @@ -70,8 +70,6 @@ type graphWalkOpts struct { // the apply phase. PlanTimeTimestamp time.Time - PlanCtx PlanContext - // Overrides contains the set of overrides we should apply during this // operation. Overrides *mocking.Overrides @@ -205,6 +203,5 @@ func (c *Context) graphWalker(graph *Graph, operation walkOperation, opts *graph Forget: opts.Forget, Actions: actions.NewActions(), Deprecations: deprecation.NewDeprecations(), - PlanCtx: opts.PlanCtx, } } diff --git a/internal/terraform/eval_context.go b/internal/terraform/eval_context.go index aa91874de470..f8b02e724f0b 100644 --- a/internal/terraform/eval_context.go +++ b/internal/terraform/eval_context.go @@ -211,8 +211,6 @@ type EvalContext interface { // this execution. Overrides() *mocking.Overrides - PlanCtx() PlanContext - // withScope derives a new EvalContext that has all of the same global // context, but a new evaluation scope. withScope(scope evalContextScope) EvalContext diff --git a/internal/terraform/eval_context_builtin.go b/internal/terraform/eval_context_builtin.go index 63b4e9b6f070..5f66c0fdb2ed 100644 --- a/internal/terraform/eval_context_builtin.go +++ b/internal/terraform/eval_context_builtin.go @@ -95,7 +95,6 @@ type BuiltinEvalContext struct { OverrideValues *mocking.Overrides ActionsValue *actions.Actions DeprecationsValue *deprecation.Deprecations - PlanContext PlanContext } // BuiltinEvalContext implements EvalContext @@ -116,10 +115,6 @@ func (ctx *BuiltinEvalContext) StopCtx() context.Context { return ctx.StopContext } -func (ctx *BuiltinEvalContext) PlanCtx() PlanContext { - return ctx.PlanContext -} - func (ctx *BuiltinEvalContext) Hook(fn func(Hook) (HookAction, error)) error { for _, h := range ctx.Hooks { action, err := fn(h) diff --git a/internal/terraform/eval_context_mock.go b/internal/terraform/eval_context_mock.go index 0a8cbf05cb3d..20dffe8ef6f4 100644 --- a/internal/terraform/eval_context_mock.go +++ b/internal/terraform/eval_context_mock.go @@ -439,11 +439,6 @@ func (c *MockEvalContext) Overrides() *mocking.Overrides { return c.OverrideValues } -func (c *MockEvalContext) PlanCtx() PlanContext { - // This is a no-op for the mock. - return PlanContext{} -} - func (c *MockEvalContext) Forget() bool { c.ForgetCalled = true return c.ForgetValues diff --git a/internal/terraform/graph_builder_plan.go b/internal/terraform/graph_builder_plan.go index 1c72821af99d..910c75962a3c 100644 --- a/internal/terraform/graph_builder_plan.go +++ b/internal/terraform/graph_builder_plan.go @@ -60,8 +60,7 @@ type PlanGraphBuilder struct { // action instead. Create and Delete actions are not affected. ForceReplace []addrs.AbsResourceInstance - // planCtx carries per-node planning context flags (e.g. light-mode, - // skip-refresh, pre-destroy-refresh, skip-plan-changes) that are + // planCtx carries per-node planning context info that are // propagated to individual resource nodes during graph building. planCtx nodePlanContext diff --git a/internal/terraform/graph_walk_context.go b/internal/terraform/graph_walk_context.go index 865736392039..a8f5dbcbb671 100644 --- a/internal/terraform/graph_walk_context.go +++ b/internal/terraform/graph_walk_context.go @@ -53,7 +53,6 @@ type ContextGraphWalker struct { Config *configs.Config PlanTimestamp time.Time Overrides *mocking.Overrides - PlanCtx PlanContext // Forget if set to true will cause the plan to forget all resources. This is // only allowed in the context of a destroy plan. Forget bool @@ -146,7 +145,6 @@ func (w *ContextGraphWalker) EvalContext() EvalContext { PrevRunStateValue: w.PrevRunState, Evaluator: evaluator, OverrideValues: w.Overrides, - PlanContext: w.PlanCtx, forget: w.Forget, ActionsValue: w.Actions, DeprecationsValue: w.Deprecations, diff --git a/internal/terraform/node_resource_abstract.go b/internal/terraform/node_resource_abstract.go index 95da12a4d84b..f4787cb30ef9 100644 --- a/internal/terraform/node_resource_abstract.go +++ b/internal/terraform/node_resource_abstract.go @@ -489,31 +489,6 @@ func (n *NodeAbstractResource) recordResourceData(ctx EvalContext, addr addrs.Ab return diags } -// schemaUpgradeRequired determines if the state representation of a resource needs upgrading -// based on its schema version in state and its current schema. It returns the state, the schema, -// and a boolean indicating if an upgrade is needed. -func (n *NodeAbstractResource) schemaUpgradeRequired(ctx EvalContext, providerSchema providers.ProviderSchema, addr addrs.AbsResourceInstance) (bool, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - log.Printf("[TRACE] schemaUpgradeRequired: checking schema version for %s", addr) - - src := ctx.State().ResourceInstanceObject(addr, addrs.NotDeposed) - if src == nil { - // No state to upgrade - log.Printf("[TRACE] schemaUpgradeRequired: no state present for %s", addr) - return false, diags - } - - schema := providerSchema.SchemaForResourceAddr(addr.Resource.ContainingResource()) - if schema.Body == nil { - // Shouldn't happen since we should've failed long ago if no schema is present - return false, diags.Append(fmt.Errorf("no schema available for %s while checking for upgrades; this is a bug in Terraform and should be reported", addr)) - } - - // Check if the schema version in state matches the current schema version - upgradeRequired := src.SchemaVersion != uint64(schema.Version) - return upgradeRequired, diags -} - // readResourceInstanceState reads the current object for a specific instance in // the state. func (n *NodeAbstractResource) readResourceInstanceState(ctx EvalContext, addr addrs.AbsResourceInstance) (*states.ResourceInstanceObject, tfdiags.Diagnostics) { diff --git a/internal/terraform/node_resource_abstract_instance.go b/internal/terraform/node_resource_abstract_instance.go index fe78e870bcb1..f0d35093d64d 100644 --- a/internal/terraform/node_resource_abstract_instance.go +++ b/internal/terraform/node_resource_abstract_instance.go @@ -829,11 +829,7 @@ func (n *NodeAbstractResourceInstance) plan( } config := *n.Config - - checkRuleSeverity := tfdiags.Error - if planCtx.preDestroyRefresh { - checkRuleSeverity = tfdiags.Warning - } + checkRuleSeverity := getCheckRuleSeverity(planCtx) if plannedChange != nil { // If we already planned the action, we stick to that plan @@ -842,7 +838,6 @@ func (n *NodeAbstractResourceInstance) plan( // Evaluate the configuration forEach, _, _ := evaluateForEachExpression(n.Config.ForEach, ctx, false) - keyData = EvalDataForInstanceKey(n.ResourceInstanceAddr().Resource.Key, forEach) checkDiags := evalCheckRules( diff --git a/internal/terraform/node_resource_manager.go b/internal/terraform/node_resource_manager.go deleted file mode 100644 index fabd41e60664..000000000000 --- a/internal/terraform/node_resource_manager.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package terraform - -import "github.com/hashicorp/terraform/internal/tfdiags" - -// ResourceState is an interface that defines the contract for executing -// a resource state. It takes a context, a node, and resource data as input -// and returns a new resource state and any diagnostics that occurred during -// the execution. -type ResourceState[T any] interface { - Execute(ctx EvalContext, node T, data *ResourceData) (ResourceState[T], tfdiags.Diagnostics) -} - -// ResourceStateManager is a generic state manager for resource instances -// It manages the state of a resource instance and its transitions -// between different states. -type ResourceStateManager[T any] struct { - node T - data *ResourceData - hooks []func(ResourceState[T], *ResourceStateManager[T]) -} - -func NewResourceStateManager[T any](node T) *ResourceStateManager[T] { - return &ResourceStateManager[T]{ - node: node, - data: &ResourceData{}, - hooks: []func(ResourceState[T], *ResourceStateManager[T]){}, - } -} - -func (m *ResourceStateManager[T]) AddHook(hook func(ResourceState[T], *ResourceStateManager[T])) { - m.hooks = append(m.hooks, hook) -} - -func (m *ResourceStateManager[T]) Execute(start ResourceState[T], ctx EvalContext) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - // Start with initial state - currentState := start - - // Execute state transitions until completion or error - for currentState != nil && !diags.HasErrors() { - for _, hook := range m.hooks { - hook(currentState, m) - } - var stateDiags tfdiags.Diagnostics - currentState, stateDiags = currentState.Execute(ctx, m.node, m.data) - diags = diags.Append(stateDiags) - } - - return diags -} diff --git a/internal/terraform/node_resource_plan_instance.go b/internal/terraform/node_resource_plan_instance.go index 51e99afaae42..30cfb74ab4d3 100644 --- a/internal/terraform/node_resource_plan_instance.go +++ b/internal/terraform/node_resource_plan_instance.go @@ -26,14 +26,6 @@ import ( "github.com/hashicorp/terraform/internal/tfdiags" ) -type PlanContext struct { - // PlanMode is the mode of the plan. This is used to determine how - // the plan is executed and what actions are taken. - PlanMode plans.Mode - - LightMode bool -} - // NodePlannableResourceInstance represents a _single_ resource // instance that is plannable. This means this represents a single // count index, for example. @@ -184,7 +176,6 @@ func (n *NodePlannableResourceInstance) ephemeralResourceExecute(ctx EvalContext func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext) (diags tfdiags.Diagnostics) { config := n.Config addr := n.ResourceInstanceAddr() - var instanceRefreshState *states.ResourceInstanceObject checkRuleSeverity := getCheckRuleSeverity(n.planCtx) @@ -204,88 +195,26 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext) importing := n.importTarget != cty.NilVal && !n.planCtx.preDestroyRefresh - var deferred *providers.Deferred - var importTarget *plans.Importing - shouldRefresh := !n.skipRefresh - - // If the resource is to be imported, we now ask the provider for an Import - // and a Refresh, and save the resulting state to instanceRefreshState. - - if importing { - importTarget = &plans.Importing{Target: n.importTarget} - // importState takes care of refreshing its imported state - shouldRefresh = shouldRefresh && !importing - if n.importTarget.IsWhollyKnown() { - var importDiags tfdiags.Diagnostics - instanceRefreshState, deferred, importDiags = n.importState(ctx, addr, n.importTarget, provider, providerSchema) - diags = diags.Append(importDiags) - } else { - // Otherwise, just mark the resource as deferred without trying to - // import it. - deferred = &providers.Deferred{ - Reason: providers.DeferredReasonResourceConfigUnknown, - } - if n.Config == nil && len(n.generateConfigPath) > 0 { - // Then we're supposed to be generating configuration for this - // resource, but we can't because the configuration is unknown. - // - // Normally, the rest of this function would just be about - // planning the known configuration to make sure everything we - // do know about it is correct, but we can't even do that here. - // - // What we'll do is write out the address as being deferred with - // an entirely unknown value. Then we'll skip the rest of this - // function. (a) We're going to panic later when it complains - // about having no configuration, and (b) the rest of the - // function isn't doing anything as there is no configuration - // to validate. - - impliedType := providerSchema.ResourceTypes[addr.Resource.Resource.Type].Body.ImpliedType() - ctx.Deferrals().ReportResourceInstanceDeferred(addr, providers.DeferredReasonResourceConfigUnknown, &plans.ResourceInstanceChange{ - Addr: addr, - PrevRunAddr: addr, - ProviderAddr: n.ResolvedProvider, - Change: plans.Change{ - Action: plans.NoOp, // assume we'll get the config generation correct. - Before: cty.NullVal(impliedType), - After: cty.UnknownVal(impliedType), - Importing: &plans.Importing{ - Target: n.importTarget, - }, - }, - }) - return diags - } - } - - // There is a subtle difference between the import by identity - // and the import by ID. When importing by identity, we need to - // make sure to use the complete identity return by the provider - // instead of the (potential) incomplete one from the configuration. - if n.importTarget.Type().IsObjectType() && instanceRefreshState != nil { - importTarget = &plans.Importing{Target: instanceRefreshState.Identity} - } + // Read or import the existing state of the resource instance. + instanceRefreshState, importTarget, deferred, readDiags := n.readExistingState(ctx, provider, providerSchema) + diags = diags.Append(readDiags) + if diags.HasErrors() { + return diags + } - } else { - var readDiags tfdiags.Diagnostics - instanceRefreshState, readDiags = n.readResourceInstanceState(ctx, addr) - diags = diags.Append(readDiags) - if diags.HasErrors() { - // Pre-Diff error hook - diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreDiff(n.HookResourceIdentity(), addrs.NotDeposed, cty.DynamicVal, cty.DynamicVal, diags.Err()) - })) - return diags - } + // if the resource is deferred, due to unknown config, but we are supposed to generate config + // generate config, we return here. + // TODO(sams): better comment + if deferred != nil && n.Config == nil && len(n.generateConfigPath) > 0 { + return diags } - // Now we have the state value - // - // Then In light mode, - // We start by planning the resource, and if it is a no-op, - // we skip the read step - if ctx.PlanCtx().LightMode { - change, planDiags := n.planManagedResource( + // Now we have the state value, then In light mode, + // We start by planning the resource, and if it is a no-op, we skip the read step + if n.planCtx.lightMode { + var plannedChange *plans.ResourceInstanceChange + var planDiags tfdiags.Diagnostics + plannedChange, deferred, planDiags = n.planManagedResource( ctx, instanceRefreshState, deferred, @@ -297,11 +226,20 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext) return diags } - // If the change is a no-op, write the change and return - if change.Action == plans.NoOp { - diags = diags.Append(n.writeChange(ctx, change, "")) + // If the plannedchange is a no-op, write the change and return + if plannedChange.Action == plans.NoOp { + diags = diags.Append(n.writeChange(ctx, plannedChange, "")) + return diags + } + + // if the plan is deferred, we can just return here. + // TODO(sams): Is there a scenario where a prior val results in deferral, but + // refresh may have prevented that? + if deferred != nil { return diags } + + // // Otherwise we continue with the read step, // which will reconcile the local state and config with the remote state } @@ -348,9 +286,9 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext) // Refresh, maybe // The import process handles its own refresh - if !n.planCtx.SkipRefresh() && !importing { + if !n.planCtx.skipRefresh && !importing { var refreshDiags tfdiags.Diagnostics - instanceRefreshState, refreshDeferred, refreshDiags = n.refreshState(ctx, instanceRefreshState) + instanceRefreshState, refreshDeferred, refreshDiags = n.refreshState(ctx, deferred, instanceRefreshState) diags = diags.Append(refreshDiags) if diags.HasErrors() { return diags @@ -359,21 +297,9 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext) if deferred == nil && refreshDeferred != nil { deferred = refreshDeferred } - - if deferred == nil { - diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, refreshState)) - } - - if diags.HasErrors() { - // Pre-Diff error hook - diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreDiff(n.HookResourceIdentity(), addrs.NotDeposed, cty.DynamicVal, cty.DynamicVal, diags.Err()) - })) - return diags - } } - if n.planCtx.SkipRefresh() && !importing && updatedCBD { + if (n.planCtx.skipRefresh || n.planCtx.lightMode) && !importing && updatedCBD { // CreateBeforeDestroy must be set correctly in the state which is used // to create the apply graph, so if we did not refresh the state make // sure we still update any changes to CreateBeforeDestroy. @@ -389,7 +315,8 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext) // Plan the instance, unless we're in the refresh-only mode if !n.planCtx.skipPlanChanges { - _, planDiags := n.planManagedResource( + var planDiags tfdiags.Diagnostics + _, deferred, planDiags = n.planManagedResource( ctx, instanceRefreshState, deferred, @@ -458,7 +385,90 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext) return diags } -func (n *NodePlannableResourceInstance) refreshState(ctx EvalContext, state *states.ResourceInstanceObject) (*states.ResourceInstanceObject, *providers.Deferred, tfdiags.Diagnostics) { +func (n *NodePlannableResourceInstance) readExistingState(ctx EvalContext, + provider providers.Interface, + providerSchema providers.ProviderSchema, +) (*states.ResourceInstanceObject, *plans.Importing, *providers.Deferred, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + var deferred *providers.Deferred + var existingState *states.ResourceInstanceObject + var importTarget *plans.Importing + addr := n.ResourceInstanceAddr() + importing := n.importTarget != cty.NilVal && !n.planCtx.preDestroyRefresh + + // If the resource is to be imported, we now ask the provider for an Import + // and a Refresh, and save the resulting state to existingState. + if importing { + importTarget = &plans.Importing{Target: n.importTarget} + // importState takes care of refreshing its imported state + if n.importTarget.IsWhollyKnown() { + var importDiags tfdiags.Diagnostics + existingState, deferred, importDiags = n.importState(ctx, addr, n.importTarget, provider, providerSchema) + diags = diags.Append(importDiags) + } else { + // Otherwise, just mark the resource as deferred without trying to + // import it. + deferred = &providers.Deferred{ + Reason: providers.DeferredReasonResourceConfigUnknown, + } + if n.Config == nil && len(n.generateConfigPath) > 0 { + // Then we're supposed to be generating configuration for this + // resource, but we can't because the configuration is unknown. + // + // Normally, the rest of this function would just be about + // planning the known configuration to make sure everything we + // do know about it is correct, but we can't even do that here. + // + // What we'll do is write out the address as being deferred with + // an entirely unknown value. Then we'll skip the rest of this + // function. (a) We're going to panic later when it complains + // about having no configuration, and (b) the rest of the + // function isn't doing anything as there is no configuration + // to validate. + + impliedType := providerSchema.ResourceTypes[addr.Resource.Resource.Type].Body.ImpliedType() + ctx.Deferrals().ReportResourceInstanceDeferred(addr, providers.DeferredReasonResourceConfigUnknown, &plans.ResourceInstanceChange{ + Addr: addr, + PrevRunAddr: addr, + ProviderAddr: n.ResolvedProvider, + Change: plans.Change{ + Action: plans.NoOp, // assume we'll get the config generation correct. + Before: cty.NullVal(impliedType), + After: cty.UnknownVal(impliedType), + Importing: &plans.Importing{ + Target: n.importTarget, + }, + }, + }) + return nil, importTarget, deferred, diags + } + } + + // There is a subtle difference between the import by identity + // and the import by ID. When importing by identity, we need to + // make sure to use the complete identity return by the provider + // instead of the (potential) incomplete one from the configuration. + if n.importTarget.Type().IsObjectType() && existingState != nil { + importTarget = &plans.Importing{Target: existingState.Identity} + } + + } else { + var readDiags tfdiags.Diagnostics + existingState, readDiags = n.readResourceInstanceState(ctx, addr) + diags = diags.Append(readDiags) + if diags.HasErrors() { + // Pre-Diff error hook + diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { + return h.PreDiff(n.HookResourceIdentity(), addrs.NotDeposed, cty.DynamicVal, cty.DynamicVal, diags.Err()) + })) + return nil, importTarget, deferred, diags + } + } + + return existingState, importTarget, deferred, diags +} + +func (n *NodePlannableResourceInstance) refreshState(ctx EvalContext, deferred *providers.Deferred, state *states.ResourceInstanceObject) (*states.ResourceInstanceObject, *providers.Deferred, tfdiags.Diagnostics) { var diags tfdiags.Diagnostics // refresh riNode := &NodeAbstractResourceInstance{ @@ -466,7 +476,7 @@ func (n *NodePlannableResourceInstance) refreshState(ctx EvalContext, state *sta NodeAbstractResource: n.NodeAbstractResource, override: n.override, } - refreshedState, deferred, refreshDiags := riNode.refresh(ctx, states.NotDeposed, state, ctx.Deferrals().DeferralAllowed()) + refreshedState, refreshDeferred, refreshDiags := riNode.refresh(ctx, states.NotDeposed, state, ctx.Deferrals().DeferralAllowed()) diags = diags.Append(refreshDiags) if diags.HasErrors() { return refreshedState, deferred, diags @@ -482,7 +492,26 @@ func (n *NodePlannableResourceInstance) refreshState(ctx EvalContext, state *sta n.Dependencies, refreshedState.Dependencies, ) } - return refreshedState, deferred, diags + + if deferred == nil && refreshDeferred != nil { + deferred = refreshDeferred + } + + if deferred == nil { + // Only write the state if the change isn't being deferred. We're also + // reporting the deferred status to the caller, so they should know + // not to read from the state. + diags = diags.Append(n.writeResourceInstanceState(ctx, refreshedState, refreshState)) + } + + if diags.HasErrors() { + // Pre-Diff error hook + diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { + return h.PreDiff(n.HookResourceIdentity(), addrs.NotDeposed, cty.DynamicVal, cty.DynamicVal, diags.Err()) + })) + } + + return refreshedState, refreshDeferred, diags } func (n *NodePlannableResourceInstance) planManagedResource( @@ -490,22 +519,19 @@ func (n *NodePlannableResourceInstance) planManagedResource( instanceRefreshState *states.ResourceInstanceObject, deferred *providers.Deferred, importTarget *plans.Importing, - write bool, -) (*plans.ResourceInstanceChange, tfdiags.Diagnostics) { + writeChange bool, +) (*plans.ResourceInstanceChange, *providers.Deferred, tfdiags.Diagnostics) { - writeChange := func(ctx EvalContext, change *plans.ResourceInstanceChange, deposedKey states.DeposedKey) error { + changeWriter := func(ctx EvalContext, change *plans.ResourceInstanceChange, deposedKey states.DeposedKey) error { return nil } - if write { - writeChange = n.writeChange + if writeChange { + changeWriter = n.writeChange } var diags tfdiags.Diagnostics addr := n.ResourceInstanceAddr() - checkRuleSeverity := tfdiags.Error - if n.skipPlanChanges || n.preDestroyRefresh { - checkRuleSeverity = tfdiags.Warning - } + checkRuleSeverity := getCheckRuleSeverity(n.planCtx) // add this instance to n.forceReplace if replacement is triggered by // another change @@ -524,11 +550,11 @@ func (n *NodePlannableResourceInstance) planManagedResource( diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { return h.PreDiff(n.HookResourceIdentity(), addrs.NotDeposed, cty.DynamicVal, cty.DynamicVal, diags.Err()) })) - return nil, diags + return nil, deferred, diags } change, instancePlanState, planDeferred, repeatData, planDiags := n.plan( - ctx, nil, instanceRefreshState, n.ForceCreateBeforeDestroy, n.forceReplace, + ctx, n.planCtx, nil, instanceRefreshState, n.ForceCreateBeforeDestroy, n.forceReplace, ) diags = diags.Append(planDiags) if diags.HasErrors() { @@ -549,10 +575,10 @@ func (n *NodePlannableResourceInstance) planManagedResource( GeneratedConfig: n.generatedConfigHCL, }, } - diags = diags.Append(writeChange(ctx, change, "")) + diags = diags.Append(changeWriter(ctx, change, "")) } - return change, diags + return change, deferred, diags } if deferred == nil && planDeferred != nil { @@ -596,18 +622,18 @@ func (n *NodePlannableResourceInstance) planManagedResource( // Future work should adjust these APIs such that it is impossible to // update these two data structures incorrectly through any objects // reachable via the terraform.EvalContext API. - diags = diags.Append(writeChange(ctx, change, "")) + diags = diags.Append(changeWriter(ctx, change, "")) if diags.HasErrors() { - return change, diags + return change, deferred, diags } diags = diags.Append(n.writeResourceInstanceState(ctx, instancePlanState, workingState)) if diags.HasErrors() { - return change, diags + return change, deferred, diags } diags = diags.Append(n.checkPreventDestroy(change)) if diags.HasErrors() { - return change, diags + return change, deferred, diags } // If this plan resulted in a NoOp, then apply won't have a chance to make @@ -620,7 +646,7 @@ func (n *NodePlannableResourceInstance) planManagedResource( instanceRefreshState.Dependencies = n.Dependencies diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, refreshState)) if diags.HasErrors() { - return change, diags + return change, deferred, diags } } @@ -650,7 +676,7 @@ func (n *NodePlannableResourceInstance) planManagedResource( deferrals.ReportResourceInstanceDeferred(n.Addr, providers.DeferredReasonDeferredPrereq, change) } - return change, diags + return change, deferred, diags } // replaceTriggered checks if this instance needs to be replace due to a change @@ -905,7 +931,7 @@ func (n *NodePlannableResourceInstance) importState(ctx EvalContext, addr addrs. )) } - instanceRefreshState, refreshDeferred, refreshDiags := n.refreshState(ctx, importedState) + instanceRefreshState, refreshDeferred, refreshDiags := n.refreshState(ctx, deferred, importedState) diags = diags.Append(refreshDiags) if diags.HasErrors() { return instanceRefreshState, deferred, diags @@ -978,12 +1004,6 @@ func (n *NodePlannableResourceInstance) importState(ctx EvalContext, addr addrs. } } - if deferred == nil { - // Only write the state if the change isn't being deferred. We're also - // reporting the deferred status to the caller, so they should know - // not to read from the state. - diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, refreshState)) - } return instanceRefreshState, deferred, diags } diff --git a/internal/terraform/node_resource_plan_instance2.go b/internal/terraform/node_resource_plan_instance2.go deleted file mode 100644 index f614348dc2ef..000000000000 --- a/internal/terraform/node_resource_plan_instance2.go +++ /dev/null @@ -1,937 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - "path/filepath" - "strings" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/instances" - "github.com/hashicorp/terraform/internal/lang/ephemeral" - "github.com/hashicorp/terraform/internal/moduletest/mocking" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/plans/deferring" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/tfdiags" - "github.com/zclconf/go-cty/cty" -) - -type PlanResourceManager ResourceStateManager[*NodePlannableResourceInstance] - -// ResourceData holds the shared data during execution -type ResourceData struct { - // inputs - Addr addrs.AbsResourceInstance - Importing bool - ImportTarget cty.Value - SkipPlanning bool - LightMode bool - - // these are set during the execution - InstanceRefreshState *states.ResourceInstanceObject - Provider providers.Interface - ProviderSchema providers.ProviderSchema - ResourceSchema providers.Schema - Deferred *providers.Deferred - CheckRuleSeverity tfdiags.Severity - RefreshNeeded bool -} - -func (n *NodePlannableResourceInstance) Execute2(ctx EvalContext, op walkOperation) tfdiags.Diagnostics { - stateManager := NewResourceStateManager(n) - steps := []ResourceState[*NodePlannableResourceInstance]{} - stateManager.AddHook(func(state ResourceState[*NodePlannableResourceInstance], manager *ResourceStateManager[*NodePlannableResourceInstance]) { - steps = append(steps, state) - }) - init := &InitializationStep{n.ResourceAddr().Resource.Mode} - diags := stateManager.Execute(init, ctx) - - // Log the steps taken - str := strings.Builder{} - str.WriteString(fmt.Sprintf("Executing %s %s", n.Addr, op)) - str.WriteString(fmt.Sprintf(" in %d steps:", len(steps))) - for _, step := range steps { - str.WriteString(fmt.Sprintf(" -> %T", step)) - } - log.Printf("[TRACE] %s\n", str.String()) - return diags -} - -// InitializationStep is the first step in the state machine. -// It initializes the resource data and sets up the provider. -type InitializationStep struct { - Mode addrs.ResourceMode -} - -func (s *InitializationStep) Execute(ctx EvalContext, node *NodePlannableResourceInstance, data *ResourceData) (ResourceState[*NodePlannableResourceInstance], tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - // Initialize basic data - data.Addr = node.ResourceInstanceAddr() - data.Importing = node.importTarget != cty.NilVal && !node.preDestroyRefresh - data.ImportTarget = node.importTarget - data.SkipPlanning = node.skipPlanChanges - data.LightMode = ctx.PlanCtx().LightMode - data.RefreshNeeded = !node.skipRefresh // by default, refresh is needed, unless asked to skip it. Any step that doesn't need it will set this to false. - - // Determine check rule severity - data.CheckRuleSeverity = tfdiags.Error - if node.skipPlanChanges || node.preDestroyRefresh { - data.CheckRuleSeverity = tfdiags.Warning - } - - // Set up provider - provider, providerSchema, err := getProvider(ctx, node.ResolvedProvider) - diags = diags.Append(err) - if diags.HasErrors() { - return nil, diags - } - - data.Provider = provider - data.ProviderSchema = providerSchema - data.ResourceSchema = data.ProviderSchema.SchemaForResourceAddr(node.Addr.Resource.Resource) - if data.ResourceSchema.Body == nil { - // Should be caught during validation, so we don't bother with a pretty error here - diags = diags.Append(fmt.Errorf("provider does not support resource type for %q", node.Addr)) - return nil, diags - } - - // Validate configuration if present - if node.Config != nil { - diags = diags.Append(validateSelfRef(data.Addr.Resource, node.Config.Config, providerSchema)) - if diags.HasErrors() { - return nil, diags - } - } - - // Data source planning goes through a different path - if s.Mode == addrs.DataResourceMode { - return &PlanDataSourceStep{}, diags - } - - // Start importing process. - if data.Importing { - return &ImportingStep{ImportTarget: node.importTarget}, diags - } - - // Check if we need to upgrade the schema. If we do, we must - // refresh the resource instance state to match the new schema. - upgradeRequired, diags := node.schemaUpgradeRequired(ctx, providerSchema, data.Addr) - if diags.HasErrors() { - return nil, diags - } - if upgradeRequired { - data.RefreshNeeded = upgradeRequired - } - - // Read the resource instance from the state - data.InstanceRefreshState, diags = node.readResourceInstanceState(ctx, node.ResourceInstanceAddr()) - if diags.HasErrors() { - return nil, diags - } - return &SaveSnapshotStep{}, diags -} - -// ImportingStep handles the importing of resources -type ImportingStep struct { - ImportTarget cty.Value -} - -func (s *ImportingStep) Execute(ctx EvalContext, node *NodePlannableResourceInstance, data *ResourceData) (ResourceState[*NodePlannableResourceInstance], tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - addr := node.ResourceInstanceAddr() - - // If the target was already in the state, import target would be nil and we - // would not have gotten here, but let's double-check. - if s.ImportTarget == cty.NilVal { - return nil, diags - } - - // Happy path: the import target id is known. Let's import it. - if s.ImportTarget.IsWhollyKnown() { - return &ProviderImportStep{ImportTarget: s.ImportTarget}, diags - } - - // Unknown config. Mark as deferred without importing. - // We can only get here because we allowed unknowns in the - // import target, a behavior that is only supported when - // we allow deferrals. - data.Deferred = &providers.Deferred{ - Reason: providers.DeferredReasonResourceConfigUnknown, - } - - // Handle config generation - if node.Config == nil && len(node.generateConfigPath) > 0 { - // Then we're supposed to be generating configuration for this - // resource, but we can't because the configuration is unknown. - // - // Normally, the next step would just be about - // planning the known configuration to make sure everything we - // do know about it is correct, but we can't even do that here. - // If we attempt to do that, (a) We're going to panic later when it complains - // about having no configuration, and (b) the rest of the - // function isn't doing anything as there is no configuration - // to validate. - // - // What we'll do instead is write out the address as being deferred with - // an entirely unknown value. Therefore we can skip the planning steps - // and go straight to the post-plan deferral step. - impliedType := data.ProviderSchema.ResourceTypes[addr.Resource.Resource.Type].Body.ImpliedType() - return &PostPlanDeferralStep{ - Change: &plans.ResourceInstanceChange{ - Addr: addr, - PrevRunAddr: addr, - ProviderAddr: node.ResolvedProvider, - Change: plans.Change{ - Action: plans.NoOp, // assume we'll get the config generation correct. - Before: cty.NullVal(impliedType), - After: cty.UnknownVal(impliedType), - Importing: &plans.Importing{ - Target: s.ImportTarget, - }, - }, - }, - }, diags - } - - // We can go straight to planning the import, since we know it has no - // state, and thus nothing to refresh. - return &PlanningStep{}, diags -} - -// ProviderImportStep handles the import of resources with the provider. -type ProviderImportStep struct { - ImportTarget cty.Value -} - -func (s *ProviderImportStep) Execute(ctx EvalContext, node *NodePlannableResourceInstance, data *ResourceData) (ResourceState[*NodePlannableResourceInstance], tfdiags.Diagnostics) { - addr := node.ResourceInstanceAddr() - deferralAllowed := ctx.Deferrals().DeferralAllowed() - var diags tfdiags.Diagnostics - absAddr := addr.Resource.Absolute(ctx.Path()) - hookResourceID := HookResourceIdentity{ - Addr: absAddr, - ProviderAddr: node.ResolvedProvider.Provider, - } - provider := data.Provider - - diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { - return h.PrePlanImport(hookResourceID, s.ImportTarget) - })) - if diags.HasErrors() { - return nil, diags - } - - importType := "ID" - var importValue string - - var resp providers.ImportResourceStateResponse - if node.override != nil { - // For overriding resources that are being imported, we cheat a little - // bit and look ahead at the configuration the user has provided and - // we'll use that as the basis for the resource we're going to make up - // that is due to be overridden. - - // Note, we know we have configuration as it's impossible to enable - // config generation during tests, and the validation that config exists - // if configuration generation is off has already happened. - if node.Config == nil { - // But, just in case we change this at some point in the future, - // let's add a specific error message here we can test for to - // document the expectation somewhere. This shouldn't happen in - // production, so we don't bother with a pretty error. - diags = diags.Append(fmt.Errorf("override blocks do not support config generation")) - return nil, diags - } - - forEach, _, _ := evaluateForEachExpression(node.Config.ForEach, ctx, false) - keyData := EvalDataForInstanceKey(node.ResourceInstanceAddr().Resource.Key, forEach) - configVal, _, configDiags := ctx.EvaluateBlock(node.Config.Config, data.ResourceSchema.Body, nil, keyData) - if configDiags.HasErrors() { - // We have an overridden resource so we're definitely in a test and - // the users config is not valid. So give up and just report the - // problems in the users configuration. Normally, we'd import the - // resource before giving up but for a test it doesn't matter, the - // test fails in the same way and the state is just lost anyway. - // - // If there were only warnings from the config then we'll duplicate - // them if we include them (as the config will be loaded again - // later), so only add the configDiags into the main diags if we - // found actual errors. - diags = diags.Append(configDiags) - return nil, diags - } - configVal, _ = configVal.UnmarkDeep() - - // Let's pretend we're reading the value as a data source so we - // pre-compute values now as if the resource has already been created. - override, overrideDiags := mocking.ComputedValuesForDataSource(configVal, &mocking.MockedData{ - Value: node.override.Values, - Range: node.override.Range, - ComputedAsUnknown: false, - }, data.ResourceSchema.Body) - resp = providers.ImportResourceStateResponse{ - ImportedResources: []providers.ImportedResource{ - { - TypeName: addr.Resource.Resource.Type, - State: ephemeral.StripWriteOnlyAttributes(override, data.ResourceSchema.Body), - }, - }, - Diagnostics: overrideDiags.InConfigBody(node.Config.Config, absAddr.String()), - } - } else { - if s.ImportTarget.Type().IsObjectType() { - // Identity-based import - resp = provider.ImportResourceState(providers.ImportResourceStateRequest{ - TypeName: addr.Resource.Resource.Type, - Identity: s.ImportTarget, - ClientCapabilities: ctx.ClientCapabilities(), - }) - importType = "Identity" - importValue = tfdiags.ObjectToString(s.ImportTarget) - } else { - // ID-based/string import - resp = provider.ImportResourceState(providers.ImportResourceStateRequest{ - TypeName: addr.Resource.Resource.Type, - ID: s.ImportTarget.AsString(), - ClientCapabilities: ctx.ClientCapabilities(), - }) - importValue = s.ImportTarget.AsString() - } - } - - data.Deferred = resp.Deferred - // If we don't support deferrals, but the provider reports a deferral and does not - // emit any error level diagnostics, we should emit an error. - if resp.Deferred != nil && !deferralAllowed && !resp.Diagnostics.HasErrors() { - diags = diags.Append(deferring.UnexpectedProviderDeferralDiagnostic(node.Addr)) - } - diags = diags.Append(resp.Diagnostics) - if diags.HasErrors() { - return nil, diags - } - - count := len(resp.ImportedResources) - switch { - case count > 1: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Multiple import states not supported", - fmt.Sprintf("While attempting to import with %s %s, the provider "+ - "returned multiple resource instance states. This "+ - "is not currently supported.", - importType, importValue, - ), - )) - case count == 0: - // Sanity check against the providers. If the provider defers the response, it may not have been able to return a state, so we'll only error if no deferral was returned. - if resp.Deferred == nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Import returned no resources", - fmt.Sprintf("While attempting to import with %s %s, the provider"+ - "returned no instance states.", - importType, importValue, - ), - )) - return nil, diags - } - - // If we were deferred, then let's make up a resource to represent the - // state we're going to import. - state := providers.ImportedResource{ - TypeName: addr.Resource.Resource.Type, - State: cty.NullVal(data.ResourceSchema.Body.ImpliedType()), - } - - // We skip the read and further validation since we make up the state - // of the imported resource anyways. - data.InstanceRefreshState = states.NewResourceInstanceObjectFromIR(state) - data.RefreshNeeded = false - return &PlanningStep{}, nil - } - - return &PostImportStep{ - ImportType: importType, - ImportValue: importValue, - ImportedResources: resp.ImportedResources, - HookResourceID: hookResourceID}, diags -} - -type PostImportStep struct { - ImportType string - ImportValue string - ImportedResources []providers.ImportedResource - HookResourceID HookResourceIdentity -} - -func (s *PostImportStep) Execute(ctx EvalContext, node *NodePlannableResourceInstance, data *ResourceData) (ResourceState[*NodePlannableResourceInstance], tfdiags.Diagnostics) { - addr := node.ResourceInstanceAddr() - deferred := data.Deferred - var diags tfdiags.Diagnostics - imported := s.ImportedResources - - absAddr := addr.Resource.Absolute(ctx.Path()) - for _, obj := range imported { - log.Printf("[TRACE] PostImportStep: import %s %q produced instance object of type %s", absAddr.String(), s.ImportValue, obj.TypeName) - } - - // We can only call the hooks and validate the imported state if we have - // actually done the import. - if deferred == nil { - // call post-import hook - diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostPlanImport(s.HookResourceID, imported) - })) - } - - if imported[0].TypeName == "" { - diags = diags.Append(fmt.Errorf("import of %s didn't set type", node.Addr.String())) - return nil, diags - } - - // Providers are supposed to return null values for all write-only attributes - writeOnlyDiags := ephemeral.ValidateWriteOnlyAttributes( - "Import returned a non-null value for a write-only attribute", - func(path cty.Path) string { - return fmt.Sprintf( - "While attempting to import with %s %s, the provider %q returned a value for the write-only attribute \"%s%s\". Write-only attributes cannot be read back from the provider. This is a bug in the provider, which should be reported in the provider's own issue tracker.", - s.ImportType, s.ImportValue, node.ResolvedProvider, node.Addr, tfdiags.FormatCtyPath(path), - ) - }, - imported[0].State, - data.ResourceSchema.Body, - ) - diags = diags.Append(writeOnlyDiags) - - if writeOnlyDiags.HasErrors() { - return nil, diags - } - - importedState := states.NewResourceInstanceObjectFromIR(imported[0]) - if deferred == nil && importedState.Value.IsNull() { - // It's actually okay for a deferred import to have returned a null. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Import returned null resource", - fmt.Sprintf("While attempting to import with %s %s, the provider"+ - "returned an instance with no state.", - s.ImportType, s.ImportValue, - ), - )) - - } - data.InstanceRefreshState = importedState - return &ProviderRefreshStep{}, diags -} - -// SaveSnapshotStep saves a snapshot of the resource instance state -// before refreshing the resource. -type SaveSnapshotStep struct{} - -func (s *SaveSnapshotStep) Execute(ctx EvalContext, node *NodePlannableResourceInstance, data *ResourceData) (ResourceState[*NodePlannableResourceInstance], tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - // Only write the state if the change isn't being deferred. - if data.Deferred == nil { - // We'll save a snapshot of what we just read from the state into the - // prevRunState before we do anything else, since this will capture the - // result of any schema upgrading that readResourceInstanceState just did, - // but not include any out-of-band changes we might detect in the - // subsequent provider refresh step. - diags = diags.Append(node.writeResourceInstanceState(ctx, data.InstanceRefreshState, prevRunState)) - if diags.HasErrors() { - return nil, diags - } - // Also the refreshState, because that should still reflect schema upgrades - // even if it doesn't reflect upstream changes. - diags = diags.Append(node.writeResourceInstanceState(ctx, data.InstanceRefreshState, refreshState)) - if diags.HasErrors() { - return nil, diags - } - } - - // we may need to detect a change in CreateBeforeDestroy to ensure it's - // stored when we are not refreshing - updated := updateCreateBeforeDestroy(node, data.InstanceRefreshState) - - // If we are in light mode, we may not need to refresh the state. - // If we find out that we have to after planning, the planning step will send us there. - if !data.RefreshNeeded || data.LightMode { - if updated { - // CreateBeforeDestroy must be set correctly in the state which is used - // to create the apply graph, so if we did not refresh the state make - // sure we still update any changes to CreateBeforeDestroy. - diags = diags.Append(node.writeResourceInstanceState(ctx, data.InstanceRefreshState, refreshState)) - if diags.HasErrors() { - return nil, diags - } - } - - // If we only want to refresh the state, then we can skip the - // planning phase. - if data.SkipPlanning { - return &RefreshOnlyStep{prevInstanceState: data.InstanceRefreshState}, diags - } - - // Go straight to planning, since we don't need to refresh the state - return &PlanningStep{}, diags - } - - return &ProviderRefreshStep{}, diags -} - -// ProviderRefreshStep handles refreshing the resource's state -// with the provider. -type ProviderRefreshStep struct{} - -func (s *ProviderRefreshStep) Execute(ctx EvalContext, node *NodePlannableResourceInstance, data *ResourceData) (ResourceState[*NodePlannableResourceInstance], tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - // This is the state of the resource before we refresh the value in the provider, we need to keep track - // of this to report this as the before value if the refresh is deferred. - preRefreshInstanceState := data.InstanceRefreshState - - var refreshWasDeferred bool - // Perform the refresh - refreshedState, refreshDeferred, refreshDiags := node.refresh( - ctx, states.NotDeposed, data.InstanceRefreshState, ctx.Deferrals().DeferralAllowed(), - ) - diags = diags.Append(refreshDiags) - if diags.HasErrors() { - return nil, diags - } - - data.InstanceRefreshState = refreshedState - - if data.InstanceRefreshState != nil { - // When refreshing we start by merging the stored dependencies and - // the configured dependencies. The configured dependencies will be - // stored to state once the changes are applied. If the plan - // results in no changes, we will re-write these dependencies - // below. - data.InstanceRefreshState.Dependencies = mergeDeps( - node.Dependencies, data.InstanceRefreshState.Dependencies, - ) - } - - if data.Deferred == nil && refreshDeferred != nil { - data.Deferred = refreshDeferred - } - refreshWasDeferred = refreshDeferred != nil - - if data.Deferred == nil { - diags = diags.Append(node.writeResourceInstanceState(ctx, data.InstanceRefreshState, refreshState)) - } - if diags.HasErrors() { - return nil, diags - } - - // Handle import validation and config generation if needed - if data.Importing { - importDiags := s.handleImportValidationAndConfigGen(ctx, node, data, refreshWasDeferred) - diags = diags.Append(importDiags) - if diags.HasErrors() { - return nil, diags - } - } - - data.RefreshNeeded = false // we just refreshed, we shouldn't need to refresh again - - // If we only want to refresh the state, then we can skip the - // planning phase. - if data.SkipPlanning { - return &RefreshOnlyStep{prevInstanceState: preRefreshInstanceState}, diags - } - - return &PlanningStep{}, diags -} - -// handleImportValidationAndConfigGen handles the import validation and config generation -// after a resource has been refreshed. -func (s *ProviderRefreshStep) handleImportValidationAndConfigGen( - ctx EvalContext, - node *NodePlannableResourceInstance, - data *ResourceData, - refreshWasDeferred bool, -) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - // We only need to handle import validation and config generation - // when we're importing and the import target is wholly known - if !data.ImportTarget.IsWhollyKnown() { - return diags - } - - if !refreshWasDeferred && data.InstanceRefreshState.Value.IsNull() { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Cannot import non-existent remote object", - fmt.Sprintf( - "While attempting to import an existing object to %q, "+ - "the provider detected that no object exists with the given id. "+ - "Only pre-existing objects can be imported; check that the id "+ - "is correct and that it is associated with the provider's "+ - "configured region or endpoint, or use \"terraform apply\" to "+ - "create a new remote object for this resource.", - node.Addr, - ), - )) - return diags - } - - // If we're importing and generating config, generate it now. We only - // generate config if the import isn't being deferred. We should generate - // the configuration in the plan that the import is actually happening in. - if data.Deferred == nil && len(node.generateConfigPath) > 0 { - if node.Config != nil { - return diags.Append(fmt.Errorf("tried to generate config for %s, but it already exists", node.Addr)) - } - - // Generate the HCL string first, then parse the HCL body from it. - // First we generate the contents of the resource block for use within - // the planning node. Then we wrap it in an enclosing resource block to - // pass into the plan for rendering. - generatedResource, generatedDiags := node.generateHCLResourceDef(ctx, node.Addr, data.InstanceRefreshState.Value) - diags = diags.Append(generatedDiags) - - // This wraps the content of the resource block in an enclosing resource block - // to pass into the plan for rendering. - node.generatedConfigHCL = generatedResource.String() - - // parse the "file" body as HCL to get the hcl.Body - synthHCLFile, hclDiags := hclsyntax.ParseConfig(generatedResource.Body, filepath.Base(node.generateConfigPath), hcl.Pos{Byte: 0, Line: 1, Column: 1}) - diags = diags.Append(hclDiags) - if hclDiags.HasErrors() { - return diags - } - - // We have to do a kind of mini parsing of the content here to correctly - // mark attributes like 'provider' as hiddenode. We only care about the - // resulting content, so it's remain that gets passed into the resource - // as the config. - _, remain, resourceDiags := synthHCLFile.Body.PartialContent(configs.ResourceBlockSchema) - diags = diags.Append(resourceDiags) - if resourceDiags.HasErrors() { - return diags - } - - node.Config = &configs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: node.Addr.Resource.Resource.Type, - Name: node.Addr.Resource.Resource.Name, - Config: remain, - Managed: &configs.ManagedResource{}, - Provider: node.ResolvedProvider.Provider, - } - } - - return diags -} - -// RefreshOnlyStep handles the refresh-only planning mode -type RefreshOnlyStep struct { - // This is the state of the resource before we refresh the value - prevInstanceState *states.ResourceInstanceObject -} - -func (s *RefreshOnlyStep) Execute(ctx EvalContext, node *NodePlannableResourceInstance, data *ResourceData) (ResourceState[*NodePlannableResourceInstance], tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - // In refresh-only mode we need to evaluate the for-each expression in - // order to supply the value to the pre- and post-condition check - // blocks. This has the unfortunate edge case of a refresh-only plan - // executing with a for-each map which has the same keys but different - // values, which could result in a post-condition check relying on that - // value being inaccurate. Unless we decide to store the value of the - // for-each expression in state, this is unavoidable. - forEach, _, _ := evaluateForEachExpression(node.Config.ForEach, ctx, false) - repeatData := EvalDataForInstanceKey(data.Addr.Resource.Key, forEach) - - // Evaluate preconditions - checkDiags := evalCheckRules( - addrs.ResourcePrecondition, - node.Config.Preconditions, - ctx, data.Addr, repeatData, - data.CheckRuleSeverity, - ) - diags = diags.Append(checkDiags) - - // Even if we don't plan changes, we do still need to at least update - // the working state to reflect the refresh result. If not, then e.g. - // any output values refering to this will not react to the drift. - // (Even if we didn't actually refresh above, this will still save - // the result of any schema upgrading we did in readResourceInstanceState.) - diags = diags.Append(node.writeResourceInstanceState(ctx, data.InstanceRefreshState, workingState)) - if diags.HasErrors() { - return nil, diags - } - - // Evaluate postconditions - checkDiags = evalCheckRules( - addrs.ResourcePostcondition, - node.Config.Postconditions, - ctx, data.Addr, repeatData, - data.CheckRuleSeverity, - ) - diags = diags.Append(checkDiags) - - // Report deferral if needed - if data.Deferred != nil { - // Make sure we have a valid state before using it - var beforeValue cty.Value - if s.prevInstanceState != nil { - beforeValue = s.prevInstanceState.Value - } else { - beforeValue = cty.NullVal(data.InstanceRefreshState.Value.Type()) - } - - ctx.Deferrals().ReportResourceInstanceDeferred( - data.Addr, - data.Deferred.Reason, - &plans.ResourceInstanceChange{ - Addr: node.Addr, - PrevRunAddr: node.Addr, - ProviderAddr: node.ResolvedProvider, - Change: plans.Change{ - Action: plans.Read, - Before: beforeValue, - After: data.InstanceRefreshState.Value, - }, - }, - ) - } - - // no more steps. - return nil, diags -} - -// PlanningStep handles the planning phase -type PlanningStep struct{} - -func (s *PlanningStep) Execute(ctx EvalContext, node *NodePlannableResourceInstance, data *ResourceData) (ResourceState[*NodePlannableResourceInstance], tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - // Initialize repetition data for replace triggers - repData := instances.RepetitionData{} - switch k := data.Addr.Resource.Key.(type) { - case addrs.IntKey: - repData.CountIndex = k.Value() - case addrs.StringKey: - repData.EachKey = k.Value() - repData.EachValue = cty.DynamicVal - } - - // Check for triggered replacements - diags = diags.Append(node.replaceTriggered(ctx, repData)) - if diags.HasErrors() { - return nil, diags - } - - // Plan the changes - change, instancePlanState, planDeferred, repeatData, planDiags := node.plan( - ctx, nil, data.InstanceRefreshState, node.ForceCreateBeforeDestroy, node.forceReplace, - ) - diags = diags.Append(planDiags) - if diags.HasErrors() { - // Special case for import with config generation - // If we are importing and generating a configuration, we need to - // ensure the change is written out so the configuration can be - // captured. - if planDeferred == nil && len(node.generateConfigPath) > 0 { - // Update our return plan - change := &plans.ResourceInstanceChange{ - Addr: node.Addr, - PrevRunAddr: node.prevRunAddr(ctx), - ProviderAddr: node.ResolvedProvider, - Change: plans.Change{ - // we only need a placeholder, so this will be a NoOp - Action: plans.NoOp, - Before: data.InstanceRefreshState.Value, - After: data.InstanceRefreshState.Value, - GeneratedConfig: node.generatedConfigHCL, - }, - } - diags = diags.Append(node.writeChange(ctx, change, "")) - } - return nil, diags - } - - if data.Deferred == nil && planDeferred != nil { - data.Deferred = planDeferred - } - - // Update import metadata if needed - if data.Importing { - // There is a subtle difference between the import by identity - // and the import by ID. When importing by identity, we need to - // make sure to use the complete identity return by the provider - // instead of the (potential) incomplete one from the configuration. - if node.importTarget.Type().IsObjectType() { - change.Importing = &plans.Importing{Target: data.InstanceRefreshState.Identity} - } else { - change.Importing = &plans.Importing{Target: node.importTarget} - } - } - - // FIXME: here we update the change to reflect the reason for - // replacement, but we still overload forceReplace to get the correct - // change planned. - if len(node.replaceTriggeredBy) > 0 { - change.ActionReason = plans.ResourceInstanceReplaceByTriggers - } - - // Determine if we need to refresh and re-plan - // In light mode, if we didn't refresh before planning but the provider - // has indicated that changes are needed, we need to refresh and re-plan to - // ensure we have the most up-to-date state - refreshChangedResource := data.RefreshNeeded && change.Action != plans.NoOp && data.LightMode - if refreshChangedResource { - // Go back to the refresh step and plan again - return &ProviderRefreshStep{}, nil - } - - // FIXME: here we update the change to reflect the reason for - // replacement, but we still overload forceReplace to get the correct - // change planned. - if len(node.replaceTriggeredBy) > 0 { - change.ActionReason = plans.ResourceInstanceReplaceByTriggers - } - - return &PostPlanDeferralStep{ - RepeatData: repeatData, - PlanState: instancePlanState, - Change: change, - }, diags -} - -// PostPlanDeferralStep handles the deferral of changes after planning -type PostPlanDeferralStep struct { - RepeatData instances.RepetitionData - PlanState *states.ResourceInstanceObject - Change *plans.ResourceInstanceChange -} - -func (s *PostPlanDeferralStep) Execute(ctx EvalContext, node *NodePlannableResourceInstance, data *ResourceData) (ResourceState[*NodePlannableResourceInstance], tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - deferrals := ctx.Deferrals() - if data.Deferred != nil { - // Then this resource has been deferred either during the import, - // refresh or planning stage. We'll report the deferral and - // store what we could produce in the deferral tracker. - deferrals.ReportResourceInstanceDeferred(data.Addr, data.Deferred.Reason, s.Change) - return nil, diags - } - - // We intentionally write the change before the subsequent checks, because - // all of the checks below this point are for problems caused by the - // context surrounding the change, rather than the change itself, and - // so it's helpful to still include the valid-in-isolation change as - // part of the plan as additional context in our error output. - // - // FIXME: it is currently important that we write resource changes to - // the plan (n.writeChange) before we write the corresponding state - // (n.writeResourceInstanceState). - // - // This is because the planned resource state will normally have the - // status of states.ObjectPlanned, which causes later logic to refer to - // the contents of the plan to retrieve the resource data. Because - // there is no shared lock between these two data structures, reversing - // the order of these writes will cause a brief window of inconsistency - // which can lead to a failed safety check. - // - // Future work should adjust these APIs such that it is impossible to - // update these two data structures incorrectly through any objects - // reachable via the terraform.EvalContext API. - if !deferrals.ShouldDeferResourceInstanceChanges(node.Addr, node.Dependencies) { - // Write the change - diags = diags.Append(node.writeChange(ctx, s.Change, "")) - if diags.HasErrors() { - return nil, diags - } - - // Update the working state - diags = diags.Append(node.writeResourceInstanceState(ctx, s.PlanState, workingState)) - if diags.HasErrors() { - return nil, diags - } - - // Check for prevent_destroy violations - diags = diags.Append(node.checkPreventDestroy(s.Change)) - if diags.HasErrors() { - return nil, diags - } - - // If this plan resulted in a NoOp, then apply won't have a chance to make - // any changes to the stored dependencies. Since this is a NoOp we know - // that the stored dependencies will have no effect during apply, and we can - // write them out now. - if s.Change.Action == plans.NoOp && !depsEqual(data.InstanceRefreshState.Dependencies, node.Dependencies) { - // the refresh state will be the final state for this resource, so - // finalize the dependencies here if they need to be updated. - data.InstanceRefreshState.Dependencies = node.Dependencies - diags = diags.Append(node.writeResourceInstanceState(ctx, data.InstanceRefreshState, refreshState)) - if diags.HasErrors() { - return nil, diags - } - } - - return &CheckingPostconditionsStep{s.RepeatData}, diags - } - - // If we get here, it means that the deferrals tracker says that - // that we must defer changes for - // this resource instance, presumably due to a dependency on an - // upstream object that was already deferred. Therefore we just - // report our own deferral (capturing a placeholder value in the - // deferral tracker) and don't add anything to the plan or - // working state. - // In this case, the expression evaluator should use the placeholder - // value registered here as the value of this resource instance, - // instead of using the plan. - deferrals.ReportResourceInstanceDeferred(node.Addr, providers.DeferredReasonDeferredPrereq, s.Change) - return nil, diags -} - -// CheckingPostconditionsStep evaluates postconditions -type CheckingPostconditionsStep struct { - RepeatData instances.RepetitionData -} - -func (s *CheckingPostconditionsStep) Execute(ctx EvalContext, node *NodePlannableResourceInstance, data *ResourceData) (ResourceState[*NodePlannableResourceInstance], tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - // Post-conditions might block completion. We intentionally do this - // _after_ writing the state/diff because we want to check against - // the result of the operation, and to fail on future operations - // until the user makes the condition succeed. - // (Note that some preconditions will end up being skipped during - // planning, because their conditions depend on values not yet known.) - - // Check postconditions - checkDiags := evalCheckRules( - addrs.ResourcePostcondition, - node.Config.Postconditions, - ctx, node.ResourceInstanceAddr(), s.RepeatData, - data.CheckRuleSeverity, - ) - diags = diags.Append(checkDiags) - - // End of execution - return nil, diags -} - -func updateCreateBeforeDestroy(n *NodePlannableResourceInstance, currentState *states.ResourceInstanceObject) (updated bool) { - if n.Config != nil && n.Config.Managed != nil && currentState != nil { - newCBD := n.Config.Managed.CreateBeforeDestroy || n.ForceCreateBeforeDestroy - updated = currentState.CreateBeforeDestroy != newCBD - currentState.CreateBeforeDestroy = newCBD - return updated - } - return false -} diff --git a/internal/terraform/node_resource_plan_instance_ds.go b/internal/terraform/node_resource_plan_instance_ds.go deleted file mode 100644 index 261ef8c47104..000000000000 --- a/internal/terraform/node_resource_plan_instance_ds.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package terraform - -import ( - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -type PlanDataSourceStep struct { -} - -func (s *PlanDataSourceStep) Execute(ctx EvalContext, node *NodePlannableResourceInstance, data *ResourceData) (ResourceState[*NodePlannableResourceInstance], tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - addr := node.ResourceInstanceAddr() - deferrals := ctx.Deferrals() - change, state, deferred, repeatData, planDiags := node.planDataSource(ctx, data.CheckRuleSeverity, data.SkipPlanning, deferrals.ShouldDeferResourceInstanceChanges(addr, node.Dependencies)) - diags = diags.Append(planDiags) - if diags.HasErrors() { - return nil, diags - } - - // A nil change here indicates that Terraform is deciding NOT to make a - // change at all. In which case even if we wanted to try and defer it - // (because of a dependency) we can't as there is no change to defer. - // - // The most common case for this is when the data source is being refreshed - // but depends on unknown values or dependencies which means we just skip - // refreshing the data source. We maintain that behaviour here. - if change != nil && deferred != nil { - // Then this data source got deferred by the provider during planning. - deferrals.ReportDataSourceInstanceDeferred(addr, deferred.Reason, change) - } else { - // Not deferred; business as usual. - - // write the data source into both the refresh state and the - // working state - diags = diags.Append(node.writeResourceInstanceState(ctx, state, refreshState)) - if diags.HasErrors() { - return nil, diags - } - diags = diags.Append(node.writeResourceInstanceState(ctx, state, workingState)) - if diags.HasErrors() { - return nil, diags - } - - diags = diags.Append(node.writeChange(ctx, change, "")) - - // Post-conditions might block further progress. We intentionally do this - // _after_ writing the state/diff because we want to check against - // the result of the operation, and to fail on future operations - // until the user makes the condition succeed. - checkDiags := evalCheckRules( - addrs.ResourcePostcondition, - node.Config.Postconditions, - ctx, addr, repeatData, - data.CheckRuleSeverity, - ) - diags = diags.Append(checkDiags) - } - - return nil, diags -}