Skip to content
11 changes: 11 additions & 0 deletions docs/command_adv2v2.md
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,15 @@ atlas tf adv2v2 -f in.tf -o out.tf
- `--replaceOutput` or `-r`: Overwrite the file at the output path if it already exists. You can also modify the input file in-place.
- `--watch` or `-w`: Keep the plugin running and watching for changes in the input file

## Comments and formatting

During the conversion process, some formatting elements may not be preserved:
- Some comments from the original resources may not be preserved in the output
- Custom blank lines and spacing may be modified
- The output file will have standardized formatting

We recommend reviewing the converted output and re-adding any important comments or documentation that you need to maintain.

## Examples

You can find [here](https://github.com/mongodb-labs/atlas-cli-plugin-terraform/tree/main/internal/convert/testdata/adv2v2) examples of input files (suffix .in.tf) and the corresponding output files (suffix .out.tf).
Expand Down Expand Up @@ -56,6 +65,7 @@ dynamic "tags" {
### Dynamic blocks in region_configs

You can use `dynamic` blocks for `region_configs`. The plugin assumes that the value of `for_each` is an expression which evaluates to a `list` of objects.
**Note:** `map` and `set` are not supported.

This is an example of how to use dynamic blocks in `region_configs`:
```hcl
Expand All @@ -81,6 +91,7 @@ replication_specs {
### Dynamic blocks in replication_specs

You can use `dynamic` blocks for `replication_specs`. The plugin assumes that the value of `for_each` is an expression which evaluates to a `list` of objects.
**Note:** `map` and `set` are not supported.

This is an example of how to use dynamic blocks in `replication_specs`:
```hcl
Expand Down
11 changes: 11 additions & 0 deletions docs/command_clu2adv.md
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,15 @@ atlas tf clu2adv -f in.tf -o out.tf
- `--watch` or `-w`: Keep the plugin running and watching for changes in the input file
- `--includeMoved` or `-m`: Include the `moved blocks` in the output file

## Comments and formatting

During the conversion process, some formatting elements may not be preserved:
- Some comments from the original resources may not be preserved in the output
- Custom blank lines and spacing may be modified
- The output file will have standardized formatting

We recommend reviewing the converted output and re-adding any important comments or documentation that you need to maintain.

## Examples

You can find [here](https://github.com/mongodb-labs/atlas-cli-plugin-terraform/tree/main/internal/convert/testdata/clu2adv) some examples of input files (suffix .in.tf) and the corresponding output files (suffix .out.tf).
Expand Down Expand Up @@ -56,6 +65,7 @@ dynamic "tags" {
### Dynamic blocks in regions_config

You can use `dynamic` blocks for `regions_config`. The plugin assumes that the value of `for_each` is an expression which evaluates to a `list` of objects.
**Note:** `map` and `set` are not supported.

This is an example of how to use dynamic blocks in `regions_config`:
```hcl
Expand All @@ -77,6 +87,7 @@ replication_specs {
### Dynamic blocks in replication_specs

You can use `dynamic` blocks for `replication_specs`. The plugin assumes that the value of `for_each` is an expression which evaluates to a `list` of objects.
**Note:** `map` and `set` are not supported.

This is an example of how to use dynamic blocks in `replication_specs`:
```hcl
Expand Down
114 changes: 64 additions & 50 deletions internal/convert/adv2v2.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,11 @@ import (
"github.com/mongodb-labs/atlas-cli-plugin-terraform/internal/hcl"
)

var (
specsWithDisk = []string{nElectableSpecs, nReadOnlySpecs, nAnalyticsSpecs}
specsWithoutDisk = []string{nAutoScaling, nAnalyticsAutoScaling}
)

// AdvancedClusterToV2 transforms all mongodbatlas_advanced_cluster resource definitions in a
// Terraform configuration file from SDKv2 schema to TPF (Terraform Plugin Framework) schema.
// All other resources and data sources are left untouched.
Expand All @@ -18,20 +23,18 @@ func AdvancedClusterToV2(config []byte) ([]byte, error) {
}
parserb := parser.Body()
for _, block := range parserb.Blocks() {
updated, err := updateResource(block)
updated, err := processResource(block)
if err != nil {
return nil, err
}
if updated { // If the resource was converted, add a comment at the end so user knows the resource was updated
blockb := block.Body()
blockb.AppendNewline()
hcl.AppendComment(blockb, commentUpdatedBy)
if updated {
addComments(block, true)
}
}
return parser.Bytes(), nil
}

func updateResource(resource *hclwrite.Block) (bool, error) {
func processResource(resource *hclwrite.Block) (bool, error) {
if resource.Type() != resourceType || getResourceName(resource) != advCluster {
return false, nil
}
Expand All @@ -43,24 +46,17 @@ func updateResource(resource *hclwrite.Block) (bool, error) {
return false, nil
}
diskSizeGB, _ := hcl.PopAttr(resourceb, nDiskSizeGB, errRoot) // ok to fail as it's optional
if err := convertRepSpecs(resourceb, diskSizeGB); err != nil {
return false, err
}
if err := fillTagsLabelsOpt(resourceb, nTags); err != nil {
if err := processRepSpecs(resourceb, diskSizeGB); err != nil {
return false, err
}
if err := fillTagsLabelsOpt(resourceb, nLabels); err != nil {
if err := processCommonOptionalBlocks(resourceb); err != nil {
return false, err
}
fillAdvConfigOpt(resourceb)
fillBlockOpt(resourceb, nBiConnector)
fillBlockOpt(resourceb, nPinnedFCV)
fillBlockOpt(resourceb, nTimeouts)
return true, nil
}

func convertRepSpecs(resourceb *hclwrite.Body, diskSizeGB hclwrite.Tokens) error {
d, err := convertRepSpecsWithDynamicBlock(resourceb, diskSizeGB)
func processRepSpecs(resourceb *hclwrite.Body, diskSizeGB hclwrite.Tokens) error {
d, err := processRepSpecsWithDynamicBlock(resourceb, diskSizeGB)
if err != nil {
return err
}
Expand All @@ -80,7 +76,7 @@ func convertRepSpecs(resourceb *hclwrite.Body, diskSizeGB hclwrite.Tokens) error
blockb := block.Body()
shardsAttr := blockb.GetAttribute(nNumShards)
blockb.RemoveAttribute(nNumShards)
dConfig, err := convertConfigsWithDynamicBlock(blockb, diskSizeGB, false)
dConfig, err := processConfigsWithDynamicBlock(blockb, diskSizeGB, false)
if err != nil {
return err
}
Expand Down Expand Up @@ -119,41 +115,50 @@ func convertRepSpecs(resourceb *hclwrite.Body, diskSizeGB hclwrite.Tokens) error
return nil
}

func convertRepSpecsWithDynamicBlock(resourceb *hclwrite.Body, diskSizeGB hclwrite.Tokens) (dynamicBlock, error) {
func processRepSpecsWithDynamicBlock(resourceb *hclwrite.Body, diskSizeGB hclwrite.Tokens) (dynamicBlock, error) {
dSpec, err := getDynamicBlock(resourceb, nRepSpecs, true)
if err != nil || !dSpec.IsPresent() {
return dynamicBlock{}, err
}
transformReferences(dSpec.content.Body(), nRepSpecs, nSpec)
dConfig, err := convertConfigsWithDynamicBlock(dSpec.content.Body(), diskSizeGB, true)
dConfig, err := processConfigsWithDynamicBlock(dSpec.content.Body(), diskSizeGB, true)
if err != nil {
return dynamicBlock{}, err
}
if dConfig.tokens != nil {
forSpec := hcl.TokensFromExpr(buildForExpr(nSpec, hcl.GetAttrExpr(dSpec.forEach), true))
dSpec.tokens = hcl.TokensFuncFlatten(append(forSpec, dConfig.tokens...))
return dSpec, nil
}

// Handle static region_configs blocks inside dynamic replication_specs
specBody := dSpec.content.Body()
staticConfigs := collectBlocks(specBody, nConfig)
repSpecb := hclwrite.NewEmptyFile().Body()
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: I believe this drops any user comments in the block, is that correct? if yes, that's probably ok for a converter but maybe we should mention in the docs?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@maastha this is also happening in other places, good idea about mentioning, WDYT about this: 7584850

handleZoneName(repSpecb, specBody, nRepSpecs, nSpec)
var configs []*hclwrite.Body
for _, configBlock := range staticConfigs {
configBlockb := configBlock.Body()
newConfigBody := processConfigForDynamicBlock(configBlockb, diskSizeGB)
configs = append(configs, newConfigBody)
}
repSpecb.SetAttributeRaw(nConfig, hcl.TokensArray(configs))
numShardsAttr := specBody.GetAttribute(nNumShards)
forSpec := hcl.TokensFromExpr(buildForExpr(nSpec, hcl.GetAttrExpr(dSpec.forEach), true))
dSpec.tokens = hcl.TokensFuncFlatten(append(forSpec, dConfig.tokens...))
numShardsTokens := buildNumShardsTokens(numShardsAttr, repSpecb, nRepSpecs, nSpec)
dSpec.tokens = hcl.TokensFuncFlatten(append(forSpec, numShardsTokens...))
return dSpec, nil
}

func convertConfigsWithDynamicBlock(specbSrc *hclwrite.Body, diskSizeGB hclwrite.Tokens,
func processConfigsWithDynamicBlock(specbSrc *hclwrite.Body, diskSizeGB hclwrite.Tokens,
insideDynamicRepSpec bool) (dynamicBlock, error) {
d, err := getDynamicBlock(specbSrc, nConfig, true)
if err != nil || !d.IsPresent() {
return dynamicBlock{}, err
}
configBody := d.content.Body()
transformReferences(configBody, getResourceName(d.block), nRegion)
regionConfigBody := hclwrite.NewEmptyFile().Body()
copyAttributesSorted(regionConfigBody, configBody.Attributes())
for _, block := range configBody.Blocks() {
blockType := block.Type()
blockBody := hclwrite.NewEmptyFile().Body()
copyAttributesSorted(blockBody, block.Body().Attributes())
if diskSizeGB != nil &&
(blockType == nElectableSpecs || blockType == nReadOnlySpecs || blockType == nAnalyticsSpecs) {
blockBody.SetAttributeRaw(nDiskSizeGB, diskSizeGB)
}
regionConfigBody.SetAttributeRaw(blockType, hcl.TokensObject(blockBody))
}
regionConfigBody := processConfigForDynamicBlock(configBody, diskSizeGB)
forEach := hcl.GetAttrExpr(d.forEach)
if insideDynamicRepSpec {
forEach = fmt.Sprintf("%s.%s", nSpec, nConfig)
Expand All @@ -165,18 +170,11 @@ func convertConfigsWithDynamicBlock(specbSrc *hclwrite.Body, diskSizeGB hclwrite
return d, nil
}
repSpecb := hclwrite.NewEmptyFile().Body()
if zoneNameAttr := specbSrc.GetAttribute(nZoneName); zoneNameAttr != nil {
zoneNameExpr := transformReference(hcl.GetAttrExpr(zoneNameAttr), nRepSpecs, nSpec)
repSpecb.SetAttributeRaw(nZoneName, hcl.TokensFromExpr(zoneNameExpr))
}
handleZoneName(repSpecb, specbSrc, nRepSpecs, nSpec)
repSpecb.SetAttributeRaw(nConfig, hcl.EncloseBracketsNewLines(regionTokens))
if numShardsAttr := specbSrc.GetAttribute(nNumShards); numShardsAttr != nil {
numShardsExpr := transformReference(hcl.GetAttrExpr(numShardsAttr), nRepSpecs, nSpec)
tokens := hcl.TokensFromExpr(buildForExpr("i", fmt.Sprintf("range(%s)", numShardsExpr), false))
tokens = append(tokens, hcl.TokensObject(repSpecb)...)
return dynamicBlock{tokens: hcl.EncloseBracketsNewLines(tokens)}, nil
}
return dynamicBlock{tokens: hcl.TokensArraySingle(repSpecb)}, nil
numShardsAttr := specbSrc.GetAttribute(nNumShards)
tokens := buildNumShardsTokens(numShardsAttr, repSpecb, nRepSpecs, nSpec)
return dynamicBlock{tokens: tokens}, nil
}

// hasExpectedBlocksAsAttributes checks if any of the expected block names
Expand Down Expand Up @@ -205,11 +203,27 @@ func copyAttributesSorted(targetBody *hclwrite.Body, sourceAttrs map[string]*hcl
}

func processAllSpecs(body *hclwrite.Body, diskSizeGB hclwrite.Tokens) {
fillSpecOpt(body, nElectableSpecs, diskSizeGB)
fillSpecOpt(body, nReadOnlySpecs, diskSizeGB)
fillSpecOpt(body, nAnalyticsSpecs, diskSizeGB)
fillSpecOpt(body, nAutoScaling, nil)
fillSpecOpt(body, nAnalyticsAutoScaling, nil)
for _, spec := range specsWithDisk {
fillSpecOpt(body, spec, diskSizeGB)
}
for _, spec := range specsWithoutDisk {
fillSpecOpt(body, spec, nil)
}
}

func processConfigForDynamicBlock(configBlockb *hclwrite.Body, diskSizeGB hclwrite.Tokens) *hclwrite.Body {
newConfigBody := hclwrite.NewEmptyFile().Body()
copyAttributesSorted(newConfigBody, configBlockb.Attributes())
for _, block := range configBlockb.Blocks() {
blockType := block.Type()
blockBody := hclwrite.NewEmptyFile().Body()
copyAttributesSorted(blockBody, block.Body().Attributes())
if diskSizeGB != nil && slices.Contains(specsWithDisk, blockType) {
blockBody.SetAttributeRaw(nDiskSizeGB, diskSizeGB)
}
newConfigBody.SetAttributeRaw(blockType, hcl.TokensObject(blockBody))
}
return newConfigBody
}

func fillSpecOpt(resourceb *hclwrite.Body, name string, diskSizeGBTokens hclwrite.Tokens) {
Expand Down
Loading