Skip to content

Commit

Permalink
Merge pull request #1242 from Tinyblargon/#1158
Browse files Browse the repository at this point in the history
Fixes: vmid output is null
  • Loading branch information
Tinyblargon authored Feb 2, 2025
2 parents c9ae418 + 9c38d8a commit d5a0a13
Show file tree
Hide file tree
Showing 5 changed files with 85 additions and 71 deletions.
36 changes: 36 additions & 0 deletions proxmox/Internal/resource/guest/vmid/schema.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
package vmid

import (
"github.com/hashicorp/go-cty/cty"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)

const (
Root string = "vmid"

maxID int = 999999999
minID int = 100
)

func Schema() *schema.Schema {
return &schema.Schema{
Type: schema.TypeInt,
Optional: true,
Computed: true,
ForceNew: true,
ValidateDiagFunc: func(i interface{}, k cty.Path) diag.Diagnostics {
val, ok := i.(int)
if !ok {
return diag.Errorf("expected type of %v to be int", k)
}
if val != 0 {
if val < minID || val > maxID {
return diag.Errorf("proxmox %s must be in the range (%d - %d) or 0 for next available ID, got %d", k, minID, maxID, val)
}
}
return nil
},
Description: "The VM identifier in proxmox (100-999999999)",
}
}
9 changes: 9 additions & 0 deletions proxmox/Internal/resource/guest/vmid/terraform.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
package vmid

import (
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)

func Terraform(id int, d *schema.ResourceData) {
d.Set(Root, int(id))
}
19 changes: 7 additions & 12 deletions proxmox/resource_lxc.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ import (
pveSDK "github.com/Telmate/proxmox-api-go/proxmox"
"github.com/Telmate/terraform-provider-proxmox/v2/proxmox/Internal/pve/guest/tags"
"github.com/Telmate/terraform-provider-proxmox/v2/proxmox/Internal/resource/guest/node"
vmID "github.com/Telmate/terraform-provider-proxmox/v2/proxmox/Internal/resource/guest/vmid"
"github.com/Telmate/terraform-provider-proxmox/v2/proxmox/Internal/util"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
Expand Down Expand Up @@ -425,14 +426,7 @@ func resourceLxc() *schema.Resource {
},
node.RootNode: node.SchemaNode(schema.Schema{
Required: true, ForceNew: true}, "lxc"),
"vmid": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
ForceNew: true,
ValidateDiagFunc: VMIDValidator(),
Description: "The VM identifier in proxmox (100-999999999)",
},
vmID.Root: vmID.Schema(),
},
Timeouts: resourceTimeouts(),
}
Expand Down Expand Up @@ -520,7 +514,7 @@ func resourceLxcCreate(ctx context.Context, d *schema.ResourceData, meta interfa

// get unique id
nextid, err := nextVmId(pconf)
vmID := d.Get("vmid").(int)
vmID := d.Get(vmID.Root).(int)
if vmID != 0 {
nextid = vmID
} else {
Expand Down Expand Up @@ -768,12 +762,12 @@ func _resourceLxcRead(ctx context.Context, d *schema.ResourceData, meta interfac
lock := pmParallelBegin(pconf)
defer lock.unlock()
client := pconf.Client
_, _, vmID, err := parseResourceId(d.Id())
_, _, guestID, err := parseResourceId(d.Id())
if err != nil {
d.SetId("")
return err
}
vmr := pveSDK.NewVmRef(vmID)
vmr := pveSDK.NewVmRef(guestID)
_, err = client.GetVmInfo(ctx, vmr)
if err != nil {
return err
Expand Down Expand Up @@ -844,7 +838,7 @@ func _resourceLxcRead(ctx context.Context, d *schema.ResourceData, meta interfac
poolContent, _ := client.GetPoolInfo(ctx, poolInfo.(map[string]interface{})["poolid"].(string))
for _, member := range poolContent["members"].([]interface{}) {
if member.(map[string]interface{})["type"] != "storage" {
if vmID == int(member.(map[string]interface{})["vmid"].(float64)) {
if guestID == int(member.(map[string]interface{})[vmID.Root].(float64)) {
d.Set("pool", poolInfo.(map[string]interface{})["poolid"].(string))
}
}
Expand All @@ -860,6 +854,7 @@ func _resourceLxcRead(ctx context.Context, d *schema.ResourceData, meta interfac
d.Set("hagroup", vmr.HaGroup())

// Read Misc
vmID.Terraform(vmr.VmId(), d)
d.Set("arch", config.Arch)
d.Set("bwlimit", config.BWLimit)
d.Set("cmode", config.CMode)
Expand Down
71 changes: 33 additions & 38 deletions proxmox/resource_vm_qemu.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ import (
"github.com/Telmate/terraform-provider-proxmox/v2/proxmox/Internal/resource/guest/qemu/pci"
"github.com/Telmate/terraform-provider-proxmox/v2/proxmox/Internal/resource/guest/qemu/serial"
"github.com/Telmate/terraform-provider-proxmox/v2/proxmox/Internal/resource/guest/qemu/usb"
vmID "github.com/Telmate/terraform-provider-proxmox/v2/proxmox/Internal/resource/guest/vmid"
"github.com/Telmate/terraform-provider-proxmox/v2/proxmox/Internal/util"
)

Expand Down Expand Up @@ -113,14 +114,7 @@ func resourceVmQemu() *schema.Resource {
return diag.Errorf(schemaAgentTimeout + " must be greater than 0")
},
},
"vmid": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
ForceNew: true,
ValidateDiagFunc: VMIDValidator(),
Description: "The VM identifier in proxmox (100-999999999)",
},
vmID.Root: vmID.Schema(),
"name": {
Type: schema.TypeString,
Optional: true,
Expand Down Expand Up @@ -677,8 +671,8 @@ func resourceVmQemuCreate(ctx context.Context, d *schema.ResourceData, meta inte
// DEBUG print out the create request
flatValue, _ := resourceDataToFlatValues(d, thisResource)
jsonString, _ := json.Marshal(flatValue)
logger.Info().Str("vmid", d.Id()).Msgf("VM creation")
logger.Debug().Str("vmid", d.Id()).Msgf("VM creation resource data: '%+v'", string(jsonString))
logger.Info().Str(vmID.Root, d.Id()).Msgf("VM creation")
logger.Debug().Str(vmID.Root, d.Id()).Msgf("VM creation resource data: '%+v'", string(jsonString))

pconf := meta.(*providerConfiguration)
lock := pmParallelBegin(pconf)
Expand Down Expand Up @@ -784,9 +778,9 @@ func resourceVmQemuCreate(ctx context.Context, d *schema.ResourceData, meta inte
if vmr == nil {
// get unique id
nextid, err := nextVmId(pconf)
vmID := d.Get("vmid").(int)
if vmID != 0 { // 0 is the "no value" for int in golang
nextid = vmID
guestID := d.Get(vmID.Root).(int)
if guestID != 0 { // 0 is the "no value" for int in golang
nextid = guestID
} else {
if err != nil {
return append(diags, diag.FromErr(err)...)
Expand All @@ -813,7 +807,7 @@ func resourceVmQemuCreate(ctx context.Context, d *schema.ResourceData, meta inte
}

log.Print("[DEBUG][QemuVmCreate] cloning VM")
logger.Debug().Str("vmid", d.Id()).Msgf("Cloning VM")
logger.Debug().Str(vmID.Root, d.Id()).Msgf("Cloning VM")
err = config.CloneVm(ctx, sourceVmr, vmr, client)
if err != nil {
return append(diags, diag.FromErr(err)...)
Expand Down Expand Up @@ -877,7 +871,7 @@ func resourceVmQemuCreate(ctx context.Context, d *schema.ResourceData, meta inte

}
d.SetId(resourceId(targetNode, "qemu", vmr.VmId()))
logger.Debug().Int("vmid", vmr.VmId()).Msgf("Set this vm (resource Id) to '%v'", d.Id())
logger.Debug().Int(vmID.Root, vmr.VmId()).Msgf("Set this vm (resource Id) to '%v'", d.Id())

// give sometime to proxmox to catchup
time.Sleep(time.Duration(d.Get(schemaAdditionalWait).(int)) * time.Second)
Expand Down Expand Up @@ -915,14 +909,14 @@ func resourceVmQemuUpdate(ctx context.Context, d *schema.ResourceData, meta inte
logger, _ := CreateSubLogger("resource_vm_update")

// get vmID
_, _, vmID, err := parseResourceId(d.Id())
_, _, guestID, err := parseResourceId(d.Id())
if err != nil {
return diag.FromErr(err)
}

logger.Info().Int("vmid", vmID).Msg("Starting update of the VM resource")
logger.Info().Int(vmID.Root, guestID).Msg("Starting update of the VM resource")

vmr := pveSDK.NewVmRef(vmID)
vmr := pveSDK.NewVmRef(guestID)
_, err = client.GetVmInfo(ctx, vmr)
if err != nil {
return diag.FromErr(err)
Expand Down Expand Up @@ -989,7 +983,7 @@ func resourceVmQemuUpdate(ctx context.Context, d *schema.ResourceData, meta inte
return diags
}

logger.Debug().Int("vmid", vmID).Msgf("Updating VM with the following configuration: %+v", config)
logger.Debug().Int(vmID.Root, guestID).Msgf("Updating VM with the following configuration: %+v", config)

var rebootRequired bool
automaticReboot := d.Get("automatic_reboot").(bool)
Expand Down Expand Up @@ -1156,14 +1150,14 @@ func resourceVmQemuRead(ctx context.Context, d *schema.ResourceData, meta interf
var diags diag.Diagnostics
logger, _ := CreateSubLogger("resource_vm_read")

_, _, vmID, err := parseResourceId(d.Id())
_, _, guestID, err := parseResourceId(d.Id())
if err != nil {
d.SetId("")
return diag.FromErr(fmt.Errorf("unexpected error when trying to read and parse the resource: %v", err))
}

logger.Info().Int("vmid", vmID).Msg("Reading configuration for vmid")
vmr := pveSDK.NewVmRef(vmID)
logger.Info().Int(vmID.Root, guestID).Msg("Reading configuration for vmid")
vmr := pveSDK.NewVmRef(guestID)

// Try to get information on the vm. If this call err's out
// that indicates the VM does not exist. We indicate that to terraform
Expand All @@ -1180,7 +1174,7 @@ func resourceVmQemuRead(ctx context.Context, d *schema.ResourceData, meta interf
if len(targetNodes) == 0 {
_, err = client.GetVmInfo(ctx, vmr)
if err != nil {
logger.Debug().Int("vmid", vmID).Err(err).Msg("failed to get vm info")
logger.Debug().Int(vmID.Root, guestID).Err(err).Msg("failed to get vm info")
d.SetId("")
return nil
}
Expand All @@ -1200,7 +1194,7 @@ func resourceVmQemuRead(ctx context.Context, d *schema.ResourceData, meta interf
}

if targetNodeVMR == "" {
logger.Debug().Int("vmid", vmID).Err(err).Msg("failed to get vm info")
logger.Debug().Int(vmID.Root, guestID).Err(err).Msg("failed to get vm info")
d.SetId("")
return nil
}
Expand Down Expand Up @@ -1235,9 +1229,10 @@ func resourceVmQemuRead(ctx context.Context, d *schema.ResourceData, meta interf
diags = append(diags, diag.FromErr(err)...)
}

logger.Debug().Int("vmid", vmID).Msgf("[READ] Received Config from Proxmox API: %+v", config)
logger.Debug().Int(vmID.Root, guestID).Msgf("[READ] Received Config from Proxmox API: %+v", config)

d.SetId(resourceId(vmr.Node(), "qemu", vmr.VmId()))
vmID.Terraform(vmr.VmId(), d)
d.Set("name", config.Name)
d.Set("desc", mapToTerraform_Description(config.Description))
d.Set("bios", config.Bios)
Expand Down Expand Up @@ -1281,10 +1276,10 @@ func resourceVmQemuRead(ctx context.Context, d *schema.ResourceData, meta interf
checkedKeys := []string{"force_create", "define_connection_info"}
for _, key := range checkedKeys {
if val := d.Get(key); val == nil {
logger.Debug().Int("vmid", vmID).Msgf("key '%s' not found, setting to default", key)
logger.Debug().Int(vmID.Root, guestID).Msgf("key '%s' not found, setting to default", key)
d.Set(key, thisResource.Schema[key].Default)
} else {
logger.Debug().Int("vmid", vmID).Msgf("key '%s' is set to %t", key, val.(bool))
logger.Debug().Int(vmID.Root, guestID).Msgf("key '%s' is set to %t", key, val.(bool))
d.Set(key, val.(bool))
}
}
Expand All @@ -1294,7 +1289,7 @@ func resourceVmQemuRead(ctx context.Context, d *schema.ResourceData, meta interf

// read in the unused disks
flatUnusedDisks, _ := FlattenDevicesList(config.QemuUnusedDisks)
logger.Debug().Int("vmid", vmID).Msgf("Unused Disk Block Processed '%v'", config.QemuUnusedDisks)
logger.Debug().Int(vmID.Root, guestID).Msgf("Unused Disk Block Processed '%v'", config.QemuUnusedDisks)
if err = d.Set("unused_disk", flatUnusedDisks); err != nil {
return diag.FromErr(err)
}
Expand All @@ -1313,7 +1308,7 @@ func resourceVmQemuRead(ctx context.Context, d *schema.ResourceData, meta interf
// DEBUG print out the read result
flatValue, _ := resourceDataToFlatValues(d, thisResource)
jsonString, _ := json.Marshal(flatValue)
logger.Debug().Int("vmid", vmID).Msgf("Finished VM read resulting in data: '%+v'", string(jsonString))
logger.Debug().Int(vmID.Root, guestID).Msgf("Finished VM read resulting in data: '%+v'", string(jsonString))

return diags
}
Expand Down Expand Up @@ -1549,7 +1544,7 @@ func initConnInfo(ctx context.Context, d *schema.ResourceData, client *pveSDK.Cl
// allow user to opt-out of setting the connection info for the resource
if !d.Get("define_connection_info").(bool) {
log.Printf("[INFO][initConnInfo] define_connection_info is %t, no further action", d.Get("define_connection_info").(bool))
logger.Info().Int("vmid", vmr.VmId()).Msgf("define_connection_info is %t, no further action", d.Get("define_connection_info").(bool))
logger.Info().Int(vmID.Root, vmr.VmId()).Msgf("define_connection_info is %t, no further action", d.Get("define_connection_info").(bool))
return diags
}

Expand All @@ -1558,7 +1553,7 @@ func initConnInfo(ctx context.Context, d *schema.ResourceData, client *pveSDK.Cl
if config.Agent != nil && config.Agent.Enable != nil && *config.Agent.Enable {
if d.Get("agent") != 1 { // allow user to opt-out of setting the connection info for the resource
log.Printf("[INFO][initConnInfo] qemu agent is disabled from proxmox config, cant communicate with vm.")
logger.Info().Int("vmid", vmr.VmId()).Msgf("qemu agent is disabled from proxmox config, cant communicate with vm.")
logger.Info().Int(vmID.Root, vmr.VmId()).Msgf("qemu agent is disabled from proxmox config, cant communicate with vm.")
return append(diags, diag.Diagnostic{
Severity: diag.Warning,
Summary: "Qemu Guest Agent support is disabled from proxmox config.",
Expand All @@ -1569,15 +1564,15 @@ func initConnInfo(ctx context.Context, d *schema.ResourceData, client *pveSDK.Cl
}

log.Print("[INFO][initConnInfo] trying to get vm ip address for provisioner")
logger.Info().Int("vmid", vmr.VmId()).Msgf("trying to get vm ip address for provisioner")
logger.Info().Int(vmID.Root, vmr.VmId()).Msgf("trying to get vm ip address for provisioner")

// wait until the os has started the guest agent
guestAgentTimeout := d.Timeout(schema.TimeoutCreate)
guestAgentWaitEnd := time.Now().Add(time.Duration(guestAgentTimeout))
log.Printf("[DEBUG][initConnInfo] retrying for at most %v minutes before giving up", guestAgentTimeout)
log.Printf("[DEBUG][initConnInfo] retries will end at %s", guestAgentWaitEnd)
logger.Debug().Int("vmid", vmr.VmId()).Msgf("retrying for at most %v minutes before giving up", guestAgentTimeout)
logger.Debug().Int("vmid", vmr.VmId()).Msgf("retries will end at %s", guestAgentWaitEnd)
logger.Debug().Int(vmID.Root, vmr.VmId()).Msgf("retrying for at most %v minutes before giving up", guestAgentTimeout)
logger.Debug().Int(vmID.Root, vmr.VmId()).Msgf("retries will end at %s", guestAgentWaitEnd)
IPs, agentDiags := getPrimaryIP(ctx, config.CloudInit, config.Networks, vmr, client, guestAgentWaitEnd, d.Get(schemaAdditionalWait).(int), d.Get(schemaAgentTimeout).(int), ciAgentEnabled, d.Get(schemaSkipIPv4).(bool), d.Get(schemaSkipIPv6).(bool), hasCiDisk)
if len(agentDiags) > 0 {
diags = append(diags, agentDiags...)
Expand All @@ -1592,7 +1587,7 @@ func initConnInfo(ctx context.Context, d *schema.ResourceData, client *pveSDK.Cl

sshPort := "22"
log.Printf("[DEBUG][initConnInfo] this is the vm configuration: %s %s", sshHost, sshPort)
logger.Debug().Int("vmid", vmr.VmId()).Msgf("this is the vm configuration: %s %s", sshHost, sshPort)
logger.Debug().Int(vmID.Root, vmr.VmId()).Msgf("this is the vm configuration: %s %s", sshHost, sshPort)

// Optional convenience attributes for provisioners
_ = d.Set("default_ipv4_address", IPs.IPv4)
Expand Down Expand Up @@ -1620,7 +1615,7 @@ func getPrimaryIP(ctx context.Context, cloudInit *pveSDK.CloudInit, networks pve
if hasCiDisk { // Check if we have a Cloud-Init disk, cloud-init setting won't have any effect if without it.
if cloudInit != nil { // Check if we have a Cloud-Init configuration
log.Print("[INFO][getPrimaryIP] vm has a cloud-init configuration")
logger.Debug().Int("vmid", vmr.VmId()).Msgf(" vm has a cloud-init configuration")
logger.Debug().Int(vmID.Root, vmr.VmId()).Msgf(" vm has a cloud-init configuration")
var cicustom bool
if cloudInit.Custom != nil && cloudInit.Custom.Network != nil {
cicustom = true
Expand Down Expand Up @@ -1664,11 +1659,11 @@ func getPrimaryIP(ctx context.Context, cloudInit *pveSDK.CloudInit, networks pve
return primaryIPs{}, diag.FromErr(err)
}
log.Printf("[INFO][getPrimaryIP] check ip result error %s", err.Error())
logger.Debug().Int("vmid", vmr.VmId()).Msgf("check ip result error %s", err.Error())
logger.Debug().Int(vmID.Root, vmr.VmId()).Msgf("check ip result error %s", err.Error())
} else { // vm is running and reachable
if len(interfaces) > 0 { // agent returned some information
log.Printf("[INFO][getPrimaryIP] QEMU Agent interfaces found: %v", interfaces)
logger.Debug().Int("vmid", vmr.VmId()).Msgf("QEMU Agent interfaces found: %v", interfaces)
logger.Debug().Int(vmID.Root, vmr.VmId()).Msgf("QEMU Agent interfaces found: %v", interfaces)
conn = conn.parsePrimaryIPs(interfaces, primaryMacAddress)
if conn.hasRequiredIP() {
return conn.IPs, diag.Diagnostics{}
Expand Down
21 changes: 0 additions & 21 deletions proxmox/validators.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,27 +59,6 @@ func MacAddressValidator() schema.SchemaValidateDiagFunc {
}
}

func VMIDValidator() schema.SchemaValidateDiagFunc {
return func(i interface{}, k cty.Path) diag.Diagnostics {
min := 100
max := 999999999

val, ok := i.(int)

if !ok {
return diag.Errorf("expected type of %v to be int", k)
}

if val != 0 {
if val < min || val > max {
return diag.Errorf("proxmox %s must be in the range (%d - %d) or 0 for next available ID, got %d", k, min, max, val)
}
}

return nil
}
}

func BIOSValidator() schema.SchemaValidateDiagFunc {
return validation.ToDiagFunc(validation.StringInSlice([]string{
"ovmf",
Expand Down

0 comments on commit d5a0a13

Please sign in to comment.