diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..e4a46e53 --- /dev/null +++ b/.gitignore @@ -0,0 +1,16 @@ +# Local .terraform directories +**/.terraform/* + +# .tfstate files +*.tfstate +*.tfstate.* + +# Crash log files +crash.log + +# Ignore any .tfvars files that are generated automatically for each Terraform run. Most +# .tfvars files are managed as part of configuration and so should be included in +# version control. +# +# example.tfvars + diff --git a/ovirt/data_source_disk.go b/ovirt/data_source_disk.go index 395c8402..ebf3339e 100644 --- a/ovirt/data_source_disk.go +++ b/ovirt/data_source_disk.go @@ -8,10 +8,9 @@ package ovirt import ( "fmt" - "strconv" - "github.com/EMSL-MSC/ovirtapi" "github.com/hashicorp/terraform/helper/schema" + ovirtsdk4 "gopkg.in/imjoey/go-ovirt.v4" ) func dataSourceDisk() *schema.Resource { @@ -47,24 +46,25 @@ func dataSourceDisk() *schema.Resource { } func dataSourceDiskRead(d *schema.ResourceData, meta interface{}) error { - con := meta.(*ovirtapi.Connection) - disks, err := con.GetAllDisks() + conn := meta.(*ovirtsdk4.Connection) + + listResp, err := conn.SystemService().DisksService(). + List().Search(fmt.Sprintf("name=%s", d.Get("name"))).Send() if err != nil { - d.SetId("") return err } - for _, disk := range disks { - if disk.Name == d.Get("name") { - d.Set("size", disk.ProvisionedSize) - d.Set("format", disk.Format) - d.Set("storage_domain_id", disk.StorageDomains.StorageDomain[0].ID) - shareable, _ := strconv.ParseBool(disk.Shareable) - d.Set("shareable", shareable) - sparse, _ := strconv.ParseBool(disk.Sparse) - d.Set("sparse", sparse) - return nil - } + + disks, ok := listResp.Disks() + if !ok && len(disks.Slice()) > 0 { + d.SetId("") + return nil } - return fmt.Errorf("Disk %s not found", d.Get("name")) + disk := disks.Slice()[0] + d.Set("size", disk.MustProvisionedSize()) + d.Set("format", disk.MustFormat()) + d.Set("storage_domain_id", disk.MustStorageDomains().Slice()[0].MustId()) + d.Set("shareable", disk.MustShareable()) + d.Set("sparse", disk.MustSparse()) + return nil } diff --git a/ovirt/provider.go b/ovirt/provider.go index 63eae4ec..a1eae37b 100644 --- a/ovirt/provider.go +++ b/ovirt/provider.go @@ -7,9 +7,9 @@ package ovirt import ( - "github.com/EMSL-MSC/ovirtapi" "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/terraform" + ovirtsdk4 "gopkg.in/imjoey/go-ovirt.v4" ) // Provider returns oVirt provider configuration @@ -45,5 +45,10 @@ func Provider() terraform.ResourceProvider { } func ConfigureProvider(d *schema.ResourceData) (interface{}, error) { - return ovirtapi.NewConnection(d.Get("url").(string), d.Get("username").(string), d.Get("password").(string), false) + return ovirtsdk4.NewConnectionBuilder(). + URL(d.Get("url").(string)). + Username(d.Get("username").(string)). + Password(d.Get("password").(string)). + Insecure(true). + Build() } diff --git a/ovirt/resource_disk.go b/ovirt/resource_disk.go index 1c3a6920..ad198f9e 100644 --- a/ovirt/resource_disk.go +++ b/ovirt/resource_disk.go @@ -7,10 +7,8 @@ package ovirt import ( - "strconv" - - "github.com/EMSL-MSC/ovirtapi" "github.com/hashicorp/terraform/helper/schema" + ovirtsdk4 "gopkg.in/imjoey/go-ovirt.v4" ) func resourceDisk() *schema.Resource { @@ -49,65 +47,84 @@ func resourceDisk() *schema.Resource { Optional: true, ForceNew: true, }, + // "qcow_version" is the only field supporting Disk-Update }, } } func resourceDiskCreate(d *schema.ResourceData, meta interface{}) error { - con := meta.(*ovirtapi.Connection) + conn := meta.(*ovirtsdk4.Connection) - newDisk := con.NewDisk() - err := resourceDiskModify(d, newDisk) + diskBuilder := ovirtsdk4.NewDiskBuilder(). + Name(d.Get("name").(string)). + Format(ovirtsdk4.DiskFormat(d.Get("format").(string))). + ProvisionedSize(int64(d.Get("size").(int))). + StorageDomainsOfAny( + ovirtsdk4.NewStorageDomainBuilder(). + Id(d.Get("storage_domain_id").(string)). + MustBuild()) + if shareable, ok := d.GetOkExists("shareable"); ok { + diskBuilder.Shareable(shareable.(bool)) + } + if sparse, ok := d.GetOkExists("sparse"); ok { + diskBuilder.Sparse(sparse.(bool)) + } + disk, err := diskBuilder.Build() if err != nil { - newDisk.Delete() return err } - d.SetId(newDisk.ID) - return nil -} -func resourceDiskModify(d *schema.ResourceData, disk *ovirtapi.Disk) error { - disk.ProvisionedSize = d.Get("size").(int) - disk.Format = d.Get("format").(string) - disk.Name = d.Get("name").(string) - storageDomains := ovirtapi.StorageDomains{} - storageDomains.StorageDomain = append(storageDomains.StorageDomain, ovirtapi.Link{ - ID: d.Get("storage_domain_id").(string), - }) - disk.StorageDomains = &storageDomains - if d.Get("shareable").(bool) { - disk.Shareable = "true" - } - if d.Get("sparse").(bool) { - disk.Sparse = "true" + addResp, err := conn.SystemService().DisksService().Add().Disk(disk).Send() + if err != nil { + return err } - return disk.Save() + + d.SetId(addResp.MustDisk().MustId()) + return nil } func resourceDiskRead(d *schema.ResourceData, meta interface{}) error { - con := meta.(*ovirtapi.Connection) - disk, err := con.GetDisk(d.Id()) + conn := meta.(*ovirtsdk4.Connection) + getDiskResp, err := conn.SystemService().DisksService(). + DiskService(d.Id()).Get().Send() if err != nil { + return err + } + + disk, ok := getDiskResp.Disk() + if !ok { d.SetId("") return nil } - d.Set("name", disk.Name) - d.Set("size", disk.ProvisionedSize) - d.Set("format", disk.Format) - d.Set("storage_domain_id", disk.StorageDomains.StorageDomain[0].ID) - shareable, _ := strconv.ParseBool(disk.Shareable) - d.Set("shareable", shareable) - sparse, _ := strconv.ParseBool(disk.Sparse) - d.Set("sparse", sparse) + d.Set("name", disk.MustName()) + d.Set("size", disk.MustProvisionedSize()) + d.Set("format", disk.MustFormat()) + + if sds, ok := disk.StorageDomains(); ok { + if len(sds.Slice()) > 0 { + d.Set("storage_domain_id", sds.Slice()[0].MustId()) + } + } + + if shareable, ok := disk.Shareable(); ok { + d.Set("shareable", shareable) + } + + if sparse, ok := disk.Sparse(); ok { + d.Set("sparse", sparse) + } + return nil } func resourceDiskDelete(d *schema.ResourceData, meta interface{}) error { - con := meta.(*ovirtapi.Connection) - disk, err := con.GetDisk(d.Id()) + conn := meta.(*ovirtsdk4.Connection) + + _, err := conn.SystemService().DisksService(). + DiskService(d.Id()).Remove().Send() if err != nil { - return nil + return err } - return disk.Delete() + return nil } diff --git a/ovirt/resource_vm.go b/ovirt/resource_vm.go index 5c2a5e0f..20e502cd 100644 --- a/ovirt/resource_vm.go +++ b/ovirt/resource_vm.go @@ -8,11 +8,11 @@ package ovirt import ( "fmt" - "strconv" "time" - "github.com/EMSL-MSC/ovirtapi" + "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/schema" + ovirtsdk4 "gopkg.in/imjoey/go-ovirt.v4" ) func resourceVM() *schema.Resource { @@ -144,138 +144,202 @@ func resourceVM() *schema.Resource { } func resourceVMCreate(d *schema.ResourceData, meta interface{}) error { - con := meta.(*ovirtapi.Connection) - newVM := con.NewVM() - newVM.Name = d.Get("name").(string) - - cluster := con.NewCluster() - cluster.Name = d.Get("cluster").(string) - newVM.Cluster = cluster - - template := con.NewTemplate() - template.Name = d.Get("template").(string) - newVM.Template = template - newVM.CPU = &ovirtapi.CPU{ - Topology: &ovirtapi.CPUTopology{ - Cores: d.Get("cores").(int), - Sockets: d.Get("sockets").(int), - Threads: d.Get("threads").(int), - }, + conn := meta.(*ovirtsdk4.Connection) + vmsService := conn.SystemService().VmsService() + + cluster, err := ovirtsdk4.NewClusterBuilder(). + Name(d.Get("cluster").(string)).Build() + if err != nil { + return err + } + + cpuCore, err := ovirtsdk4.NewCoreBuilder(). + Socket(int64(d.Get("sockets").(int))).Build() + if err != nil { + return err + } + + template, err := ovirtsdk4.NewTemplateBuilder(). + Name(d.Get("template").(string)).Build() + if err != nil { + return err + } + + cpu, err := ovirtsdk4.NewCpuBuilder(). + CoresOfAny(cpuCore).Build() + if err != nil { + return err } - newVM.Initialization = &ovirtapi.Initialization{} - newVM.Initialization.AuthorizedSSHKeys = d.Get("authorized_ssh_key").(string) + initialBuilder := ovirtsdk4.NewInitializationBuilder(). + AuthorizedSshKeys(d.Get("authorized_ssh_key").(string)) numNetworks := d.Get("network_interface.#").(int) - NICConfigurations := make([]ovirtapi.NICConfiguration, numNetworks) for i := 0; i < numNetworks; i++ { prefix := fmt.Sprintf("network_interface.%d", i) - _ = prefix - NICConfigurations[i] = ovirtapi.NICConfiguration{ - IP: &ovirtapi.IP{ - Address: d.Get(prefix + ".ip_address").(string), - Netmask: d.Get(prefix + ".subnet_mask").(string), - Gateway: d.Get(prefix + ".gateway").(string), - }, - BootProtocol: d.Get(prefix + ".boot_proto").(string), - OnBoot: strconv.FormatBool(d.Get(prefix + ".on_boot").(bool)), - Name: d.Get(prefix + ".label").(string), - } - if i == 0 { - d.SetConnInfo(map[string]string{ - "host": d.Get(prefix + ".ip_address").(string), - }) - } + + ncBuilder := ovirtsdk4.NewNicConfigurationBuilder(). + Name(d.Get(prefix + ".label").(string)). + IpBuilder( + ovirtsdk4.NewIpBuilder(). + Address(d.Get(prefix + ".ip_address").(string)). + Netmask(d.Get(prefix + ".subnet_mask").(string)). + Gateway(d.Get(prefix + ".gateway").(string))). + BootProtocol(ovirtsdk4.BootProtocol(d.Get(prefix + ".boot_proto").(string))). + OnBoot(d.Get(prefix + ".on_boot").(bool)) + initialBuilder.NicConfigurationsBuilderOfAny(*ncBuilder) } - newVM.Initialization.NICConfigurations = &ovirtapi.NICConfigurations{NICConfiguration: NICConfigurations} - err := newVM.Save() + initialize, err := initialBuilder.Build() if err != nil { return err } - d.SetId(newVM.ID) - for newVM.Status != "down" { - time.Sleep(time.Second) - newVM.Update() + resp, err := vmsService.Add(). + Vm( + ovirtsdk4.NewVmBuilder(). + Name(d.Get("name").(string)). + Cluster(cluster). + Template(template). + Cpu(cpu). + Initialization(initialize). + MustBuild()). + Send() + + if err != nil { + return err + } + newVM, ok := resp.Vm() + if ok { + d.SetId(newVM.MustId()) } + vmService := conn.SystemService().VmsService().VmService(newVM.MustId()) + attachmentSet := d.Get("attached_disks").(*schema.Set) for _, v := range attachmentSet.List() { attachment := v.(map[string]interface{}) - disk, err := con.GetDisk(attachment["disk_id"].(string)) + + diskService := conn.SystemService().DisksService(). + DiskService(attachment["disk_id"].(string)) + + var disk *ovirtsdk4.Disk + + err = resource.Retry(30*time.Second, func() *resource.RetryError { + getDiskResp, err := diskService.Get().Send() + if err != nil { + return resource.RetryableError(err) + } + disk = getDiskResp.MustDisk() + + if disk.MustStatus() == ovirtsdk4.DISKSTATUS_LOCKED { + return resource.RetryableError(fmt.Errorf("disk is locked, wait for next check")) + } + return nil + }) + if err != nil { return err } - diskAttachment := ovirtapi.DiskAttachment{ - Disk: disk, - Active: strconv.FormatBool(attachment["active"].(bool)), - Bootable: strconv.FormatBool(attachment["bootable"].(bool)), - Interface: attachment["interface"].(string), - LogicalName: attachment["logical_name"].(string), - PassDiscard: strconv.FormatBool(attachment["pass_discard"].(bool)), - ReadOnly: strconv.FormatBool(attachment["read_only"].(bool)), - UsesSCSIReservation: strconv.FormatBool(attachment["use_scsi_reservation"].(bool)), - } - _, err = newVM.AddLinkObject("diskattachments", diskAttachment, nil) + + _, err = vmService.DiskAttachmentsService().Add(). + Attachment( + ovirtsdk4.NewDiskAttachmentBuilder(). + Disk(disk). + Interface(ovirtsdk4.DiskInterface(attachment["interface"].(string))). + Bootable(attachment["bootable"].(bool)). + Active(attachment["active"].(bool)). + LogicalName(attachment["logical_name"].(string)). + PassDiscard(attachment["pass_discard"].(bool)). + ReadOnly(attachment["read_only"].(bool)). + UsesScsiReservation(attachment["use_scsi_reservation"].(bool)). + MustBuild()). + Send() if err != nil { return err } + } - err = newVM.Start("", "", "", "true", "", nil) + _, err = vmService.Start().Send() if err != nil { - newVM.Delete() return err } + return nil } func resourceVMUpdate(d *schema.ResourceData, meta interface{}) error { return nil } + func resourceVMRead(d *schema.ResourceData, meta interface{}) error { - con := meta.(*ovirtapi.Connection) - vm, err := con.GetVM(d.Id()) + conn := meta.(*ovirtsdk4.Connection) + getVmresp, err := conn.SystemService().VmsService(). + VmService(d.Id()).Get().Send() if err != nil { - d.SetId("") - return nil + return err } - d.Set("name", vm.Name) - cluster, err := con.GetCluster(vm.Cluster.ID) - if err != nil { + vm, ok := getVmresp.Vm() + + if !ok { d.SetId("") return nil } - d.Set("cluster", cluster.Name) + d.Set("name", vm.MustName()) + d.Set("cores", vm.MustCpu().MustTopology().MustCores()) + d.Set("sockets", vm.MustCpu().MustTopology().MustSockets()) + d.Set("threads", vm.MustCpu().MustTopology().MustThreads()) + d.Set("authorized_ssh_key", vm.MustInitialization().MustAuthorizedSshKeys()) - template, err := con.GetTemplate(vm.Template.ID) - if err != nil { - d.SetId("") - return nil + // Use `conn.FollowLink` function to fetch cluster and template instance from a vm. + // See: https://github.com/imjoey/go-ovirt/blob/master/examples/follow_vm_links.go. + cluster, _ := conn.FollowLink(vm.MustCluster()) + if cluster, ok := cluster.(*ovirtsdk4.Cluster); ok { + d.Set("cluster", cluster.MustName()) } - d.Set("template", template.Name) - d.Set("cores", vm.CPU.Topology.Cores) - d.Set("sockets", vm.CPU.Topology.Sockets) - d.Set("threads", vm.CPU.Topology.Threads) - d.Set("authorized_ssh_key", vm.Initialization.AuthorizedSSHKeys) + template, _ := conn.FollowLink(vm.MustTemplate()) + if template, ok := template.(*ovirtsdk4.Template); ok { + d.Set("template", template.MustName()) + } + return nil } func resourceVMDelete(d *schema.ResourceData, meta interface{}) error { - con := meta.(*ovirtapi.Connection) - vm, err := con.GetVM(d.Id()) - if err != nil { + conn := meta.(*ovirtsdk4.Connection) + + vmService := conn.SystemService().VmsService().VmService(d.Id()) + + return resource.Retry(3*time.Minute, func() *resource.RetryError { + getVMResp, err := vmService.Get().Send() + if err != nil { + return resource.RetryableError(err) + } + + vm, ok := getVMResp.Vm() + if !ok { + d.SetId("") + return nil + } + + if vm.MustStatus() != ovirtsdk4.VMSTATUS_DOWN { + _, err := vmService.Shutdown().Send() + if err != nil { + return resource.RetryableError(fmt.Errorf("Stop instance timeout and got an error: %v", err)) + } + } + // + _, err = vmService.Remove(). + DetachOnly(true). // DetachOnly indicates without removing disks attachments + Send() + if err != nil { + return resource.RetryableError(fmt.Errorf("Delete instalce timeout and got an error: %v", err)) + } + return nil - } - if vm.Status != "down" { - vm.Stop("false") - } - for vm.Status != "down" { - time.Sleep(time.Second) - vm.Update() - } - return vm.Delete() + + }) }