From 44db9b7df877ddc9d928bd96b86925dac9f79230 Mon Sep 17 00:00:00 2001 From: Din Music Date: Thu, 9 Jan 2025 14:35:53 +0000 Subject: [PATCH 01/51] api: Add Pure Storage driver API extension Signed-off-by: Din Music --- doc/api-extensions.md | 12 ++++++++++++ shared/version/api.go | 1 + 2 files changed, 13 insertions(+) diff --git a/doc/api-extensions.md b/doc/api-extensions.md index 271b9463cb76..e9bbcde72065 100644 --- a/doc/api-extensions.md +++ b/doc/api-extensions.md @@ -2587,3 +2587,15 @@ The following new pool level configuration keys have been added: The following configuration keys have been added for volumes backed by PowerFlex: 1. {config:option}`storage-powerflex-volume-conf:block.type` + +## `storage_driver_pure` + +Adds a new `pure` storage driver which allows the consumption of storage volumes from a Pure Storage storage array using either iSCSI or NVMe/TCP. + +The following pool level configuration keys have been added: + +1. {config:option}`storage-pure-pool-conf:pure.gateway` +1. {config:option}`storage-pure-pool-conf:pure.gateway.verify` +1. {config:option}`storage-pure-pool-conf:pure.api.token` +1. {config:option}`storage-pure-pool-conf:pure.mode` +1. {config:option}`storage-pure-pool-conf:pure.target` diff --git a/shared/version/api.go b/shared/version/api.go index 4fc3ce7e5f99..1507c0583f07 100644 --- a/shared/version/api.go +++ b/shared/version/api.go @@ -435,6 +435,7 @@ var APIExtensions = []string{ "entities_with_entitlements", "profiles_all_projects", "storage_driver_powerflex", + "storage_driver_pure", } // APIExtensionsCount returns the number of available API extensions. From 7a1027132553f290ae6a48f3aada97ac0f9aad34 Mon Sep 17 00:00:00 2001 From: Din Music Date: Fri, 27 Sep 2024 15:37:34 +0000 Subject: [PATCH 02/51] lxd/storage/drivers/pure: Initial scaffolding for storage driver pure Empty functions that satisfy storage pool interface. Signed-off-by: Din Music --- lxd/storage/drivers/driver_pure.go | 119 +++++++++++++++ lxd/storage/drivers/driver_pure_util.go | 14 ++ lxd/storage/drivers/driver_pure_volumes.go | 164 +++++++++++++++++++++ 3 files changed, 297 insertions(+) create mode 100644 lxd/storage/drivers/driver_pure.go create mode 100644 lxd/storage/drivers/driver_pure_util.go create mode 100644 lxd/storage/drivers/driver_pure_volumes.go diff --git a/lxd/storage/drivers/driver_pure.go b/lxd/storage/drivers/driver_pure.go new file mode 100644 index 000000000000..d581ff4c6698 --- /dev/null +++ b/lxd/storage/drivers/driver_pure.go @@ -0,0 +1,119 @@ +package drivers + +import ( + "github.com/canonical/lxd/lxd/migration" + "github.com/canonical/lxd/lxd/operations" + "github.com/canonical/lxd/shared/api" +) + +// pureLoaded indicates whether load() function was already called for the Pure Storage driver. +var pureLoaded = false + +// pureVersion indicates Pure Storage version. +var pureVersion = "" + +type pure struct { + common + + // Holds the low level HTTP client for the Pure Storage API. + // Use pure.client() to retrieve the client struct. + httpClient *pureClient +} + +// load is used initialize the driver. It should be used only once. +func (d *pure) load() error { + // Done if previously loaded. + if pureLoaded { + return nil + } + + pureLoaded = true + return nil +} + +// client returns the drivers Pure Storage client. A new client is created only if it does not already exist. +func (d *pure) client() *pureClient { + if d.httpClient == nil { + d.httpClient = newPureClient(d) + } + + return d.httpClient +} + +// isRemote returns true indicating this driver uses remote storage. +func (d *pure) isRemote() bool { + return true +} + +// Info returns info about the driver and its environment. +func (d *pure) Info() Info { + return Info{ + Name: "pure", + Version: pureVersion, + DefaultBlockSize: d.defaultBlockVolumeSize(), + DefaultVMBlockFilesystemSize: d.defaultVMBlockFilesystemSize(), + OptimizedImages: false, + PreservesInodes: false, + Remote: d.isRemote(), + VolumeTypes: []VolumeType{VolumeTypeCustom, VolumeTypeVM, VolumeTypeContainer, VolumeTypeImage}, + BlockBacking: true, + RunningCopyFreeze: true, + DirectIO: true, + IOUring: true, + MountedRoot: false, + } +} + +// FillConfig populates the storage pool's configuration file with the default values. +func (d *pure) FillConfig() error { + return nil +} + +// Validate checks that all provided keys are supported and there is no conflicting or missing configuration. +func (d *pure) Validate(config map[string]string) error { + return nil +} + +// Create is called during pool creation and is effectively using an empty driver struct. +// WARNING: The Create() function cannot rely on any of the struct attributes being set. +func (d *pure) Create() error { + err := d.FillConfig() + if err != nil { + return err + } + + return nil +} + +// Update applies any driver changes required from a configuration change. +func (d *pure) Update(changedConfig map[string]string) error { + return nil +} + +// Delete removes the storage pool (Pure Storage pod). +func (d *pure) Delete(op *operations.Operation) error { + return nil +} + +// Mount mounts the storage pool. +func (d *pure) Mount() (bool, error) { + // Nothing to do here. + return true, nil +} + +// Unmount unmounts the storage pool. +func (d *pure) Unmount() (bool, error) { + // Nothing to do here. + return true, nil +} + +// GetResources returns the pool resource usage information. +func (d *pure) GetResources() (*api.ResourcesStoragePool, error) { + res := &api.ResourcesStoragePool{} + return res, nil +} + +// MigrationTypes returns the type of transfer methods to be used when doing migrations between pools in preference order. +func (d *pure) MigrationTypes(contentType ContentType, refresh bool, copySnapshots bool) []migration.Type { + return []migration.Type{} +} diff --git a/lxd/storage/drivers/driver_pure_util.go b/lxd/storage/drivers/driver_pure_util.go new file mode 100644 index 000000000000..fc523864206c --- /dev/null +++ b/lxd/storage/drivers/driver_pure_util.go @@ -0,0 +1,14 @@ +package drivers + +// pureClient holds the Pure Storage HTTP client and an access token. +type pureClient struct { + driver *pure + accessToken string +} + +// newPureClient creates a new instance of the HTTP Pure Storage client. +func newPureClient(driver *pure) *pureClient { + return &pureClient{ + driver: driver, + } +} diff --git a/lxd/storage/drivers/driver_pure_volumes.go b/lxd/storage/drivers/driver_pure_volumes.go new file mode 100644 index 000000000000..faac54092ba8 --- /dev/null +++ b/lxd/storage/drivers/driver_pure_volumes.go @@ -0,0 +1,164 @@ +package drivers + +import ( + "io" + + "github.com/canonical/lxd/lxd/backup" + "github.com/canonical/lxd/lxd/instancewriter" + "github.com/canonical/lxd/lxd/migration" + "github.com/canonical/lxd/lxd/operations" + "github.com/canonical/lxd/shared/revert" +) + +// commonVolumeRules returns validation rules which are common for pool and volume. +func (d *pure) commonVolumeRules() map[string]func(value string) error { + return map[string]func(value string) error{} +} + +// CreateVolume creates an empty volume and can optionally fill it by executing the supplied filler function. +func (d *pure) CreateVolume(vol Volume, filler *VolumeFiller, op *operations.Operation) error { + return nil +} + +// CreateVolumeFromBackup re-creates a volume from its exported state. +func (d *pure) CreateVolumeFromBackup(vol VolumeCopy, srcBackup backup.Info, srcData io.ReadSeeker, op *operations.Operation) (VolumePostHook, revert.Hook, error) { + return genericVFSBackupUnpack(d, d.state.OS, vol, srcBackup.Snapshots, srcData, op) +} + +// CreateVolumeFromCopy provides same-pool volume copying functionality. +func (d *pure) CreateVolumeFromCopy(vol VolumeCopy, srcVol VolumeCopy, allowInconsistent bool, op *operations.Operation) error { + return nil +} + +// CreateVolumeFromMigration creates a volume being sent via a migration. +func (d *pure) CreateVolumeFromMigration(vol VolumeCopy, conn io.ReadWriteCloser, volTargetArgs migration.VolumeTargetArgs, preFiller *VolumeFiller, op *operations.Operation) error { + _, err := genericVFSCreateVolumeFromMigration(d, nil, vol, conn, volTargetArgs, preFiller, op) + return err +} + +// RefreshVolume updates an existing volume to match the state of another. +func (d *pure) RefreshVolume(vol VolumeCopy, srcVol VolumeCopy, refreshSnapshots []string, allowInconsistent bool, op *operations.Operation) error { + _, err := genericVFSCopyVolume(d, nil, vol, srcVol, refreshSnapshots, true, allowInconsistent, op) + return err +} + +// DeleteVolume deletes a volume of the storage device. +// If any snapshots of the volume remain then this function will return an error. +func (d *pure) DeleteVolume(vol Volume, op *operations.Operation) error { + return nil +} + +// HasVolume indicates whether a specific volume exists on the storage pool. +func (d *pure) HasVolume(vol Volume) (bool, error) { + return true, nil +} + +// FillVolumeConfig populate volume with default config. +func (d *pure) FillVolumeConfig(vol Volume) error { + return nil +} + +// ValidateVolume validates the supplied volume config. +func (d *pure) ValidateVolume(vol Volume, removeUnknownKeys bool) error { + commonRules := d.commonVolumeRules() + return d.validateVolume(vol, commonRules, removeUnknownKeys) +} + +// UpdateVolume applies config changes to the volume. +func (d *pure) UpdateVolume(vol Volume, changedConfig map[string]string) error { + return nil +} + +// GetVolumeUsage returns the disk space used by the volume. +func (d *pure) GetVolumeUsage(vol Volume) (int64, error) { + return 0, ErrNotSupported +} + +// SetVolumeQuota applies a size limit on volume. +// Does nothing if supplied with an empty/zero size. +func (d *pure) SetVolumeQuota(vol Volume, size string, allowUnsafeResize bool, op *operations.Operation) error { + return nil +} + +// GetVolumeDiskPath returns the location of a root disk block device. +func (d *pure) GetVolumeDiskPath(vol Volume) (string, error) { + return "", ErrNotSupported +} + +// ListVolumes returns a list of LXD volumes in storage pool. +func (d *pure) ListVolumes() ([]Volume, error) { + return []Volume{}, nil +} + +// MountVolume mounts a volume and increments ref counter. Please call UnmountVolume() when done with the volume. +func (d *pure) MountVolume(vol Volume, op *operations.Operation) error { + return nil +} + +// UnmountVolume simulates unmounting a volume. +// keepBlockDev indicates if backing block device should not be unmapped if volume is unmounted. +func (d *pure) UnmountVolume(vol Volume, keepBlockDev bool, op *operations.Operation) (bool, error) { + return false, nil +} + +// RenameVolume renames a volume and its snapshots. +func (d *pure) RenameVolume(vol Volume, newVolName string, op *operations.Operation) error { + // Renaming a volume won't change an actual name of the Pure Storage volume. + return nil +} + +// RestoreVolume restores a volume from a snapshot. +func (d *pure) RestoreVolume(vol Volume, snapVol Volume, op *operations.Operation) error { + return nil +} + +// MigrateVolume sends a volume for migration. +func (d *pure) MigrateVolume(vol VolumeCopy, conn io.ReadWriteCloser, volSrcArgs *migration.VolumeSourceArgs, op *operations.Operation) error { + // When performing a cluster member move don't do anything on the source member. + if volSrcArgs.ClusterMove { + return nil + } + + return genericVFSMigrateVolume(d, d.state, vol, conn, volSrcArgs, op) +} + +// BackupVolume creates an exported version of a volume. +func (d *pure) BackupVolume(vol VolumeCopy, tarWriter *instancewriter.InstanceTarWriter, optimized bool, snapshots []string, op *operations.Operation) error { + return genericVFSBackupVolume(d, vol, tarWriter, snapshots, op) +} + +// CreateVolumeSnapshot creates a snapshot of a volume. +func (d *pure) CreateVolumeSnapshot(snapVol Volume, op *operations.Operation) error { + return nil +} + +// DeleteVolumeSnapshot removes a snapshot from the storage device. +func (d *pure) DeleteVolumeSnapshot(snapVol Volume, op *operations.Operation) error { + return nil +} + +// MountVolumeSnapshot simulates mounting a volume snapshot. +func (d *pure) MountVolumeSnapshot(snapVol Volume, op *operations.Operation) error { + return d.MountVolume(snapVol, op) +} + +// UnmountVolumeSnapshot simulates unmounting a volume snapshot. +func (d *pure) UnmountVolumeSnapshot(snapVol Volume, op *operations.Operation) (bool, error) { + return d.UnmountVolume(snapVol, false, op) +} + +// VolumeSnapshots returns a list of snapshots for the volume (in no particular order). +func (d *pure) VolumeSnapshots(vol Volume, op *operations.Operation) ([]string, error) { + return []string{}, nil +} + +// CheckVolumeSnapshots checks that the volume's snapshots, according to the storage driver, match those provided. +func (d *pure) CheckVolumeSnapshots(vol Volume, snapVols []Volume, op *operations.Operation) error { + return nil +} + +// RenameVolumeSnapshot renames a volume snapshot. +func (d *pure) RenameVolumeSnapshot(snapVol Volume, newSnapshotName string, op *operations.Operation) error { + // Renaming a volume snapshot won't change an actual name of the Pure Storage volume snapshot. + return nil +} From 53cac8d9cc7cf4cee1c3d082e6b51115c19956fd Mon Sep 17 00:00:00 2001 From: Din Music Date: Wed, 4 Dec 2024 14:23:05 +0000 Subject: [PATCH 03/51] lxd/storage/drivers/driver_types: Add driver option PopulateParentVolumeUUID Option PopulateParentVolumeUUID ensures the volumes get parent UUID always populated. In case of Pure Storage we always need to know the volume UUID as snapshots are linked to parent volume (snapshots are not independent volumes). Signed-off-by: Din Music --- lxd/storage/drivers/driver_types.go | 1 + 1 file changed, 1 insertion(+) diff --git a/lxd/storage/drivers/driver_types.go b/lxd/storage/drivers/driver_types.go index cd842308d25c..8654eef08106 100644 --- a/lxd/storage/drivers/driver_types.go +++ b/lxd/storage/drivers/driver_types.go @@ -19,6 +19,7 @@ type Info struct { DirectIO bool // Whether the driver supports direct I/O. IOUring bool // Whether the driver supports io_uring. MountedRoot bool // Whether the pool directory itself is a mount. + PopulateParentVolumeUUID bool // Whether the volume should have parent UUID populated before any action. } // VolumeFiller provides a struct for filling a volume. From 44d05adfe5fc1e40c1d620efe783a114c65c1da4 Mon Sep 17 00:00:00 2001 From: Din Music Date: Wed, 4 Dec 2024 14:25:56 +0000 Subject: [PATCH 04/51] lxd/storage/backend_lxd: Ensure parent volume UUID is populated when required by driver Signed-off-by: Din Music --- lxd/storage/backend_lxd.go | 118 +++++++++++++++++++++++++++++++++++++ 1 file changed, 118 insertions(+) diff --git a/lxd/storage/backend_lxd.go b/lxd/storage/backend_lxd.go index 639be8bcf18b..e2538724bb4e 100644 --- a/lxd/storage/backend_lxd.go +++ b/lxd/storage/backend_lxd.go @@ -1155,6 +1155,16 @@ func (b *lxdBackend) CreateInstanceFromCopy(inst instance.Instance, src instance srcVolStorageName := project.Instance(src.Project().Name, src.Name()) srcVol := b.GetVolume(volType, contentType, srcVolStorageName, srcConfig.Volume.Config) + // Set the parent volume's UUID. + if b.driver.Info().PopulateParentVolumeUUID { + parentUUID, err := b.getParentVolumeUUID(srcVol, src.Project().Name) + if err != nil { + return err + } + + srcVol.SetParentUUID(parentUUID) + } + volCopy := drivers.NewVolumeCopy(vol, targetSnapshots...) srcVolCopy := drivers.NewVolumeCopy(srcVol, sourceSnapshots...) @@ -2016,6 +2026,16 @@ func (b *lxdBackend) CreateInstanceFromImage(inst instance.Instance, fingerprint // will cause a non matching configuration which will always fall back to non optimized storage. vol := b.GetNewVolume(volType, contentType, volStorageName, volumeConfig) + // Set the parent volume UUID. + if b.driver.Info().PopulateParentVolumeUUID { + parentUUID, err := b.getParentVolumeUUID(vol, inst.Project().Name) + if err != nil { + return err + } + + vol.SetParentUUID(parentUUID) + } + // Validate config and create database entry for new storage volume. err = VolumeDBCreate(b, inst.Project().Name, inst.Name(), "", volType, false, vol.Config(), inst.CreationDate(), time.Time{}, contentType, true, false) if err != nil { @@ -2366,6 +2386,16 @@ func (b *lxdBackend) CreateInstanceFromMigration(inst instance.Instance, conn io targetSnapshots = append(targetSnapshots, b.GetVolume(volType, contentType, snapshotStorageName, snap.Config)) } + // Set the parent volume's UUID. + if b.driver.Info().PopulateParentVolumeUUID { + parentUUID, err := b.getParentVolumeUUID(vol, projectName) + if err != nil { + return err + } + + vol.SetParentUUID(parentUUID) + } + volCopy := drivers.NewVolumeCopy(vol, targetSnapshots...) err = b.driver.CreateVolumeFromMigration(volCopy, conn, args, &preFiller, op) @@ -3078,6 +3108,16 @@ func (b *lxdBackend) MigrateInstance(inst instance.Instance, conn io.ReadWriteCl _ = filesystem.SyncFS(inst.RootfsPath()) } + // Set the parent volume UUID. + if b.driver.Info().PopulateParentVolumeUUID { + parentUUID, err := b.getParentVolumeUUID(vol, inst.Project().Name) + if err != nil { + return err + } + + vol.SetParentUUID(parentUUID) + } + volCopy := drivers.NewVolumeCopy(vol, sourceSnapshots...) err = b.driver.MigrateVolume(volCopy, conn, args, op) @@ -3710,6 +3750,16 @@ func (b *lxdBackend) DeleteInstanceSnapshot(inst instance.Instance, op *operatio vol := b.GetVolume(volType, contentType, snapVolName, dbVol.Config) + // Set the parent volume UUID. + if b.driver.Info().PopulateParentVolumeUUID { + parentUUID, err := b.getParentVolumeUUID(vol, inst.Project().Name) + if err != nil { + return err + } + + vol.SetParentUUID(parentUUID) + } + volExists, err := b.driver.HasVolume(vol) if err != nil { return err @@ -3911,6 +3961,17 @@ func (b *lxdBackend) MountInstanceSnapshot(inst instance.Instance, op *operation return nil, err } + // Set the parent volume UUID. + if b.driver.Info().PopulateParentVolumeUUID { + parentUUID, err := b.getParentVolumeUUID(vol, inst.Project().Name) + if err != nil { + return nil, err + } + + vol.SetParentUUID(parentUUID) + } + + // Mount the snapshot. err = b.driver.MountVolumeSnapshot(vol, op) if err != nil { return nil, err @@ -3960,6 +4021,17 @@ func (b *lxdBackend) UnmountInstanceSnapshot(inst instance.Instance, op *operati return err } + // Set the parent volume UUID. + if b.driver.Info().PopulateParentVolumeUUID { + parentUUID, err := b.getParentVolumeUUID(vol, inst.Project().Name) + if err != nil { + return err + } + + vol.SetParentUUID(parentUUID) + } + + // Unmount volume. _, err = b.driver.UnmountVolumeSnapshot(vol, op) return err @@ -5726,6 +5798,15 @@ func (b *lxdBackend) CreateCustomVolumeFromMigration(projectName string, conn io return err } + if b.driver.Info().PopulateParentVolumeUUID { + parentUUID, err := b.getParentVolumeUUID(vol, projectName) + if err != nil { + return err + } + + vol.SetParentUUID(parentUUID) + } + revert := revert.New() defer revert.Fail() @@ -6616,6 +6697,16 @@ func (b *lxdBackend) DeleteCustomVolumeSnapshot(projectName, volName string, op vol := b.GetVolume(drivers.VolumeTypeCustom, contentType, volStorageName, volume.Config) + // Set the parent volume's UUID. + if b.driver.Info().PopulateParentVolumeUUID { + parentUUID, err := b.getParentVolumeUUID(vol, projectName) + if err != nil { + return err + } + + vol.SetParentUUID(parentUUID) + } + // Delete the snapshot from the storage device. // Must come before DB VolumeDBDelete so that the volume ID is still available. volExists, err := b.driver.HasVolume(vol) @@ -7798,3 +7889,30 @@ func (b *lxdBackend) CreateCustomVolumeFromBackup(srcBackup backup.Info, srcData revert.Success() return nil } + +// getParentVolumeUUID returns the UUID of the parent's volume. +// If the volume has no parent, an empty string is returned. +func (b *lxdBackend) getParentVolumeUUID(vol drivers.Volume, projectName string) (string, error) { + parentName, _, isSnapshot := api.GetParentAndSnapshotName(vol.Name()) + if !isSnapshot { + // Volume has no parent. + return "", nil + } + + // Ensure the parent name does not contain a project prefix. + _, parentName = project.StorageVolumeParts(parentName) + + // Load storage volume from the database. + parentDBVol, err := VolumeDBGet(b, projectName, parentName, vol.Type()) + if err != nil { + return "", fmt.Errorf("Failed to extract parent UUID from snapshot %q in project %q: %w", vol.Name(), projectName, err) + } + + // Extract parent volume UUID. + parentUUID := parentDBVol.Config["volatile.uuid"] + if parentUUID == "" { + return "", fmt.Errorf("Parent volume %q of snapshot %q in project %q does not have UUID set)", parentName, projectName, vol.Name()) + } + + return parentUUID, nil +} From b6b8876f6a725d87a79e1fce6c8d6f492b8229ad Mon Sep 17 00:00:00 2001 From: Din Music Date: Fri, 6 Dec 2024 09:51:17 +0000 Subject: [PATCH 05/51] lxd/storage/drivers: Configure PopulateParentVolumeUUID for all drivers Signed-off-by: Din Music --- lxd/storage/drivers/driver_btrfs.go | 1 + lxd/storage/drivers/driver_ceph.go | 1 + lxd/storage/drivers/driver_cephfs.go | 1 + lxd/storage/drivers/driver_cephobject.go | 25 ++++++++++++------------ lxd/storage/drivers/driver_dir.go | 1 + lxd/storage/drivers/driver_lvm.go | 1 + lxd/storage/drivers/driver_mock.go | 1 + lxd/storage/drivers/driver_powerflex.go | 1 + lxd/storage/drivers/driver_pure.go | 1 + lxd/storage/drivers/driver_zfs.go | 1 + 10 files changed, 22 insertions(+), 12 deletions(-) diff --git a/lxd/storage/drivers/driver_btrfs.go b/lxd/storage/drivers/driver_btrfs.go index 3219a4b73fcb..7bc7de44fe49 100644 --- a/lxd/storage/drivers/driver_btrfs.go +++ b/lxd/storage/drivers/driver_btrfs.go @@ -106,6 +106,7 @@ func (d *btrfs) Info() Info { IOUring: true, MountedRoot: true, Buckets: true, + PopulateParentVolumeUUID: false, } } diff --git a/lxd/storage/drivers/driver_ceph.go b/lxd/storage/drivers/driver_ceph.go index c58320d8b056..d4ab1547c3f1 100644 --- a/lxd/storage/drivers/driver_ceph.go +++ b/lxd/storage/drivers/driver_ceph.go @@ -92,6 +92,7 @@ func (d *ceph) Info() Info { DirectIO: true, IOUring: true, MountedRoot: false, + PopulateParentVolumeUUID: false, } } diff --git a/lxd/storage/drivers/driver_cephfs.go b/lxd/storage/drivers/driver_cephfs.go index d9c23391fe66..77a3034c7afc 100644 --- a/lxd/storage/drivers/driver_cephfs.go +++ b/lxd/storage/drivers/driver_cephfs.go @@ -91,6 +91,7 @@ func (d *cephfs) Info() Info { RunningCopyFreeze: false, DirectIO: true, MountedRoot: true, + PopulateParentVolumeUUID: false, } } diff --git a/lxd/storage/drivers/driver_cephobject.go b/lxd/storage/drivers/driver_cephobject.go index a4c5acf78752..43990289f05e 100644 --- a/lxd/storage/drivers/driver_cephobject.go +++ b/lxd/storage/drivers/driver_cephobject.go @@ -78,18 +78,19 @@ func (d *cephobject) isRemote() bool { // Info returns the pool driver information. func (d *cephobject) Info() Info { return Info{ - Name: "cephobject", - Version: cephobjectVersion, - OptimizedImages: false, - PreservesInodes: false, - Remote: d.isRemote(), - Buckets: true, - VolumeTypes: []VolumeType{}, - VolumeMultiNode: false, - BlockBacking: false, - RunningCopyFreeze: false, - DirectIO: false, - MountedRoot: false, + Name: "cephobject", + Version: cephobjectVersion, + OptimizedImages: false, + PreservesInodes: false, + Remote: d.isRemote(), + Buckets: true, + VolumeTypes: []VolumeType{}, + VolumeMultiNode: false, + BlockBacking: false, + RunningCopyFreeze: false, + DirectIO: false, + MountedRoot: false, + PopulateParentVolumeUUID: false, } } diff --git a/lxd/storage/drivers/driver_dir.go b/lxd/storage/drivers/driver_dir.go index c5f3b70e1b77..0d7108804c78 100644 --- a/lxd/storage/drivers/driver_dir.go +++ b/lxd/storage/drivers/driver_dir.go @@ -49,6 +49,7 @@ func (d *dir) Info() Info { IOUring: true, MountedRoot: true, Buckets: true, + PopulateParentVolumeUUID: false, } } diff --git a/lxd/storage/drivers/driver_lvm.go b/lxd/storage/drivers/driver_lvm.go index be000ed95797..eb4e662274e2 100644 --- a/lxd/storage/drivers/driver_lvm.go +++ b/lxd/storage/drivers/driver_lvm.go @@ -98,6 +98,7 @@ func (d *lvm) Info() Info { IOUring: true, MountedRoot: false, Buckets: true, + PopulateParentVolumeUUID: false, } } diff --git a/lxd/storage/drivers/driver_mock.go b/lxd/storage/drivers/driver_mock.go index cf00dc4e4063..f49fdfc377d1 100644 --- a/lxd/storage/drivers/driver_mock.go +++ b/lxd/storage/drivers/driver_mock.go @@ -35,6 +35,7 @@ func (d *mock) Info() Info { RunningCopyFreeze: true, DirectIO: true, MountedRoot: true, + PopulateParentVolumeUUID: false, } } diff --git a/lxd/storage/drivers/driver_powerflex.go b/lxd/storage/drivers/driver_powerflex.go index 1c0482f701fe..7d8cfb9f9c85 100644 --- a/lxd/storage/drivers/driver_powerflex.go +++ b/lxd/storage/drivers/driver_powerflex.go @@ -103,6 +103,7 @@ func (d *powerflex) Info() Info { DirectIO: true, IOUring: true, MountedRoot: false, + PopulateParentVolumeUUID: false, } } diff --git a/lxd/storage/drivers/driver_pure.go b/lxd/storage/drivers/driver_pure.go index d581ff4c6698..97d156937cc3 100644 --- a/lxd/storage/drivers/driver_pure.go +++ b/lxd/storage/drivers/driver_pure.go @@ -61,6 +61,7 @@ func (d *pure) Info() Info { DirectIO: true, IOUring: true, MountedRoot: false, + PopulateParentVolumeUUID: true, } } diff --git a/lxd/storage/drivers/driver_zfs.go b/lxd/storage/drivers/driver_zfs.go index 7920e42a902c..bdc26cecccfd 100644 --- a/lxd/storage/drivers/driver_zfs.go +++ b/lxd/storage/drivers/driver_zfs.go @@ -130,6 +130,7 @@ func (d *zfs) Info() Info { DirectIO: zfsDirectIO, MountedRoot: false, Buckets: true, + PopulateParentVolumeUUID: false, } return info From 668ff1c2bd35e1621326936da30200bceffc855f Mon Sep 17 00:00:00 2001 From: Din Music Date: Wed, 20 Nov 2024 12:35:16 +0000 Subject: [PATCH 06/51] lxd/storage/drivers/load: Add pure as storage driver Signed-off-by: Din Music --- lxd/storage/drivers/load.go | 1 + 1 file changed, 1 insertion(+) diff --git a/lxd/storage/drivers/load.go b/lxd/storage/drivers/load.go index 198b11f5d084..187945a04e50 100644 --- a/lxd/storage/drivers/load.go +++ b/lxd/storage/drivers/load.go @@ -13,6 +13,7 @@ var drivers = map[string]func() driver{ "dir": func() driver { return &dir{} }, "lvm": func() driver { return &lvm{} }, "powerflex": func() driver { return &powerflex{} }, + "pure": func() driver { return &pure{} }, "zfs": func() driver { return &zfs{} }, } From 2313ef36101c51b8d543d267bbf31504fdd2159b Mon Sep 17 00:00:00 2001 From: Din Music Date: Mon, 30 Sep 2024 11:19:01 +0000 Subject: [PATCH 07/51] lxd/storage/drivers/pure: Add basic pool and volume configuration validation Signed-off-by: Din Music --- lxd/storage/drivers/driver_pure.go | 36 ++++++++ lxd/storage/drivers/driver_pure_volumes.go | 95 +++++++++++++++++++++- 2 files changed, 130 insertions(+), 1 deletion(-) diff --git a/lxd/storage/drivers/driver_pure.go b/lxd/storage/drivers/driver_pure.go index 97d156937cc3..9d5da1c359a4 100644 --- a/lxd/storage/drivers/driver_pure.go +++ b/lxd/storage/drivers/driver_pure.go @@ -4,6 +4,7 @@ import ( "github.com/canonical/lxd/lxd/migration" "github.com/canonical/lxd/lxd/operations" "github.com/canonical/lxd/shared/api" + "github.com/canonical/lxd/shared/validate" ) // pureLoaded indicates whether load() function was already called for the Pure Storage driver. @@ -72,6 +73,41 @@ func (d *pure) FillConfig() error { // Validate checks that all provided keys are supported and there is no conflicting or missing configuration. func (d *pure) Validate(config map[string]string) error { + rules := map[string]func(value string) error{ + "size": validate.Optional(validate.IsSize), + // lxdmeta:generate(entities=storage-pure; group=pool-conf; key=pure.api.token) + // + // --- + // type: string + // shortdesc: API token for Pure Storage gateway authentication + "pure.api.token": validate.Optional(), + // lxdmeta:generate(entities=storage-pure; group=pool-conf; key=pure.gateway) + // + // --- + // type: string + // shortdesc: Address of the Pure Storage gateway + "pure.gateway": validate.Optional(validate.IsRequestURL), + // lxdmeta:generate(entities=storage-pure; group=pool-conf; key=pure.gateway.verify) + // + // --- + // type: bool + // defaultdesc: `true` + // shortdesc: Whether to verify the Pure Storage gateway's certificate + "pure.gateway.verify": validate.Optional(validate.IsBool), + // lxdmeta:generate(entities=storage-pure; group=pool-conf; key=volume.size) + // Default Pure Storage volume size rounded to 512B. The minimum size is 1MiB. + // --- + // type: string + // defaultdesc: `10GiB` + // shortdesc: Size/quota of the storage volume + "volume.size": validate.Optional(validate.IsMultipleOfUnit("512B")), + } + + err := d.validatePool(config, rules, d.commonVolumeRules()) + if err != nil { + return err + } + return nil } diff --git a/lxd/storage/drivers/driver_pure_volumes.go b/lxd/storage/drivers/driver_pure_volumes.go index faac54092ba8..9840ee347747 100644 --- a/lxd/storage/drivers/driver_pure_volumes.go +++ b/lxd/storage/drivers/driver_pure_volumes.go @@ -1,6 +1,7 @@ package drivers import ( + "fmt" "io" "github.com/canonical/lxd/lxd/backup" @@ -8,11 +9,38 @@ import ( "github.com/canonical/lxd/lxd/migration" "github.com/canonical/lxd/lxd/operations" "github.com/canonical/lxd/shared/revert" + "github.com/canonical/lxd/shared/units" + "github.com/canonical/lxd/shared/validate" ) // commonVolumeRules returns validation rules which are common for pool and volume. func (d *pure) commonVolumeRules() map[string]func(value string) error { - return map[string]func(value string) error{} + return map[string]func(value string) error{ + // lxdmeta:generate(entities=storage-pure; group=volume-conf; key=block.filesystem) + // Valid options are: `btrfs`, `ext4`, `xfs` + // If not set, `ext4` is assumed. + // --- + // type: string + // condition: block-based volume with content type `filesystem` + // defaultdesc: same as `volume.block.filesystem` + // shortdesc: File system of the storage volume + "block.filesystem": validate.Optional(validate.IsOneOf(blockBackedAllowedFilesystems...)), + // lxdmeta:generate(entities=storage-pure; group=volume-conf; key=block.mount_options) + // + // --- + // type: string + // condition: block-based volume with content type `filesystem` + // defaultdesc: same as `volume.block.mount_options` + // shortdesc: Mount options for block-backed file system volumes + "block.mount_options": validate.IsAny, + // lxdmeta:generate(entities=storage-pure; group=volume-conf; key=size) + // Default Pure Storage volume size rounded to 512B. The minimum size is 1MiB. + // --- + // type: string + // defaultdesc: same as `volume.size` + // shortdesc: Size/quota of the storage volume + "size": validate.Optional(validate.IsMultipleOfUnit("512B")), + } } // CreateVolume creates an empty volume and can optionally fill it by executing the supplied filler function. @@ -55,12 +83,77 @@ func (d *pure) HasVolume(vol Volume) (bool, error) { // FillVolumeConfig populate volume with default config. func (d *pure) FillVolumeConfig(vol Volume) error { + // Copy volume.* configuration options from pool. + // Exclude 'block.filesystem' and 'block.mount_options' + // as these ones are handled below in this function and depend on the volume's type. + err := d.fillVolumeConfig(&vol, "block.filesystem", "block.mount_options") + if err != nil { + return err + } + + // Only validate filesystem config keys for filesystem volumes or VM block volumes (which have an + // associated filesystem volume). + if vol.ContentType() == ContentTypeFS || vol.IsVMBlock() { + // VM volumes will always use the default filesystem. + if vol.IsVMBlock() { + vol.config["block.filesystem"] = DefaultFilesystem + } else { + // Inherit filesystem from pool if not set. + if vol.config["block.filesystem"] == "" { + vol.config["block.filesystem"] = d.config["volume.block.filesystem"] + } + + // Default filesystem if neither volume nor pool specify an override. + if vol.config["block.filesystem"] == "" { + // Unchangeable volume property: Set unconditionally. + vol.config["block.filesystem"] = DefaultFilesystem + } + } + + // Inherit filesystem mount options from pool if not set. + if vol.config["block.mount_options"] == "" { + vol.config["block.mount_options"] = d.config["volume.block.mount_options"] + } + + // Default filesystem mount options if neither volume nor pool specify an override. + if vol.config["block.mount_options"] == "" { + // Unchangeable volume property: Set unconditionally. + vol.config["block.mount_options"] = "discard" + } + } + return nil } // ValidateVolume validates the supplied volume config. func (d *pure) ValidateVolume(vol Volume, removeUnknownKeys bool) error { + // When creating volumes from ISO images, round its size to the next multiple of 512B. + if vol.ContentType() == ContentTypeISO { + sizeBytes, err := units.ParseByteSizeString(vol.ConfigSize()) + if err != nil { + return err + } + + // If the remainder when dividing by 512 is greater than 0, round the size up + // to the next multiple of 512. + remainder := sizeBytes % 512 + if remainder > 0 { + sizeBytes = (sizeBytes/512 + 1) * 512 + vol.SetConfigSize(fmt.Sprintf("%d", sizeBytes)) + } + } + commonRules := d.commonVolumeRules() + + // Disallow block.* settings for regular custom block volumes. These settings only make sense + // when using custom filesystem volumes. LXD will create the filesystem for these volumes, + // and use the mount options. When attaching a regular block volume to a VM, these are not + // mounted by LXD and therefore don't need these config keys. + if vol.volType == VolumeTypeCustom && vol.contentType == ContentTypeBlock { + delete(commonRules, "block.filesystem") + delete(commonRules, "block.mount_options") + } + return d.validateVolume(vol, commonRules, removeUnknownKeys) } From 332f95598fe825c3fb584b6035426a3d4cf0fbd4 Mon Sep 17 00:00:00 2001 From: Din Music Date: Mon, 30 Sep 2024 19:59:07 +0000 Subject: [PATCH 08/51] lxd/storage/drivers/pure: Add login and request wrapper Signed-off-by: Din Music --- lxd/storage/drivers/driver_pure.go | 3 + lxd/storage/drivers/driver_pure_util.go | 247 ++++++++++++++++++++++++ 2 files changed, 250 insertions(+) diff --git a/lxd/storage/drivers/driver_pure.go b/lxd/storage/drivers/driver_pure.go index 9d5da1c359a4..7f0bf8691ca9 100644 --- a/lxd/storage/drivers/driver_pure.go +++ b/lxd/storage/drivers/driver_pure.go @@ -19,6 +19,9 @@ type pure struct { // Holds the low level HTTP client for the Pure Storage API. // Use pure.client() to retrieve the client struct. httpClient *pureClient + + // apiVersion indicates the Pure Storage API version. + apiVersion string } // load is used initialize the driver. It should be used only once. diff --git a/lxd/storage/drivers/driver_pure_util.go b/lxd/storage/drivers/driver_pure_util.go index fc523864206c..04f99978c036 100644 --- a/lxd/storage/drivers/driver_pure_util.go +++ b/lxd/storage/drivers/driver_pure_util.go @@ -1,5 +1,49 @@ package drivers +import ( + "bytes" + "crypto/tls" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "path" + "slices" + "strings" + + "github.com/canonical/lxd/shared" + "github.com/canonical/lxd/shared/api" +) + +// pureAPIVersion is the Pure Storage API version used by LXD. +// The 2.21 version is the first version that supports NVMe/TCP. +const pureAPIVersion = "2.21" + +// pureError represents an error responses from Pure Storage API. +type pureError struct { + // List of errors returned by the Pure Storage API. + Errors []struct { + Context string `json:"context"` + Message string `json:"message"` + } `json:"errors"` + + // StatusCode is not part of the response body but is used + // to store the HTTP status code. + StatusCode int `json:"-"` +} + +// Error returns the first error message from the Pure Storage API error. +func (p *pureError) Error() string { + if p == nil || len(p.Errors) == 0 { + return "" + } + + // Return the first error message without the trailing dot. + return strings.TrimSuffix(p.Errors[0].Message, ".") +} + // pureClient holds the Pure Storage HTTP client and an access token. type pureClient struct { driver *pure @@ -12,3 +56,206 @@ func newPureClient(driver *pure) *pureClient { driver: driver, } } + +// createBodyReader creates a reader for the given request body contents. +func (p *pureClient) createBodyReader(contents map[string]any) (io.Reader, error) { + body := &bytes.Buffer{} + + err := json.NewEncoder(body).Encode(contents) + if err != nil { + return nil, fmt.Errorf("Failed to write request body: %w", err) + } + + return body, nil +} + +// request issues a HTTP request against the Pure Storage gateway. +func (p *pureClient) request(method string, url url.URL, reqBody io.Reader, reqHeaders map[string]string, respBody any, respHeaders map[string]string) error { + // Extract scheme and host from the gateway URL. + scheme, host, found := strings.Cut(p.driver.config["pure.gateway"], "://") + if !found { + return fmt.Errorf("Invalid Pure Storage gateway URL: %q", p.driver.config["pure.gateway"]) + } + + // Set request URL scheme and host. + url.Scheme = scheme + url.Host = host + + // Prefixes the given path with the API version in the format "/api//". + // If the path is "/api/api_version", the API version is not included as this path + // is used to retrieve supported API versions. + if url.Path != "/api/api_version" { + // If API version is not known yet, retrieve and cache it first. + if p.driver.apiVersion == "" { + apiVersions, err := p.getAPIVersions() + if err != nil { + return fmt.Errorf("Failed to retrieve supported Pure Storage API versions: %w", err) + } + + // Ensure the required API version is supported by Pure Storage array. + if !slices.Contains(apiVersions, pureAPIVersion) { + return fmt.Errorf("Required API version %q is not supported by Pure Storage array", pureAPIVersion) + } + + // Set API version to the driver to avoid checking the API version + // for each subsequent request. + p.driver.apiVersion = pureAPIVersion + } + + // Prefix current path with the API version. + url.Path = path.Join("api", p.driver.apiVersion, url.Path) + } + + req, err := http.NewRequest(method, url.String(), reqBody) + if err != nil { + return fmt.Errorf("Failed to create request: %w", err) + } + + // Set custom request headers. + for k, v := range reqHeaders { + req.Header.Add(k, v) + } + + req.Header.Add("Accept", "application/json") + if reqBody != nil { + req.Header.Add("Content-Type", "application/json") + } + + client := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: shared.IsFalse(p.driver.config["pure.gateway.verify"]), + }, + }, + } + + resp, err := client.Do(req) + if err != nil { + return fmt.Errorf("Failed to send request: %w", err) + } + + defer resp.Body.Close() + + // The unauthorized error is reported when an invalid (or expired) access token is provided. + // Wrap unauthorized requests into an API status error to allow easier checking for expired + // token in the requestAuthenticated function. + if resp.StatusCode == http.StatusUnauthorized { + return api.StatusErrorf(http.StatusUnauthorized, "Unauthorized request") + } + + // Overwrite the response data type if an error is detected. + if resp.StatusCode != http.StatusOK { + respBody = &pureError{} + } + + // Extract the response body if requested. + if respBody != nil { + err = json.NewDecoder(resp.Body).Decode(respBody) + if err != nil { + return fmt.Errorf("Failed to read response body from %q: %w", url.String(), err) + } + } + + // Extract the response headers if requested. + if respHeaders != nil { + for k, v := range resp.Header { + respHeaders[k] = strings.Join(v, ",") + } + } + + // Return the formatted error from the body + pureErr, ok := respBody.(*pureError) + if ok { + pureErr.StatusCode = resp.StatusCode + return pureErr + } + + return nil +} + +// requestAuthenticated issues an authenticated HTTP request against the Pure Storage gateway. +// In case the access token is expired, the function will try to obtain a new one. +func (p *pureClient) requestAuthenticated(method string, url url.URL, reqBody io.Reader, respBody any) error { + // If request fails with an unauthorized error, the request will be retried after + // requesting a new access token. + retries := 1 + + for { + // Ensure we are logged into the Pure Storage. + err := p.login() + if err != nil { + return err + } + + // Set access token as request header. + reqHeaders := map[string]string{ + "X-Auth-Token": p.accessToken, + } + + // Initiate request. + err = p.request(method, url, reqBody, reqHeaders, respBody, nil) + if err != nil { + if api.StatusErrorCheck(err, http.StatusUnauthorized) && retries > 0 { + // Access token seems to be expired. + // Reset the token and try one more time. + p.accessToken = "" + retries-- + continue + } + + // Either the error is not of type unauthorized or the maximum number of + // retries has been exceeded. + return err + } + + return nil + } +} + +// getAPIVersion returns the list of API versions that are supported by the Pure Storage. +func (p *pureClient) getAPIVersions() ([]string, error) { + var resp struct { + APIVersions []string `json:"version"` + } + + url := api.NewURL().Path("api", "api_version") + err := p.request(http.MethodGet, url.URL, nil, nil, &resp, nil) + if err != nil { + return nil, fmt.Errorf("Failed to retrieve available API versions from Pure Storage: %w", err) + } + + if len(resp.APIVersions) == 0 { + return nil, fmt.Errorf("Pure Storage does not support any API versions") + } + + return resp.APIVersions, nil +} + +// login initiates an authentication request against the Pure Storage using the API token. If successful, +// an access token is retrieved and stored within a client. The access token is then used for further +// authentication. +func (p *pureClient) login() error { + if p.accessToken != "" { + // Token has been already obtained. + return nil + } + + reqHeaders := map[string]string{ + "api-token": p.driver.config["pure.api.token"], + } + + respHeaders := make(map[string]string) + + url := api.NewURL().Path("login") + err := p.request(http.MethodPost, url.URL, nil, reqHeaders, nil, respHeaders) + if err != nil { + return fmt.Errorf("Failed to login: %w", err) + } + + p.accessToken = respHeaders["X-Auth-Token"] + if p.accessToken == "" { + return errors.New("Failed to obtain access token") + } + + return nil +} From 26320b3e2b72ca8e5e904fe25fff975a1e8d31d5 Mon Sep 17 00:00:00 2001 From: Din Music Date: Wed, 20 Nov 2024 10:55:42 +0000 Subject: [PATCH 09/51] lxd/storage/drivers/pure: Create and delete storage pool Adds support for creating and deleting Pure Storage storage pools (pods). Signed-off-by: Din Music --- lxd/storage/drivers/driver_pure.go | 49 ++++++- lxd/storage/drivers/driver_pure_util.go | 173 ++++++++++++++++++++++++ 2 files changed, 221 insertions(+), 1 deletion(-) diff --git a/lxd/storage/drivers/driver_pure.go b/lxd/storage/drivers/driver_pure.go index 7f0bf8691ca9..d44ac02fe806 100644 --- a/lxd/storage/drivers/driver_pure.go +++ b/lxd/storage/drivers/driver_pure.go @@ -1,9 +1,15 @@ package drivers import ( + "fmt" + "net/http" + "github.com/canonical/lxd/lxd/migration" "github.com/canonical/lxd/lxd/operations" + "github.com/canonical/lxd/shared" "github.com/canonical/lxd/shared/api" + "github.com/canonical/lxd/shared/revert" + "github.com/canonical/lxd/shared/units" "github.com/canonical/lxd/shared/validate" ) @@ -122,6 +128,35 @@ func (d *pure) Create() error { return err } + revert := revert.New() + defer revert.Fail() + + // Validate required Pure Storage configuration keys and return an error if they are + // not set. Since those keys are not cluster member specific, the general validation + // rules allow empty strings in order to create the pending storage pools. + if d.config["pure.gateway"] == "" { + return fmt.Errorf("The pure.gateway cannot be empty") + } + + if d.config["pure.api.token"] == "" { + return fmt.Errorf("The pure.api.token cannot be empty") + } + + poolSizeBytes, err := units.ParseByteSizeString(d.config["size"]) + if err != nil { + return fmt.Errorf("Failed to parse storage size: %w", err) + } + + // Create the storage pool. + err = d.client().createStoragePool(d.name, poolSizeBytes) + if err != nil { + return err + } + + revert.Add(func() { _ = d.client().deleteStoragePool(d.name) }) + + revert.Success() + return nil } @@ -132,7 +167,19 @@ func (d *pure) Update(changedConfig map[string]string) error { // Delete removes the storage pool (Pure Storage pod). func (d *pure) Delete(op *operations.Operation) error { - return nil + // First delete the storage pool on Pure Storage. + err := d.client().deleteStoragePool(d.name) + if err != nil && !api.StatusErrorCheck(err, http.StatusNotFound) { + return err + } + + // If the user completely destroyed it, call it done. + if !shared.PathExists(GetPoolMountPath(d.name)) { + return nil + } + + // On delete, wipe everything in the directory. + return wipeDirectory(GetPoolMountPath(d.name)) } // Mount mounts the storage pool. diff --git a/lxd/storage/drivers/driver_pure_util.go b/lxd/storage/drivers/driver_pure_util.go index 04f99978c036..b8caa9023f10 100644 --- a/lxd/storage/drivers/driver_pure_util.go +++ b/lxd/storage/drivers/driver_pure_util.go @@ -15,6 +15,7 @@ import ( "github.com/canonical/lxd/shared" "github.com/canonical/lxd/shared/api" + "github.com/canonical/lxd/shared/logger" ) // pureAPIVersion is the Pure Storage API version used by LXD. @@ -44,6 +45,59 @@ func (p *pureError) Error() string { return strings.TrimSuffix(p.Errors[0].Message, ".") } +// isPureErrorOf checks if the given error is of type pureError, has the specified status code, +// and its error messages contain any of the provided substrings. Note that the error message +// comparison is case-insensitive. +func isPureErrorOf(err error, statusCode int, substrings ...string) bool { + perr, ok := err.(*pureError) + if !ok { + return false + } + + if perr.StatusCode != statusCode { + return false + } + + if len(substrings) == 0 { + // Error matches the given status code and no substrings are provided. + return true + } + + // Check if any error message contains a provided substring. + // Perform case-insensitive matching by converting both the + // error message and the substring to lowercase. + for _, err := range perr.Errors { + errMsg := strings.ToLower(err.Message) + + for _, substring := range substrings { + if strings.Contains(errMsg, strings.ToLower(substring)) { + return true + } + } + } + + return false +} + +// pureIsNotFoundError returns true if the error is of type pureError, its status code is 400 (bad request), +// and the error message contains a substring indicating the resource was not found. +func isPureErrorNotFound(err error) bool { + return isPureErrorOf(err, http.StatusBadRequest, "Not found", "Does not exist", "No such volume or snapshot") +} + +// pureResponse wraps the response from the Pure Storage API. In most cases, the response +// contains a list of items, even if only one item is returned. +type pureResponse[T any] struct { + Items []T `json:"items"` +} + +// pureStoragePool represents a storage pool (pod) in Pure Storage. +type pureStoragePool struct { + ID string `json:"id"` + Name string `json:"name"` + IsDestroyed bool `json:"destroyed"` +} + // pureClient holds the Pure Storage HTTP client and an access token. type pureClient struct { driver *pure @@ -259,3 +313,122 @@ func (p *pureClient) login() error { return nil } + +// getStoragePool returns the storage pool with the given name. +func (p *pureClient) getStoragePool(poolName string) (*pureStoragePool, error) { + var resp pureResponse[pureStoragePool] + + url := api.NewURL().Path("pods").WithQuery("names", poolName) + err := p.requestAuthenticated(http.MethodGet, url.URL, nil, &resp) + if err != nil { + if isPureErrorNotFound(err) { + return nil, api.StatusErrorf(http.StatusNotFound, "Storage pool %q not found", poolName) + } + + return nil, fmt.Errorf("Failed to get storage pool %q: %w", poolName, err) + } + + if len(resp.Items) == 0 { + return nil, api.StatusErrorf(http.StatusNotFound, "Storage pool %q not found", poolName) + } + + return &resp.Items[0], nil +} + +// createStoragePool creates a storage pool (Pure Storage pod). +func (p *pureClient) createStoragePool(poolName string, size int64) error { + reqBody := make(map[string]any) + if size > 0 { + reqBody["quota_limit"] = size + } + + pool, err := p.getStoragePool(poolName) + if err == nil && pool.IsDestroyed { + // Storage pool exists in destroyed state, therefore, restore it. + reqBody["destroyed"] = false + + req, err := p.createBodyReader(reqBody) + if err != nil { + return err + } + + url := api.NewURL().Path("pods").WithQuery("names", poolName) + err = p.requestAuthenticated(http.MethodPatch, url.URL, req, nil) + if err != nil { + return fmt.Errorf("Failed to restore storage pool %q: %w", poolName, err) + } + + logger.Info("Storage pool has been restored", logger.Ctx{"pool": poolName}) + return nil + } + + req, err := p.createBodyReader(reqBody) + if err != nil { + return err + } + + // Storage pool does not exist in destroyed state, therefore, try to create a new one. + url := api.NewURL().Path("pods").WithQuery("names", poolName) + err = p.requestAuthenticated(http.MethodPost, url.URL, req, nil) + if err != nil { + return fmt.Errorf("Failed to create storage pool %q: %w", poolName, err) + } + + return nil +} + +// deleteStoragePool deletes a storage pool (Pure Storage pod). +func (p *pureClient) deleteStoragePool(poolName string) error { + pool, err := p.getStoragePool(poolName) + if err != nil { + if api.StatusErrorCheck(err, http.StatusNotFound) { + // Storage pool has been already removed. + return nil + } + + return err + } + + // To delete the storage pool, we need to destroy it first by setting the destroyed property to true. + // In addition, we want to destroy all of its contents to allow the pool to be deleted. + // If the pool is already destroyed, we can skip this step. + if !pool.IsDestroyed { + req, err := p.createBodyReader(map[string]any{ + "destroyed": true, + }) + if err != nil { + return err + } + + url := api.NewURL().Path("pods").WithQuery("names", poolName).WithQuery("destroy_contents", "true") + err = p.requestAuthenticated(http.MethodPatch, url.URL, req, nil) + if err != nil { + if isPureErrorNotFound(err) { + return nil + } + + return fmt.Errorf("Failed to destroy storage pool %q: %w", poolName, err) + } + } + + // Eradicate the storage pool by permanently deleting it along all of its contents. + url := api.NewURL().Path("pods").WithQuery("names", poolName).WithQuery("eradicate_contents", "true") + err = p.requestAuthenticated(http.MethodDelete, url.URL, nil, nil) + if err != nil { + if isPureErrorNotFound(err) { + return nil + } + + if isPureErrorOf(err, http.StatusBadRequest, "Cannot eradicate pod") { + // Eradication failed, therefore the pool remains in the destroyed state. + // However, we still consider it as deleted because Pure Storage SafeMode + // may be enabled, which prevents immediate eradication of the pool. + logger.Warn("Storage pool is left in destroyed state", logger.Ctx{"pool": poolName, "err": err}) + return nil + } + + return fmt.Errorf("Failed to delete storage pool %q: %w", poolName, err) + } + + return nil +} From 4c9c61196525c80b6b667ca0e19dfd361c23ebfc Mon Sep 17 00:00:00 2001 From: Din Music Date: Wed, 20 Nov 2024 11:28:13 +0000 Subject: [PATCH 10/51] lxd/storage/drivers/pure: Add utils for handling Pure Storage hosts Signed-off-by: Din Music --- lxd/storage/drivers/driver_pure_util.go | 101 ++++++++++++++++++++++++ 1 file changed, 101 insertions(+) diff --git a/lxd/storage/drivers/driver_pure_util.go b/lxd/storage/drivers/driver_pure_util.go index b8caa9023f10..b40f69d57b30 100644 --- a/lxd/storage/drivers/driver_pure_util.go +++ b/lxd/storage/drivers/driver_pure_util.go @@ -98,6 +98,12 @@ type pureStoragePool struct { IsDestroyed bool `json:"destroyed"` } +// pureHost represents a host in Pure Storage. +type pureHost struct { + Name string `json:"name"` + ConnectionCount int `json:"connection_count"` +} + // pureClient holds the Pure Storage HTTP client and an access token. type pureClient struct { driver *pure @@ -432,3 +438,98 @@ func (p *pureClient) deleteStoragePool(poolName string) error { return nil } + +// getHosts retrieves an existing Pure Storage host. +func (p *pureClient) getHosts() ([]pureHost, error) { + var resp pureResponse[pureHost] + + url := api.NewURL().Path("hosts") + err := p.requestAuthenticated(http.MethodGet, url.URL, nil, &resp) + if err != nil { + return nil, fmt.Errorf("Failed to get hosts: %w", err) + } + + return resp.Items, nil +} + +// createHost creates a new host that can be associated with specific volumes. +func (p *pureClient) createHost(hostName string) error { + req, err := p.createBodyReader(map[string]any{}) + if err != nil { + return err + } + + url := api.NewURL().Path("hosts").WithQuery("names", hostName) + err = p.requestAuthenticated(http.MethodPost, url.URL, req, nil) + if err != nil { + if isPureErrorOf(err, http.StatusBadRequest, "Host already exists.") { + return api.StatusErrorf(http.StatusConflict, "Host %q already exists", hostName) + } + + return fmt.Errorf("Failed to create host %q: %w", hostName, err) + } + + return nil +} + +// updateHost updates an existing host. +func (p *pureClient) updateHost(hostName string) error { + req, err := p.createBodyReader(map[string]any{}) + if err != nil { + return err + } + + // To destroy the volume, we need to patch it by setting the destroyed to true. + url := api.NewURL().Path("hosts").WithQuery("names", hostName) + err = p.requestAuthenticated(http.MethodPatch, url.URL, req, nil) + if err != nil { + return fmt.Errorf("Failed to update host %q: %w", hostName, err) + } + + return nil +} + +// deleteHost deletes an existing host. +func (p *pureClient) deleteHost(hostName string) error { + url := api.NewURL().Path("hosts").WithQuery("names", hostName) + err := p.requestAuthenticated(http.MethodDelete, url.URL, nil, nil) + if err != nil { + return fmt.Errorf("Failed to delete host %q: %w", hostName, err) + } + + return nil +} + +// connectHostToVolume creates a connection between a host and volume. It returns true if the connection +// was created, and false if it already existed. +func (p *pureClient) connectHostToVolume(poolName string, volName string, hostName string) (bool, error) { + url := api.NewURL().Path("connections").WithQuery("host_names", hostName).WithQuery("volume_names", poolName+"::"+volName) + + err := p.requestAuthenticated(http.MethodPost, url.URL, nil, nil) + if err != nil { + if isPureErrorOf(err, http.StatusBadRequest, "Connection already exists.") { + // Do not error out if connection already exists. + return false, nil + } + + return false, fmt.Errorf("Failed to connect volume %q with host %q: %w", volName, hostName, err) + } + + return true, nil +} + +// disconnectHostFromVolume deletes a connection between a host and volume. +func (p *pureClient) disconnectHostFromVolume(poolName string, volName string, hostName string) error { + url := api.NewURL().Path("connections").WithQuery("host_names", hostName).WithQuery("volume_names", poolName+"::"+volName) + + err := p.requestAuthenticated(http.MethodDelete, url.URL, nil, nil) + if err != nil { + if isPureErrorNotFound(err) { + return api.StatusErrorf(http.StatusNotFound, "Connection between host %q and volume %q not found", volName, hostName) + } + + return fmt.Errorf("Failed to disconnect volume %q from host %q: %w", volName, hostName, err) + } + + return nil +} From bfb237c9ede52d44dde4e49a823745931baa3ae8 Mon Sep 17 00:00:00 2001 From: Din Music Date: Wed, 20 Nov 2024 12:04:36 +0000 Subject: [PATCH 11/51] lxd/storage/drivers/pure: Function to resolve Pure Storage volume name Signed-off-by: Din Music --- lxd/storage/drivers/driver_pure_util.go | 54 +++++++++++++++++++++++++ 1 file changed, 54 insertions(+) diff --git a/lxd/storage/drivers/driver_pure_util.go b/lxd/storage/drivers/driver_pure_util.go index b40f69d57b30..332027673e51 100644 --- a/lxd/storage/drivers/driver_pure_util.go +++ b/lxd/storage/drivers/driver_pure_util.go @@ -13,6 +13,8 @@ import ( "slices" "strings" + "github.com/google/uuid" + "github.com/canonical/lxd/shared" "github.com/canonical/lxd/shared/api" "github.com/canonical/lxd/shared/logger" @@ -22,6 +24,28 @@ import ( // The 2.21 version is the first version that supports NVMe/TCP. const pureAPIVersion = "2.21" +// pureVolTypePrefixes maps volume type to storage volume name prefix. +// Use smallest possible prefixes since Pure Storage volume names are limited to 63 characters. +var pureVolTypePrefixes = map[VolumeType]string{ + VolumeTypeContainer: "c", + VolumeTypeVM: "v", + VolumeTypeImage: "i", + VolumeTypeCustom: "u", +} + +// pureContentTypeSuffixes maps volume's content type to storage volume name suffix. +var pureContentTypeSuffixes = map[ContentType]string{ + // Suffix used for block content type volumes. + ContentTypeBlock: "b", + + // Suffix used for ISO content type volumes. + ContentTypeISO: "i", +} + +// pureSnapshotPrefix is a prefix used for Pure Storage snapshots to avoid name conflicts +// when creating temporary volume from the snapshot. +var pureSnapshotPrefix = "s" + // pureError represents an error responses from Pure Storage API. type pureError struct { // List of errors returned by the Pure Storage API. @@ -533,3 +557,33 @@ func (p *pureClient) disconnectHostFromVolume(poolName string, volName string, h return nil } + +// getVolumeName returns the fully qualified name derived from the volume's UUID. +func (d *pure) getVolumeName(vol Volume) (string, error) { + volUUID, err := uuid.Parse(vol.config["volatile.uuid"]) + if err != nil { + return "", fmt.Errorf(`Failed parsing "volatile.uuid" from volume %q: %w`, vol.name, err) + } + + // Remove hypens from the UUID to create a volume name. + volName := strings.ReplaceAll(volUUID.String(), "-", "") + + // Search for the volume type prefix, and if found, prepend it to the volume name. + volumeTypePrefix, ok := pureVolTypePrefixes[vol.volType] + if ok { + volName = fmt.Sprintf("%s-%s", volumeTypePrefix, volName) + } + + // Search for the content type suffix, and if found, append it to the volume name. + contentTypeSuffix, ok := pureContentTypeSuffixes[vol.contentType] + if ok { + volName = fmt.Sprintf("%s-%s", volName, contentTypeSuffix) + } + + // If volume is snapshot, prepend snapshot prefix to its name. + if vol.IsSnapshot() { + volName = fmt.Sprintf("%s%s", pureSnapshotPrefix, volName) + } + + return volName, nil +} From bc70f8e6856ef2e4082af4fcc89fc7c6231c1091 Mon Sep 17 00:00:00 2001 From: Din Music Date: Wed, 11 Dec 2024 11:28:20 +0000 Subject: [PATCH 12/51] lxd/storage/drivers/pure: Test volume name generation Signed-off-by: Din Music --- lxd/storage/drivers/driver_pure_util_test.go | 117 +++++++++++++++++++ 1 file changed, 117 insertions(+) create mode 100644 lxd/storage/drivers/driver_pure_util_test.go diff --git a/lxd/storage/drivers/driver_pure_util_test.go b/lxd/storage/drivers/driver_pure_util_test.go new file mode 100644 index 000000000000..3fabf570a356 --- /dev/null +++ b/lxd/storage/drivers/driver_pure_util_test.go @@ -0,0 +1,117 @@ +package drivers + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_pure_serverName(t *testing.T) { + // newTestVol creates a new Volume with the given UUID, VolumeType and ContentType. + newTestVol := func(volName string, volType VolumeType, contentType ContentType, uuid string) Volume { + config := map[string]string{ + "volatile.uuid": uuid, + } + + return NewVolume(nil, "testpool", volType, contentType, volName, config, nil) + } + + tests := []struct { + Name string + Volume Volume + WantVolName string + WantError string + }{ + { + Name: "Incorrect UUID length", + Volume: newTestVol("vol-err-1", VolumeTypeContainer, ContentTypeFS, "uuid"), + WantError: "invalid UUID length: 4", + }, + { + Name: "Invalid UUID format", + Volume: newTestVol("vol-err-2", VolumeTypeContainer, ContentTypeFS, "abcdefgh-1234-abcd-1234-abcdefgh"), + WantError: "invalid UUID format", + }, + { + Name: "Container FS", + Volume: newTestVol("c-fs", VolumeTypeContainer, ContentTypeFS, "a5289556-c903-409a-8aa0-4af18a46738d"), + WantVolName: "c-a5289556c903409a8aa04af18a46738d", + }, + { + Name: "VM FS", + Volume: newTestVol("vm-fs", VolumeTypeVM, ContentTypeFS, "a5289556-c903-409a-8aa0-4af18a46738d"), + WantVolName: "v-a5289556c903409a8aa04af18a46738d", + }, + { + Name: "VM Block", + Volume: newTestVol("vm-block", VolumeTypeVM, ContentTypeBlock, "a5289556-c903-409a-8aa0-4af18a46738d"), + WantVolName: "v-a5289556c903409a8aa04af18a46738d-b", + }, + { + Name: "Image FS", + Volume: newTestVol("img-fs", VolumeTypeImage, ContentTypeFS, "a5289556-c903-409a-8aa0-4af18a46738d"), + WantVolName: "i-a5289556c903409a8aa04af18a46738d", + }, + { + Name: "Image Block", + Volume: newTestVol("img-block", VolumeTypeImage, ContentTypeBlock, "a5289556-c903-409a-8aa0-4af18a46738d"), + WantVolName: "i-a5289556c903409a8aa04af18a46738d-b", + }, + { + Name: "Custom FS", + Volume: newTestVol("custom-fs", VolumeTypeCustom, ContentTypeFS, "a5289556-c903-409a-8aa0-4af18a46738d"), + WantVolName: "u-a5289556c903409a8aa04af18a46738d", + }, + { + Name: "Custom Block", + Volume: newTestVol("custom-block", VolumeTypeCustom, ContentTypeBlock, "a5289556-c903-409a-8aa0-4af18a46738d"), + WantVolName: "u-a5289556c903409a8aa04af18a46738d-b", + }, + { + Name: "Custom ISO", + Volume: newTestVol("custom-iso", VolumeTypeCustom, ContentTypeISO, "a5289556-c903-409a-8aa0-4af18a46738d"), + WantVolName: "u-a5289556c903409a8aa04af18a46738d-i", + }, + { + Name: "Snapshot Container FS", + Volume: newTestVol("c-fs/snap0", VolumeTypeContainer, ContentTypeFS, "fd87f109-767d-4f2f-ae18-66c34276f351"), + WantVolName: "sc-fd87f109767d4f2fae1866c34276f351", + }, + { + Name: "Snapshot VM FS", + Volume: newTestVol("vm-fs/snap0", VolumeTypeVM, ContentTypeFS, "fd87f109-767d-4f2f-ae18-66c34276f351"), + WantVolName: "sv-fd87f109767d4f2fae1866c34276f351", + }, + { + Name: "Snapshot VM Block", + Volume: newTestVol("vm-block/snap0", VolumeTypeVM, ContentTypeBlock, "fd87f109-767d-4f2f-ae18-66c34276f351"), + WantVolName: "sv-fd87f109767d4f2fae1866c34276f351-b", + }, + { + Name: "Snapshot Custom Block", + Volume: newTestVol("custom-block/snap0", VolumeTypeCustom, ContentTypeBlock, "fd87f109-767d-4f2f-ae18-66c34276f351"), + WantVolName: "su-fd87f109767d4f2fae1866c34276f351-b", + }, + } + + for _, test := range tests { + t.Run(test.Name, func(t *testing.T) { + d := &pure{} + + volName, err := d.getVolumeName(test.Volume) + if err != nil { + if test.WantError != "" { + assert.ErrorContains(t, err, test.WantError) + } else { + t.Errorf("pure.getVolumeName() unexpected error: %v", err) + } + } else { + if test.WantError != "" { + t.Errorf("pure.getVolumeName() expected error %q, but got none", err) + } else { + assert.Equal(t, test.WantVolName, volName) + } + } + }) + } +} From 04606824c1cfa128cb3c806b4234dfccab37a225 Mon Sep 17 00:00:00 2001 From: Din Music Date: Thu, 9 Jan 2025 10:28:06 +0000 Subject: [PATCH 13/51] lxd/storage/drivers/pure: Add util function to retrieve network interfaces Signed-off-by: Din Music --- lxd/storage/drivers/driver_pure_util.go | 29 +++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/lxd/storage/drivers/driver_pure_util.go b/lxd/storage/drivers/driver_pure_util.go index 332027673e51..6ed70a2eca49 100644 --- a/lxd/storage/drivers/driver_pure_util.go +++ b/lxd/storage/drivers/driver_pure_util.go @@ -115,6 +115,14 @@ type pureResponse[T any] struct { Items []T `json:"items"` } +// purePort represents a network interface in Pure Storage. +type pureNetworkInterface struct { + Name string `json:"name"` + Ethernet struct { + Address string `json:"address,omitempty"` + } `json:"eth,omitempty"` +} + // pureStoragePool represents a storage pool (pod) in Pure Storage. type pureStoragePool struct { ID string `json:"id"` @@ -344,6 +352,27 @@ func (p *pureClient) login() error { return nil } +// getNetworkInterfaces retrieves a valid Pure Storage network interfaces, which +// means the interface has an IP address configured and is enabled. The result +// can be filtered by a specific service name, where an empty string represents +// no filtering. +func (p *pureClient) getNetworkInterfaces(service string) ([]pureNetworkInterface, error) { + var resp pureResponse[pureNetworkInterface] + + // Retrieve enabled network interfaces that have an IP address configured. + url := api.NewURL().Path("network-interfaces").WithQuery("filter", "enabled='true'").WithQuery("filter", "eth.address") + if service != "" { + url = url.WithQuery("filter", "services='"+service+"'") + } + + err := p.requestAuthenticated(http.MethodGet, url.URL, nil, &resp) + if err != nil { + return nil, fmt.Errorf("Failed to retrieve Pure Storage network interfaces: %w", err) + } + + return resp.Items, nil +} + // getStoragePool returns the storage pool with the given name. func (p *pureClient) getStoragePool(poolName string) (*pureStoragePool, error) { var resp pureResponse[pureStoragePool] From dc9992b5b22fc0ebb3de7926dd84ec9516e52d1d Mon Sep 17 00:00:00 2001 From: Din Music Date: Wed, 18 Dec 2024 18:07:32 +0000 Subject: [PATCH 14/51] lxd/storage/connectors: Add iSCSI connector Signed-off-by: Din Music --- lxd/storage/connectors/connector.go | 8 + lxd/storage/connectors/connector_iscsi.go | 281 ++++++++++++++++++++++ 2 files changed, 289 insertions(+) create mode 100644 lxd/storage/connectors/connector_iscsi.go diff --git a/lxd/storage/connectors/connector.go b/lxd/storage/connectors/connector.go index 5225c4749469..d2cab54a1ccd 100644 --- a/lxd/storage/connectors/connector.go +++ b/lxd/storage/connectors/connector.go @@ -16,6 +16,9 @@ const ( // TypeSDC represents Dell SDC storage connector. TypeSDC string = "sdc" + + // TypeISCSI represents an iSCSI storage connector. + TypeISCSI string = "iscsi" ) // session represents a connector session that is established with a target. @@ -64,6 +67,11 @@ func NewConnector(connectorType string, serverUUID string) (Connector, error) { common: common, }, nil + case TypeISCSI: + return &connectorISCSI{ + common: common, + }, nil + default: // Return common connector if the type is unknown. This removes // the need to check for nil or handle the error in the caller. diff --git a/lxd/storage/connectors/connector_iscsi.go b/lxd/storage/connectors/connector_iscsi.go new file mode 100644 index 000000000000..c0e48415dc84 --- /dev/null +++ b/lxd/storage/connectors/connector_iscsi.go @@ -0,0 +1,281 @@ +package connectors + +import ( + "context" + "errors" + "fmt" + "io/fs" + "os" + "path/filepath" + "slices" + "strings" + + "github.com/canonical/lxd/lxd/util" + "github.com/canonical/lxd/shared" + "github.com/canonical/lxd/shared/revert" +) + +const ( + // Status code 15 (ISCSI_ERR_SESS_EXISTS) indicates that + // the connection is already established. + iscsiErrCodeSessionExists = 15 + + // Status code 21 (ISCSI_ERR_NO_OBJS_FOUND) indicates that + // the no matching record, target, session, or portal was found + // to execute the operation on. + iscsiErrCodeNotFound = 21 +) + +var _ Connector = &connectorISCSI{} + +type connectorISCSI struct { + common + + iqn string +} + +// Type returns the type of the connector. +func (c *connectorISCSI) Type() string { + return TypeISCSI +} + +// Version returns the version of the iSCSI CLI (iscsiadm). +func (c *connectorISCSI) Version() (string, error) { + // Detect and record the version of the iSCSI CLI. + // It will fail if the "iscsiadm" is not installed on the host. + out, err := shared.RunCommandContext(context.Background(), "iscsiadm", "--version") + if err != nil { + return "", fmt.Errorf("Failed to get iscsiadm version: %w", err) + } + + fields := strings.Split(strings.TrimSpace(out), " ") + if strings.HasPrefix(out, "iscsiadm version ") && len(fields) > 2 { + version := fmt.Sprintf("%s (iscsiadm)", fields[2]) + return version, nil + } + + return "", fmt.Errorf("Failed to get iscsiadm version: Unexpected output %q", out) +} + +// LoadModules loads the iSCSI kernel modules. +// Returns true if the modules can be loaded. +func (c *connectorISCSI) LoadModules() error { + return util.LoadModule("iscsi_tcp") +} + +// QualifiedName returns the unique iSCSI Qualified Name (IQN) of the host. +func (c *connectorISCSI) QualifiedName() (string, error) { + if c.iqn != "" { + return c.iqn, nil + } + + // Get the unique iSCSI Qualified Name (IQN) of the host. The iscsiadm + // does not allow providing the IQN directly, so we need to extract it + // from the /etc/iscsi/initiatorname.iscsi file on the host. + filename := shared.HostPath("/etc/iscsi/initiatorname.iscsi") + if !shared.PathExists(filename) { + return "", fmt.Errorf("Failed to extract host IQN: File %q does not exist", filename) + } + + content, err := os.ReadFile(filename) + if err != nil { + return "", err + } + + // Find the IQN line in the file. + lines := strings.Split(string(content), "\n") + for _, line := range lines { + iqn, ok := strings.CutPrefix(line, "InitiatorName=") + if ok { + c.iqn = iqn + return iqn, nil + } + } + + return "", fmt.Errorf(`Failed to extract host IQN: File %q does not contain "InitiatorName"`, filename) +} + +// discoverTargets discovers the available iSCSI targets on a given address. +func (c *connectorISCSI) discoverTargets(ctx context.Context, targetAddr string) error { + // Discover the available iSCSI targets on a given address. + _, _, err := shared.RunCommandSplit(ctx, nil, nil, "iscsiadm", "--mode", "discovery", "--type", "sendtargets", "--portal", targetAddr) + if err != nil { + return fmt.Errorf("Failed to discover available iSCSI targets on %q: %w", targetAddr, err) + } + + return nil +} + +// Connect establishes a connection with the target on the given address. +func (c *connectorISCSI) Connect(ctx context.Context, targetQN string, targetAddresses ...string) (revert.Hook, error) { + // Connects to the provided target address. If the connection is already established, + // the session is rescanned to detect new volumes. + connectFunc := func(ctx context.Context, s *session, targetAddr string) error { + if s != nil && slices.Contains(s.addresses, targetAddr) { + // If connection with the target address is already established, + // rescan the session to ensure new volumes are detected. + _, err := shared.RunCommandContext(ctx, "iscsiadm", "--mode", "session", "--sid", s.id, "--rescan") + if err != nil { + return err + } + } + + // Otherwise, connect to the target address. + err := c.discoverTargets(ctx, targetAddr) + if err != nil { + return err + } + + // Attempt to login into iSCSI target. + _, stderr, err := shared.RunCommandSplit(ctx, nil, nil, "iscsiadm", "--mode", "node", "--targetname", targetQN, "--portal", targetAddr, "--login") + if err != nil { + exitCode, _ := shared.ExitStatus(err) + if exitCode == iscsiErrCodeSessionExists { + // Nothing to do. Status code indicates that the connection + // is already established. + return nil + } + + return fmt.Errorf("Failed to connect to target %q on %q via iSCSI: %w", targetQN, targetAddr, err) + } + + if stderr != "" { + return fmt.Errorf("Failed to connect to target %q on %q via iSCSI: %s", targetQN, targetAddr, stderr) + } + + return nil + } + + return connect(ctx, c, targetQN, targetAddresses, connectFunc) +} + +// ConnectAll establishes a connection with all targets available on the given address. +func (c *connectorISCSI) ConnectAll(ctx context.Context, targetAddr string) error { + return fmt.Errorf("ConnectAll not implemented") +} + +// Disconnect terminates a connection with the target. +func (c *connectorISCSI) Disconnect(targetQN string) error { + // Find an existing iSCSI session. + session, err := c.findSession(targetQN) + if err != nil { + return err + } + + // Disconnect from the iSCSI target if there is an existing session. + if session != nil { + // Do not pass a cancelable context as the operation is relatively short + // and most importantly we do not want to "partially" disconnect from + // the target - potentially leaving some unclosed sessions. + _, err = shared.RunCommandContext(context.Background(), "iscsiadm", "--mode", "node", "--targetname", targetQN, "--logout") + if err != nil { + exitCode, _ := shared.ExitStatus(err) + if exitCode == iscsiErrCodeNotFound { + // Nothing to do. Status code indicates that the session + // was not found. This just prevents an error in case the + // disconnect is called multiple times on the same target. + return nil + } + + return fmt.Errorf("Failed disconnecting from iSCSI target %q: %w", targetQN, err) + } + } + + return nil +} + +// DisconnectAll terminates all connections with all targets. +func (c *connectorISCSI) DisconnectAll() error { + return fmt.Errorf("DisconnectAll not implemented") +} + +// SessionID returns the ID of an existing session. +func (c *connectorISCSI) SessionID(targetQN string) (string, error) { + return "", fmt.Errorf("SessionID not implemented") +} + +// findSession returns an active iSCSI session that matches the given targetQN. +// If the session is not found, nil session is returned. +// +// This function first searches for checks for an existing session matching the +// provided target IQN in "/sys/class/iscsi_session". If the session is found, +// it retrieves the addresses of the active connections from +// "/sys/class/iscsi_connection". +func (c *connectorISCSI) findSession(targetQN string) (*session, error) { + // Base path for iSCSI sessions. + sessionBasePath := "/sys/class/iscsi_session" + + // Retrieve list of existing iSCSI sessions. + sessions, err := os.ReadDir(sessionBasePath) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + // No active sessions. + return nil, nil + } + + return nil, fmt.Errorf("Failed getting a list of existing iSCSI sessions: %w", err) + } + + sessionID := "" + for _, session := range sessions { + // Get the target IQN of the iSCSI session. + iqnBytes, err := os.ReadFile(filepath.Join(sessionBasePath, session.Name(), "targetname")) + if err != nil { + return nil, fmt.Errorf("Failed getting the target IQN for session %q: %w", session, err) + } + + sessionIQN := strings.TrimSpace(string(iqnBytes)) + if targetQN == sessionIQN { + // Session found. + sessionID = strings.TrimPrefix(session.Name(), "session") + break + } + } + + if sessionID == "" { + // No active session found. + return nil, nil + } + + session := &session{ + id: sessionID, + targetQN: targetQN, + } + + connBasePath := "/sys/class/iscsi_connection" + + // Retrieve list of active conns for the session. + conns, err := os.ReadDir(connBasePath) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + // No active connections. + return session, nil + } + + return nil, fmt.Errorf("Failed getting a list of existing iSCSI connections: %w", err) + } + + // Iterate over active connections that correspond to the found session + // and extract their addresses. + connID := "connection" + sessionID + for _, conn := range conns { + if !strings.HasPrefix(conn.Name(), connID) { + // Connection does not belong to the session. + continue + } + + // Get address of an active iSCSI connection. + addrPath := filepath.Join(connBasePath, conn.Name(), "address") + addrBytes, err := os.ReadFile(addrPath) + if err != nil { + // In case of an error when reading the address, simply skip this address. + // We detect addresses just to reduce the number of connection attempts. + continue + } + + addr := strings.TrimSpace(string(addrBytes)) + session.addresses = append(session.addresses, addr) + } + + return session, nil +} From 095001623ac08f7cf049914a4bda276c0f8215b0 Mon Sep 17 00:00:00 2001 From: Din Music Date: Wed, 20 Nov 2024 12:00:57 +0000 Subject: [PATCH 15/51] lxd/storage/drivers/pure: Add iSCSI support and volume mapping utils Attempt to connect to all target addresses. Signed-off-by: Din Music --- lxd/storage/drivers/driver_pure.go | 71 ++++ lxd/storage/drivers/driver_pure_util.go | 459 +++++++++++++++++++++++- 2 files changed, 523 insertions(+), 7 deletions(-) diff --git a/lxd/storage/drivers/driver_pure.go b/lxd/storage/drivers/driver_pure.go index d44ac02fe806..d34ba7d2ee38 100644 --- a/lxd/storage/drivers/driver_pure.go +++ b/lxd/storage/drivers/driver_pure.go @@ -3,9 +3,11 @@ package drivers import ( "fmt" "net/http" + "strings" "github.com/canonical/lxd/lxd/migration" "github.com/canonical/lxd/lxd/operations" + "github.com/canonical/lxd/lxd/storage/connectors" "github.com/canonical/lxd/shared" "github.com/canonical/lxd/shared/api" "github.com/canonical/lxd/shared/revert" @@ -19,9 +21,18 @@ var pureLoaded = false // pureVersion indicates Pure Storage version. var pureVersion = "" +// pureSupportedConnectors represents a list of storage connectors that can be used with Pure Storage. +var pureSupportedConnectors = []string{ + connectors.TypeISCSI, +} + type pure struct { common + // Holds the low level connector for the Pure Storage driver. + // Use pure.connector() to retrieve the initialized connector. + storageConnector connectors.Connector + // Holds the low level HTTP client for the Pure Storage API. // Use pure.client() to retrieve the client struct. httpClient *pureClient @@ -37,10 +48,36 @@ func (d *pure) load() error { return nil } + versions := connectors.GetSupportedVersions(pureSupportedConnectors) + pureVersion = strings.Join(versions, " / ") pureLoaded = true + + // Load the kernel modules of the respective connector, ignoring those that cannot be loaded. + // Support for a specific connector is checked during pool creation. However, this + // ensures that the kernel modules are loaded, even if the host has been rebooted. + connector, err := d.connector() + if err == nil { + _ = connector.LoadModules() + } + return nil } +// connector retrieves an initialized storage connector based on the configured +// Pure Storage mode. The connector is cached in the driver struct. +func (d *pure) connector() (connectors.Connector, error) { + if d.storageConnector == nil { + connector, err := connectors.NewConnector(d.config["pure.mode"], d.state.ServerUUID) + if err != nil { + return nil, err + } + + d.storageConnector = connector + } + + return d.storageConnector, nil +} + // client returns the drivers Pure Storage client. A new client is created only if it does not already exist. func (d *pure) client() *pureClient { if d.httpClient == nil { @@ -103,6 +140,13 @@ func (d *pure) Validate(config map[string]string) error { // defaultdesc: `true` // shortdesc: Whether to verify the Pure Storage gateway's certificate "pure.gateway.verify": validate.Optional(validate.IsBool), + // lxdmeta:generate(entities=storage-pure; group=pool-conf; key=pure.mode) + // The mode to use to map Pure Storage volumes to the local server. + // --- + // type: string + // defaultdesc: the discovered mode + // shortdesc: How volumes are mapped to the local server + "pure.mode": validate.Optional(validate.IsOneOf(pureSupportedConnectors...)), // lxdmeta:generate(entities=storage-pure; group=pool-conf; key=volume.size) // Default Pure Storage volume size rounded to 512B. The minimum size is 1MiB. // --- @@ -117,6 +161,33 @@ func (d *pure) Validate(config map[string]string) error { return err } + newMode := config["pure.mode"] + oldMode := d.config["pure.mode"] + + // Ensure pure.mode cannot be changed to avoid leaving volume mappings + // and prevent disturbing running instances. + if oldMode != "" && oldMode != newMode { + return fmt.Errorf("Pure Storage mode cannot be changed") + } + + // Check if the selected Pure Storage mode is supported on this node. + // Also when forming the storage pool on a LXD cluster, the mode + // that got discovered on the creating machine needs to be validated + // on the other cluster members too. This can be done here since Validate + // gets executed on every cluster member when receiving the cluster + // notification to finally create the pool. + if newMode != "" { + connector, err := connectors.NewConnector(newMode, "") + if err != nil { + return fmt.Errorf("Pure Storage mode %q is not supported: %w", newMode, err) + } + + err = connector.LoadModules() + if err != nil { + return fmt.Errorf("Pure Storage mode %q is not supported due to missing kernel modules: %w", newMode, err) + } + } + return nil } diff --git a/lxd/storage/drivers/driver_pure_util.go b/lxd/storage/drivers/driver_pure_util.go index 6ed70a2eca49..f93ef135e5a0 100644 --- a/lxd/storage/drivers/driver_pure_util.go +++ b/lxd/storage/drivers/driver_pure_util.go @@ -2,6 +2,7 @@ package drivers import ( "bytes" + "context" "crypto/tls" "encoding/json" "errors" @@ -9,21 +10,33 @@ import ( "io" "net/http" "net/url" + "os" "path" + "path/filepath" "slices" "strings" + "time" "github.com/google/uuid" + "github.com/canonical/lxd/lxd/storage/block" + "github.com/canonical/lxd/lxd/storage/connectors" "github.com/canonical/lxd/shared" "github.com/canonical/lxd/shared/api" "github.com/canonical/lxd/shared/logger" + "github.com/canonical/lxd/shared/revert" ) // pureAPIVersion is the Pure Storage API version used by LXD. // The 2.21 version is the first version that supports NVMe/TCP. const pureAPIVersion = "2.21" +// pureServiceNameMapping maps Pure Storage mode in LXD to the corresponding Pure Storage +// service name. +var pureServiceNameMapping = map[string]string{ + connectors.TypeISCSI: "iscsi", +} + // pureVolTypePrefixes maps volume type to storage volume name prefix. // Use smallest possible prefixes since Pure Storage volume names are limited to 63 characters. var pureVolTypePrefixes = map[VolumeType]string{ @@ -130,10 +143,25 @@ type pureStoragePool struct { IsDestroyed bool `json:"destroyed"` } +// pureVolume represents a volume in Pure Storage. +type pureVolume struct { + ID string `json:"id"` + Name string `json:"name"` + IsDestroyed bool `json:"destroyed"` +} + // pureHost represents a host in Pure Storage. type pureHost struct { - Name string `json:"name"` - ConnectionCount int `json:"connection_count"` + Name string `json:"name"` + IQNs []string `json:"iqns"` + ConnectionCount int `json:"connection_count"` +} + +// purePort represents a port in Pure Storage. +type purePort struct { + Name string `json:"name"` + IQN string `json:"iqn,omitempty"` + NQN string `json:"nqn,omitempty"` } // pureClient holds the Pure Storage HTTP client and an access token. @@ -505,9 +533,52 @@ func (p *pureClient) getHosts() ([]pureHost, error) { return resp.Items, nil } -// createHost creates a new host that can be associated with specific volumes. -func (p *pureClient) createHost(hostName string) error { - req, err := p.createBodyReader(map[string]any{}) +// getCurrentHost retrieves the Pure Storage host linked to the current LXD host. +// The Pure Storage host is considered a match if it includes the fully qualified +// name of the LXD host that is determined by the configured mode. +func (p *pureClient) getCurrentHost() (*pureHost, error) { + connector, err := p.driver.connector() + if err != nil { + return nil, err + } + + qn, err := connector.QualifiedName() + if err != nil { + return nil, err + } + + hosts, err := p.getHosts() + if err != nil { + return nil, err + } + + for _, host := range hosts { + if slices.Contains(host.IQNs, qn) { + return &host, nil + } + } + + return nil, api.StatusErrorf(http.StatusNotFound, "Host with qualified name %q not found", qn) +} + +// createHost creates a new host with provided initiator qualified names that can be associated +// with specific volumes. +func (p *pureClient) createHost(hostName string, qns []string) error { + body := make(map[string]any, 1) + + connector, err := p.driver.connector() + if err != nil { + return err + } + + switch connector.Type() { + case connectors.TypeISCSI: + body["iqns"] = qns + default: + return fmt.Errorf("Unsupported Pure Storage mode %q", connector.Type()) + } + + req, err := p.createBodyReader(body) if err != nil { return err } @@ -526,8 +597,22 @@ func (p *pureClient) createHost(hostName string) error { } // updateHost updates an existing host. -func (p *pureClient) updateHost(hostName string) error { - req, err := p.createBodyReader(map[string]any{}) +func (p *pureClient) updateHost(hostName string, qns []string) error { + body := make(map[string]any, 1) + + connector, err := p.driver.connector() + if err != nil { + return err + } + + switch connector.Type() { + case connectors.TypeISCSI: + body["iqns"] = qns + default: + return fmt.Errorf("Unsupported Pure Storage mode %q", connector.Type()) + } + + req, err := p.createBodyReader(body) if err != nil { return err } @@ -587,6 +672,366 @@ func (p *pureClient) disconnectHostFromVolume(poolName string, volName string, h return nil } +// getTarget retrieves the qualified name and addresses of Pure Storage target for the configured mode. +func (p *pureClient) getTarget() (targetQN string, targetAddrs []string, err error) { + connector, err := p.driver.connector() + if err != nil { + return "", nil, err + } + + mode := connector.Type() + + // Get Pure Storage service name based on the configured mode. + service, ok := pureServiceNameMapping[mode] + if !ok { + return "", nil, fmt.Errorf("Failed to determine service name for Pure Storage mode %q", mode) + } + + // Retrieve the list of Pure Storage network interfaces. + interfaces, err := p.getNetworkInterfaces(service) + if err != nil { + return "", nil, err + } + + if len(interfaces) == 0 { + return "", nil, api.StatusErrorf(http.StatusNotFound, "Enabled network interface with %q service not found", service) + } + + targetAddrs = make([]string, 0, len(interfaces)) + for _, iface := range interfaces { + targetAddrs = append(targetAddrs, iface.Ethernet.Address) + } + + // Get the qualified name of the target by iterating over the available + // ports until the one with the qualified name is found. All ports have + // the same IQN, but it may happen that IQN is not reported for a + // specific port, for example, if the port is misconfigured. + var nq string + for _, iface := range interfaces { + var resp pureResponse[purePort] + + url := api.NewURL().Path("ports").WithQuery("filter", "name='"+iface.Name+"'") + err = p.requestAuthenticated(http.MethodGet, url.URL, nil, &resp) + if err != nil { + return "", nil, fmt.Errorf("Failed to retrieve Pure Storage targets: %w", err) + } + + if len(resp.Items) == 0 { + continue + } + + port := resp.Items[0] + + if mode == connectors.TypeISCSI { + nq = port.IQN + } + + if nq != "" { + break + } + } + + if nq == "" { + return "", nil, api.StatusErrorf(http.StatusNotFound, "Qualified name for %q target not found", mode) + } + + return nq, targetAddrs, nil +} + +// ensureHost returns a name of the host that is configured with a given IQN. If such host +// does not exist, a new one is created, where host's name equals to the server name with a +// mode included as a suffix because Pure Storage does not allow mixing IQNs, NQNs, and WWNs +// on a single host. +func (d *pure) ensureHost() (hostName string, cleanup revert.Hook, err error) { + var hostname string + + revert := revert.New() + defer revert.Fail() + + connector, err := d.connector() + if err != nil { + return "", nil, err + } + + // Get the qualified name of the host. + qn, err := connector.QualifiedName() + if err != nil { + return "", nil, err + } + + // Fetch an existing Pure Storage host. + host, err := d.client().getCurrentHost() + if err != nil { + if !api.StatusErrorCheck(err, http.StatusNotFound) { + return "", nil, err + } + + // The Pure Storage host with a qualified name of the current LXD host does not exist. + // Therefore, create a new one and name it after the server name. + serverName, err := ResolveServerName(d.state.ServerName) + if err != nil { + return "", nil, err + } + + // Append the mode to the server name because Pure Storage does not allow mixing + // NQNs, IQNs, and WWNs for a single host. + hostname = serverName + "-" + connector.Type() + + err = d.client().createHost(hostname, []string{qn}) + if err != nil { + if !api.StatusErrorCheck(err, http.StatusConflict) { + return "", nil, err + } + + // The host with the given name already exists, update it instead. + err = d.client().updateHost(hostname, []string{qn}) + if err != nil { + return "", nil, err + } + } else { + revert.Add(func() { _ = d.client().deleteHost(hostname) }) + } + } else { + // Hostname already exists with the given IQN. + hostname = host.Name + } + + cleanup = revert.Clone().Fail + revert.Success() + return hostname, cleanup, nil +} + +// mapVolume maps the given volume onto this host. +func (d *pure) mapVolume(vol Volume) (cleanup revert.Hook, err error) { + reverter := revert.New() + defer reverter.Fail() + + connector, err := d.connector() + if err != nil { + return nil, err + } + + volName, err := d.getVolumeName(vol) + if err != nil { + return nil, err + } + + unlock, err := remoteVolumeMapLock(connector.Type(), "pure") + if err != nil { + return nil, err + } + + defer unlock() + + // Ensure the host exists and is configured with the correct QN. + hostname, cleanup, err := d.ensureHost() + if err != nil { + return nil, err + } + + reverter.Add(cleanup) + + // Ensure the volume is connected to the host. + connCreated, err := d.client().connectHostToVolume(vol.pool, volName, hostname) + if err != nil { + return nil, err + } + + if connCreated { + reverter.Add(func() { _ = d.client().disconnectHostFromVolume(vol.pool, volName, hostname) }) + } + + // Find the array's qualified name for the configured mode. + targetQN, targetAddrs, err := d.client().getTarget() + if err != nil { + return nil, err + } + + // Connect to the array. + connReverter, err := connector.Connect(d.state.ShutdownCtx, targetQN, targetAddrs...) + if err != nil { + return nil, err + } + + reverter.Add(connReverter) + + // If connect succeeded it means we have at least one established connection. + // However, it's reverter does not cleanup the establised connections or a newly + // created session. Therefore, if we created a mapping, add unmapVolume to the + // returned (outer) reverter. Unmap ensures the target is disconnected only when + // no other device is using it. + outerReverter := revert.New() + if !connCreated { + outerReverter.Add(func() { _ = d.unmapVolume(vol) }) + } + + // Add connReverter to the outer reverter, as it will immediately stop + // any ongoing connection attempts. Note that it must be added after + // unmapVolume to ensure it is called first. + outerReverter.Add(connReverter) + + reverter.Success() + return outerReverter.Fail, nil +} + +// unmapVolume unmaps the given volume from this host. +func (d *pure) unmapVolume(vol Volume) error { + connector, err := d.connector() + if err != nil { + return err + } + + volName, err := d.getVolumeName(vol) + if err != nil { + return err + } + + unlock, err := remoteVolumeMapLock(connector.Type(), "pure") + if err != nil { + return err + } + + defer unlock() + + host, err := d.client().getCurrentHost() + if err != nil { + return err + } + + // Disconnect the volume from the host and ignore error if connection does not exist. + err = d.client().disconnectHostFromVolume(vol.pool, volName, host.Name) + if err != nil && !api.StatusErrorCheck(err, http.StatusNotFound) { + return err + } + + volumePath, _, _ := d.getMappedDevPath(vol, false) + if volumePath != "" { + // When iSCSI volume is disconnected from the host, the device will remain on the system. + // + // To remove the device, we need to either logout from the session or remove the + // device manually. Logging out of the session is not desired as it would disconnect + // from all connected volumes. Therefore, we need to manually remove the device. + if connector.Type() == connectors.TypeISCSI { + // removeDevice removes device from the system if the device is removable. + removeDevice := func(devName string) error { + path := fmt.Sprintf("/sys/block/%s/device/delete", devName) + if shared.PathExists(path) { + // Delete device. + err := os.WriteFile(path, []byte("1"), 0400) + if err != nil { + return err + } + } + + return nil + } + + devName := filepath.Base(volumePath) + err := removeDevice(devName) + if err != nil { + return fmt.Errorf("Failed to unmap volume %q: Failed to remove device %q: %w", vol.name, devName, err) + } + } + + // Wait until the volume has disappeared. + ctx, cancel := context.WithTimeout(d.state.ShutdownCtx, 30*time.Second) + defer cancel() + + if !block.WaitDiskDeviceGone(ctx, volumePath) { + return fmt.Errorf("Timeout exceeded waiting for Pure Storage volume %q to disappear on path %q", vol.name, volumePath) + } + } + + // If this was the last volume being unmapped from this system, terminate iSCSI session + // and remove the host from Pure Storage. + if host.ConnectionCount <= 1 { + targetQN, _, err := d.client().getTarget() + if err != nil { + return err + } + + // Disconnect from the target. + err = connector.Disconnect(targetQN) + if err != nil { + return err + } + + // Remove the host from Pure Storage. + err = d.client().deleteHost(host.Name) + if err != nil { + return err + } + } + + return nil +} + +// getMappedDevPath returns the local device path for the given volume. +// Indicate with mapVolume if the volume should get mapped to the system if it isn't present. +func (d *pure) getMappedDevPath(vol Volume, mapVolume bool) (string, revert.Hook, error) { + revert := revert.New() + defer revert.Fail() + + connector, err := d.connector() + if err != nil { + return "", nil, err + } + + if mapVolume { + cleanup, err := d.mapVolume(vol) + if err != nil { + return "", nil, err + } + + revert.Add(cleanup) + } + + volName, err := d.getVolumeName(vol) + if err != nil { + return "", nil, err + } + + pureVol, err := d.client().getVolume(vol.pool, volName) + if err != nil { + return "", nil, err + } + + var diskPrefix string + var diskSuffix string + + switch connector.Type() { + case connectors.TypeISCSI: + diskPrefix = "scsi-" + diskSuffix = pureVol.Serial + default: + return "", nil, fmt.Errorf("Unsupported Pure Storage mode %q", connector.Type()) + } + + // Filters devices by matching the device path with the lowercase disk suffix. + // Pure Storage reports serial numbers in uppercase, so the suffix is converted + // to lowercase. + diskPathFilter := func(devPath string) bool { + return strings.HasSuffix(devPath, strings.ToLower(diskSuffix)) + } + + var devicePath string + if mapVolume { + // Wait until the disk device is mapped to the host. + devicePath, err = block.WaitDiskDevicePath(d.state.ShutdownCtx, diskPrefix, diskPathFilter) + } else { + // Expect device to be already mapped. + devicePath, err = block.GetDiskDevicePath(diskPrefix, diskPathFilter) + } + + if err != nil { + return "", nil, fmt.Errorf("Failed to locate device for volume %q: %w", vol.name, err) + } + + cleanup := revert.Clone().Fail + revert.Success() + return devicePath, cleanup, nil +} + // getVolumeName returns the fully qualified name derived from the volume's UUID. func (d *pure) getVolumeName(vol Volume) (string, error) { volUUID, err := uuid.Parse(vol.config["volatile.uuid"]) From 685bbd48af119bccbca25fc65203dbb2880d00b7 Mon Sep 17 00:00:00 2001 From: Din Music Date: Fri, 17 Jan 2025 14:51:47 +0000 Subject: [PATCH 16/51] lxd/storage/drivers/pure: Ensure multipath device is properly removed Signed-off-by: Din Music --- lxd/storage/drivers/driver_pure_util.go | 26 ++++++++++++++++++++++--- 1 file changed, 23 insertions(+), 3 deletions(-) diff --git a/lxd/storage/drivers/driver_pure_util.go b/lxd/storage/drivers/driver_pure_util.go index f93ef135e5a0..2eb2d940e936 100644 --- a/lxd/storage/drivers/driver_pure_util.go +++ b/lxd/storage/drivers/driver_pure_util.go @@ -927,9 +927,29 @@ func (d *pure) unmapVolume(vol Volume) error { } devName := filepath.Base(volumePath) - err := removeDevice(devName) - if err != nil { - return fmt.Errorf("Failed to unmap volume %q: Failed to remove device %q: %w", vol.name, devName, err) + if strings.HasPrefix(devName, "dm-") { + // Multipath device (/dev/dm-*) itself is not removable. + // Therefore, we remove its slaves instead. + slaves, err := filepath.Glob(fmt.Sprintf("/sys/block/%s/slaves/*", devName)) + if err != nil { + return fmt.Errorf("Failed to unmap volume %q: Failed to list slaves for device %q: %w", vol.name, devName, err) + } + + // Remove slave devices. + for _, slave := range slaves { + slaveDevName := filepath.Base(slave) + + err := removeDevice(slaveDevName) + if err != nil { + return fmt.Errorf("Failed to unmap volume %q: Failed to remove slave device %q: %w", vol.name, slaveDevName, err) + } + } + } else { + // For non-multipath device (/dev/sd*), remove the device itself. + err := removeDevice(devName) + if err != nil { + return fmt.Errorf("Failed to unmap volume %q: Failed to remove device %q: %w", vol.name, devName, err) + } } } From 60c2ab03e660f78dbd4a7bce50ec9ce3b9c8379d Mon Sep 17 00:00:00 2001 From: Din Music Date: Thu, 3 Oct 2024 14:45:29 +0000 Subject: [PATCH 17/51] lxd/storage/drivers/pure: Create, mount, and unmount volume Signed-off-by: Din Music --- lxd/storage/drivers/driver_pure_util.go | 72 +++++- lxd/storage/drivers/driver_pure_volumes.go | 241 ++++++++++++++++++++- 2 files changed, 311 insertions(+), 2 deletions(-) diff --git a/lxd/storage/drivers/driver_pure_util.go b/lxd/storage/drivers/driver_pure_util.go index 2eb2d940e936..a542fe78c87b 100644 --- a/lxd/storage/drivers/driver_pure_util.go +++ b/lxd/storage/drivers/driver_pure_util.go @@ -147,6 +147,7 @@ type pureStoragePool struct { type pureVolume struct { ID string `json:"id"` Name string `json:"name"` + Serial string `json:"serial"` IsDestroyed bool `json:"destroyed"` } @@ -520,6 +521,75 @@ func (p *pureClient) deleteStoragePool(poolName string) error { return nil } +// getVolume returns the volume behind volumeID. +func (p *pureClient) getVolume(poolName string, volName string) (*pureVolume, error) { + var resp pureResponse[pureVolume] + + url := api.NewURL().Path("volumes").WithQuery("names", poolName+"::"+volName) + err := p.requestAuthenticated(http.MethodGet, url.URL, nil, &resp) + if err != nil { + if isPureErrorNotFound(err) { + return nil, api.StatusErrorf(http.StatusNotFound, "Volume %q not found", volName) + } + + return nil, fmt.Errorf("Failed to get volume %q: %w", volName, err) + } + + if len(resp.Items) == 0 { + return nil, api.StatusErrorf(http.StatusNotFound, "Volume %q not found", volName) + } + + return &resp.Items[0], nil +} + +// createVolume creates a new volume in the given storage pool. The volume is created with +// supplied size in bytes. Upon successful creation, volume's ID is returned. +func (p *pureClient) createVolume(poolName string, volName string, sizeBytes int64) error { + req, err := p.createBodyReader(map[string]any{ + "provisioned": sizeBytes, + }) + if err != nil { + return err + } + + // Prevent default protection groups to be applied on the new volume, which can + // prevent us from eradicating the volume once deleted. + url := api.NewURL().Path("volumes").WithQuery("names", poolName+"::"+volName).WithQuery("with_default_protection", "false") + err = p.requestAuthenticated(http.MethodPost, url.URL, req, nil) + if err != nil { + return fmt.Errorf("Failed to create volume %q in storage pool %q: %w", volName, poolName, err) + } + + return nil +} + +// deleteVolume deletes an exisiting volume in the given storage pool. +func (p *pureClient) deleteVolume(poolName string, volName string) error { + req, err := p.createBodyReader(map[string]any{ + "destroyed": true, + }) + if err != nil { + return err + } + + url := api.NewURL().Path("volumes").WithQuery("names", poolName+"::"+volName) + + // To destroy the volume, we need to patch it by setting the destroyed to true. + err = p.requestAuthenticated(http.MethodPatch, url.URL, req, nil) + if err != nil { + return fmt.Errorf("Failed to destroy volume %q in storage pool %q: %w", volName, poolName, err) + } + + // Afterwards, we can eradicate the volume. If this operation fails, the volume will remain + // in the destroyed state. + err = p.requestAuthenticated(http.MethodDelete, url.URL, nil, nil) + if err != nil { + return fmt.Errorf("Failed to delete volume %q in storage pool %q: %w", volName, poolName, err) + } + + return nil +} + // getHosts retrieves an existing Pure Storage host. func (p *pureClient) getHosts() ([]pureHost, error) { var resp pureResponse[pureHost] @@ -962,7 +1032,7 @@ func (d *pure) unmapVolume(vol Volume) error { } } - // If this was the last volume being unmapped from this system, terminate iSCSI session + // If this was the last volume being unmapped from this system, disconnect the active session // and remove the host from Pure Storage. if host.ConnectionCount <= 1 { targetQN, _, err := d.client().getTarget() diff --git a/lxd/storage/drivers/driver_pure_volumes.go b/lxd/storage/drivers/driver_pure_volumes.go index 9840ee347747..93a175569f50 100644 --- a/lxd/storage/drivers/driver_pure_volumes.go +++ b/lxd/storage/drivers/driver_pure_volumes.go @@ -3,11 +3,17 @@ package drivers import ( "fmt" "io" + "strings" + + "golang.org/x/sys/unix" "github.com/canonical/lxd/lxd/backup" "github.com/canonical/lxd/lxd/instancewriter" "github.com/canonical/lxd/lxd/migration" "github.com/canonical/lxd/lxd/operations" + "github.com/canonical/lxd/lxd/storage/filesystem" + "github.com/canonical/lxd/shared" + "github.com/canonical/lxd/shared/logger" "github.com/canonical/lxd/shared/revert" "github.com/canonical/lxd/shared/units" "github.com/canonical/lxd/shared/validate" @@ -45,6 +51,116 @@ func (d *pure) commonVolumeRules() map[string]func(value string) error { // CreateVolume creates an empty volume and can optionally fill it by executing the supplied filler function. func (d *pure) CreateVolume(vol Volume, filler *VolumeFiller, op *operations.Operation) error { + client := d.client() + + revert := revert.New() + defer revert.Fail() + + volName, err := d.getVolumeName(vol) + if err != nil { + return err + } + + sizeBytes, err := units.ParseByteSizeString(vol.ConfigSize()) + if err != nil { + return err + } + + // Create the volume. + err = client.createVolume(vol.pool, volName, sizeBytes) + if err != nil { + return err + } + + revert.Add(func() { _ = client.deleteVolume(vol.pool, volName) }) + + volumeFilesystem := vol.ConfigBlockFilesystem() + if vol.contentType == ContentTypeFS { + devPath, cleanup, err := d.getMappedDevPath(vol, true) + if err != nil { + return err + } + + revert.Add(cleanup) + + _, err = makeFSType(devPath, volumeFilesystem, nil) + if err != nil { + return err + } + } + + // For VMs, also create the filesystem volume. + if vol.IsVMBlock() { + fsVol := vol.NewVMBlockFilesystemVolume() + + err := d.CreateVolume(fsVol, nil, op) + if err != nil { + return err + } + + revert.Add(func() { _ = d.DeleteVolume(fsVol, op) }) + } + + err = vol.MountTask(func(mountPath string, op *operations.Operation) error { + // Run the volume filler function if supplied. + if filler != nil && filler.Fill != nil { + var err error + var devPath string + + if IsContentBlock(vol.contentType) { + // Get the device path. + devPath, err = d.GetVolumeDiskPath(vol) + if err != nil { + return err + } + } + + allowUnsafeResize := false + if vol.volType == VolumeTypeImage { + // Allow filler to resize initial image volume as needed. + // Some storage drivers don't normally allow image volumes to be resized due to + // them having read-only snapshots that cannot be resized. However when creating + // the initial image volume and filling it before the snapshot is taken resizing + // can be allowed and is required in order to support unpacking images larger than + // the default volume size. The filler function is still expected to obey any + // volume size restrictions configured on the pool. + // Unsafe resize is also needed to disable filesystem resize safety checks. + // This is safe because if for some reason an error occurs the volume will be + // discarded rather than leaving a corrupt filesystem. + allowUnsafeResize = true + } + + // Run the filler. + err = d.runFiller(vol, devPath, filler, allowUnsafeResize) + if err != nil { + return err + } + + // Move the GPT alt header to end of disk if needed. + if vol.IsVMBlock() { + err = d.moveGPTAltHeader(devPath) + if err != nil { + return err + } + } + } + + if vol.contentType == ContentTypeFS { + // Run EnsureMountPath again after mounting and filling to ensure the mount directory has + // the correct permissions set. + err = vol.EnsureMountPath() + if err != nil { + return err + } + } + + return nil + }, op) + if err != nil { + return err + } + + revert.Success() return nil } @@ -175,6 +291,11 @@ func (d *pure) SetVolumeQuota(vol Volume, size string, allowUnsafeResize bool, o // GetVolumeDiskPath returns the location of a root disk block device. func (d *pure) GetVolumeDiskPath(vol Volume) (string, error) { + if vol.IsVMBlock() || (vol.volType == VolumeTypeCustom && IsContentBlock(vol.contentType)) { + devPath, _, err := d.getMappedDevPath(vol, false) + return devPath, err + } + return "", ErrNotSupported } @@ -185,13 +306,131 @@ func (d *pure) ListVolumes() ([]Volume, error) { // MountVolume mounts a volume and increments ref counter. Please call UnmountVolume() when done with the volume. func (d *pure) MountVolume(vol Volume, op *operations.Operation) error { + unlock, err := vol.MountLock() + if err != nil { + return err + } + + defer unlock() + + revert := revert.New() + defer revert.Fail() + + // Activate Pure Storage volume if needed. + volDevPath, cleanup, err := d.getMappedDevPath(vol, true) + if err != nil { + return err + } + + revert.Add(cleanup) + + if vol.contentType == ContentTypeFS { + mountPath := vol.MountPath() + if !filesystem.IsMountPoint(mountPath) { + err = vol.EnsureMountPath() + if err != nil { + return err + } + + fsType := vol.ConfigBlockFilesystem() + + if vol.mountFilesystemProbe { + fsType, err = fsProbe(volDevPath) + if err != nil { + return fmt.Errorf("Failed probing filesystem: %w", err) + } + } + + mountFlags, mountOptions := filesystem.ResolveMountOptions(strings.Split(vol.ConfigBlockMountOptions(), ",")) + err = TryMount(volDevPath, mountPath, fsType, mountFlags, mountOptions) + if err != nil { + return err + } + + d.logger.Debug("Mounted Pure Storage volume", logger.Ctx{"volName": vol.name, "dev": volDevPath, "path": mountPath, "options": mountOptions}) + } + } else if vol.contentType == ContentTypeBlock { + // For VMs, mount the filesystem volume. + if vol.IsVMBlock() { + fsVol := vol.NewVMBlockFilesystemVolume() + err := d.MountVolume(fsVol, op) + if err != nil { + return err + } + } + } + + vol.MountRefCountIncrement() // From here on it is up to caller to call UnmountVolume() when done. + revert.Success() return nil } // UnmountVolume simulates unmounting a volume. // keepBlockDev indicates if backing block device should not be unmapped if volume is unmounted. func (d *pure) UnmountVolume(vol Volume, keepBlockDev bool, op *operations.Operation) (bool, error) { - return false, nil + unlock, err := vol.MountLock() + if err != nil { + return false, err + } + + defer unlock() + + ourUnmount := false + mountPath := vol.MountPath() + refCount := vol.MountRefCountDecrement() + + // Attempt to unmount the volume. + if vol.contentType == ContentTypeFS && filesystem.IsMountPoint(mountPath) { + if refCount > 0 { + d.logger.Debug("Skipping unmount as in use", logger.Ctx{"volName": vol.name, "refCount": refCount}) + return false, ErrInUse + } + + err := TryUnmount(mountPath, unix.MNT_DETACH) + if err != nil { + return false, err + } + + // Attempt to unmap. + if !keepBlockDev { + err = d.unmapVolume(vol) + if err != nil { + return false, err + } + } + + ourUnmount = true + } else if vol.contentType == ContentTypeBlock { + // For VMs, unmount the filesystem volume. + if vol.IsVMBlock() { + fsVol := vol.NewVMBlockFilesystemVolume() + ourUnmount, err = d.UnmountVolume(fsVol, false, op) + if err != nil { + return false, err + } + } + + if !keepBlockDev { + // Check if device is currently mapped (but don't map if not). + devPath, _, _ := d.getMappedDevPath(vol, false) + if devPath != "" && shared.PathExists(devPath) { + if refCount > 0 { + d.logger.Debug("Skipping unmount as in use", logger.Ctx{"volName": vol.name, "refCount": refCount}) + return false, ErrInUse + } + + // Attempt to unmap. + err := d.unmapVolume(vol) + if err != nil { + return false, err + } + + ourUnmount = true + } + } + } + + return ourUnmount, nil } // RenameVolume renames a volume and its snapshots. From 9d37bd3cb8070c1e42cf1266128d15fb06a9ef8d Mon Sep 17 00:00:00 2001 From: Din Music Date: Thu, 5 Dec 2024 13:07:02 +0000 Subject: [PATCH 18/51] lxd/storage/drivers/pure: Add NVMe/TCP support Signed-off-by: Din Music --- lxd/storage/drivers/driver_pure.go | 7 ++++++ lxd/storage/drivers/driver_pure_util.go | 31 ++++++++++++++++++++++++- 2 files changed, 37 insertions(+), 1 deletion(-) diff --git a/lxd/storage/drivers/driver_pure.go b/lxd/storage/drivers/driver_pure.go index d34ba7d2ee38..1bc354ee746b 100644 --- a/lxd/storage/drivers/driver_pure.go +++ b/lxd/storage/drivers/driver_pure.go @@ -24,6 +24,7 @@ var pureVersion = "" // pureSupportedConnectors represents a list of storage connectors that can be used with Pure Storage. var pureSupportedConnectors = []string{ connectors.TypeISCSI, + connectors.TypeNVME, } type pure struct { @@ -114,6 +115,11 @@ func (d *pure) Info() Info { // FillConfig populates the storage pool's configuration file with the default values. func (d *pure) FillConfig() error { + // Use NVMe by default. + if d.config["pure.mode"] == "" { + d.config["pure.mode"] = connectors.TypeNVME + } + return nil } @@ -142,6 +148,7 @@ func (d *pure) Validate(config map[string]string) error { "pure.gateway.verify": validate.Optional(validate.IsBool), // lxdmeta:generate(entities=storage-pure; group=pool-conf; key=pure.mode) // The mode to use to map Pure Storage volumes to the local server. + // Supported values are `iscsi` and `nvme`. // --- // type: string // defaultdesc: the discovered mode diff --git a/lxd/storage/drivers/driver_pure_util.go b/lxd/storage/drivers/driver_pure_util.go index a542fe78c87b..1f586f5c5f35 100644 --- a/lxd/storage/drivers/driver_pure_util.go +++ b/lxd/storage/drivers/driver_pure_util.go @@ -35,6 +35,7 @@ const pureAPIVersion = "2.21" // service name. var pureServiceNameMapping = map[string]string{ connectors.TypeISCSI: "iscsi", + connectors.TypeNVME: "nvme-tcp", } // pureVolTypePrefixes maps volume type to storage volume name prefix. @@ -155,6 +156,7 @@ type pureVolume struct { type pureHost struct { Name string `json:"name"` IQNs []string `json:"iqns"` + NQNs []string `json:"nqns"` ConnectionCount int `json:"connection_count"` } @@ -622,8 +624,14 @@ func (p *pureClient) getCurrentHost() (*pureHost, error) { return nil, err } + mode := connector.Type() + for _, host := range hosts { - if slices.Contains(host.IQNs, qn) { + if mode == connectors.TypeISCSI && slices.Contains(host.IQNs, qn) { + return &host, nil + } + + if mode == connectors.TypeNVME && slices.Contains(host.NQNs, qn) { return &host, nil } } @@ -644,6 +652,8 @@ func (p *pureClient) createHost(hostName string, qns []string) error { switch connector.Type() { case connectors.TypeISCSI: body["iqns"] = qns + case connectors.TypeNVME: + body["nqns"] = qns default: return fmt.Errorf("Unsupported Pure Storage mode %q", connector.Type()) } @@ -678,6 +688,8 @@ func (p *pureClient) updateHost(hostName string, qns []string) error { switch connector.Type() { case connectors.TypeISCSI: body["iqns"] = qns + case connectors.TypeNVME: + body["nqns"] = qns default: return fmt.Errorf("Unsupported Pure Storage mode %q", connector.Type()) } @@ -796,6 +808,10 @@ func (p *pureClient) getTarget() (targetQN string, targetAddrs []string, err err nq = port.IQN } + if mode == connectors.TypeNVME { + nq = port.NQN + } + if nq != "" { break } @@ -1093,6 +1109,19 @@ func (d *pure) getMappedDevPath(vol Volume, mapVolume bool) (string, revert.Hook case connectors.TypeISCSI: diskPrefix = "scsi-" diskSuffix = pureVol.Serial + case connectors.TypeNVME: + diskPrefix = "nvme-eui." + + // The serial number is used to identify the device. The last 10 characters + // of the serial number appear as a disk device suffix. This check ensures + // we do not panic if the reported serial number is too short for parsing. + if len(pureVol.Serial) <= 10 { + // Serial number is too short. + return "", nil, fmt.Errorf("Failed to locate device for volume %q: Invalid serial number %q", vol.name, pureVol.Serial) + } + + // Extract the last 10 characters of the serial number. + diskSuffix = pureVol.Serial[len(pureVol.Serial)-10:] default: return "", nil, fmt.Errorf("Unsupported Pure Storage mode %q", connector.Type()) } From ff78ff437cf467d01bb65c442caf9b5adf852356 Mon Sep 17 00:00:00 2001 From: Din Music Date: Wed, 20 Nov 2024 12:31:16 +0000 Subject: [PATCH 19/51] lxd/storage/drivers/pure: Delete volume Signed-off-by: Din Music --- lxd/storage/drivers/driver_pure_volumes.go | 79 +++++++++++++++++++++- 1 file changed, 77 insertions(+), 2 deletions(-) diff --git a/lxd/storage/drivers/driver_pure_volumes.go b/lxd/storage/drivers/driver_pure_volumes.go index 93a175569f50..369b2c120330 100644 --- a/lxd/storage/drivers/driver_pure_volumes.go +++ b/lxd/storage/drivers/driver_pure_volumes.go @@ -3,6 +3,8 @@ package drivers import ( "fmt" "io" + "net/http" + "os" "strings" "golang.org/x/sys/unix" @@ -13,6 +15,7 @@ import ( "github.com/canonical/lxd/lxd/operations" "github.com/canonical/lxd/lxd/storage/filesystem" "github.com/canonical/lxd/shared" + "github.com/canonical/lxd/shared/api" "github.com/canonical/lxd/shared/logger" "github.com/canonical/lxd/shared/revert" "github.com/canonical/lxd/shared/units" @@ -186,14 +189,86 @@ func (d *pure) RefreshVolume(vol VolumeCopy, srcVol VolumeCopy, refreshSnapshots return err } -// DeleteVolume deletes a volume of the storage device. -// If any snapshots of the volume remain then this function will return an error. +// DeleteVolume deletes the volume and all associated snapshots. func (d *pure) DeleteVolume(vol Volume, op *operations.Operation) error { + volExists, err := d.HasVolume(vol) + if err != nil { + return err + } + + if !volExists { + return nil + } + + volName, err := d.getVolumeName(vol) + if err != nil { + return err + } + + host, err := d.client().getCurrentHost() + if err != nil { + // If the host doesn't exist, continue with the deletion of + // the volume and do not try to delete the volume mapping as + // it cannot exist. + if !api.StatusErrorCheck(err, http.StatusNotFound) { + return err + } + } else { + // Delete the volume mapping with the host. + err = d.client().disconnectHostFromVolume(vol.pool, volName, host.Name) + if err != nil && !api.StatusErrorCheck(err, http.StatusNotFound) { + return err + } + } + + err = d.client().deleteVolume(vol.pool, volName) + if err != nil { + return err + } + + // For VMs, also delete the filesystem volume. + if vol.IsVMBlock() { + fsVol := vol.NewVMBlockFilesystemVolume() + + err := d.DeleteVolume(fsVol, op) + if err != nil { + return err + } + } + + mountPath := vol.MountPath() + + if vol.contentType == ContentTypeFS && shared.PathExists(mountPath) { + err := wipeDirectory(mountPath) + if err != nil { + return err + } + + err = os.Remove(mountPath) + if err != nil && !os.IsNotExist(err) { + return fmt.Errorf("Failed to remove %q: %w", mountPath, err) + } + } + return nil } // HasVolume indicates whether a specific volume exists on the storage pool. func (d *pure) HasVolume(vol Volume) (bool, error) { + volName, err := d.getVolumeName(vol) + if err != nil { + return false, err + } + + _, err = d.client().getVolume(vol.pool, volName) + if err != nil { + if api.StatusErrorCheck(err, http.StatusNotFound) { + return false, nil + } + + return false, err + } + return true, nil } From c55b252449c5a6ea9f594c98c062629922ef67f3 Mon Sep 17 00:00:00 2001 From: Din Music Date: Mon, 3 Feb 2025 16:02:17 +0000 Subject: [PATCH 20/51] lxd/storage/drivers/volume: Add util function to get parent volume Add util function that returns a new (parent) volume with volatile.uuid set to the value of the current's volume parent UUID. Signed-off-by: Din Music --- lxd/storage/drivers/volume.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/lxd/storage/drivers/volume.go b/lxd/storage/drivers/volume.go index a243a365504f..0d4ddf9f6d13 100644 --- a/lxd/storage/drivers/volume.go +++ b/lxd/storage/drivers/volume.go @@ -565,6 +565,16 @@ func (v *Volume) SetParentUUID(parentUUID string) { v.parentUUID = parentUUID } +// GetParent returns a parent volume that has volatile.uuid set to the current's volume parent UUID. +func (v *Volume) GetParent() Volume { + parentName, _, _ := api.GetParentAndSnapshotName(v.name) + parentVolConfig := map[string]string{ + "volatile.uuid": v.parentUUID, + } + + return NewVolume(v.driver, v.pool, v.volType, v.contentType, parentName, parentVolConfig, nil) +} + // Clone returns a copy of the volume. func (v Volume) Clone() Volume { // Copy the config map to avoid internal modifications affecting external state. From 9f05ea1c836a59e95a983c4888d69ea6611d3554 Mon Sep 17 00:00:00 2001 From: Din Music Date: Wed, 9 Oct 2024 15:56:02 +0000 Subject: [PATCH 21/51] lxd/storage/drivers/pure: Create and delete volume snapshots Signed-off-by: Din Music --- lxd/storage/drivers/driver_pure_util.go | 90 ++++++++++++++ lxd/storage/drivers/driver_pure_volumes.go | 131 +++++++++++++++++++++ 2 files changed, 221 insertions(+) diff --git a/lxd/storage/drivers/driver_pure_util.go b/lxd/storage/drivers/driver_pure_util.go index 1f586f5c5f35..ccd1b0b2b3d8 100644 --- a/lxd/storage/drivers/driver_pure_util.go +++ b/lxd/storage/drivers/driver_pure_util.go @@ -592,6 +592,96 @@ func (p *pureClient) deleteVolume(poolName string, volName string) error { return nil } +// getVolumeSnapshots retrieves all existing snapshot for the given storage volume. +func (p *pureClient) getVolumeSnapshots(poolName string, volName string) ([]pureVolume, error) { + var resp pureResponse[pureVolume] + + url := api.NewURL().Path("volume-snapshots").WithQuery("source_names", poolName+"::"+volName) + err := p.requestAuthenticated(http.MethodGet, url.URL, nil, &resp) + if err != nil { + if isPureErrorNotFound(err) { + return nil, api.StatusErrorf(http.StatusNotFound, "Volume %q not found", volName) + } + + return nil, fmt.Errorf("Failed to retrieve snapshots for volume %q in storage pool %q: %w", volName, poolName, err) + } + + return resp.Items, nil +} + +// getVolumeSnapshot retrieves an existing snapshot for the given storage volume. +func (p *pureClient) getVolumeSnapshot(poolName string, volName string, snapshotName string) (*pureVolume, error) { + var resp pureResponse[pureVolume] + + url := api.NewURL().Path("volume-snapshots").WithQuery("names", poolName+"::"+volName+"."+snapshotName) + err := p.requestAuthenticated(http.MethodGet, url.URL, nil, &resp) + if err != nil { + if isPureErrorNotFound(err) { + return nil, api.StatusErrorf(http.StatusNotFound, "Snapshot %q not found", snapshotName) + } + + return nil, fmt.Errorf("Failed to retrieve snapshot %q for volume %q in storage pool %q: %w", snapshotName, volName, poolName, err) + } + + if len(resp.Items) == 0 { + return nil, api.StatusErrorf(http.StatusNotFound, "Snapshot %q not found", snapshotName) + } + + return &resp.Items[0], nil +} + +// createVolumeSnapshot creates a new snapshot for the given storage volume. +func (p *pureClient) createVolumeSnapshot(poolName string, volName string, snapshotName string) error { + req, err := p.createBodyReader(map[string]any{ + "suffix": snapshotName, + }) + if err != nil { + return err + } + + url := api.NewURL().Path("volume-snapshots").WithQuery("source_names", poolName+"::"+volName) + err = p.requestAuthenticated(http.MethodPost, url.URL, req, nil) + if err != nil { + return fmt.Errorf("Failed to create snapshot %q for volume %q in storage pool %q: %w", snapshotName, volName, poolName, err) + } + + return nil +} + +// deleteVolumeSnapshot deletes an existing snapshot for the given storage volume. +func (p *pureClient) deleteVolumeSnapshot(poolName string, volName string, snapshotName string) error { + snapshot, err := p.getVolumeSnapshot(poolName, volName, snapshotName) + if err != nil { + return err + } + + if !snapshot.IsDestroyed { + // First destroy the snapshot. + req, err := p.createBodyReader(map[string]any{ + "destroyed": true, + }) + if err != nil { + return err + } + + // Destroy snapshot. + url := api.NewURL().Path("volume-snapshots").WithQuery("names", poolName+"::"+volName+"."+snapshotName) + err = p.requestAuthenticated(http.MethodPatch, url.URL, req, nil) + if err != nil { + return fmt.Errorf("Failed to destroy snapshot %q for volume %q in storage pool %q: %w", snapshotName, volName, poolName, err) + } + } + + // Delete (eradicate) snapshot. + url := api.NewURL().Path("volume-snapshots").WithQuery("names", poolName+"::"+volName+"."+snapshotName) + err = p.requestAuthenticated(http.MethodDelete, url.URL, nil, nil) + if err != nil { + return fmt.Errorf("Failed to delete snapshot %q for volume %q in storage pool %q: %w", snapshotName, volName, poolName, err) + } + + return nil +} + // getHosts retrieves an existing Pure Storage host. func (p *pureClient) getHosts() ([]pureHost, error) { var resp pureResponse[pureHost] diff --git a/lxd/storage/drivers/driver_pure_volumes.go b/lxd/storage/drivers/driver_pure_volumes.go index 369b2c120330..7006cd91c0cb 100644 --- a/lxd/storage/drivers/driver_pure_volumes.go +++ b/lxd/storage/drivers/driver_pure_volumes.go @@ -260,6 +260,28 @@ func (d *pure) HasVolume(vol Volume) (bool, error) { return false, err } + // If volume represents a snapshot, also retrieve (encoded) volume name of the parent, + // and check if the snapshot exists. + if vol.IsSnapshot() { + parentVol := vol.GetParent() + parentVolName, err := d.getVolumeName(parentVol) + if err != nil { + return false, err + } + + _, err = d.client().getVolumeSnapshot(vol.pool, parentVolName, volName) + if err != nil { + if api.StatusErrorCheck(err, http.StatusNotFound) { + return false, nil + } + + return false, err + } + + return true, nil + } + + // Otherwise, check if the volume exists. _, err = d.client().getVolume(vol.pool, volName) if err != nil { if api.StatusErrorCheck(err, http.StatusNotFound) { @@ -536,11 +558,120 @@ func (d *pure) BackupVolume(vol VolumeCopy, tarWriter *instancewriter.InstanceTa // CreateVolumeSnapshot creates a snapshot of a volume. func (d *pure) CreateVolumeSnapshot(snapVol Volume, op *operations.Operation) error { + revert := revert.New() + defer revert.Fail() + + parentName, _, _ := api.GetParentAndSnapshotName(snapVol.name) + sourcePath := GetVolumeMountPath(d.name, snapVol.volType, parentName) + + if filesystem.IsMountPoint(sourcePath) { + // Attempt to sync and freeze filesystem, but do not error if not able to freeze (as filesystem + // could still be busy), as we do not guarantee the consistency of a snapshot. This is costly but + // try to ensure that all cached data has been committed to disk. If we don't then the snapshot + // of the underlying filesystem can be inconsistent or, in the worst case, empty. + unfreezeFS, err := d.filesystemFreeze(sourcePath) + if err == nil { + defer func() { _ = unfreezeFS() }() + } + } + + // Create the parent directory. + err := createParentSnapshotDirIfMissing(d.name, snapVol.volType, parentName) + if err != nil { + return err + } + + err = snapVol.EnsureMountPath() + if err != nil { + return err + } + + parentVol := snapVol.GetParent() + parentVolName, err := d.getVolumeName(parentVol) + if err != nil { + return err + } + + snapVolName, err := d.getVolumeName(snapVol) + if err != nil { + return err + } + + err = d.client().createVolumeSnapshot(snapVol.pool, parentVolName, snapVolName) + if err != nil { + return err + } + + revert.Add(func() { _ = d.DeleteVolumeSnapshot(snapVol, op) }) + + // For VMs, create a snapshot of the filesystem volume too. + if snapVol.IsVMBlock() { + fsVol := snapVol.NewVMBlockFilesystemVolume() + + // Set the parent volume's UUID. + fsVol.SetParentUUID(snapVol.parentUUID) + + err := d.CreateVolumeSnapshot(fsVol, op) + if err != nil { + return err + } + + revert.Add(func() { _ = d.DeleteVolumeSnapshot(fsVol, op) }) + } + + revert.Success() return nil } // DeleteVolumeSnapshot removes a snapshot from the storage device. func (d *pure) DeleteVolumeSnapshot(snapVol Volume, op *operations.Operation) error { + parentVol := snapVol.GetParent() + parentVolName, err := d.getVolumeName(parentVol) + if err != nil { + return err + } + + snapVolName, err := d.getVolumeName(snapVol) + if err != nil { + return err + } + + err = d.client().deleteVolumeSnapshot(snapVol.pool, parentVolName, snapVolName) + if err != nil { + return err + } + + mountPath := snapVol.MountPath() + + if snapVol.contentType == ContentTypeFS && shared.PathExists(mountPath) { + err = wipeDirectory(mountPath) + if err != nil { + return err + } + + err = os.Remove(mountPath) + if err != nil && !os.IsNotExist(err) { + return fmt.Errorf("Failed to remove %q: %w", mountPath, err) + } + } + + // Remove the parent snapshot directory if this is the last snapshot being removed. + err = deleteParentSnapshotDirIfEmpty(d.name, snapVol.volType, parentVol.name) + if err != nil { + return err + } + + // For VM images, delete the filesystem volume too. + if snapVol.IsVMBlock() { + fsVol := snapVol.NewVMBlockFilesystemVolume() + fsVol.SetParentUUID(snapVol.parentUUID) + + err := d.DeleteVolumeSnapshot(fsVol, op) + if err != nil { + return err + } + } + return nil } From b9c5eb7daf5784b4d2b1ef5fd849c2d201a547fa Mon Sep 17 00:00:00 2001 From: Din Music Date: Wed, 20 Nov 2024 13:09:47 +0000 Subject: [PATCH 22/51] lxd/storage/drivers/pure: Add utils for retrieving storage arrays Signed-off-by: Din Music --- lxd/storage/drivers/driver_pure_util.go | 33 ++++++++++++++++++++++--- 1 file changed, 30 insertions(+), 3 deletions(-) diff --git a/lxd/storage/drivers/driver_pure_util.go b/lxd/storage/drivers/driver_pure_util.go index ccd1b0b2b3d8..893de49ae36e 100644 --- a/lxd/storage/drivers/driver_pure_util.go +++ b/lxd/storage/drivers/driver_pure_util.go @@ -137,11 +137,24 @@ type pureNetworkInterface struct { } `json:"eth,omitempty"` } +// pureEntity represents a generic entity in Pure Storage. +type pureEntity struct { + ID string `json:"id"` + Name string `json:"name"` +} + +// pureStorageArray represents a storage array in Pure Storage. +type pureStorageArray struct { + ID string `json:"id"` + Name string `json:"name"` +} + // pureStoragePool represents a storage pool (pod) in Pure Storage. type pureStoragePool struct { - ID string `json:"id"` - Name string `json:"name"` - IsDestroyed bool `json:"destroyed"` + ID string `json:"id"` + Name string `json:"name"` + IsDestroyed bool `json:"destroyed"` + Arrays []pureEntity `json:"arrays"` } // pureVolume represents a volume in Pure Storage. @@ -404,6 +417,20 @@ func (p *pureClient) getNetworkInterfaces(service string) ([]pureNetworkInterfac return resp.Items, nil } +// getStorageArray returns the list of storage arrays. +// If arrayNames are provided, only those are returned. +func (p *pureClient) getStorageArrays(arrayNames ...string) ([]pureStorageArray, error) { + var resp pureResponse[pureStorageArray] + + url := api.NewURL().Path("arrays").WithQuery("names", strings.Join(arrayNames, ",")) + err := p.requestAuthenticated(http.MethodGet, url.URL, nil, &resp) + if err != nil { + return nil, fmt.Errorf("Failed to get storage arrays: %w", err) + } + + return resp.Items, nil +} + // getStoragePool returns the storage pool with the given name. func (p *pureClient) getStoragePool(poolName string) (*pureStoragePool, error) { var resp pureResponse[pureStoragePool] From 2224238eae17f43134d887348fb18ec3623d78a2 Mon Sep 17 00:00:00 2001 From: Din Music Date: Wed, 20 Nov 2024 13:06:06 +0000 Subject: [PATCH 23/51] lxd/storage/drivers/pure: Extract storage pool, volume, and array space information Signed-off-by: Din Music --- lxd/storage/drivers/driver_pure_util.go | 31 ++++++++++++++++++++----- 1 file changed, 25 insertions(+), 6 deletions(-) diff --git a/lxd/storage/drivers/driver_pure_util.go b/lxd/storage/drivers/driver_pure_util.go index 893de49ae36e..a1b566efaf1b 100644 --- a/lxd/storage/drivers/driver_pure_util.go +++ b/lxd/storage/drivers/driver_pure_util.go @@ -143,10 +143,26 @@ type pureEntity struct { Name string `json:"name"` } +// pureSpace represents the usage data of Pure Storage resource. +type pureSpace struct { + // Total reserved space. + // For volumes, this is the available space or quota. + // For storage pools, this is the total reserved space (not the quota). + TotalBytes int64 `json:"total_provisioned"` + + // Amount of logically written data that a volume or a snapshot references. + // This value is compared against the quota, therefore, it should be used for + // showing the actual used space. Although, the actual used space is most likely + // less than this value due to the data reduction that is done by Pure Storage. + UsedBytes int64 `json:"virtual"` +} + // pureStorageArray represents a storage array in Pure Storage. type pureStorageArray struct { - ID string `json:"id"` - Name string `json:"name"` + ID string `json:"id"` + Name string `json:"name"` + Capacity int64 `json:"capacity"` + Space pureSpace `json:"space"` } // pureStoragePool represents a storage pool (pod) in Pure Storage. @@ -154,15 +170,18 @@ type pureStoragePool struct { ID string `json:"id"` Name string `json:"name"` IsDestroyed bool `json:"destroyed"` + Quota int64 `json:"quota_limit"` + Space pureSpace `json:"space"` Arrays []pureEntity `json:"arrays"` } // pureVolume represents a volume in Pure Storage. type pureVolume struct { - ID string `json:"id"` - Name string `json:"name"` - Serial string `json:"serial"` - IsDestroyed bool `json:"destroyed"` + ID string `json:"id"` + Name string `json:"name"` + Serial string `json:"serial"` + IsDestroyed bool `json:"destroyed"` + Space pureSpace `json:"space"` } // pureHost represents a host in Pure Storage. From b217d751b41d31962c70988d3dd3ce24d215fc5e Mon Sep 17 00:00:00 2001 From: Din Music Date: Thu, 10 Oct 2024 12:40:42 +0000 Subject: [PATCH 24/51] lxd/storage/drivers/pure: Report resource usage of storage pools and volumes Signed-off-by: Din Music --- lxd/storage/drivers/driver_pure.go | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/lxd/storage/drivers/driver_pure.go b/lxd/storage/drivers/driver_pure.go index 1bc354ee746b..ef1b29ceb1f5 100644 --- a/lxd/storage/drivers/driver_pure.go +++ b/lxd/storage/drivers/driver_pure.go @@ -274,7 +274,34 @@ func (d *pure) Unmount() (bool, error) { // GetResources returns the pool resource usage information. func (d *pure) GetResources() (*api.ResourcesStoragePool, error) { + pool, err := d.client().getStoragePool(d.name) + if err != nil { + return nil, err + } + res := &api.ResourcesStoragePool{} + + res.Space.Total = uint64(pool.Quota) + res.Space.Used = uint64(pool.Space.UsedBytes) + + if pool.Quota == 0 { + // If quota is set to 0, it means that the storage pool is unbounded. Therefore, + // collect the total capacity of arrays where storage pool provisioned. + arrayNames := make([]string, 0, len(pool.Arrays)) + for _, array := range pool.Arrays { + arrayNames = append(arrayNames, array.Name) + } + + arrays, err := d.client().getStorageArrays(arrayNames...) + if err != nil { + return nil, err + } + + for _, array := range arrays { + res.Space.Total += uint64(array.Capacity) + } + } + return res, nil } From 5931f83fbf7521c77a485f3f760348e0a5f59598 Mon Sep 17 00:00:00 2001 From: Din Music Date: Thu, 10 Oct 2024 12:42:48 +0000 Subject: [PATCH 25/51] lxd/storage/drivers/pure: Restore volume snapshots Signed-off-by: Din Music --- lxd/storage/drivers/driver_pure_util.go | 33 ++++++++ lxd/storage/drivers/driver_pure_volumes.go | 89 +++++++++++++++++++++- 2 files changed, 119 insertions(+), 3 deletions(-) diff --git a/lxd/storage/drivers/driver_pure_util.go b/lxd/storage/drivers/driver_pure_util.go index a1b566efaf1b..7872dc0caab8 100644 --- a/lxd/storage/drivers/driver_pure_util.go +++ b/lxd/storage/drivers/driver_pure_util.go @@ -638,6 +638,34 @@ func (p *pureClient) deleteVolume(poolName string, volName string) error { return nil } +// copyVolume copies a source volume into destination volume. If overwrite is set to true, +// the destination volume will be overwritten if it already exists. +func (p *pureClient) copyVolume(srcPoolName string, srcVolName string, dstPoolName string, dstVolName string, overwrite bool) error { + req, err := p.createBodyReader(map[string]any{ + "source": map[string]string{ + "name": srcPoolName + "::" + srcVolName, + }, + }) + if err != nil { + return err + } + + url := api.NewURL().Path("volumes").WithQuery("names", dstPoolName+"::"+dstVolName).WithQuery("overwrite", fmt.Sprint(overwrite)) + + if !overwrite { + // Disable default protection groups when creating a new volume to avoid potential issues + // when deleting the volume because protection group may prevent volume eridication. + url = url.WithQuery("with_default_protection", "false") + } + + err = p.requestAuthenticated(http.MethodPost, url.URL, req, nil) + if err != nil { + return fmt.Errorf(`Failed to copy volume "%s/%s" to "%s/%s": %w`, srcPoolName, srcVolName, dstPoolName, dstVolName, err) + } + + return nil +} + // getVolumeSnapshots retrieves all existing snapshot for the given storage volume. func (p *pureClient) getVolumeSnapshots(poolName string, volName string) ([]pureVolume, error) { var resp pureResponse[pureVolume] @@ -728,6 +756,11 @@ func (p *pureClient) deleteVolumeSnapshot(poolName string, volName string, snaps return nil } +// restoreVolumeSnapshot restores the volume by copying the volume snapshot into its parent volume. +func (p *pureClient) restoreVolumeSnapshot(poolName string, volName string, snapshotName string) error { + return p.copyVolume(poolName, volName+"."+snapshotName, poolName, volName, true) +} + // getHosts retrieves an existing Pure Storage host. func (p *pureClient) getHosts() ([]pureHost, error) { var resp pureResponse[pureHost] diff --git a/lxd/storage/drivers/driver_pure_volumes.go b/lxd/storage/drivers/driver_pure_volumes.go index 7006cd91c0cb..f6ffed32bff8 100644 --- a/lxd/storage/drivers/driver_pure_volumes.go +++ b/lxd/storage/drivers/driver_pure_volumes.go @@ -5,6 +5,7 @@ import ( "io" "net/http" "os" + "slices" "strings" "golang.org/x/sys/unix" @@ -538,6 +539,44 @@ func (d *pure) RenameVolume(vol Volume, newVolName string, op *operations.Operat // RestoreVolume restores a volume from a snapshot. func (d *pure) RestoreVolume(vol Volume, snapVol Volume, op *operations.Operation) error { + ourUnmount, err := d.UnmountVolume(vol, false, op) + if err != nil { + return err + } + + if ourUnmount { + defer func() { _ = d.MountVolume(vol, op) }() + } + + volName, err := d.getVolumeName(vol) + if err != nil { + return err + } + + snapVolName, err := d.getVolumeName(snapVol) + if err != nil { + return err + } + + // Overwrite existing volume by copying the given snapshot content into it. + err = d.client().restoreVolumeSnapshot(vol.pool, volName, snapVolName) + if err != nil { + return err + } + + // For VMs, also restore the filesystem volume. + if vol.IsVMBlock() { + fsVol := vol.NewVMBlockFilesystemVolume() + + snapFSVol := snapVol.NewVMBlockFilesystemVolume() + snapFSVol.SetParentUUID(snapVol.parentUUID) + + err := d.RestoreVolume(fsVol, snapFSVol, op) + if err != nil { + return err + } + } + return nil } @@ -685,13 +724,57 @@ func (d *pure) UnmountVolumeSnapshot(snapVol Volume, op *operations.Operation) ( return d.UnmountVolume(snapVol, false, op) } -// VolumeSnapshots returns a list of snapshots for the volume (in no particular order). +// VolumeSnapshots returns a list of Pure Storage snapshot names for the given volume (in no particular order). func (d *pure) VolumeSnapshots(vol Volume, op *operations.Operation) ([]string, error) { - return []string{}, nil + volName, err := d.getVolumeName(vol) + if err != nil { + return nil, err + } + + volumeSnapshots, err := d.client().getVolumeSnapshots(vol.pool, volName) + if err != nil { + if api.StatusErrorCheck(err, http.StatusNotFound) { + return nil, nil + } + + return nil, err + } + + snapshotNames := make([]string, 0, len(volumeSnapshots)) + for _, snapshot := range volumeSnapshots { + // Snapshot name contains storage pool and volume names as prefix. + // Storage pool is delimited with double colon (::) and volume with a dot. + _, volAndSnapName, _ := strings.Cut(snapshot.Name, "::") + _, snapshotName, _ := strings.Cut(volAndSnapName, ".") + + snapshotNames = append(snapshotNames, snapshotName) + } + + return snapshotNames, nil } -// CheckVolumeSnapshots checks that the volume's snapshots, according to the storage driver, match those provided. +// CheckVolumeSnapshots checks that the volume's snapshots, according to the storage driver, +// match those provided. Note that additional snapshots may exist within the Pure Storage pool +// if protection groups are configured outside of LXD. func (d *pure) CheckVolumeSnapshots(vol Volume, snapVols []Volume, op *operations.Operation) error { + // Get all of the volume's snapshots in base64 encoded format. + storageSnapshotNames, err := vol.driver.VolumeSnapshots(vol, op) + if err != nil { + return err + } + + // Check if the provided list of volume snapshots matches the ones from the storage. + for _, snap := range snapVols { + snapName, err := d.getVolumeName(snap) + if err != nil { + return err + } + + if !slices.Contains(storageSnapshotNames, snapName) { + return fmt.Errorf("Snapshot %q expected but not in storage", snapName) + } + } + return nil } From cf938f39df0198e7a279babe1fcafebefe88e4d5 Mon Sep 17 00:00:00 2001 From: Din Music Date: Wed, 20 Nov 2024 13:23:43 +0000 Subject: [PATCH 26/51] lxd/storage/drivers/pure: Get volume usage from Pure Storage Signed-off-by: Din Music --- lxd/storage/drivers/driver_pure_volumes.go | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/lxd/storage/drivers/driver_pure_volumes.go b/lxd/storage/drivers/driver_pure_volumes.go index f6ffed32bff8..f4b53a198913 100644 --- a/lxd/storage/drivers/driver_pure_volumes.go +++ b/lxd/storage/drivers/driver_pure_volumes.go @@ -378,7 +378,17 @@ func (d *pure) UpdateVolume(vol Volume, changedConfig map[string]string) error { // GetVolumeUsage returns the disk space used by the volume. func (d *pure) GetVolumeUsage(vol Volume) (int64, error) { - return 0, ErrNotSupported + volName, err := d.getVolumeName(vol) + if err != nil { + return -1, err + } + + pureVol, err := d.client().getVolume(vol.pool, volName) + if err != nil { + return -1, err + } + + return pureVol.Space.UsedBytes, nil } // SetVolumeQuota applies a size limit on volume. From f9d28da912f3b063c10736386a8749c7922906ff Mon Sep 17 00:00:00 2001 From: Din Music Date: Thu, 10 Oct 2024 17:56:02 +0000 Subject: [PATCH 27/51] lxd/storage/drivers/pure: Set or update volume quota Signed-off-by: Din Music --- lxd/storage/drivers/driver_pure_util.go | 18 +++ lxd/storage/drivers/driver_pure_volumes.go | 139 ++++++++++++++++++++- 2 files changed, 156 insertions(+), 1 deletion(-) diff --git a/lxd/storage/drivers/driver_pure_util.go b/lxd/storage/drivers/driver_pure_util.go index 7872dc0caab8..b3f1afbc7ed8 100644 --- a/lxd/storage/drivers/driver_pure_util.go +++ b/lxd/storage/drivers/driver_pure_util.go @@ -638,6 +638,24 @@ func (p *pureClient) deleteVolume(poolName string, volName string) error { return nil } +// resizeVolume resizes an existing volume. This function does not resize any filesystem inside the volume. +func (p *pureClient) resizeVolume(poolName string, volName string, sizeBytes int64, truncate bool) error { + req, err := p.createBodyReader(map[string]any{ + "provisioned": sizeBytes, + }) + if err != nil { + return err + } + + url := api.NewURL().Path("volumes").WithQuery("names", poolName+"::"+volName).WithQuery("truncate", fmt.Sprint(truncate)) + err = p.requestAuthenticated(http.MethodPatch, url.URL, req, nil) + if err != nil { + return fmt.Errorf("Failed to resize volume %q in storage pool %q: %w", volName, poolName, err) + } + + return nil +} + // copyVolume copies a source volume into destination volume. If overwrite is set to true, // the destination volume will be overwritten if it already exists. func (p *pureClient) copyVolume(srcPoolName string, srcVolName string, dstPoolName string, dstVolName string, overwrite bool) error { diff --git a/lxd/storage/drivers/driver_pure_volumes.go b/lxd/storage/drivers/driver_pure_volumes.go index f4b53a198913..42ef6582c3ed 100644 --- a/lxd/storage/drivers/driver_pure_volumes.go +++ b/lxd/storage/drivers/driver_pure_volumes.go @@ -373,6 +373,14 @@ func (d *pure) ValidateVolume(vol Volume, removeUnknownKeys bool) error { // UpdateVolume applies config changes to the volume. func (d *pure) UpdateVolume(vol Volume, changedConfig map[string]string) error { + newSize, sizeChanged := changedConfig["size"] + if sizeChanged { + err := d.SetVolumeQuota(vol, newSize, false, nil) + if err != nil { + return err + } + } + return nil } @@ -392,8 +400,137 @@ func (d *pure) GetVolumeUsage(vol Volume) (int64, error) { } // SetVolumeQuota applies a size limit on volume. -// Does nothing if supplied with an empty/zero size. +// Does nothing if supplied with an non-positive size. func (d *pure) SetVolumeQuota(vol Volume, size string, allowUnsafeResize bool, op *operations.Operation) error { + revert := revert.New() + defer revert.Fail() + + // Convert to bytes. + sizeBytes, err := units.ParseByteSizeString(size) + if err != nil { + return err + } + + // Do nothing if size isn't specified. + if sizeBytes <= 0 { + return nil + } + + volName, err := d.getVolumeName(vol) + if err != nil { + return err + } + + // Get volume and retrieve current size. + pureVol, err := d.client().getVolume(vol.pool, volName) + if err != nil { + return err + } + + oldSizeBytes := pureVol.Space.TotalBytes + + // Do nothing if volume is already specified size (+/- 512 bytes). + if oldSizeBytes+512 > sizeBytes && oldSizeBytes-512 < sizeBytes { + return nil + } + + inUse := vol.MountInUse() + truncate := sizeBytes < oldSizeBytes + + // Resize filesystem if needed. + if vol.contentType == ContentTypeFS { + fsType := vol.ConfigBlockFilesystem() + + if sizeBytes < oldSizeBytes { + if !filesystemTypeCanBeShrunk(fsType) { + return fmt.Errorf("Filesystem %q cannot be shrunk: %w", fsType, ErrCannotBeShrunk) + } + + if inUse { + // We don't allow online shrinking of filesytem volumes. + // Returning this error ensures the disk is resized next + // time the instance is started. + return ErrInUse + } + + devPath, cleanup, err := d.getMappedDevPath(vol, true) + if err != nil { + return err + } + + revert.Add(cleanup) + + // Shrink filesystem first. + err = shrinkFileSystem(fsType, devPath, vol, sizeBytes, allowUnsafeResize) + if err != nil { + return err + } + + // Shrink the block device. + err = d.client().resizeVolume(vol.pool, volName, sizeBytes, truncate) + if err != nil { + return err + } + } else { + // Grow block device first. + err = d.client().resizeVolume(vol.pool, volName, sizeBytes, truncate) + if err != nil { + return err + } + + devPath, cleanup, err := d.getMappedDevPath(vol, true) + if err != nil { + return err + } + + revert.Add(cleanup) + + // Grow the filesystem to fill the block device. + err = growFileSystem(fsType, devPath, vol) + if err != nil { + return err + } + } + } else { + // Only perform pre-resize checks if we are not in "unsafe" mode. + // In unsafe mode we expect the caller to know what they are doing and understand the risks. + if !allowUnsafeResize { + if sizeBytes < oldSizeBytes { + return fmt.Errorf("Block volumes cannot be shrunk: %w", ErrCannotBeShrunk) + } + + if inUse { + // We don't allow online shrinking of filesytem volumes. + // Returning this error ensures the disk is resized next + // time the instance is started. + return ErrInUse + } + } + + // Resize block device. + err = d.client().resizeVolume(vol.pool, volName, sizeBytes, truncate) + if err != nil { + return err + } + + // Move the VM GPT alt header to end of disk if needed (not needed in unsafe resize mode as it is + // expected the caller will do all necessary post resize actions themselves). + if vol.IsVMBlock() && !allowUnsafeResize { + devPath, cleanup, err := d.getMappedDevPath(vol, true) + if err != nil { + return err + } + + revert.Add(cleanup) + + err = d.moveGPTAltHeader(devPath) + if err != nil { + return err + } + } + } + + revert.Success() return nil } From 1984b9326b39e018348050b5ff828ab23bf1510e Mon Sep 17 00:00:00 2001 From: Din Music Date: Tue, 15 Oct 2024 13:20:04 +0000 Subject: [PATCH 28/51] lxd/storage/drivers/pure: Allow volume copy with snapshots and optimized images Signed-off-by: Din Music --- lxd/storage/drivers/driver_pure.go | 2 +- lxd/storage/drivers/driver_pure_util.go | 6 + lxd/storage/drivers/driver_pure_volumes.go | 165 ++++++++++++++++++++- 3 files changed, 171 insertions(+), 2 deletions(-) diff --git a/lxd/storage/drivers/driver_pure.go b/lxd/storage/drivers/driver_pure.go index ef1b29ceb1f5..39c700706423 100644 --- a/lxd/storage/drivers/driver_pure.go +++ b/lxd/storage/drivers/driver_pure.go @@ -100,7 +100,7 @@ func (d *pure) Info() Info { Version: pureVersion, DefaultBlockSize: d.defaultBlockVolumeSize(), DefaultVMBlockFilesystemSize: d.defaultVMBlockFilesystemSize(), - OptimizedImages: false, + OptimizedImages: true, PreservesInodes: false, Remote: d.isRemote(), VolumeTypes: []VolumeType{VolumeTypeCustom, VolumeTypeVM, VolumeTypeContainer, VolumeTypeImage}, diff --git a/lxd/storage/drivers/driver_pure_util.go b/lxd/storage/drivers/driver_pure_util.go index b3f1afbc7ed8..ea65e572f3f8 100644 --- a/lxd/storage/drivers/driver_pure_util.go +++ b/lxd/storage/drivers/driver_pure_util.go @@ -779,6 +779,12 @@ func (p *pureClient) restoreVolumeSnapshot(poolName string, volName string, snap return p.copyVolume(poolName, volName+"."+snapshotName, poolName, volName, true) } +// copyVolumeSnapshot copies the volume snapshot into destination volume. Destination volume is overwritten +// if already exists. +func (p *pureClient) copyVolumeSnapshot(srcPoolName string, srcVolName string, srcSnapshotName string, dstPoolName string, dstVolName string) error { + return p.copyVolume(srcPoolName, fmt.Sprintf("%s.%s", srcVolName, srcSnapshotName), dstPoolName, dstVolName, true) +} + // getHosts retrieves an existing Pure Storage host. func (p *pureClient) getHosts() ([]pureHost, error) { var resp pureResponse[pureHost] diff --git a/lxd/storage/drivers/driver_pure_volumes.go b/lxd/storage/drivers/driver_pure_volumes.go index 42ef6582c3ed..10a8c959ecb5 100644 --- a/lxd/storage/drivers/driver_pure_volumes.go +++ b/lxd/storage/drivers/driver_pure_volumes.go @@ -175,6 +175,162 @@ func (d *pure) CreateVolumeFromBackup(vol VolumeCopy, srcBackup backup.Info, src // CreateVolumeFromCopy provides same-pool volume copying functionality. func (d *pure) CreateVolumeFromCopy(vol VolumeCopy, srcVol VolumeCopy, allowInconsistent bool, op *operations.Operation) error { + revert := revert.New() + defer revert.Fail() + + // Function to run once the volume is created, which will ensure appropriate permissions + // on the mount path inside the volume, and resize the volume to specified size. + postCreateTasks := func(v Volume) error { + if vol.contentType == ContentTypeFS { + // Mount the volume and ensure the permissions are set correctly inside the mounted volume. + err := v.MountTask(func(_ string, _ *operations.Operation) error { + return v.EnsureMountPath() + }, op) + if err != nil { + return err + } + } + + // Resize volume to the size specified. + err := d.SetVolumeQuota(v, v.ConfigSize(), false, op) + if err != nil { + return err + } + + return nil + } + + // For VMs, also copy the filesystem volume. + if vol.IsVMBlock() { + // Ensure that the volume's snapshots are also replaced with their filesystem counterpart. + fsVolSnapshots := make([]Volume, 0, len(vol.Snapshots)) + for _, snapshot := range vol.Snapshots { + fsVolSnapshots = append(fsVolSnapshots, snapshot.NewVMBlockFilesystemVolume()) + } + + srcFsVolSnapshots := make([]Volume, 0, len(srcVol.Snapshots)) + for _, snapshot := range srcVol.Snapshots { + srcFsVolSnapshots = append(srcFsVolSnapshots, snapshot.NewVMBlockFilesystemVolume()) + } + + fsVol := NewVolumeCopy(vol.NewVMBlockFilesystemVolume(), fsVolSnapshots...) + srcFSVol := NewVolumeCopy(srcVol.NewVMBlockFilesystemVolume(), srcFsVolSnapshots...) + + // Ensure parent UUID is retained for the filesystem volumes. + fsVol.SetParentUUID(vol.parentUUID) + srcFSVol.SetParentUUID(srcVol.parentUUID) + + err := d.CreateVolumeFromCopy(fsVol, srcFSVol, false, op) + if err != nil { + return err + } + + revert.Add(func() { _ = d.DeleteVolume(fsVol.Volume, op) }) + } + + poolName := vol.pool + srcPoolName := srcVol.pool + + volName, err := d.getVolumeName(vol.Volume) + if err != nil { + return err + } + + srcVolName, err := d.getVolumeName(srcVol.Volume) + if err != nil { + return err + } + + // Since snapshots are first copied into destination volume from which a new snapshot is created, + // we need to also remove the destination volume if an error occurs during copying of snapshots. + deleteVolCopy := true + + // Copy volume snapshots. + // Pure Storage does not allow copying snapshots along with the volume. Therefore, we + // copy the snapshots sequentially. Each snapshot is first copied into destination + // volume from which a new snapshot is created. The process is repeted until all + // snapshots are copied. + if !srcVol.IsSnapshot() { + for _, snapshot := range vol.Snapshots { + _, snapshotShortName, _ := api.GetParentAndSnapshotName(snapshot.name) + + // Find the corresponding source snapshot. + var srcSnapshot *Volume + for _, srcSnap := range srcVol.Snapshots { + _, srcSnapshotShortName, _ := api.GetParentAndSnapshotName(snapshot.name) + if snapshotShortName == srcSnapshotShortName { + srcSnapshot = &srcSnap + break + } + } + + if srcSnapshot == nil { + return fmt.Errorf("Failed to copy snapshot %q: Source snapshot does not exist", snapshotShortName) + } + + srcSnapshotName, err := d.getVolumeName(*srcSnapshot) + if err != nil { + return err + } + + // Copy the snapshot. + err = d.client().copyVolumeSnapshot(srcPoolName, srcVolName, srcSnapshotName, poolName, volName) + if err != nil { + return fmt.Errorf("Failed copying snapshot %q: %w", snapshot.name, err) + } + + if deleteVolCopy { + // If at least one snapshot is copied into destination volume, we need to remove + // that volume as well in case of an error. + revert.Add(func() { _ = d.DeleteVolume(vol.Volume, op) }) + deleteVolCopy = false + } + + // Set snapshot's parent UUID and retain source snapshot UUID. + snapshot.SetParentUUID(vol.config["volatile.uuid"]) + + // Create snapshot from a new volume (that was created from the source snapshot). + // However, do not create VM's filesystem volume snapshot, as filesystem volume is + // copied before block volume. + err = d.createVolumeSnapshot(snapshot, false, op) + if err != nil { + return err + } + } + } + + // Finally, copy the source volume (or snapshot) into destination volume snapshots. + if srcVol.IsSnapshot() { + // Get snapshot parent volume name. + srcParentVol := srcVol.Volume.GetParent() + srcParentVolName, err := d.getVolumeName(srcParentVol) + if err != nil { + return err + } + + // Copy the source snapshot into destination volume. + err = d.client().copyVolumeSnapshot(srcPoolName, srcParentVolName, srcVolName, poolName, volName) + if err != nil { + return err + } + } else { + err = d.client().copyVolume(srcPoolName, srcVolName, poolName, volName, true) + if err != nil { + return err + } + } + + // Add reverted to delete destination volume, if not already added. + if deleteVolCopy { + revert.Add(func() { _ = d.DeleteVolume(vol.Volume, op) }) + } + + err = postCreateTasks(vol.Volume) + if err != nil { + return err + } + + revert.Success() return nil } @@ -744,6 +900,12 @@ func (d *pure) BackupVolume(vol VolumeCopy, tarWriter *instancewriter.InstanceTa // CreateVolumeSnapshot creates a snapshot of a volume. func (d *pure) CreateVolumeSnapshot(snapVol Volume, op *operations.Operation) error { + return d.createVolumeSnapshot(snapVol, true, op) +} + +// createVolumeSnapshot creates a snapshot of a volume. If snapshotVMfilesystem is false, a VM's filesystem volume +// is not copied. +func (d *pure) createVolumeSnapshot(snapVol Volume, snapshotVMfilesystem bool, op *operations.Operation) error { revert := revert.New() defer revert.Fail() @@ -791,7 +953,8 @@ func (d *pure) CreateVolumeSnapshot(snapVol Volume, op *operations.Operation) er revert.Add(func() { _ = d.DeleteVolumeSnapshot(snapVol, op) }) // For VMs, create a snapshot of the filesystem volume too. - if snapVol.IsVMBlock() { + // Skip if snapshotVMfilesystem is false to prevent overwriting separately copied volumes. + if snapVol.IsVMBlock() && snapshotVMfilesystem { fsVol := snapVol.NewVMBlockFilesystemVolume() // Set the parent volume's UUID. From 050390e700340451683eb056058c4969b33e573e Mon Sep 17 00:00:00 2001 From: Din Music Date: Wed, 16 Oct 2024 13:32:31 +0000 Subject: [PATCH 29/51] lxd/storage/drivers/pure: Volume refresh Signed-off-by: Din Music --- lxd/storage/drivers/driver_pure_volumes.go | 204 ++++++++++++++++++++- 1 file changed, 202 insertions(+), 2 deletions(-) diff --git a/lxd/storage/drivers/driver_pure_volumes.go b/lxd/storage/drivers/driver_pure_volumes.go index 10a8c959ecb5..01e07312e0cb 100644 --- a/lxd/storage/drivers/driver_pure_volumes.go +++ b/lxd/storage/drivers/driver_pure_volumes.go @@ -342,8 +342,208 @@ func (d *pure) CreateVolumeFromMigration(vol VolumeCopy, conn io.ReadWriteCloser // RefreshVolume updates an existing volume to match the state of another. func (d *pure) RefreshVolume(vol VolumeCopy, srcVol VolumeCopy, refreshSnapshots []string, allowInconsistent bool, op *operations.Operation) error { - _, err := genericVFSCopyVolume(d, nil, vol, srcVol, refreshSnapshots, true, allowInconsistent, op) - return err + revert := revert.New() + defer revert.Fail() + + // For VMs, also copy the filesystem volume. + if vol.IsVMBlock() { + // Ensure that the volume's snapshots are also replaced with their filesystem counterpart. + fsVolSnapshots := make([]Volume, 0, len(vol.Snapshots)) + for _, snapshot := range vol.Snapshots { + fsVolSnapshots = append(fsVolSnapshots, snapshot.NewVMBlockFilesystemVolume()) + } + + srcFsVolSnapshots := make([]Volume, 0, len(srcVol.Snapshots)) + for _, snapshot := range srcVol.Snapshots { + srcFsVolSnapshots = append(srcFsVolSnapshots, snapshot.NewVMBlockFilesystemVolume()) + } + + fsVol := NewVolumeCopy(vol.NewVMBlockFilesystemVolume(), fsVolSnapshots...) + srcFSVol := NewVolumeCopy(srcVol.NewVMBlockFilesystemVolume(), srcFsVolSnapshots...) + + cleanup, err := d.refreshVolume(fsVol, srcFSVol, refreshSnapshots, allowInconsistent, op) + if err != nil { + return err + } + + revert.Add(cleanup) + } + + cleanup, err := d.refreshVolume(vol, srcVol, refreshSnapshots, allowInconsistent, op) + if err != nil { + return err + } + + revert.Add(cleanup) + + revert.Success() + return nil +} + +// refreshVolume updates an existing volume to match the state of another. For VMs, this function +// refreshes either block or filesystem volume, depending on the volume type. Therefore, the caller +// needs to ensure it is called twice - once for each volume type. +func (d *pure) refreshVolume(vol VolumeCopy, srcVol VolumeCopy, refreshSnapshots []string, allowInconsistent bool, op *operations.Operation) (revert.Hook, error) { + revert := revert.New() + defer revert.Fail() + + // Function to run once the volume is created, which will ensure appropriate permissions + // on the mount path inside the volume, and resize the volume to specified size. + postCreateTasks := func(v Volume) error { + if vol.contentType == ContentTypeFS { + // Mount the volume and ensure the permissions are set correctly inside the mounted volume. + err := v.MountTask(func(_ string, _ *operations.Operation) error { + return v.EnsureMountPath() + }, op) + if err != nil { + return err + } + } + + // Resize volume to the size specified. + err := d.SetVolumeQuota(vol.Volume, vol.ConfigSize(), false, op) + if err != nil { + return err + } + + return nil + } + + srcPoolName := srcVol.pool + poolName := vol.pool + + srcVolName, err := d.getVolumeName(srcVol.Volume) + if err != nil { + return nil, err + } + + volName, err := d.getVolumeName(vol.Volume) + if err != nil { + return nil, err + } + + // Create new reverter snapshot, which is used to revert the original volume in case of + // an error. Snapshots are also required to be first copied into destination volume, + // from which a new snapshot is created to effectively copy a snapshot. If any error + // occurs, the destination volume has been already modified and needs reverting. + reverterSnapshotName := "lxd-reverter-snapshot" + + // Remove existing reverter snapshot. + err = d.client().deleteVolumeSnapshot(vol.pool, volName, reverterSnapshotName) + if err != nil && !api.StatusErrorCheck(err, http.StatusNotFound) { + return nil, err + } + + // Create new reverter snapshot. + err = d.client().createVolumeSnapshot(vol.pool, volName, reverterSnapshotName) + if err != nil { + return nil, err + } + + revert.Add(func() { + // Restore destination volume from reverter snapshot and remove the snapshot afterwards. + _ = d.client().restoreVolumeSnapshot(vol.pool, volName, reverterSnapshotName) + _ = d.client().deleteVolumeSnapshot(vol.pool, volName, reverterSnapshotName) + }) + + if !srcVol.IsSnapshot() && len(refreshSnapshots) > 0 { + var refreshedSnapshots []string + + // Refresh volume snapshots. + // Pure Storage does not allow copying snapshots along with the volume. Therefore, + // we copy the missing snapshots sequentially. Each snapshot is first copied into + // destination volume from which a new snapshot is created. The process is repeted + // until all of the missing snapshots are copied. + for _, snapshot := range vol.Snapshots { + // Remove volume name prefix from the snapshot name, and check whether it + // has to be refreshed. + _, snapshotShortName, _ := api.GetParentAndSnapshotName(snapshot.name) + if !slices.Contains(refreshSnapshots, snapshotShortName) { + // Skip snapshot if it doesn't have to be refreshed. + continue + } + + // Find the corresponding source snapshot. + var srcSnapshot *Volume + for _, srcSnap := range srcVol.Snapshots { + _, srcSnapshotShortName, _ := api.GetParentAndSnapshotName(srcSnap.name) + if snapshotShortName == srcSnapshotShortName { + srcSnapshot = &srcSnap + break + } + } + + if srcSnapshot == nil { + return nil, fmt.Errorf("Failed to refresh snapshot %q: Source snapshot does not exist", snapshotShortName) + } + + srcSnapshotName, err := d.getVolumeName(*srcSnapshot) + if err != nil { + return nil, err + } + + // Overwrite existing destination volume with snapshot. + err = d.client().copyVolumeSnapshot(srcPoolName, srcVolName, srcSnapshotName, poolName, volName) + if err != nil { + return nil, err + } + + // Set snapshot's parent UUID. + snapshot.SetParentUUID(vol.config["volatile.uuid"]) + + // Create snapshot of a new volume. Do not copy VM's filesystem volume snapshot, + // as FS volumes are already copied by this point. + err = d.createVolumeSnapshot(snapshot, false, op) + if err != nil { + return nil, err + } + + revert.Add(func() { _ = d.DeleteVolumeSnapshot(snapshot, op) }) + + // Append snapshot to the list of successfully refreshed snapshots. + refreshedSnapshots = append(refreshedSnapshots, snapshotShortName) + } + + // Ensure all snapshots were successfully refreshed. + missing := shared.RemoveElementsFromSlice(refreshSnapshots, refreshedSnapshots...) + if len(missing) > 0 { + return nil, fmt.Errorf("Failed to refresh snapshots %v", missing) + } + } + + // Finally, copy the source volume (or snapshot) into destination volume snapshots. + if srcVol.IsSnapshot() { + // Find snapshot parent volume. + srcParentVol := srcVol.Volume.GetParent() + srcParentVolName, err := d.getVolumeName(srcParentVol) + if err != nil { + return nil, err + } + + // Copy the source snapshot into destination volume. + err = d.client().copyVolumeSnapshot(srcPoolName, srcParentVolName, srcVolName, poolName, volName) + if err != nil { + return nil, err + } + } else { + err = d.client().copyVolume(srcPoolName, srcVolName, poolName, volName, true) + if err != nil { + return nil, err + } + } + + err = postCreateTasks(vol.Volume) + if err != nil { + return nil, err + } + + cleanup := revert.Clone().Fail + revert.Success() + + // Remove temporary reverter snapshot. + _ = d.client().deleteVolumeSnapshot(vol.pool, volName, reverterSnapshotName) + + return cleanup, err } // DeleteVolume deletes the volume and all associated snapshots. From a4c92465a32644447c879ef7c76b8535c1f64531 Mon Sep 17 00:00:00 2001 From: Din Music Date: Fri, 18 Oct 2024 08:37:10 +0000 Subject: [PATCH 30/51] lxd/storage/drivers/pure: Mount/unmount volume snapshot Pure Storage does not allow mounting snapshots directly, therefore we have to create a new volume from it before mounting it. Signed-off-by: Din Music --- lxd/storage/drivers/driver_pure_volumes.go | 101 ++++++++++++++++++++- 1 file changed, 97 insertions(+), 4 deletions(-) diff --git a/lxd/storage/drivers/driver_pure_volumes.go b/lxd/storage/drivers/driver_pure_volumes.go index 01e07312e0cb..a626b49c015a 100644 --- a/lxd/storage/drivers/driver_pure_volumes.go +++ b/lxd/storage/drivers/driver_pure_volumes.go @@ -1224,14 +1224,107 @@ func (d *pure) DeleteVolumeSnapshot(snapVol Volume, op *operations.Operation) er return nil } -// MountVolumeSnapshot simulates mounting a volume snapshot. +// MountVolumeSnapshot creates a new temporary volume from a volume snapshot to allow mounting it. func (d *pure) MountVolumeSnapshot(snapVol Volume, op *operations.Operation) error { - return d.MountVolume(snapVol, op) + revert := revert.New() + defer revert.Fail() + + parentVol := snapVol.GetParent() + + // Get the parent volume name. + parentVolName, err := d.getVolumeName(parentVol) + if err != nil { + return err + } + + // Get the snapshot volume name. + snapVolName, err := d.getVolumeName(snapVol) + if err != nil { + return err + } + + // A Pure Storage snapshot cannot be mounted. To mount a snapshot, a new volume + // has to be created from the snapshot. + err = d.client().copyVolumeSnapshot(snapVol.pool, parentVolName, snapVolName, snapVol.pool, snapVolName) + if err != nil { + return err + } + + // Ensure temporary snapshot volume is remooved in case of an error. + revert.Add(func() { _ = d.client().deleteVolume(snapVol.pool, snapVolName) }) + + // For VMs, also create the temporary filesystem volume snapshot. + if snapVol.IsVMBlock() { + snapFsVol := snapVol.NewVMBlockFilesystemVolume() + snapFsVol.SetParentUUID(snapVol.parentUUID) + + parentFsVol := snapFsVol.GetParent() + + snapFsVolName, err := d.getVolumeName(snapFsVol) + if err != nil { + return err + } + + parentFsVolName, err := d.getVolumeName(parentFsVol) + if err != nil { + return err + } + + err = d.client().copyVolumeSnapshot(snapVol.pool, parentFsVolName, snapFsVolName, snapVol.pool, snapFsVolName) + if err != nil { + return err + } + + revert.Add(func() { _ = d.client().deleteVolume(snapVol.pool, snapFsVolName) }) + } + + err = d.MountVolume(snapVol, op) + if err != nil { + return err + } + + revert.Success() + return nil } -// UnmountVolumeSnapshot simulates unmounting a volume snapshot. +// UnmountVolumeSnapshot unmountes and deletes volume that was temporary created from a snapshot +// to allow mounting it. func (d *pure) UnmountVolumeSnapshot(snapVol Volume, op *operations.Operation) (bool, error) { - return d.UnmountVolume(snapVol, false, op) + ourUnmount, err := d.UnmountVolume(snapVol, false, op) + if err != nil { + return false, err + } + + if !ourUnmount { + return false, nil + } + + snapVolName, err := d.getVolumeName(snapVol) + if err != nil { + return true, err + } + + // Cleanup temporary snapshot volume. + err = d.client().deleteVolume(snapVol.pool, snapVolName) + if err != nil { + return true, err + } + + // For VMs, also cleanup the temporary volume for a filesystem snapshot. + if snapVol.IsVMBlock() { + snapFsVol := snapVol.NewVMBlockFilesystemVolume() + snapFsVolName, err := d.getVolumeName(snapFsVol) + if err != nil { + return true, err + } + + err = d.client().deleteVolume(snapVol.pool, snapFsVolName) + if err != nil { + return true, err + } + } + + return ourUnmount, nil } // VolumeSnapshots returns a list of Pure Storage snapshot names for the given volume (in no particular order). From bba1c1bd893f56edab76f5d78dd7224fb8ab68e5 Mon Sep 17 00:00:00 2001 From: Din Music Date: Tue, 22 Oct 2024 10:00:06 +0000 Subject: [PATCH 31/51] lxd/storage/drivers/pure: Volume migration Signed-off-by: Din Music --- lxd/storage/drivers/driver_pure.go | 43 +++++++++++++++++++++++++++++- 1 file changed, 42 insertions(+), 1 deletion(-) diff --git a/lxd/storage/drivers/driver_pure.go b/lxd/storage/drivers/driver_pure.go index 39c700706423..f7273606ce31 100644 --- a/lxd/storage/drivers/driver_pure.go +++ b/lxd/storage/drivers/driver_pure.go @@ -307,5 +307,46 @@ func (d *pure) GetResources() (*api.ResourcesStoragePool, error) { // MigrationTypes returns the type of transfer methods to be used when doing migrations between pools in preference order. func (d *pure) MigrationTypes(contentType ContentType, refresh bool, copySnapshots bool) []migration.Type { - return []migration.Type{} + var rsyncFeatures []string + + // Do not pass compression argument to rsync if the associated + // config key, that is rsync.compression, is set to false. + if shared.IsFalse(d.Config()["rsync.compression"]) { + rsyncFeatures = []string{"xattrs", "delete", "bidirectional"} + } else { + rsyncFeatures = []string{"xattrs", "delete", "compress", "bidirectional"} + } + + if refresh { + var transportType migration.MigrationFSType + + if IsContentBlock(contentType) { + transportType = migration.MigrationFSType_BLOCK_AND_RSYNC + } else { + transportType = migration.MigrationFSType_RSYNC + } + + return []migration.Type{ + { + FSType: transportType, + Features: rsyncFeatures, + }, + } + } + + if contentType == ContentTypeBlock { + return []migration.Type{ + { + FSType: migration.MigrationFSType_BLOCK_AND_RSYNC, + Features: rsyncFeatures, + }, + } + } + + return []migration.Type{ + { + FSType: migration.MigrationFSType_RSYNC, + Features: rsyncFeatures, + }, + } } From a8ca3091e2ad0b95f55b288e4a18f87935706dba Mon Sep 17 00:00:00 2001 From: Din Music Date: Fri, 22 Nov 2024 10:56:10 +0000 Subject: [PATCH 32/51] lxd/storage/drivers/pure: Handle cluster member volume move Signed-off-by: Din Music --- lxd/storage/drivers/driver_pure_volumes.go | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/lxd/storage/drivers/driver_pure_volumes.go b/lxd/storage/drivers/driver_pure_volumes.go index a626b49c015a..5313f7ddad84 100644 --- a/lxd/storage/drivers/driver_pure_volumes.go +++ b/lxd/storage/drivers/driver_pure_volumes.go @@ -336,6 +336,24 @@ func (d *pure) CreateVolumeFromCopy(vol VolumeCopy, srcVol VolumeCopy, allowInco // CreateVolumeFromMigration creates a volume being sent via a migration. func (d *pure) CreateVolumeFromMigration(vol VolumeCopy, conn io.ReadWriteCloser, volTargetArgs migration.VolumeTargetArgs, preFiller *VolumeFiller, op *operations.Operation) error { + // When performing a cluster member move prepare the volumes on the target side. + if volTargetArgs.ClusterMoveSourceName != "" { + err := vol.EnsureMountPath() + if err != nil { + return err + } + + if vol.IsVMBlock() { + fsVol := NewVolumeCopy(vol.NewVMBlockFilesystemVolume()) + err := d.CreateVolumeFromMigration(fsVol, conn, volTargetArgs, preFiller, op) + if err != nil { + return err + } + } + + return nil + } + _, err := genericVFSCreateVolumeFromMigration(d, nil, vol, conn, volTargetArgs, preFiller, op) return err } From d59b924f18295378aef2b6e8418e5a24c90febb3 Mon Sep 17 00:00:00 2001 From: Din Music Date: Wed, 20 Nov 2024 14:36:55 +0000 Subject: [PATCH 33/51] lxd/storage/drivers/pure: Allow changing storage pool quota Signed-off-by: Din Music --- lxd/storage/drivers/driver_pure.go | 17 +++++++++++++++++ lxd/storage/drivers/driver_pure_util.go | 21 +++++++++++++++++++++ 2 files changed, 38 insertions(+) diff --git a/lxd/storage/drivers/driver_pure.go b/lxd/storage/drivers/driver_pure.go index f7273606ce31..59fb74ae5635 100644 --- a/lxd/storage/drivers/driver_pure.go +++ b/lxd/storage/drivers/driver_pure.go @@ -240,6 +240,23 @@ func (d *pure) Create() error { // Update applies any driver changes required from a configuration change. func (d *pure) Update(changedConfig map[string]string) error { + newPoolSizeBytes, err := units.ParseByteSizeString(changedConfig["size"]) + if err != nil { + return fmt.Errorf("Failed to parse storage size: %w", err) + } + + oldPoolSizeBytes, err := units.ParseByteSizeString(d.config["size"]) + if err != nil { + return fmt.Errorf("Failed to parse old storage size: %w", err) + } + + if newPoolSizeBytes != oldPoolSizeBytes { + err = d.client().updateStoragePool(d.name, newPoolSizeBytes) + if err != nil { + return err + } + } + return nil } diff --git a/lxd/storage/drivers/driver_pure_util.go b/lxd/storage/drivers/driver_pure_util.go index ea65e572f3f8..e2bb59dbb0e2 100644 --- a/lxd/storage/drivers/driver_pure_util.go +++ b/lxd/storage/drivers/driver_pure_util.go @@ -513,6 +513,27 @@ func (p *pureClient) createStoragePool(poolName string, size int64) error { return nil } +// updateStoragePool updates an existing storage pool (Pure Storage pod). +func (p *pureClient) updateStoragePool(poolName string, size int64) error { + reqBody := make(map[string]any) + if size > 0 { + reqBody["quota_limit"] = size + } + + req, err := p.createBodyReader(reqBody) + if err != nil { + return err + } + + url := api.NewURL().Path("pods").WithQuery("names", poolName) + err = p.requestAuthenticated(http.MethodPatch, url.URL, req, nil) + if err != nil { + return fmt.Errorf("Failed to update storage pool %q: %w", poolName, err) + } + + return nil +} + // deleteStoragePool deletes a storage pool (Pure Storage pod). func (p *pureClient) deleteStoragePool(poolName string) error { pool, err := p.getStoragePool(poolName) From a41a5d43a2ba77948c59d29f75162d5a1ca15dc2 Mon Sep 17 00:00:00 2001 From: Din Music Date: Sun, 22 Dec 2024 14:43:02 +0000 Subject: [PATCH 34/51] lxd/storage/drivers/pure: Delete default protection groups when storage pool is created Signed-off-by: Din Music --- lxd/storage/drivers/driver_pure_util.go | 165 ++++++++++++++++++++++-- 1 file changed, 156 insertions(+), 9 deletions(-) diff --git a/lxd/storage/drivers/driver_pure_util.go b/lxd/storage/drivers/driver_pure_util.go index e2bb59dbb0e2..f4a761b39474 100644 --- a/lxd/storage/drivers/driver_pure_util.go +++ b/lxd/storage/drivers/driver_pure_util.go @@ -165,6 +165,18 @@ type pureStorageArray struct { Space pureSpace `json:"space"` } +// pureProtectionGroup represents a protection group in Pure Storage. +type pureProtectionGroup struct { + Name string `json:"name"` + IsDestroyed bool `json:"destroyed"` +} + +// pureDefaultProtection represents a default protection in Pure Storage. +type pureDefaultProtection struct { + Name string `json:"name"` + Type string `json:"type"` +} + // pureStoragePool represents a storage pool (pod) in Pure Storage. type pureStoragePool struct { ID string `json:"id"` @@ -436,6 +448,129 @@ func (p *pureClient) getNetworkInterfaces(service string) ([]pureNetworkInterfac return resp.Items, nil } +// getProtectionGroup returns the protection group with the given name. +func (p *pureClient) getProtectionGroup(name string) (*pureProtectionGroup, error) { + var resp pureResponse[pureProtectionGroup] + + url := api.NewURL().Path("protection-groups").WithQuery("names", name) + err := p.requestAuthenticated(http.MethodGet, url.URL, nil, &resp) + if err != nil { + if isPureErrorNotFound(err) { + return nil, api.StatusErrorf(http.StatusNotFound, "Protection group %q not found", name) + } + + return nil, fmt.Errorf("Failed to get protection group %q: %w", name, err) + } + + if len(resp.Items) == 0 { + return nil, api.StatusErrorf(http.StatusNotFound, "Protection group %q not found", name) + } + + return &resp.Items[0], nil +} + +// deleteProtectionGroup deletes the protection group with the given name. +func (p *pureClient) deleteProtectionGroup(name string) error { + pg, err := p.getProtectionGroup(name) + if err != nil { + if api.StatusErrorCheck(err, http.StatusNotFound) { + // Already removed. + return nil + } + + return err + } + + url := api.NewURL().Path("protection-groups").WithQuery("names", name) + + // Ensure the protection group is destroyed. + if !pg.IsDestroyed { + req, err := p.createBodyReader(map[string]any{ + "destroyed": true, + }) + if err != nil { + return err + } + + err = p.requestAuthenticated(http.MethodPatch, url.URL, req, nil) + if err != nil { + return fmt.Errorf("Failed to destroy protection group %q: %w", name, err) + } + } + + // Delete the protection group. + err = p.requestAuthenticated(http.MethodDelete, url.URL, nil, nil) + if err != nil { + return fmt.Errorf("Failed to delete protection group %q: %w", name, err) + } + + return nil +} + +// deleteStoragePoolDefaultProtections unsets default protections for the given +// storage pool and removes its default protection groups. +func (p *pureClient) deleteStoragePoolDefaultProtections(poolName string) error { + var resp pureResponse[struct { + Type string `json:"type"` + DefaultProtections []pureDefaultProtection `json:"default_protections"` + }] + + url := api.NewURL().Path("container-default-protections").WithQuery("names", poolName) + + // Extract default protections for the given storage pool. + err := p.requestAuthenticated(http.MethodGet, url.URL, nil, &resp) + if err != nil { + if isPureErrorNotFound(err) { + // Default protections does not exist. + return nil + } + + return fmt.Errorf("Failed to get default protections for storage pool %q: %w", poolName, err) + } + + // Remove default protections and protection groups related to the storage pool. + for _, item := range resp.Items { + // Ensure protection applies to the storage pool. + if item.Type != "pod" { + continue + } + + // To be able to delete default protection groups, they have to + // be removed from the list of default protections. + req, err := p.createBodyReader(map[string]any{ + "default_protections": []pureDefaultProtection{}, + }) + if err != nil { + return err + } + + err = p.requestAuthenticated(http.MethodPatch, url.URL, req, nil) + if err != nil { + if isPureErrorNotFound(err) { + // Default protection already removed. + continue + } + + return fmt.Errorf("Failed to unset default protections for storage pool %q: %w", poolName, err) + } + + // Iterate over default protections and extract protection group names. + for _, pg := range item.DefaultProtections { + if pg.Type != "protection_group" { + continue + } + + // Remove protection groups. + err := p.deleteProtectionGroup(pg.Name) + if err != nil { + return fmt.Errorf("Failed to remove protection group %q for storage pool %q: %w", pg.Name, poolName, err) + } + } + } + + return nil +} + // getStorageArray returns the list of storage arrays. // If arrayNames are provided, only those are returned. func (p *pureClient) getStorageArrays(arrayNames ...string) ([]pureStorageArray, error) { @@ -473,6 +608,9 @@ func (p *pureClient) getStoragePool(poolName string) (*pureStoragePool, error) { // createStoragePool creates a storage pool (Pure Storage pod). func (p *pureClient) createStoragePool(poolName string, size int64) error { + revert := revert.New() + defer revert.Fail() + reqBody := make(map[string]any) if size > 0 { reqBody["quota_limit"] = size @@ -495,21 +633,30 @@ func (p *pureClient) createStoragePool(poolName string, size int64) error { } logger.Info("Storage pool has been restored", logger.Ctx{"pool": poolName}) - return nil - } + } else { + // Storage pool does not exist in destroyed state, therefore, try to create a new one. + req, err := p.createBodyReader(reqBody) + if err != nil { + return err + } - req, err := p.createBodyReader(reqBody) - if err != nil { - return err + url := api.NewURL().Path("pods").WithQuery("names", poolName) + err = p.requestAuthenticated(http.MethodPost, url.URL, req, nil) + if err != nil { + return fmt.Errorf("Failed to create storage pool %q: %w", poolName, err) + } } - // Storage pool does not exist in destroyed state, therefore, try to create a new one. - url := api.NewURL().Path("pods").WithQuery("names", poolName) - err = p.requestAuthenticated(http.MethodPost, url.URL, req, nil) + revert.Add(func() { _ = p.deleteStoragePool(poolName) }) + + // Delete default protection groups of the new storage pool to ensure + // there is no limitations when deleting the pool or volume. + err = p.deleteStoragePoolDefaultProtections(poolName) if err != nil { - return fmt.Errorf("Failed to create storage pool %q: %w", poolName, err) + return err } + revert.Success() return nil } From 2a9347298fddb78300a75a0e02507d16a6354480 Mon Sep 17 00:00:00 2001 From: Din Music Date: Thu, 30 Jan 2025 09:44:47 +0000 Subject: [PATCH 35/51] lxd/storage/drivers/pure: Wait for desired disk size after resize Signed-off-by: Din Music --- lxd/storage/drivers/driver_pure_volumes.go | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/lxd/storage/drivers/driver_pure_volumes.go b/lxd/storage/drivers/driver_pure_volumes.go index 5313f7ddad84..61e329cb2d6f 100644 --- a/lxd/storage/drivers/driver_pure_volumes.go +++ b/lxd/storage/drivers/driver_pure_volumes.go @@ -14,6 +14,7 @@ import ( "github.com/canonical/lxd/lxd/instancewriter" "github.com/canonical/lxd/lxd/migration" "github.com/canonical/lxd/lxd/operations" + "github.com/canonical/lxd/lxd/storage/block" "github.com/canonical/lxd/lxd/storage/filesystem" "github.com/canonical/lxd/shared" "github.com/canonical/lxd/shared/api" @@ -859,6 +860,14 @@ func (d *pure) SetVolumeQuota(vol Volume, size string, allowUnsafeResize bool, o revert.Add(cleanup) + // Ensure the block device is resized before growing the filesystem. + // This should succeed immediately, but if volume was already mapped, + // it may take a moment for the size to be reflected on the host. + err = block.WaitDiskDeviceResize(d.state.ShutdownCtx, devPath, sizeBytes) + if err != nil { + return err + } + // Grow the filesystem to fill the block device. err = growFileSystem(fsType, devPath, vol) if err != nil { @@ -897,6 +906,15 @@ func (d *pure) SetVolumeQuota(vol Volume, size string, allowUnsafeResize bool, o revert.Add(cleanup) + // Wait for the block device to be resized before moving GPT alt header. + // This ensures that the GPT alt header is not moved before the actual + // size is reflected on a local host. Otherwise, the GPT alt header + // would be moved to the same location. + err = block.WaitDiskDeviceResize(d.state.ShutdownCtx, devPath, sizeBytes) + if err != nil { + return err + } + err = d.moveGPTAltHeader(devPath) if err != nil { return err From 36c7cc9a28a82d0dde5df65499861217d0c55edc Mon Sep 17 00:00:00 2001 From: Din Music Date: Fri, 31 Jan 2025 14:58:03 +0000 Subject: [PATCH 36/51] lxd/storage/drivers/pure: Allow setting custom target addresses Signed-off-by: Din Music --- lxd/storage/drivers/driver_pure.go | 9 +++++++++ lxd/storage/drivers/driver_pure_util.go | 10 +++++++--- 2 files changed, 16 insertions(+), 3 deletions(-) diff --git a/lxd/storage/drivers/driver_pure.go b/lxd/storage/drivers/driver_pure.go index 59fb74ae5635..591b6608e6b8 100644 --- a/lxd/storage/drivers/driver_pure.go +++ b/lxd/storage/drivers/driver_pure.go @@ -146,6 +146,15 @@ func (d *pure) Validate(config map[string]string) error { // defaultdesc: `true` // shortdesc: Whether to verify the Pure Storage gateway's certificate "pure.gateway.verify": validate.Optional(validate.IsBool), + // lxdmeta:generate(entities=storage-pure; group=pool-conf; key=pure.target) + // A comma-separated list of target addresses. If empty, LXD discovers and + // connects to all available targets. Otherwise, it only connects to the + // specified addresses. + // --- + // type: string + // defaultdesc: the discovered mode + // shortdesc: List of target addresses. + "pure.target": validate.Optional(validate.IsListOf(validate.IsNetworkAddress)), // lxdmeta:generate(entities=storage-pure; group=pool-conf; key=pure.mode) // The mode to use to map Pure Storage volumes to the local server. // Supported values are `iscsi` and `nvme`. diff --git a/lxd/storage/drivers/driver_pure_util.go b/lxd/storage/drivers/driver_pure_util.go index f4a761b39474..51930fe8e078 100644 --- a/lxd/storage/drivers/driver_pure_util.go +++ b/lxd/storage/drivers/driver_pure_util.go @@ -1140,9 +1140,13 @@ func (p *pureClient) getTarget() (targetQN string, targetAddrs []string, err err return "", nil, api.StatusErrorf(http.StatusNotFound, "Enabled network interface with %q service not found", service) } - targetAddrs = make([]string, 0, len(interfaces)) - for _, iface := range interfaces { - targetAddrs = append(targetAddrs, iface.Ethernet.Address) + // First check if target addresses are configured, otherwise, use the discovered ones. + targetAddrs = shared.SplitNTrimSpace(p.driver.config["pure.target"], ",", -1, true) + if len(targetAddrs) == 0 { + targetAddrs = make([]string, 0, len(interfaces)) + for _, iface := range interfaces { + targetAddrs = append(targetAddrs, iface.Ethernet.Address) + } } // Get the qualified name of the target by iterating over the available From 4b43b6311dd34d66080cdc199fd3460dfd989566 Mon Sep 17 00:00:00 2001 From: Din Music Date: Fri, 31 Jan 2025 14:04:18 +0000 Subject: [PATCH 37/51] lxd/storage/backend_lxd: Ensure volatile uuid is set for regenerated optimized image Signed-off-by: Din Music --- lxd/storage/backend_lxd.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lxd/storage/backend_lxd.go b/lxd/storage/backend_lxd.go index e2538724bb4e..563c9ef8c925 100644 --- a/lxd/storage/backend_lxd.go +++ b/lxd/storage/backend_lxd.go @@ -4175,8 +4175,10 @@ func (b *lxdBackend) EnsureImage(fingerprint string, op *operations.Operation) e } // Reset img volume variables as we just deleted the old one. + // Since the old volume has been removed, ensure the new volume + // is instantiated with its own UUID. imgDBVol = nil - imgVol = b.GetVolume(drivers.VolumeTypeImage, contentType, image.Fingerprint, nil) + imgVol = b.GetNewVolume(drivers.VolumeTypeImage, contentType, image.Fingerprint, nil) } else if err != nil { return err } else { From 2a0051a8977c9b5608c3bae9563e4a2e259f15d7 Mon Sep 17 00:00:00 2001 From: Din Music Date: Fri, 6 Dec 2024 16:18:58 +0000 Subject: [PATCH 38/51] lxd/storage/utils: Add Pure Storage to common volume rules Signed-off-by: Din Music --- lxd/storage/utils.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/lxd/storage/utils.go b/lxd/storage/utils.go index 091d329d7f44..0810de5373fd 100644 --- a/lxd/storage/utils.go +++ b/lxd/storage/utils.go @@ -533,7 +533,7 @@ func poolAndVolumeCommonRules(vol *drivers.Volume) map[string]func(string) error // shortdesc: Size/quota of the storage bucket // scope: local "size": validate.Optional(validate.IsSize), - // lxdmeta:generate(entities=storage-btrfs,storage-cephfs,storage-ceph,storage-dir,storage-lvm,storage-zfs,storage-powerflex; group=volume-conf; key=snapshots.expiry) + // lxdmeta:generate(entities=storage-btrfs,storage-cephfs,storage-ceph,storage-dir,storage-lvm,storage-zfs,storage-powerflex,storage-pure; group=volume-conf; key=snapshots.expiry) // Specify an expression like `1M 2H 3d 4w 5m 6y`. // --- // type: string @@ -546,7 +546,7 @@ func poolAndVolumeCommonRules(vol *drivers.Volume) map[string]func(string) error _, err := shared.GetExpiry(time.Time{}, value) return err }, - // lxdmeta:generate(entities=storage-btrfs,storage-cephfs,storage-ceph,storage-dir,storage-lvm,storage-zfs,storage-powerflex; group=volume-conf; key=snapshots.schedule) + // lxdmeta:generate(entities=storage-btrfs,storage-cephfs,storage-ceph,storage-dir,storage-lvm,storage-zfs,storage-powerflex,storage-pure; group=volume-conf; key=snapshots.schedule) // Specify either a cron expression (` `), a comma-separated list of schedule aliases (`@hourly`, `@daily`, `@midnight`, `@weekly`, `@monthly`, `@annually`, `@yearly`), or leave empty to disable automatic snapshots (the default). // --- // type: string @@ -555,7 +555,7 @@ func poolAndVolumeCommonRules(vol *drivers.Volume) map[string]func(string) error // shortdesc: Schedule for automatic volume snapshots // scope: global "snapshots.schedule": validate.Optional(validate.IsCron([]string{"@hourly", "@daily", "@midnight", "@weekly", "@monthly", "@annually", "@yearly"})), - // lxdmeta:generate(entities=storage-btrfs,storage-cephfs,storage-ceph,storage-dir,storage-lvm,storage-zfs,storage-powerflex; group=volume-conf; key=snapshots.pattern) + // lxdmeta:generate(entities=storage-btrfs,storage-cephfs,storage-ceph,storage-dir,storage-lvm,storage-zfs,storage-powerflex,storage-pure; group=volume-conf; key=snapshots.pattern) // You can specify a naming template that is used for scheduled snapshots and unnamed snapshots. // // {{snapshot_pattern_detail}} @@ -606,7 +606,7 @@ func poolAndVolumeCommonRules(vol *drivers.Volume) map[string]func(string) error // Those keys are only valid for volumes. if vol != nil { - // lxdmeta:generate(entities=storage-btrfs,storage-cephfs,storage-ceph,storage-dir,storage-lvm,storage-zfs,storage-powerflex; group=volume-conf; key=volatile.uuid) + // lxdmeta:generate(entities=storage-btrfs,storage-cephfs,storage-ceph,storage-dir,storage-lvm,storage-zfs,storage-powerflex,storage-pure; group=volume-conf; key=volatile.uuid) // // --- // type: string From d573f8743dc34b3ef9589c0c10f8f30ae214ebb8 Mon Sep 17 00:00:00 2001 From: Din Music Date: Tue, 12 Nov 2024 15:10:27 +0000 Subject: [PATCH 39/51] test/backends: Helper functions for creating Pure Storage pools Signed-off-by: Din Music --- test/backends/pure.sh | 64 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 64 insertions(+) create mode 100644 test/backends/pure.sh diff --git a/test/backends/pure.sh b/test/backends/pure.sh new file mode 100644 index 000000000000..b6e4bcfc8b85 --- /dev/null +++ b/test/backends/pure.sh @@ -0,0 +1,64 @@ +pure_setup() { + local LXD_DIR + + LXD_DIR=$1 + + echo "==> Setting up Pure Storage backend in ${LXD_DIR}" +} + +# pure_configure creates Pure Storage storage pool and configures instance root disk +# device in default profile to use that storage pool. +pure_configure() { + local LXD_DIR + + LXD_DIR=$1 + + echo "==> Configuring Pure Storage backend in ${LXD_DIR}" + + # Create pure storage storage pool. + lxc storage create "lxdtest-$(basename "${LXD_DIR}")" pure \ + pure.gateway="${PURE_GATEWAY}" \ + pure.gateway.verify="${PURE_GATEWAY_VERIFY:-true}" \ + pure.api.token="${PURE_API_TOKEN}" \ + pure.mode="${PURE_MODE:-nvme}" \ + volume.size=25MiB + + # Add the storage pool to the default profile. + lxc profile device add default root disk path="/" pool="lxdtest-$(basename "${LXD_DIR}")" +} + +# configure_pure_pool creates new Pure Storage storage pool with a given name. +# Additional arguments are appended to the lxc storage create command. +# If there is anything on the stdin, the content is passed to the lxc storage create command as stdin as well. +configure_pure_pool() { + poolName=$1 + shift 1 + + if [ -p /dev/stdin ]; then + # Use heredoc if there's input on stdin + lxc storage create "${poolName}" pure \ + pure.gateway="${PURE_GATEWAY}" \ + pure.gateway.verify="${PURE_GATEWAY_VERIFY:-true}" \ + pure.api.token="${PURE_API_TOKEN}" \ + pure.mode="${PURE_MODE:-nvme}" \ + "$@" < Tearing down Pure Storage backend in ${LXD_DIR}" +} From f30463eb5a333721036dc2136004bb28806f420b Mon Sep 17 00:00:00 2001 From: Din Music Date: Fri, 6 Dec 2024 15:24:37 +0000 Subject: [PATCH 40/51] test/includes/storage: Include Pure Storage driver if gateway and api token are set Signed-off-by: Din Music --- test/includes/storage.sh | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/test/includes/storage.sh b/test/includes/storage.sh index 9f3f001c8faa..37e412f4e024 100644 --- a/test/includes/storage.sh +++ b/test/includes/storage.sh @@ -39,6 +39,10 @@ available_storage_backends() { backends="dir" # always available + if [ -n "${PURE_GATEWAY:-}" ] && [ -n "${PURE_API_TOKEN}" ]; then + backends="$backends pure" + fi + storage_backends="btrfs lvm zfs" if [ -n "${LXD_CEPH_CLUSTER:-}" ]; then storage_backends="${storage_backends} ceph" @@ -182,4 +186,4 @@ delete_object_storage_pool() { # shellcheck disable=SC2154 deconfigure_loop_device "${loop_file}" "${loop_device}" fi -} \ No newline at end of file +} From 6b1a04bb1eea5b0c996a3a842fb1202f3d15a549 Mon Sep 17 00:00:00 2001 From: Din Music Date: Fri, 8 Nov 2024 10:56:29 +0000 Subject: [PATCH 41/51] test/storage_driver_pure: Add basic Pure Storage tests Signed-off-by: Din Music --- test/main.sh | 1 + test/suites/storage_driver_pure.sh | 128 +++++++++++++++++++++++++++++ 2 files changed, 129 insertions(+) create mode 100644 test/suites/storage_driver_pure.sh diff --git a/test/main.sh b/test/main.sh index ddac1e0774ec..7faf3e26ce4f 100755 --- a/test/main.sh +++ b/test/main.sh @@ -395,6 +395,7 @@ if [ "${1:-"all"}" != "cluster" ]; then run_test test_storage_driver_cephfs "cephfs storage driver" run_test test_storage_driver_dir "dir storage driver" run_test test_storage_driver_zfs "zfs storage driver" + run_test test_storage_driver_pure "pure storage driver" run_test test_storage_buckets "storage buckets" run_test test_storage_volume_import "storage volume import" run_test test_storage_volume_initial_config "storage volume initial configuration" diff --git a/test/suites/storage_driver_pure.sh b/test/suites/storage_driver_pure.sh new file mode 100644 index 000000000000..bf021f4120ab --- /dev/null +++ b/test/suites/storage_driver_pure.sh @@ -0,0 +1,128 @@ +test_storage_driver_pure() { + local LXD_STORAGE_DIR lxd_backend + + lxd_backend=$(storage_backend "$LXD_DIR") + if [ "$lxd_backend" != "pure" ]; then + return + fi + + LXD_STORAGE_DIR=$(mktemp -d -p "${TEST_DIR}" XXXXXXXXX) + chmod +x "${LXD_STORAGE_DIR}" + spawn_lxd "${LXD_STORAGE_DIR}" false + + ( + set -eux + # shellcheck disable=2030 + LXD_DIR="${LXD_STORAGE_DIR}" + + # Create 2 storage pools. + poolName1="lxdtest-$(basename "${LXD_DIR}")-pool1" + poolName2="lxdtest-$(basename "${LXD_DIR}")-pool2" + configure_pure_pool "${poolName1}" + configure_pure_pool "${poolName2}" + + # Configure default volume size for pools. + lxc storage set "${poolName1}" volume.size=25MiB + lxc storage set "${poolName2}" volume.size=25MiB + + # Set default storage pool for image import. + lxc profile device add default root disk path="/" pool="${poolName1}" + + # Import image into default storage pool. + ensure_import_testimage + + # Muck around with some containers on various pools. + lxc init testimage c1pool1 -s "${poolName1}" + lxc list -c b c1pool1 | grep "${poolName1}" + + lxc init testimage c2pool2 -s "${poolName2}" + lxc list -c b c2pool2 | grep "${poolName2}" + + lxc launch images:alpine/edge c3pool1 -s "${poolName1}" + lxc list -c b c3pool1 | grep "${poolName1}" + + lxc launch images:alpine/edge c4pool2 -s "${poolName2}" + lxc list -c b c4pool2 | grep "${poolName2}" + + lxc storage set "${poolName1}" volume.block.filesystem xfs + + # xfs is unhappy with block devices < 300 MiB. + lxc storage set "${poolName1}" volume.size 300MiB + lxc init testimage c5pool1 -s "${poolName1}" + + # Test whether dependency tracking is working correctly. We should be able + # to create a container, copy it, which leads to a dependency relation + # between the source container's storage volume and the copied container's + # storage volume. Now, we delete the source container which will trigger a + # rename operation and not an actual delete operation. Now we create a + # container of the same name as the source container again, create a copy of + # it to introduce another dependency relation. Now we delete the source + # container again. This should work. If it doesn't it means the rename + # operation tries to map the two source to the same name. + lxc init testimage a -s "${poolName1}" + lxc copy a b + lxc delete a + lxc init testimage a -s "${poolName1}" + lxc copy a c + lxc delete a + lxc delete b + lxc delete c + + lxc storage volume create "${poolName1}" c1pool1 + lxc storage volume attach "${poolName1}" c1pool1 c1pool1 testDevice /opt + ! lxc storage volume attach "${poolName1}" c1pool1 c1pool1 testDevice2 /opt || false + lxc storage volume detach "${poolName1}" c1pool1 c1pool1 + lxc storage volume attach "${poolName1}" custom/c1pool1 c1pool1 testDevice /opt + ! lxc storage volume attach "${poolName1}" custom/c1pool1 c1pool1 testDevice2 /opt || false + lxc storage volume detach "${poolName1}" c1pool1 c1pool1 + + lxc storage volume create "${poolName1}" c2pool2 + lxc storage volume attach "${poolName1}" c2pool2 c2pool2 testDevice /opt + ! lxc storage volume attach "${poolName1}" c2pool2 c2pool2 testDevice2 /opt || false + lxc storage volume detach "${poolName1}" c2pool2 c2pool2 + lxc storage volume attach "${poolName1}" custom/c2pool2 c2pool2 testDevice /opt + ! lxc storage volume attach "${poolName1}" custom/c2pool2 c2pool2 testDevice2 /opt || false + lxc storage volume detach "${poolName1}" c2pool2 c2pool2 + + lxc storage volume create "${poolName2}" c3pool1 + lxc storage volume attach "${poolName2}" c3pool1 c3pool1 testDevice /opt + ! lxc storage volume attach "${poolName2}" c3pool1 c3pool1 testDevice2 /opt || false + lxc storage volume detach "${poolName2}" c3pool1 c3pool1 + lxc storage volume attach "${poolName2}" c3pool1 c3pool1 testDevice /opt + ! lxc storage volume attach "${poolName2}" c3pool1 c3pool1 testDevice2 /opt || false + lxc storage volume detach "${poolName2}" c3pool1 c3pool1 + + lxc storage volume create "${poolName2}" c4pool2 + lxc storage volume attach "${poolName2}" c4pool2 c4pool2 testDevice /opt + ! lxc storage volume attach "${poolName2}" c4pool2 c4pool2 testDevice2 /opt || false + lxc storage volume detach "${poolName2}" c4pool2 c4pool2 + lxc storage volume attach "${poolName2}" custom/c4pool2 c4pool2 testDevice /opt + ! lxc storage volume attach "${poolName2}" custom/c4pool2 c4pool2 testDevice2 /opt || false + lxc storage volume detach "${poolName2}" c4pool2 c4pool2 + lxc storage volume rename "${poolName2}" c4pool2 c4pool2-renamed + lxc storage volume rename "${poolName2}" c4pool2-renamed c4pool2 + + lxc delete -f c1pool1 + lxc delete -f c3pool1 + lxc delete -f c5pool1 + + lxc delete -f c4pool2 + lxc delete -f c2pool2 + + lxc storage volume set "${poolName1}" c1pool1 size 500MiB + lxc storage volume unset "${poolName1}" c1pool1 size + + lxc storage volume delete "${poolName1}" c1pool1 + lxc storage volume delete "${poolName1}" c2pool2 + lxc storage volume delete "${poolName2}" c3pool1 + lxc storage volume delete "${poolName2}" c4pool2 + + lxc image delete testimage + lxc profile device remove default root + lxc storage delete "${poolName1}" + lxc storage delete "${poolName2}" + ) + + # shellcheck disable=SC2031 + kill_lxd "${LXD_STORAGE_DIR}" +} From f7a0a9d3cc661e0160d901598487f89804c1dbeb Mon Sep 17 00:00:00 2001 From: Din Music Date: Tue, 12 Nov 2024 15:10:59 +0000 Subject: [PATCH 42/51] test/container_move: Use helper function to create Pure Storage pool Correctly initialize Pure Storage pool. Signed-off-by: Din Music --- test/suites/container_move.sh | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/test/suites/container_move.sh b/test/suites/container_move.sh index ecc117b563e8..db8bf60cb594 100644 --- a/test/suites/container_move.sh +++ b/test/suites/container_move.sh @@ -11,7 +11,11 @@ test_container_move() { # Setup. lxc project create "${project}" - lxc storage create "${pool2}" "${lxd_backend}" + if [ "${lxd_backend}" = "pure" ]; then + configure_pure_pool "${pool2}" + else + lxc storage create "${pool2}" "${lxd_backend}" + fi lxc profile create "${profile}" --project "${project}" lxc profile device add "${profile}" root disk pool="${pool2}" path=/ --project "${project}" From 12508d45cdf3435a4f651df5d05e5b6f0d41b825 Mon Sep 17 00:00:00 2001 From: Din Music Date: Thu, 14 Nov 2024 11:12:35 +0000 Subject: [PATCH 43/51] test/storage_local_volume_handling: Test Pure Storage with other available storage drivers Signed-off-by: Din Music --- test/suites/storage_local_volume_handling.sh | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/test/suites/storage_local_volume_handling.sh b/test/suites/storage_local_volume_handling.sh index 456eebd8b7e4..09c5621be6f5 100755 --- a/test/suites/storage_local_volume_handling.sh +++ b/test/suites/storage_local_volume_handling.sh @@ -36,6 +36,10 @@ test_storage_local_volume_handling() { lxc storage create "${pool_base}-zfs" zfs size=1GiB fi + if storage_backend_available "pure"; then + configure_pure_pool "${pool_base}-pure" + fi + # Test all combinations of our storage drivers driver="${lxd_backend}" @@ -51,11 +55,13 @@ test_storage_local_volume_handling() { pool_opts="volume.size=25MiB ceph.osd.pg_num=16" fi - if [ "$driver" = "lvm" ]; then + if [ "$driver" = "lvm" ] || [ "$driver" = "pure" ]; then pool_opts="volume.size=25MiB" fi - if [ -n "${pool_opts}" ]; then + if [ "$driver" = "pure" ]; then + configure_pure_pool "${pool}1" "${pool_opts}" + elif [ -n "${pool_opts}" ]; then # shellcheck disable=SC2086 lxc storage create "${pool}1" "${driver}" $pool_opts else @@ -179,8 +185,8 @@ test_storage_local_volume_handling() { lxc storage volume delete "${pool}1" vol1 lxc storage delete "${pool}1" - for source_driver in "btrfs" "ceph" "cephfs" "dir" "lvm" "zfs"; do - for target_driver in "btrfs" "ceph" "cephfs" "dir" "lvm" "zfs"; do + for source_driver in "btrfs" "ceph" "cephfs" "dir" "lvm" "zfs" "pure"; do + for target_driver in "btrfs" "ceph" "cephfs" "dir" "lvm" "zfs" "pure"; do # shellcheck disable=SC2235 if [ "$source_driver" != "$target_driver" ] \ && ([ "$lxd_backend" = "$source_driver" ] || ([ "$lxd_backend" = "ceph" ] && [ "$source_driver" = "cephfs" ] && [ -n "${LXD_CEPH_CEPHFS:-}" ])) \ From e6aaa1bdd58e3ef2479b3a9829c493bb6b98f4cc Mon Sep 17 00:00:00 2001 From: Din Music Date: Thu, 14 Nov 2024 11:25:34 +0000 Subject: [PATCH 44/51] test/storage_snapshots: Use helper function to create Pure Storage pool Signed-off-by: Din Music --- test/suites/storage_snapshots.sh | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/test/suites/storage_snapshots.sh b/test/suites/storage_snapshots.sh index 528af0df32d2..d78bd76b4101 100644 --- a/test/suites/storage_snapshots.sh +++ b/test/suites/storage_snapshots.sh @@ -14,7 +14,12 @@ test_storage_volume_snapshots() { storage_pool2="${storage_pool}2" storage_volume="${storage_pool}-vol" - lxc storage create "$storage_pool" "$lxd_backend" + if [ "${lxd_backend}" = "pure" ]; then + # Pure Storage needs some additional configuration, therefore create it using a helper function. + configure_pure_pool "${storage_pool}" + else + lxc storage create "$storage_pool" "$lxd_backend" + fi lxc storage volume create "${storage_pool}" "${storage_volume}" lxc launch testimage c1 -s "${storage_pool}" lxc storage volume attach "${storage_pool}" "${storage_volume}" c1 /mnt From a353baaf25665a421d69b79b4d97af681ed9a594 Mon Sep 17 00:00:00 2001 From: Din Music Date: Fri, 15 Nov 2024 12:42:34 +0000 Subject: [PATCH 45/51] test/backup: Skip recovery tests for Pure Storage driver Currently, Pure Storage driver does not support recovery. Mainly bacuse the storage volume names are encoded, which would result in indistinguishable storage volume names after the recovery. Signed-off-by: Din Music --- test/suites/backup.sh | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/test/suites/backup.sh b/test/suites/backup.sh index 62a29c7807bf..8621d0ee55c6 100644 --- a/test/suites/backup.sh +++ b/test/suites/backup.sh @@ -6,6 +6,11 @@ test_storage_volume_recover() { poolName=$(lxc profile device get default root pool) poolDriver=$(lxc storage show "${poolName}" | awk '/^driver:/ {print $2}') + if [ "${poolDriver}" = "pure" ]; then + echo "==> SKIP: Storage driver does not support recovery" + return + fi + # Create custom block volume. lxc storage volume create "${poolName}" vol1 --type=block @@ -77,6 +82,11 @@ test_container_recover() { LXD_DIR=${LXD_IMPORT_DIR} lxd_backend=$(storage_backend "$LXD_DIR") + if [ "${lxd_backend}" = "pure" ]; then + echo "==> SKIP: Storage driver does not support recovery" + return + fi + ensure_import_testimage poolName=$(lxc profile device get default root pool) @@ -1008,6 +1018,13 @@ test_backup_volume_expiry() { } test_backup_export_import_recover() { + lxd_backend=$(storage_backend "$LXD_DIR") + + if [ "$lxd_backend" = "pure" ]; then + echo "==> SKIP: Storage driver does not support recovery" + return + fi + ( set -e From 729aaac7990347fc379fd1fad69a2457591fa67a Mon Sep 17 00:00:00 2001 From: Din Music Date: Tue, 22 Oct 2024 13:02:10 +0000 Subject: [PATCH 46/51] docs: Add Pure Storage driver docs Signed-off-by: Din Music --- doc/reference/storage_pure.md | 106 ++++++++++++++++++++++++++++++++++ 1 file changed, 106 insertions(+) create mode 100644 doc/reference/storage_pure.md diff --git a/doc/reference/storage_pure.md b/doc/reference/storage_pure.md new file mode 100644 index 000000000000..41b1e49ffae2 --- /dev/null +++ b/doc/reference/storage_pure.md @@ -0,0 +1,106 @@ +(storage-pure)= +# Pure Storage - `pure` + +[Pure Storage](https://www.purestorage.com/) is a software-defined storage solution. It offers the consumption of redundant block storage across the network. + +LXD supports connecting to Pure Storage storage clusters through two protocols: either {abbr}`iSCSI (Internet Small Computer Systems Interface)` or {abbr}`NVMe/TCP (Non-Volatile Memory Express over Transmission Control Protocol)`. +In addition, Pure Storage offers copy-on-write snapshots, thin provisioning, and other features. + +To use Pure Storage with LXD requires a Pure Storage API version of at least `2.21`, corresponding to a minimum Purity//FA version of `6.4.2`. + +Additionally, ensure that the required kernel modules for the selected protocol are installed on your host system. +For iSCSI, the iSCSI CLI named `iscsiadm` needs to be installed in addition to the required kernel modules. + +## Terminology + +Each storage pool created in LXD using a Pure Storage driver represents a Pure Storage *pod*, which is an abstraction that groups multiple volumes under a specific name. +One benefit of using Pure Storage pods is that they can be linked with multiple Pure Storage arrays to provide additional redundancy. + +LXD creates volumes within a pod that is identified by the storage pool name. +When the first volume needs to be mapped to a specific LXD host, a corresponding Pure Storage host is created with the name of the LXD host and a suffix of the used protocol. +For example, if the LXD host is `host01` and the mode is `nvme`, the resulting Pure Storage host would be `host01-nvme`. + +The Pure Storage host is then connected with the required volumes, to allow attaching and accessing volumes from the LXD host. +The created Pure Storage host is automatically removed once there are no volumes connected to it. + +## The `pure` driver in LXD + +The `pure` driver in LXD uses Pure Storage volumes for custom storage volumes, instances, and snapshots. +All created volumes are thin-provisioned block volumes. If required (for example, for containers and custom file system volumes), LXD formats the volume with a desired file system. + +LXD expects Pure Storage to be pre-configured with a specific service (e.g. iSCSI) on network interfaces whose address is provided during storage pool configuration. +Furthermore, LXD assumes that it has full control over the Pure Storage pods it manages. +Therefore, you should never maintain any volumes in Pure Storage pods that are not owned by LXD because LXD might disconnect or even delete them. + +This driver behaves differently than some of the other drivers in that it provides remote storage. +As a result, and depending on the internal network, storage access might be a bit slower compared to local storage. +On the other hand, using remote storage has significant advantages in a cluster setup: all cluster members have access to the same storage pools with the exact same contents, without the need to synchronize them. + +When creating a new storage pool using the `pure` driver in either `iscsi` or `nvme` mode, LXD automatically discovers the array's qualified name and target address (portal). +Upon successful discovery, LXD attaches all volumes that are connected to the Pure Storage host that is associated with a specific LXD server. +Pure Storage hosts and volume connections are fully managed by LXD. + +Volume snapshots are also supported by Pure Storage. However, each snapshot is associated with a parent volume and cannot be directly attached to the host. +Therefore, when a snapshot is being exported, LXD creates a temporary volume behind the scenes. This volume is attached to the LXD host and removed once the operation is completed. +Similarly, when a volume with at least one snapshot is being copied, LXD sequentially copies snapshots into destination volume, from which a new snapshot is created. +Finally, once all snapshots are copied, the source volume is copied into the destination volume. + +(storage-pure-volume-names)= +### Volume names + +Due to a [limitation](storage-pure-limitations) in Pure Storage, volume names cannot exceed 63 characters. +Therefore, the driver uses the volume's {config:option}`storage-pure-volume-conf:volatile.uuid` to generate a shorter volume name. + +For example, a UUID `5a2504b0-6a6c-4849-8ee7-ddb0b674fd14` is first trimmed of any hyphens (`-`), resulting in the string `5a2504b06a6c48498ee7ddb0b674fd14`. +To distinguish volume types and snapshots, special identifiers are prepended and appended to the volume names, as depicted in the table below: + +Type | Identifier | Example +:-- | :--- | :---------- +Container | `c-` | `c-5a2504b06a6c48498ee7ddb0b674fd14` +Virtual machine | `v-` | `v-5a2504b06a6c48498ee7ddb0b674fd14-b` (block volume) and `v-5a2504b06a6c48498ee7ddb0b674fd14` (file system volume) +Image (ISO) | `i-` | `i-5a2504b06a6c48498ee7ddb0b674fd14-i` +Custom volume | `u-` | `u-5a2504b06a6c48498ee7ddb0b674fd14` +Snapshot | `s` | `sc-5a2504b06a6c48498ee7ddb0b674fd14` (container snapshot) + +(storage-pure-limitations)= +### Limitations + +The `pure` driver has the following limitations: + +Volume size constraints +: Minimum volume size (quota) is `1MiB` and must be a multiple of `512B`. + +Snapshots cannot be mounted +: Snapshots cannot be mounted directly to the host. Instead, a temporary volume must be created to access the snapshot's contents. + For internal operations, such as copying instances or exporting snapshots, LXD handles this automatically. + +Sharing the Pure Storage storage pool between installations +: Sharing the same Pure Storage storage pool between multiple LXD installations is not supported. + If a different LXD installation tries to create a storage pool with a name that already exists, an error is returned. + +Recovering Pure Storage storage pools +: Recovery of Pure Storage storage pools using `lxd recover` is currently not supported. + +## Configuration options + +The following configuration options are available for storage pools that use the `pure` driver, as well as storage volumes in these pools. + +(storage-pure-pool-config)= +### Storage pool configuration + +% Include content from [../metadata.txt](../metadata.txt) +```{include} ../metadata.txt + :start-after: + :end-before: +``` + +{{volume_configuration}} + +(storage-pure-vol-config)= +### Storage volume configuration + +% Include content from [../metadata.txt](../metadata.txt) +```{include} ../metadata.txt + :start-after: + :end-before: +``` From 94185900a176c2999751f69b2a993b5dcfb50a5a Mon Sep 17 00:00:00 2001 From: Din Music Date: Tue, 22 Oct 2024 13:02:58 +0000 Subject: [PATCH 47/51] docs: Add example on how to create Pure Storage storage pool Signed-off-by: Din Music --- doc/howto/storage_pools.md | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/doc/howto/storage_pools.md b/doc/howto/storage_pools.md index 1c721a4ceb31..bf04a2532fe6 100644 --- a/doc/howto/storage_pools.md +++ b/doc/howto/storage_pools.md @@ -173,6 +173,24 @@ Create a storage pool named `pool5` that explicitly uses the PowerFlex SDC: lxc storage create pool5 powerflex powerflex.mode=sdc powerflex.pool= powerflex.gateway=https://powerflex powerflex.user.name=lxd powerflex.user.password=foo +#### Create a Pure Storage pool + +Create a storage pool named `pool1` that uses NVMe/TCP by default: + + lxc storage create pool1 pure pure.gateway=https:// pure.api.token= + +Create a storage pool named `pool2` that uses a Pure Storage gateway with a certificate that is not trusted: + + lxc storage create pool2 pure pure.gateway=https:// pure.gateway.verify=false pure.api.token= + +Create a storage pool named `pool3` that uses iSCSI to connect to Pure Storage array: + + lxc storage create pool3 pure pure.gateway=https:// pure.api.token= pure.mode=iscsi + +Create a storage pool named `pool4` that uses NVMe/TCP to connect to Pure Storage array via specific target addresses: + + lxc storage create pool4 pure pure.gateway=https:// pure.api.token= pure.mode=iscsi pure.target=, + (storage-pools-cluster)= ## Create a storage pool in a cluster @@ -240,6 +258,19 @@ Storage pool my-remote-pool2 pending on member vm03 Storage pool my-remote-pool2 created ``` +Create a third storage pool named `my-remote-pool3` using the Pure Storage driver: + +```{terminal} +:input: lxc storage create my-remote-pool3 pure --target=vm01 +Storage pool my-remote-pool3 pending on member vm01 +:input: lxc storage create my-remote-pool3 pure --target=vm02 +Storage pool my-remote-pool3 pending on member vm02 +:input: lxc storage create my-remote-pool3 pure --target=vm03 +Storage pool my-remote-pool3 pending on member vm03 +:input: lxc storage create my-remote-pool3 pure pure.gateway=https:// pure.api.token= +Storage pool my-remote-pool3 created +``` + ## Configure storage pool settings See the {ref}`storage-drivers` documentation for the available configuration options for each storage driver. From b5159d3bfec5ed51ad091dea0b7fb509c904d088 Mon Sep 17 00:00:00 2001 From: Din Music Date: Tue, 22 Oct 2024 13:04:39 +0000 Subject: [PATCH 48/51] docs: Add explanation of Pure Storage remote storage Signed-off-by: Din Music --- doc/explanation/storage.md | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/doc/explanation/storage.md b/doc/explanation/storage.md index e83bfd5f2bd8..2094d12b0ae4 100644 --- a/doc/explanation/storage.md +++ b/doc/explanation/storage.md @@ -24,6 +24,7 @@ The following storage drivers are supported: - [CephFS - `cephfs`](storage-cephfs) - [Ceph Object - `cephobject`](storage-cephobject) - [Dell PowerFlex - `powerflex`](storage-powerflex) +- [Pure Storage - `pure`](storage-pure) See the following how-to guides for additional information: @@ -36,12 +37,12 @@ See the following how-to guides for additional information: Where the LXD data is stored depends on the configuration and the selected storage driver. Depending on the storage driver that is used, LXD can either share the file system with its host or keep its data separate. -Storage location | Directory | Btrfs | LVM | ZFS | Ceph (all) | Dell PowerFlex | -:--- | :-: | :-: | :-: | :-: | :-: | :-: | -Shared with the host | ✓ | ✓ | - | ✓ | - | - | -Dedicated disk/partition | - | ✓ | ✓ | ✓ | - | - | -Loop disk | - | ✓ | ✓ | ✓ | - | - | -Remote storage | - | - | - | - | ✓ | ✓ | +Storage location | Directory | Btrfs | LVM | ZFS | Ceph (all) | Dell PowerFlex | Pure Storage | +:--- | :-: | :-: | :-: | :-: | :-: | :-: | :-: | +Shared with the host | ✓ | ✓ | - | ✓ | - | - | - | +Dedicated disk/partition | - | ✓ | ✓ | ✓ | - | - | - | +Loop disk | - | ✓ | ✓ | ✓ | - | - | - | +Remote storage | - | - | - | - | ✓ | ✓ | ✓ | #### Shared with the host @@ -71,7 +72,7 @@ You can increase their size (quota) though; see {ref}`storage-resize-pool`. #### Remote storage The `ceph`, `cephfs` and `cephobject` drivers store the data in a completely independent Ceph storage cluster that must be set up separately. -The same applies to the `powerflex` driver. +The same applies to the `powerflex` and `pure` drivers. (storage-default-pool)= ### Default storage pool From c79502cb3cf9713bc9cbc7891910185386b01646 Mon Sep 17 00:00:00 2001 From: Din Music Date: Tue, 22 Oct 2024 13:05:22 +0000 Subject: [PATCH 49/51] docs: Add Pure Storage and its features to table of supported storage drivers Signed-off-by: Din Music --- doc/reference/storage_drivers.md | 35 ++++++++++++++++---------------- 1 file changed, 18 insertions(+), 17 deletions(-) diff --git a/doc/reference/storage_drivers.md b/doc/reference/storage_drivers.md index 195d60534d16..1ac8426af4be 100644 --- a/doc/reference/storage_drivers.md +++ b/doc/reference/storage_drivers.md @@ -15,6 +15,7 @@ storage_cephfs storage_cephobject storage_ceph storage_powerflex +storage_pure storage_dir storage_lvm storage_zfs @@ -27,23 +28,23 @@ See the corresponding pages for driver-specific information and configuration op Where possible, LXD uses the advanced features of each storage system to optimize operations. -Feature | Directory | Btrfs | LVM | ZFS | Ceph RBD | CephFS | Ceph Object | Dell PowerFlex -:--- | :--- | :--- | :--- | :--- | :--- | :--- | :--- | :--- -{ref}`storage-optimized-image-storage` | ❌ | ✅ | ✅ | ✅ | ✅ | ➖ | ➖ | ❌ -Optimized instance creation | ❌ | ✅ | ✅ | ✅ | ✅ | ➖ | ➖ | ❌ -Optimized snapshot creation | ❌ | ✅ | ✅ | ✅ | ✅ | ✅ | ➖ | ✅ -Optimized image transfer | ❌ | ✅ | ❌ | ✅ | ✅ | ➖ | ➖ | ❌ -Optimized backup (import/export) | ❌ | ✅ | ❌ | ✅ | ❌ | ➖ | ➖ | ❌ -{ref}`storage-optimized-volume-transfer` | ❌ | ✅ | ❌ | ✅ | ✅[^1] | ➖ | ➖ | ❌ -{ref}`storage-optimized-volume-refresh` | ❌ | ✅ | ✅[^2] | ✅ | ✅[^3] | ➖ | ➖ | ❌ -Copy on write | ❌ | ✅ | ✅ | ✅ | ✅ | ✅ | ➖ | ✅ -Block based | ❌ | ❌ | ✅ | ❌ | ✅ | ❌ | ➖ | ✅ -Instant cloning | ❌ | ✅ | ✅ | ✅ | ✅ | ✅ | ➖ | ❌ -Storage driver usable inside a container | ✅ | ✅ | ❌ | ✅[^4] | ❌ | ➖ | ➖ | ❌ -Restore from older snapshots (not latest) | ✅ | ✅ | ✅ | ❌ | ✅ | ✅ | ➖ | ✅ -Storage quotas | ✅[^5] | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ -Available on `lxd init` | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ -Object storage | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ✅ | ❌ +Feature | Directory | Btrfs | LVM | ZFS | Ceph RBD | CephFS | Ceph Object | Dell PowerFlex | Pure Storage +:--- | :--- | :--- | :--- | :--- | :--- | :--- | :--- | :--- | :--- +{ref}`storage-optimized-image-storage` | ❌ | ✅ | ✅ | ✅ | ✅ | ➖ | ➖ | ❌ | ✅ +Optimized instance creation | ❌ | ✅ | ✅ | ✅ | ✅ | ➖ | ➖ | ❌ | ✅ +Optimized snapshot creation | ❌ | ✅ | ✅ | ✅ | ✅ | ✅ | ➖ | ✅ | ✅ +Optimized image transfer | ❌ | ✅ | ❌ | ✅ | ✅ | ➖ | ➖ | ❌ | ✅ +Optimized backup (import/export) | ❌ | ✅ | ❌ | ✅ | ❌ | ➖ | ➖ | ❌ | ❌ +{ref}`storage-optimized-volume-transfer` | ❌ | ✅ | ❌ | ✅ | ✅[^1] | ➖ | ➖ | ❌ | ❌ +{ref}`storage-optimized-volume-refresh` | ❌ | ✅ | ✅[^2] | ✅ | ✅[^3] | ➖ | ➖ | ❌ | ❌ +Copy on write | ❌ | ✅ | ✅ | ✅ | ✅ | ✅ | ➖ | ✅ | ✅ +Block based | ❌ | ❌ | ✅ | ❌ | ✅ | ❌ | ➖ | ✅ | ✅ +Instant cloning | ❌ | ✅ | ✅ | ✅ | ✅ | ✅ | ➖ | ❌ | ✅ +Storage driver usable inside a container | ✅ | ✅ | ❌ | ✅[^4] | ❌ | ➖ | ➖ | ❌ | ❌ +Restore from older snapshots (not latest) | ✅ | ✅ | ✅ | ❌ | ✅ | ✅ | ➖ | ✅ | ✅ +Storage quotas | ✅[^5] | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ +Available on `lxd init` | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ +Object storage | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ✅ | ❌ | ❌ [^1]: Volumes of type `block` will fall back to non-optimized transfer when migrating to an older LXD server that doesn't yet support the `RBD_AND_RSYNC` migration type. [^2]: Requires {config:option}`storage-lvm-pool-conf:lvm.use_thinpool` to be enabled. Only when refreshing local volumes. From d0f8f5aa1674b170ee9d42a50716f323e3aefab3 Mon Sep 17 00:00:00 2001 From: Din Music Date: Fri, 6 Dec 2024 19:03:21 +0000 Subject: [PATCH 50/51] docs: Update wordlist Signed-off-by: Din Music --- doc/.wordlist.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/.wordlist.txt b/doc/.wordlist.txt index dda025890f5e..1e830304f4b2 100644 --- a/doc/.wordlist.txt +++ b/doc/.wordlist.txt @@ -14,6 +14,7 @@ EBS EKS enablement favicon +FlashArray Furo GDB Git @@ -21,6 +22,7 @@ GitHub Grafana IAM installable +iSCSI JSON Juju Kubeflow From 40b6b5129618dee2071591c77e4216570c3f20a3 Mon Sep 17 00:00:00 2001 From: Din Music Date: Fri, 13 Dec 2024 10:50:43 +0000 Subject: [PATCH 51/51] docs: Update metadata Signed-off-by: Din Music --- doc/metadata.txt | 117 ++++++++++++++++++++++++++++++ lxd/metadata/configuration.json | 121 ++++++++++++++++++++++++++++++++ 2 files changed, 238 insertions(+) diff --git a/doc/metadata.txt b/doc/metadata.txt index 909a1836c5d6..361c6e234fed 100644 --- a/doc/metadata.txt +++ b/doc/metadata.txt @@ -5925,6 +5925,123 @@ Specify either a cron expression (` `), a comm ``` + +```{config:option} pure.api.token storage-pure-pool-conf +:shortdesc: "API token for Pure Storage gateway authentication" +:type: "string" + +``` + +```{config:option} pure.gateway storage-pure-pool-conf +:shortdesc: "Address of the Pure Storage gateway" +:type: "string" + +``` + +```{config:option} pure.gateway.verify storage-pure-pool-conf +:defaultdesc: "`true`" +:shortdesc: "Whether to verify the Pure Storage gateway's certificate" +:type: "bool" + +``` + +```{config:option} pure.mode storage-pure-pool-conf +:defaultdesc: "the discovered mode" +:shortdesc: "How volumes are mapped to the local server" +:type: "string" +The mode to use to map Pure Storage volumes to the local server. +Supported values are `iscsi` and `nvme`. +``` + +```{config:option} pure.target storage-pure-pool-conf +:defaultdesc: "the discovered mode" +:shortdesc: "List of target addresses." +:type: "string" +A comma-separated list of target addresses. If empty, LXD discovers and +connects to all available targets. Otherwise, it only connects to the +specified addresses. +``` + +```{config:option} volume.size storage-pure-pool-conf +:defaultdesc: "`10GiB`" +:shortdesc: "Size/quota of the storage volume" +:type: "string" +Default Pure Storage volume size rounded to 512B. The minimum size is 1MiB. +``` + + + +```{config:option} block.filesystem storage-pure-volume-conf +:condition: "block-based volume with content type `filesystem`" +:defaultdesc: "same as `volume.block.filesystem`" +:shortdesc: "File system of the storage volume" +:type: "string" +Valid options are: `btrfs`, `ext4`, `xfs` +If not set, `ext4` is assumed. +``` + +```{config:option} block.mount_options storage-pure-volume-conf +:condition: "block-based volume with content type `filesystem`" +:defaultdesc: "same as `volume.block.mount_options`" +:shortdesc: "Mount options for block-backed file system volumes" +:type: "string" + +``` + +```{config:option} size storage-pure-volume-conf +:defaultdesc: "same as `volume.size`" +:shortdesc: "Size/quota of the storage volume" +:type: "string" +Default Pure Storage volume size rounded to 512B. The minimum size is 1MiB. +``` + +```{config:option} snapshots.expiry storage-pure-volume-conf +:condition: "custom volume" +:defaultdesc: "same as `volume.snapshots.expiry`" +:scope: "global" +:shortdesc: "When snapshots are to be deleted" +:type: "string" +Specify an expression like `1M 2H 3d 4w 5m 6y`. +``` + +```{config:option} snapshots.pattern storage-pure-volume-conf +:condition: "custom volume" +:defaultdesc: "same as `volume.snapshots.pattern` or `snap%d`" +:scope: "global" +:shortdesc: "Template for the snapshot name" +:type: "string" +You can specify a naming template that is used for scheduled snapshots and unnamed snapshots. + +The `snapshots.pattern` option takes a Pongo2 template string to format the snapshot name. + +To add a time stamp to the snapshot name, use the Pongo2 context variable `creation_date`. +Make sure to format the date in your template string to avoid forbidden characters in the snapshot name. +For example, set `snapshots.pattern` to `{{ creation_date|date:'2006-01-02_15-04-05' }}` to name the snapshots after their time of creation, down to the precision of a second. + +Another way to avoid name collisions is to use the placeholder `%d` in the pattern. +For the first snapshot, the placeholder is replaced with `0`. +For subsequent snapshots, the existing snapshot names are taken into account to find the highest number at the placeholder's position. +This number is then incremented by one for the new name. +``` + +```{config:option} snapshots.schedule storage-pure-volume-conf +:condition: "custom volume" +:defaultdesc: "same as `snapshots.schedule`" +:scope: "global" +:shortdesc: "Schedule for automatic volume snapshots" +:type: "string" +Specify either a cron expression (` `), a comma-separated list of schedule aliases (`@hourly`, `@daily`, `@midnight`, `@weekly`, `@monthly`, `@annually`, `@yearly`), or leave empty to disable automatic snapshots (the default). +``` + +```{config:option} volatile.uuid storage-pure-volume-conf +:defaultdesc: "random UUID" +:scope: "global" +:shortdesc: "The volume's UUID" +:type: "string" + +``` + + ```{config:option} size storage-zfs-bucket-conf :condition: "appropriate driver" diff --git a/lxd/metadata/configuration.json b/lxd/metadata/configuration.json index ed74207fe14e..5e7e762e97da 100644 --- a/lxd/metadata/configuration.json +++ b/lxd/metadata/configuration.json @@ -6614,6 +6614,127 @@ ] } }, + "storage-pure": { + "pool-conf": { + "keys": [ + { + "pure.api.token": { + "longdesc": "", + "shortdesc": "API token for Pure Storage gateway authentication", + "type": "string" + } + }, + { + "pure.gateway": { + "longdesc": "", + "shortdesc": "Address of the Pure Storage gateway", + "type": "string" + } + }, + { + "pure.gateway.verify": { + "defaultdesc": "`true`", + "longdesc": "", + "shortdesc": "Whether to verify the Pure Storage gateway's certificate", + "type": "bool" + } + }, + { + "pure.mode": { + "defaultdesc": "the discovered mode", + "longdesc": "The mode to use to map Pure Storage volumes to the local server.\nSupported values are `iscsi` and `nvme`.", + "shortdesc": "How volumes are mapped to the local server", + "type": "string" + } + }, + { + "pure.target": { + "defaultdesc": "the discovered mode", + "longdesc": "A comma-separated list of target addresses. If empty, LXD discovers and\nconnects to all available targets. Otherwise, it only connects to the\nspecified addresses.", + "shortdesc": "List of target addresses.", + "type": "string" + } + }, + { + "volume.size": { + "defaultdesc": "`10GiB`", + "longdesc": "Default Pure Storage volume size rounded to 512B. The minimum size is 1MiB.", + "shortdesc": "Size/quota of the storage volume", + "type": "string" + } + } + ] + }, + "volume-conf": { + "keys": [ + { + "block.filesystem": { + "condition": "block-based volume with content type `filesystem`", + "defaultdesc": "same as `volume.block.filesystem`", + "longdesc": "Valid options are: `btrfs`, `ext4`, `xfs`\nIf not set, `ext4` is assumed.", + "shortdesc": "File system of the storage volume", + "type": "string" + } + }, + { + "block.mount_options": { + "condition": "block-based volume with content type `filesystem`", + "defaultdesc": "same as `volume.block.mount_options`", + "longdesc": "", + "shortdesc": "Mount options for block-backed file system volumes", + "type": "string" + } + }, + { + "size": { + "defaultdesc": "same as `volume.size`", + "longdesc": "Default Pure Storage volume size rounded to 512B. The minimum size is 1MiB.", + "shortdesc": "Size/quota of the storage volume", + "type": "string" + } + }, + { + "snapshots.expiry": { + "condition": "custom volume", + "defaultdesc": "same as `volume.snapshots.expiry`", + "longdesc": "Specify an expression like `1M 2H 3d 4w 5m 6y`.", + "scope": "global", + "shortdesc": "When snapshots are to be deleted", + "type": "string" + } + }, + { + "snapshots.pattern": { + "condition": "custom volume", + "defaultdesc": "same as `volume.snapshots.pattern` or `snap%d`", + "longdesc": "You can specify a naming template that is used for scheduled snapshots and unnamed snapshots.\n\nThe `snapshots.pattern` option takes a Pongo2 template string to format the snapshot name.\n\nTo add a time stamp to the snapshot name, use the Pongo2 context variable `creation_date`.\nMake sure to format the date in your template string to avoid forbidden characters in the snapshot name.\nFor example, set `snapshots.pattern` to `{{ creation_date|date:'2006-01-02_15-04-05' }}` to name the snapshots after their time of creation, down to the precision of a second.\n\nAnother way to avoid name collisions is to use the placeholder `%d` in the pattern.\nFor the first snapshot, the placeholder is replaced with `0`.\nFor subsequent snapshots, the existing snapshot names are taken into account to find the highest number at the placeholder's position.\nThis number is then incremented by one for the new name.", + "scope": "global", + "shortdesc": "Template for the snapshot name", + "type": "string" + } + }, + { + "snapshots.schedule": { + "condition": "custom volume", + "defaultdesc": "same as `snapshots.schedule`", + "longdesc": "Specify either a cron expression (`\u003cminute\u003e \u003chour\u003e \u003cdom\u003e \u003cmonth\u003e \u003cdow\u003e`), a comma-separated list of schedule aliases (`@hourly`, `@daily`, `@midnight`, `@weekly`, `@monthly`, `@annually`, `@yearly`), or leave empty to disable automatic snapshots (the default).", + "scope": "global", + "shortdesc": "Schedule for automatic volume snapshots", + "type": "string" + } + }, + { + "volatile.uuid": { + "defaultdesc": "random UUID", + "longdesc": "", + "scope": "global", + "shortdesc": "The volume's UUID", + "type": "string" + } + } + ] + } + }, "storage-zfs": { "bucket-conf": { "keys": [