From 5a66c27cc68d0679b7d29ad28d3ed44e9aa6f4cf Mon Sep 17 00:00:00 2001 From: Thomas Lamprecht Date: Wed, 11 Jun 2025 10:03:21 +0200 Subject: [PATCH] auto-format code using perltidy with Proxmox style guide using the new top-level `make tidy` target, which calls perltidy via our wrapper to enforce the desired style as closely as possible. Signed-off-by: Thomas Lamprecht --- src/PVE/API2/Disks.pm | 409 +-- src/PVE/API2/Disks/Directory.pm | 573 ++-- src/PVE/API2/Disks/LVM.pm | 405 +-- src/PVE/API2/Disks/LVMThin.pm | 384 +-- src/PVE/API2/Disks/ZFS.pm | 943 +++--- src/PVE/API2/Storage/Config.pm | 526 +-- src/PVE/API2/Storage/Content.pm | 845 ++--- src/PVE/API2/Storage/FileRestore.pm | 301 +- src/PVE/API2/Storage/PruneBackups.pm | 256 +- src/PVE/API2/Storage/Scan.pm | 565 ++-- src/PVE/API2/Storage/Status.pm | 1312 ++++---- src/PVE/BackupProvider/Plugin/Base.pm | 21 + src/PVE/CLI/pvesm.pm | 1132 ++++--- src/PVE/CephConfig.pm | 421 +-- src/PVE/Diskmanage.pm | 796 +++-- src/PVE/GuestImport.pm | 64 +- src/PVE/GuestImport/OVF.pm | 307 +- src/PVE/Storage.pm | 1593 ++++----- src/PVE/Storage/BTRFSPlugin.pm | 717 ++-- src/PVE/Storage/CIFSPlugin.pm | 194 +- src/PVE/Storage/CephFSPlugin.pm | 125 +- src/PVE/Storage/Common.pm | 17 +- src/PVE/Storage/DirPlugin.pm | 219 +- src/PVE/Storage/ESXiPlugin.pm | 721 ++-- src/PVE/Storage/GlusterfsPlugin.pm | 195 +- src/PVE/Storage/ISCSIDirectPlugin.pm | 124 +- src/PVE/Storage/ISCSIPlugin.pm | 470 +-- src/PVE/Storage/LVMPlugin.pm | 628 ++-- src/PVE/Storage/LunCmd/Comstar.pm | 44 +- src/PVE/Storage/LunCmd/Iet.pm | 97 +- src/PVE/Storage/LunCmd/Istgt.pm | 157 +- src/PVE/Storage/LunCmd/LIO.pm | 240 +- src/PVE/Storage/LvmThinPlugin.pm | 278 +- src/PVE/Storage/NFSPlugin.pm | 147 +- src/PVE/Storage/PBSPlugin.pm | 582 ++-- src/PVE/Storage/Plugin.pm | 1463 +++++---- src/PVE/Storage/RBDPlugin.pm | 589 ++-- src/PVE/Storage/ZFSPlugin.pm | 152 +- src/PVE/Storage/ZFSPoolPlugin.pm | 541 +-- src/PVE/test/ceph_conf_parse_write_test.pl | 762 +++-- src/test/archive_info_test.pm | 238 +- src/test/disklist_test.pm | 238 +- src/test/filesystem_path_test.pm | 72 +- src/test/get_subdir_test.pm | 15 +- src/test/list_volumes_test.pm | 917 +++--- src/test/parse_volname_test.pm | 332 +- src/test/path_to_volume_id_test.pm | 322 +- src/test/prune_backups_test.pm | 830 ++--- src/test/rbd_namespace.pl | 309 +- src/test/run_bwlimit_tests.pl | 476 ++- src/test/run_disk_tests.pl | 4 +- src/test/run_ovf_tests.pl | 59 +- src/test/run_plugin_tests.pl | 2 +- src/test/run_test_zfspoolplugin.pl | 3469 +++++++++++--------- 54 files changed, 14137 insertions(+), 12461 deletions(-) diff --git a/src/PVE/API2/Disks.pm b/src/PVE/API2/Disks.pm index 408bdbe..3f8b196 100644 --- a/src/PVE/API2/Disks.pm +++ b/src/PVE/API2/Disks.pm @@ -19,27 +19,27 @@ use PVE::API2::Disks::ZFS; use PVE::RESTHandler; use base qw(PVE::RESTHandler); -__PACKAGE__->register_method ({ - subclass => "PVE::API2::Disks::LVM", - path => 'lvm', +__PACKAGE__->register_method({ + subclass => "PVE::API2::Disks::LVM", + path => 'lvm', }); -__PACKAGE__->register_method ({ - subclass => "PVE::API2::Disks::LVMThin", - path => 'lvmthin', +__PACKAGE__->register_method({ + subclass => "PVE::API2::Disks::LVMThin", + path => 'lvmthin', }); -__PACKAGE__->register_method ({ - subclass => "PVE::API2::Disks::Directory", - path => 'directory', +__PACKAGE__->register_method({ + subclass => "PVE::API2::Disks::Directory", + path => 'directory', }); -__PACKAGE__->register_method ({ - subclass => "PVE::API2::Disks::ZFS", - path => 'zfs', +__PACKAGE__->register_method({ + subclass => "PVE::API2::Disks::ZFS", + path => 'zfs', }); -__PACKAGE__->register_method ({ +__PACKAGE__->register_method({ name => 'index', path => '', method => 'GET', @@ -47,37 +47,38 @@ __PACKAGE__->register_method ({ permissions => { user => 'all' }, description => "Node index.", parameters => { - additionalProperties => 0, - properties => { - node => get_standard_option('pve-node'), - }, + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + }, }, returns => { - type => 'array', - items => { - type => "object", - properties => {}, - }, - links => [ { rel => 'child', href => "{name}" } ], + type => 'array', + items => { + type => "object", + properties => {}, + }, + links => [{ rel => 'child', href => "{name}" }], }, code => sub { - my ($param) = @_; + my ($param) = @_; - my $result = [ - { name => 'list' }, - { name => 'initgpt' }, - { name => 'smart' }, - { name => 'lvm' }, - { name => 'lvmthin' }, - { name => 'directory' }, - { name => 'wipedisk' }, - { name => 'zfs' }, - ]; + my $result = [ + { name => 'list' }, + { name => 'initgpt' }, + { name => 'smart' }, + { name => 'lvm' }, + { name => 'lvmthin' }, + { name => 'directory' }, + { name => 'wipedisk' }, + { name => 'zfs' }, + ]; - return $result; - }}); + return $result; + }, +}); -__PACKAGE__->register_method ({ +__PACKAGE__->register_method({ name => 'list', path => 'list', method => 'GET', @@ -85,98 +86,97 @@ __PACKAGE__->register_method ({ protected => 1, proxyto => 'node', permissions => { - check => ['or', ['perm', '/', ['Sys.Audit']], ['perm', '/nodes/{node}', ['Sys.Audit']]], + check => ['or', ['perm', '/', ['Sys.Audit']], ['perm', '/nodes/{node}', ['Sys.Audit']]], }, parameters => { - additionalProperties => 0, - properties => { - node => get_standard_option('pve-node'), - 'include-partitions' => { - description => "Also include partitions.", - type => 'boolean', - optional => 1, - default => 0, - }, - skipsmart => { - description => "Skip smart checks.", - type => 'boolean', - optional => 1, - default => 0, - }, - type => { - description => "Only list specific types of disks.", - type => 'string', - enum => ['unused', 'journal_disks'], - optional => 1, - }, - }, + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + 'include-partitions' => { + description => "Also include partitions.", + type => 'boolean', + optional => 1, + default => 0, + }, + skipsmart => { + description => "Skip smart checks.", + type => 'boolean', + optional => 1, + default => 0, + }, + type => { + description => "Only list specific types of disks.", + type => 'string', + enum => ['unused', 'journal_disks'], + optional => 1, + }, + }, }, returns => { - type => 'array', - items => { - type => 'object', - properties => { - devpath => { - type => 'string', - description => 'The device path', - }, - used => { type => 'string', optional => 1 }, - gpt => { type => 'boolean' }, - mounted => { type => 'boolean' }, - size => { type => 'integer'}, - osdid => { type => 'integer'}, # TODO: deprecate / remove in PVE 9? - 'osdid-list' => { - type => 'array', - items => { type => 'integer' }, - }, - vendor => { type => 'string', optional => 1 }, - model => { type => 'string', optional => 1 }, - serial => { type => 'string', optional => 1 }, - wwn => { type => 'string', optional => 1}, - health => { type => 'string', optional => 1}, - parent => { - type => 'string', - description => 'For partitions only. The device path of ' . - 'the disk the partition resides on.', - optional => 1 - }, - }, - }, + type => 'array', + items => { + type => 'object', + properties => { + devpath => { + type => 'string', + description => 'The device path', + }, + used => { type => 'string', optional => 1 }, + gpt => { type => 'boolean' }, + mounted => { type => 'boolean' }, + size => { type => 'integer' }, + osdid => { type => 'integer' }, # TODO: deprecate / remove in PVE 9? + 'osdid-list' => { + type => 'array', + items => { type => 'integer' }, + }, + vendor => { type => 'string', optional => 1 }, + model => { type => 'string', optional => 1 }, + serial => { type => 'string', optional => 1 }, + wwn => { type => 'string', optional => 1 }, + health => { type => 'string', optional => 1 }, + parent => { + type => 'string', + description => 'For partitions only. The device path of ' + . 'the disk the partition resides on.', + optional => 1, + }, + }, + }, }, code => sub { - my ($param) = @_; + my ($param) = @_; - my $skipsmart = $param->{skipsmart} // 0; - my $include_partitions = $param->{'include-partitions'} // 0; + my $skipsmart = $param->{skipsmart} // 0; + my $include_partitions = $param->{'include-partitions'} // 0; - my $disks = PVE::Diskmanage::get_disks( - undef, - $skipsmart, - $include_partitions - ); + my $disks = PVE::Diskmanage::get_disks( + undef, $skipsmart, $include_partitions, + ); - my $type = $param->{type} // ''; - my $result = []; + my $type = $param->{type} // ''; + my $result = []; - foreach my $disk (sort keys %$disks) { - my $entry = $disks->{$disk}; - if ($type eq 'journal_disks') { - next if $entry->{osdid} >= 0; - if (my $usage = $entry->{used}) { - next if !($usage eq 'partitions' && $entry->{gpt} - || $usage eq 'LVM'); - } - } elsif ($type eq 'unused') { - next if $entry->{used}; - } elsif ($type ne '') { - die "internal error"; # should not happen - } - push @$result, $entry; - } - return $result; - }}); + foreach my $disk (sort keys %$disks) { + my $entry = $disks->{$disk}; + if ($type eq 'journal_disks') { + next if $entry->{osdid} >= 0; + if (my $usage = $entry->{used}) { + next + if !($usage eq 'partitions' && $entry->{gpt} || $usage eq 'LVM'); + } + } elsif ($type eq 'unused') { + next if $entry->{used}; + } elsif ($type ne '') { + die "internal error"; # should not happen + } + push @$result, $entry; + } + return $result; + }, +}); -__PACKAGE__->register_method ({ +__PACKAGE__->register_method({ name => 'smart', path => 'smart', method => 'GET', @@ -184,47 +184,48 @@ __PACKAGE__->register_method ({ protected => 1, proxyto => "node", permissions => { - check => ['perm', '/', ['Sys.Audit']], + check => ['perm', '/', ['Sys.Audit']], }, parameters => { - additionalProperties => 0, - properties => { - node => get_standard_option('pve-node'), - disk => { - type => 'string', - pattern => '^/dev/[a-zA-Z0-9\/]+$', - description => "Block device name", - }, - healthonly => { - type => 'boolean', - description => "If true returns only the health status", - optional => 1, - }, - }, + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + disk => { + type => 'string', + pattern => '^/dev/[a-zA-Z0-9\/]+$', + description => "Block device name", + }, + healthonly => { + type => 'boolean', + description => "If true returns only the health status", + optional => 1, + }, + }, }, returns => { - type => 'object', - properties => { - health => { type => 'string' }, - type => { type => 'string', optional => 1 }, - attributes => { type => 'array', optional => 1}, - text => { type => 'string', optional => 1 }, - }, + type => 'object', + properties => { + health => { type => 'string' }, + type => { type => 'string', optional => 1 }, + attributes => { type => 'array', optional => 1 }, + text => { type => 'string', optional => 1 }, + }, }, code => sub { - my ($param) = @_; + my ($param) = @_; - my $disk = PVE::Diskmanage::verify_blockdev_path($param->{disk}); + my $disk = PVE::Diskmanage::verify_blockdev_path($param->{disk}); - my $result = PVE::Diskmanage::get_smart_data($disk, $param->{healthonly}); + my $result = PVE::Diskmanage::get_smart_data($disk, $param->{healthonly}); - $result->{health} = 'UNKNOWN' if !defined $result->{health}; - $result = { health => $result->{health} } if $param->{healthonly}; + $result->{health} = 'UNKNOWN' if !defined $result->{health}; + $result = { health => $result->{health} } if $param->{healthonly}; - return $result; - }}); + return $result; + }, +}); -__PACKAGE__->register_method ({ +__PACKAGE__->register_method({ name => 'initgpt', path => 'initgpt', method => 'POST', @@ -232,48 +233,49 @@ __PACKAGE__->register_method ({ protected => 1, proxyto => "node", permissions => { - check => ['perm', '/', ['Sys.Modify']], + check => ['perm', '/', ['Sys.Modify']], }, parameters => { - additionalProperties => 0, - properties => { - node => get_standard_option('pve-node'), - disk => { - type => 'string', - description => "Block device name", - pattern => '^/dev/[a-zA-Z0-9\/]+$', - }, - uuid => { - type => 'string', - description => 'UUID for the GPT table', - pattern => '[a-fA-F0-9\-]+', - maxLength => 36, - optional => 1, - }, - }, + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + disk => { + type => 'string', + description => "Block device name", + pattern => '^/dev/[a-zA-Z0-9\/]+$', + }, + uuid => { + type => 'string', + description => 'UUID for the GPT table', + pattern => '[a-fA-F0-9\-]+', + maxLength => 36, + optional => 1, + }, + }, }, returns => { type => 'string' }, code => sub { - my ($param) = @_; + my ($param) = @_; - my $disk = PVE::Diskmanage::verify_blockdev_path($param->{disk}); + my $disk = PVE::Diskmanage::verify_blockdev_path($param->{disk}); - my $rpcenv = PVE::RPCEnvironment::get(); + my $rpcenv = PVE::RPCEnvironment::get(); - my $authuser = $rpcenv->get_user(); + my $authuser = $rpcenv->get_user(); - die "$disk is a partition\n" if PVE::Diskmanage::is_partition($disk); - die "disk $disk already in use\n" if PVE::Diskmanage::disk_is_used($disk); - my $worker = sub { - PVE::Diskmanage::init_disk($disk, $param->{uuid}); - }; + die "$disk is a partition\n" if PVE::Diskmanage::is_partition($disk); + die "disk $disk already in use\n" if PVE::Diskmanage::disk_is_used($disk); + my $worker = sub { + PVE::Diskmanage::init_disk($disk, $param->{uuid}); + }; - my $diskid = $disk; - $diskid =~ s|^.*/||; # remove all up to the last slash - return $rpcenv->fork_worker('diskinit', $diskid, $authuser, $worker); - }}); + my $diskid = $disk; + $diskid =~ s|^.*/||; # remove all up to the last slash + return $rpcenv->fork_worker('diskinit', $diskid, $authuser, $worker); + }, +}); -__PACKAGE__->register_method ({ +__PACKAGE__->register_method({ name => 'wipe_disk', path => 'wipedisk', method => 'PUT', @@ -281,39 +283,40 @@ __PACKAGE__->register_method ({ proxyto => 'node', protected => 1, parameters => { - additionalProperties => 0, - properties => { - node => get_standard_option('pve-node'), - disk => { - type => 'string', - description => "Block device name", - pattern => '^/dev/[a-zA-Z0-9\/]+$', - }, - }, + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + disk => { + type => 'string', + description => "Block device name", + pattern => '^/dev/[a-zA-Z0-9\/]+$', + }, + }, }, returns => { type => 'string' }, code => sub { - my ($param) = @_; + my ($param) = @_; - my $disk = PVE::Diskmanage::verify_blockdev_path($param->{disk}); + my $disk = PVE::Diskmanage::verify_blockdev_path($param->{disk}); - my $mounted = PVE::Diskmanage::is_mounted($disk); - die "disk/partition '${mounted}' is mounted\n" if $mounted; + my $mounted = PVE::Diskmanage::is_mounted($disk); + die "disk/partition '${mounted}' is mounted\n" if $mounted; - my $held = PVE::Diskmanage::has_holder($disk); - die "disk/partition '${held}' has a holder\n" if $held; + my $held = PVE::Diskmanage::has_holder($disk); + die "disk/partition '${held}' has a holder\n" if $held; - my $rpcenv = PVE::RPCEnvironment::get(); - my $authuser = $rpcenv->get_user(); + my $rpcenv = PVE::RPCEnvironment::get(); + my $authuser = $rpcenv->get_user(); - my $worker = sub { - PVE::Diskmanage::wipe_blockdev($disk); - PVE::Diskmanage::udevadm_trigger($disk); - }; + my $worker = sub { + PVE::Diskmanage::wipe_blockdev($disk); + PVE::Diskmanage::udevadm_trigger($disk); + }; - my $basename = basename($disk); # avoid '/' in the ID + my $basename = basename($disk); # avoid '/' in the ID - return $rpcenv->fork_worker('wipedisk', $basename, $authuser, $worker); - }}); + return $rpcenv->fork_worker('wipedisk', $basename, $authuser, $worker); + }, +}); 1; diff --git a/src/PVE/API2/Disks/Directory.pm b/src/PVE/API2/Disks/Directory.pm index 099afae..1de26a4 100644 --- a/src/PVE/API2/Disks/Directory.pm +++ b/src/PVE/API2/Disks/Directory.pm @@ -30,32 +30,32 @@ my $read_ini = sub { my $section; foreach my $line (@lines) { - $line = trim($line); - if ($line =~ m/^\[([^\]]+)\]/) { - $section = $1; - if (!defined($result->{$section})) { - $result->{$section} = {}; - } - } elsif ($line =~ m/^(.*?)=(.*)$/) { - my ($key, $val) = ($1, $2); - if (!$section) { - warn "key value pair found without section, skipping\n"; - next; - } + $line = trim($line); + if ($line =~ m/^\[([^\]]+)\]/) { + $section = $1; + if (!defined($result->{$section})) { + $result->{$section} = {}; + } + } elsif ($line =~ m/^(.*?)=(.*)$/) { + my ($key, $val) = ($1, $2); + if (!$section) { + warn "key value pair found without section, skipping\n"; + next; + } - if ($result->{$section}->{$key}) { - # make duplicate properties to arrays to keep the order - my $prop = $result->{$section}->{$key}; - if (ref($prop) eq 'ARRAY') { - push @$prop, $val; - } else { - $result->{$section}->{$key} = [$prop, $val]; - } - } else { - $result->{$section}->{$key} = $val; - } - } - # ignore everything else + if ($result->{$section}->{$key}) { + # make duplicate properties to arrays to keep the order + my $prop = $result->{$section}->{$key}; + if (ref($prop) eq 'ARRAY') { + push @$prop, $val; + } else { + $result->{$section}->{$key} = [$prop, $val]; + } + } else { + $result->{$section}->{$key} = $val; + } + } + # ignore everything else } return $result; @@ -67,341 +67,366 @@ my $write_ini = sub { my $content = ""; foreach my $sname (sort keys %$ini) { - my $section = $ini->{$sname}; + my $section = $ini->{$sname}; - $content .= "[$sname]\n"; + $content .= "[$sname]\n"; - foreach my $pname (sort keys %$section) { - my $prop = $section->{$pname}; + foreach my $pname (sort keys %$section) { + my $prop = $section->{$pname}; - if (!ref($prop)) { - $content .= "$pname=$prop\n"; - } elsif (ref($prop) eq 'ARRAY') { - foreach my $val (@$prop) { - $content .= "$pname=$val\n"; - } - } else { - die "invalid property '$pname'\n"; - } - } - $content .= "\n"; + if (!ref($prop)) { + $content .= "$pname=$prop\n"; + } elsif (ref($prop) eq 'ARRAY') { + foreach my $val (@$prop) { + $content .= "$pname=$val\n"; + } + } else { + die "invalid property '$pname'\n"; + } + } + $content .= "\n"; } file_set_contents($filename, $content); }; -__PACKAGE__->register_method ({ +__PACKAGE__->register_method({ name => 'index', path => '', method => 'GET', proxyto => 'node', protected => 1, permissions => { - check => ['perm', '/', ['Sys.Audit']], + check => ['perm', '/', ['Sys.Audit']], }, description => "PVE Managed Directory storages.", parameters => { - additionalProperties => 0, - properties => { - node => get_standard_option('pve-node'), - }, + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + }, }, returns => { - type => 'array', - items => { - type => 'object', - properties => { - unitfile => { - type => 'string', - description => 'The path of the mount unit.', - }, - path => { - type => 'string', - description => 'The mount path.', - }, - device => { - type => 'string', - description => 'The mounted device.', - }, - type => { - type => 'string', - description => 'The filesystem type.', - }, - options => { - type => 'string', - description => 'The mount options.', - }, - }, - }, + type => 'array', + items => { + type => 'object', + properties => { + unitfile => { + type => 'string', + description => 'The path of the mount unit.', + }, + path => { + type => 'string', + description => 'The mount path.', + }, + device => { + type => 'string', + description => 'The mounted device.', + }, + type => { + type => 'string', + description => 'The filesystem type.', + }, + options => { + type => 'string', + description => 'The mount options.', + }, + }, + }, }, code => sub { - my ($param) = @_; + my ($param) = @_; - my $result = []; + my $result = []; - dir_glob_foreach('/etc/systemd/system', '^mnt-pve-(.+)\.mount$', sub { - my ($filename, $storid) = @_; - $storid = PVE::Systemd::unescape_unit($storid); + dir_glob_foreach( + '/etc/systemd/system', + '^mnt-pve-(.+)\.mount$', + sub { + my ($filename, $storid) = @_; + $storid = PVE::Systemd::unescape_unit($storid); - my $unitfile = "/etc/systemd/system/$filename"; - my $unit = $read_ini->($unitfile); + my $unitfile = "/etc/systemd/system/$filename"; + my $unit = $read_ini->($unitfile); - push @$result, { - unitfile => $unitfile, - path => "/mnt/pve/$storid", - device => $unit->{'Mount'}->{'What'}, - type => $unit->{'Mount'}->{'Type'}, - options => $unit->{'Mount'}->{'Options'}, - }; - }); + push @$result, + { + unitfile => $unitfile, + path => "/mnt/pve/$storid", + device => $unit->{'Mount'}->{'What'}, + type => $unit->{'Mount'}->{'Type'}, + options => $unit->{'Mount'}->{'Options'}, + }; + }, + ); - return $result; - }}); + return $result; + }, +}); -__PACKAGE__->register_method ({ +__PACKAGE__->register_method({ name => 'create', path => '', method => 'POST', proxyto => 'node', protected => 1, permissions => { - description => "Requires additionally 'Datastore.Allocate' on /storage when setting 'add_storage'", - check => ['perm', '/', ['Sys.Modify']], + description => + "Requires additionally 'Datastore.Allocate' on /storage when setting 'add_storage'", + check => ['perm', '/', ['Sys.Modify']], }, - description => "Create a Filesystem on an unused disk. Will be mounted under '/mnt/pve/NAME'.", + description => + "Create a Filesystem on an unused disk. Will be mounted under '/mnt/pve/NAME'.", parameters => { - additionalProperties => 0, - properties => { - node => get_standard_option('pve-node'), - name => get_standard_option('pve-storage-id'), - device => { - type => 'string', - description => 'The block device you want to create the filesystem on.', - }, - add_storage => { - description => "Configure storage using the directory.", - type => 'boolean', - optional => 1, - default => 0, - }, - filesystem => { - description => "The desired filesystem.", - type => 'string', - enum => ['ext4', 'xfs'], - optional => 1, - default => 'ext4', - }, - }, + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + name => get_standard_option('pve-storage-id'), + device => { + type => 'string', + description => 'The block device you want to create the filesystem on.', + }, + add_storage => { + description => "Configure storage using the directory.", + type => 'boolean', + optional => 1, + default => 0, + }, + filesystem => { + description => "The desired filesystem.", + type => 'string', + enum => ['ext4', 'xfs'], + optional => 1, + default => 'ext4', + }, + }, }, returns => { type => 'string' }, code => sub { - my ($param) = @_; + my ($param) = @_; - my $rpcenv = PVE::RPCEnvironment::get(); - my $user = $rpcenv->get_user(); + my $rpcenv = PVE::RPCEnvironment::get(); + my $user = $rpcenv->get_user(); - my $name = $param->{name}; - my $dev = $param->{device}; - my $node = $param->{node}; - my $type = $param->{filesystem} // 'ext4'; - my $path = "/mnt/pve/$name"; - my $mountunitname = PVE::Systemd::escape_unit($path, 1) . ".mount"; - my $mountunitpath = "/etc/systemd/system/$mountunitname"; + my $name = $param->{name}; + my $dev = $param->{device}; + my $node = $param->{node}; + my $type = $param->{filesystem} // 'ext4'; + my $path = "/mnt/pve/$name"; + my $mountunitname = PVE::Systemd::escape_unit($path, 1) . ".mount"; + my $mountunitpath = "/etc/systemd/system/$mountunitname"; - $dev = PVE::Diskmanage::verify_blockdev_path($dev); - PVE::Diskmanage::assert_disk_unused($dev); + $dev = PVE::Diskmanage::verify_blockdev_path($dev); + PVE::Diskmanage::assert_disk_unused($dev); - my $storage_params = { - type => 'dir', - storage => $name, - content => 'rootdir,images,iso,backup,vztmpl,snippets', - is_mountpoint => 1, - path => $path, - nodes => $node, - }; - my $verify_params = [qw(path)]; + my $storage_params = { + type => 'dir', + storage => $name, + content => 'rootdir,images,iso,backup,vztmpl,snippets', + is_mountpoint => 1, + path => $path, + nodes => $node, + }; + my $verify_params = [qw(path)]; - if ($param->{add_storage}) { - $rpcenv->check($user, "/storage", ['Datastore.Allocate']); + if ($param->{add_storage}) { + $rpcenv->check($user, "/storage", ['Datastore.Allocate']); - # reserve the name and add as disabled, will be enabled below if creation works out - PVE::API2::Storage::Config->create_or_update( - $name, $node, $storage_params, $verify_params, 1); - } + # reserve the name and add as disabled, will be enabled below if creation works out + PVE::API2::Storage::Config->create_or_update( + $name, $node, $storage_params, $verify_params, 1, + ); + } - my $mounted = PVE::Diskmanage::mounted_paths(); - die "the path for '${name}' is already mounted: ${path} ($mounted->{$path})\n" - if $mounted->{$path}; - die "a systemd mount unit already exists: ${mountunitpath}\n" if -e $mountunitpath; + my $mounted = PVE::Diskmanage::mounted_paths(); + die "the path for '${name}' is already mounted: ${path} ($mounted->{$path})\n" + if $mounted->{$path}; + die "a systemd mount unit already exists: ${mountunitpath}\n" if -e $mountunitpath; - my $worker = sub { - PVE::Diskmanage::locked_disk_action(sub { - PVE::Diskmanage::assert_disk_unused($dev); + my $worker = sub { + PVE::Diskmanage::locked_disk_action(sub { + PVE::Diskmanage::assert_disk_unused($dev); - my $part = $dev; + my $part = $dev; - if (PVE::Diskmanage::is_partition($dev)) { - eval { PVE::Diskmanage::change_parttype($dev, '8300'); }; - warn $@ if $@; - } else { - # create partition - my $cmd = [$SGDISK, '-n1', '-t1:8300', $dev]; - print "# ", join(' ', @$cmd), "\n"; - run_command($cmd); + if (PVE::Diskmanage::is_partition($dev)) { + eval { PVE::Diskmanage::change_parttype($dev, '8300'); }; + warn $@ if $@; + } else { + # create partition + my $cmd = [$SGDISK, '-n1', '-t1:8300', $dev]; + print "# ", join(' ', @$cmd), "\n"; + run_command($cmd); - my ($devname) = $dev =~ m|^/dev/(.*)$|; - $part = "/dev/"; - dir_glob_foreach("/sys/block/$devname", qr/\Q$devname\E.+/, sub { - my ($partition) = @_; - $part .= $partition; - }); - } + my ($devname) = $dev =~ m|^/dev/(.*)$|; + $part = "/dev/"; + dir_glob_foreach( + "/sys/block/$devname", + qr/\Q$devname\E.+/, + sub { + my ($partition) = @_; + $part .= $partition; + }, + ); + } - # create filesystem - my $cmd = [$MKFS, '-t', $type, $part]; - print "# ", join(' ', @$cmd), "\n"; - run_command($cmd); + # create filesystem + my $cmd = [$MKFS, '-t', $type, $part]; + print "# ", join(' ', @$cmd), "\n"; + run_command($cmd); - # create systemd mount unit and enable & start it - my $ini = { - 'Unit' => { - 'Description' => "Mount storage '$name' under /mnt/pve", - }, - 'Install' => { - 'WantedBy' => 'multi-user.target', - }, - }; + # create systemd mount unit and enable & start it + my $ini = { + 'Unit' => { + 'Description' => "Mount storage '$name' under /mnt/pve", + }, + 'Install' => { + 'WantedBy' => 'multi-user.target', + }, + }; - my $uuid_path; - my $uuid; + my $uuid_path; + my $uuid; - $cmd = [$BLKID, $part, '-o', 'export']; - print "# ", join(' ', @$cmd), "\n"; - run_command($cmd, outfunc => sub { - my ($line) = @_; + $cmd = [$BLKID, $part, '-o', 'export']; + print "# ", join(' ', @$cmd), "\n"; + run_command( + $cmd, + outfunc => sub { + my ($line) = @_; - if ($line =~ m/^UUID=(.*)$/) { - $uuid = $1; - $uuid_path = "/dev/disk/by-uuid/$uuid"; - } - }); + if ($line =~ m/^UUID=(.*)$/) { + $uuid = $1; + $uuid_path = "/dev/disk/by-uuid/$uuid"; + } + }, + ); - die "could not get UUID of device '$part'\n" if !$uuid; + die "could not get UUID of device '$part'\n" if !$uuid; - $ini->{'Mount'} = { - 'What' => $uuid_path, - 'Where' => $path, - 'Type' => $type, - 'Options' => 'defaults', - }; + $ini->{'Mount'} = { + 'What' => $uuid_path, + 'Where' => $path, + 'Type' => $type, + 'Options' => 'defaults', + }; - $write_ini->($ini, $mountunitpath); + $write_ini->($ini, $mountunitpath); - PVE::Diskmanage::udevadm_trigger($part); + PVE::Diskmanage::udevadm_trigger($part); - run_command(['systemctl', 'daemon-reload']); - run_command(['systemctl', 'enable', $mountunitname]); - run_command(['systemctl', 'start', $mountunitname]); + run_command(['systemctl', 'daemon-reload']); + run_command(['systemctl', 'enable', $mountunitname]); + run_command(['systemctl', 'start', $mountunitname]); - if ($param->{add_storage}) { - PVE::API2::Storage::Config->create_or_update( - $name, $node, $storage_params, $verify_params); - } - }); - }; + if ($param->{add_storage}) { + PVE::API2::Storage::Config->create_or_update( + $name, $node, $storage_params, $verify_params, + ); + } + }); + }; - return $rpcenv->fork_worker('dircreate', $name, $user, $worker); - }}); + return $rpcenv->fork_worker('dircreate', $name, $user, $worker); + }, +}); -__PACKAGE__->register_method ({ +__PACKAGE__->register_method({ name => 'delete', path => '{name}', method => 'DELETE', proxyto => 'node', protected => 1, permissions => { - description => "Requires additionally 'Datastore.Allocate' on /storage when setting 'cleanup-config'", - check => ['perm', '/', ['Sys.Modify']], + description => + "Requires additionally 'Datastore.Allocate' on /storage when setting 'cleanup-config'", + check => ['perm', '/', ['Sys.Modify']], }, description => "Unmounts the storage and removes the mount unit.", parameters => { - additionalProperties => 0, - properties => { - node => get_standard_option('pve-node'), - name => get_standard_option('pve-storage-id'), - 'cleanup-config' => { - description => "Marks associated storage(s) as not available on this node anymore ". - "or removes them from the configuration (if configured for this node only).", - type => 'boolean', - optional => 1, - default => 0, - }, - 'cleanup-disks' => { - description => "Also wipe disk so it can be repurposed afterwards.", - type => 'boolean', - optional => 1, - default => 0, - }, - }, + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + name => get_standard_option('pve-storage-id'), + 'cleanup-config' => { + description => + "Marks associated storage(s) as not available on this node anymore " + . "or removes them from the configuration (if configured for this node only).", + type => 'boolean', + optional => 1, + default => 0, + }, + 'cleanup-disks' => { + description => "Also wipe disk so it can be repurposed afterwards.", + type => 'boolean', + optional => 1, + default => 0, + }, + }, }, returns => { type => 'string' }, code => sub { - my ($param) = @_; + my ($param) = @_; - my $rpcenv = PVE::RPCEnvironment::get(); - my $user = $rpcenv->get_user(); + my $rpcenv = PVE::RPCEnvironment::get(); + my $user = $rpcenv->get_user(); - $rpcenv->check($user, "/storage", ['Datastore.Allocate']) if $param->{'cleanup-config'}; + $rpcenv->check($user, "/storage", ['Datastore.Allocate']) if $param->{'cleanup-config'}; - my $name = $param->{name}; - my $node = $param->{node}; + my $name = $param->{name}; + my $node = $param->{node}; - my $worker = sub { - my $path = "/mnt/pve/$name"; - my $mountunitname = PVE::Systemd::escape_unit($path, 1) . ".mount"; - my $mountunitpath = "/etc/systemd/system/$mountunitname"; + my $worker = sub { + my $path = "/mnt/pve/$name"; + my $mountunitname = PVE::Systemd::escape_unit($path, 1) . ".mount"; + my $mountunitpath = "/etc/systemd/system/$mountunitname"; - PVE::Diskmanage::locked_disk_action(sub { - my $to_wipe; - if ($param->{'cleanup-disks'}) { - my $unit = $read_ini->($mountunitpath); + PVE::Diskmanage::locked_disk_action(sub { + my $to_wipe; + if ($param->{'cleanup-disks'}) { + my $unit = $read_ini->($mountunitpath); - my $dev = PVE::Diskmanage::verify_blockdev_path($unit->{'Mount'}->{'What'}); - $to_wipe = $dev; + my $dev = PVE::Diskmanage::verify_blockdev_path($unit->{'Mount'}->{'What'}); + $to_wipe = $dev; - # clean up whole device if this is the only partition - $dev =~ s|^/dev/||; - my $info = PVE::Diskmanage::get_disks($dev, 1, 1); - die "unable to obtain information for disk '$dev'\n" if !$info->{$dev}; - $to_wipe = $info->{$dev}->{parent} - if $info->{$dev}->{parent} && scalar(keys $info->%*) == 2; - } + # clean up whole device if this is the only partition + $dev =~ s|^/dev/||; + my $info = PVE::Diskmanage::get_disks($dev, 1, 1); + die "unable to obtain information for disk '$dev'\n" if !$info->{$dev}; + $to_wipe = $info->{$dev}->{parent} + if $info->{$dev}->{parent} && scalar(keys $info->%*) == 2; + } - run_command(['systemctl', 'stop', $mountunitname]); - run_command(['systemctl', 'disable', $mountunitname]); + run_command(['systemctl', 'stop', $mountunitname]); + run_command(['systemctl', 'disable', $mountunitname]); - unlink $mountunitpath or $! == ENOENT or die "cannot remove $mountunitpath - $!\n"; + unlink $mountunitpath + or $! == ENOENT + or die "cannot remove $mountunitpath - $!\n"; - my $config_err; - if ($param->{'cleanup-config'}) { - my $match = sub { - my ($scfg) = @_; - return $scfg->{type} eq 'dir' && $scfg->{path} eq $path; - }; - eval { PVE::API2::Storage::Config->cleanup_storages_for_node($match, $node); }; - warn $config_err = $@ if $@; - } + my $config_err; + if ($param->{'cleanup-config'}) { + my $match = sub { + my ($scfg) = @_; + return $scfg->{type} eq 'dir' && $scfg->{path} eq $path; + }; + eval { + PVE::API2::Storage::Config->cleanup_storages_for_node($match, $node); + }; + warn $config_err = $@ if $@; + } - if ($to_wipe) { - PVE::Diskmanage::wipe_blockdev($to_wipe); - PVE::Diskmanage::udevadm_trigger($to_wipe); - } + if ($to_wipe) { + PVE::Diskmanage::wipe_blockdev($to_wipe); + PVE::Diskmanage::udevadm_trigger($to_wipe); + } - die "config cleanup failed - $config_err" if $config_err; - }); - }; + die "config cleanup failed - $config_err" if $config_err; + }); + }; - return $rpcenv->fork_worker('dirremove', $name, $user, $worker); - }}); + return $rpcenv->fork_worker('dirremove', $name, $user, $worker); + }, +}); 1; diff --git a/src/PVE/API2/Disks/LVM.pm b/src/PVE/API2/Disks/LVM.pm index 3c5bdfa..9f8f951 100644 --- a/src/PVE/API2/Disks/LVM.pm +++ b/src/PVE/API2/Disks/LVM.pm @@ -14,266 +14,277 @@ use PVE::RESTHandler; use base qw(PVE::RESTHandler); -__PACKAGE__->register_method ({ +__PACKAGE__->register_method({ name => 'index', path => '', method => 'GET', proxyto => 'node', protected => 1, permissions => { - check => ['perm', '/', ['Sys.Audit']], + check => ['perm', '/', ['Sys.Audit']], }, description => "List LVM Volume Groups", parameters => { - additionalProperties => 0, - properties => { - node => get_standard_option('pve-node'), - }, + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + }, }, returns => { - type => 'object', - properties => { - leaf => { - type => 'boolean', - }, - children => { - type => 'array', - items => { - type => "object", - properties => { - leaf => { - type => 'boolean', - }, - name => { - type => 'string', - description => 'The name of the volume group', - }, - size => { - type => 'integer', - description => 'The size of the volume group in bytes', - }, - free => { - type => 'integer', - description => 'The free bytes in the volume group', - }, - children => { - optional => 1, - type => 'array', - description => 'The underlying physical volumes', - items => { - type => 'object', - properties => { - leaf => { - type => 'boolean', - }, - name => { - type => 'string', - description => 'The name of the physical volume', - }, - size => { - type => 'integer', - description => 'The size of the physical volume in bytes', - }, - free => { - type => 'integer', - description => 'The free bytes in the physical volume', - }, - }, - }, - }, - }, - }, - }, - }, + type => 'object', + properties => { + leaf => { + type => 'boolean', + }, + children => { + type => 'array', + items => { + type => "object", + properties => { + leaf => { + type => 'boolean', + }, + name => { + type => 'string', + description => 'The name of the volume group', + }, + size => { + type => 'integer', + description => 'The size of the volume group in bytes', + }, + free => { + type => 'integer', + description => 'The free bytes in the volume group', + }, + children => { + optional => 1, + type => 'array', + description => 'The underlying physical volumes', + items => { + type => 'object', + properties => { + leaf => { + type => 'boolean', + }, + name => { + type => 'string', + description => 'The name of the physical volume', + }, + size => { + type => 'integer', + description => + 'The size of the physical volume in bytes', + }, + free => { + type => 'integer', + description => 'The free bytes in the physical volume', + }, + }, + }, + }, + }, + }, + }, + }, }, code => sub { - my ($param) = @_; + my ($param) = @_; - my $result = []; + my $result = []; - my $vgs = PVE::Storage::LVMPlugin::lvm_vgs(1); + my $vgs = PVE::Storage::LVMPlugin::lvm_vgs(1); - foreach my $vg_name (sort keys %$vgs) { - my $vg = $vgs->{$vg_name}; - $vg->{name} = $vg_name; - $vg->{leaf} = 0; - foreach my $pv (@{$vg->{pvs}}) { - $pv->{leaf} = 1; - } - $vg->{children} = delete $vg->{pvs}; - push @$result, $vg; - } + foreach my $vg_name (sort keys %$vgs) { + my $vg = $vgs->{$vg_name}; + $vg->{name} = $vg_name; + $vg->{leaf} = 0; + foreach my $pv (@{ $vg->{pvs} }) { + $pv->{leaf} = 1; + } + $vg->{children} = delete $vg->{pvs}; + push @$result, $vg; + } - return { - leaf => 0, - children => $result, - }; - }}); + return { + leaf => 0, + children => $result, + }; + }, +}); -__PACKAGE__->register_method ({ +__PACKAGE__->register_method({ name => 'create', path => '', method => 'POST', proxyto => 'node', protected => 1, permissions => { - description => "Requires additionally 'Datastore.Allocate' on /storage when setting 'add_storage'", - check => ['perm', '/', ['Sys.Modify']], + description => + "Requires additionally 'Datastore.Allocate' on /storage when setting 'add_storage'", + check => ['perm', '/', ['Sys.Modify']], }, description => "Create an LVM Volume Group", parameters => { - additionalProperties => 0, - properties => { - node => get_standard_option('pve-node'), - name => get_standard_option('pve-storage-id'), - device => { - type => 'string', - description => 'The block device you want to create the volume group on', - }, - add_storage => { - description => "Configure storage using the Volume Group", - type => 'boolean', - optional => 1, - default => 0, - }, - }, + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + name => get_standard_option('pve-storage-id'), + device => { + type => 'string', + description => 'The block device you want to create the volume group on', + }, + add_storage => { + description => "Configure storage using the Volume Group", + type => 'boolean', + optional => 1, + default => 0, + }, + }, }, returns => { type => 'string' }, code => sub { - my ($param) = @_; + my ($param) = @_; - my $rpcenv = PVE::RPCEnvironment::get(); - my $user = $rpcenv->get_user(); + my $rpcenv = PVE::RPCEnvironment::get(); + my $user = $rpcenv->get_user(); - my $name = $param->{name}; - my $dev = $param->{device}; - my $node = $param->{node}; + my $name = $param->{name}; + my $dev = $param->{device}; + my $node = $param->{node}; - $dev = PVE::Diskmanage::verify_blockdev_path($dev); - PVE::Diskmanage::assert_disk_unused($dev); + $dev = PVE::Diskmanage::verify_blockdev_path($dev); + PVE::Diskmanage::assert_disk_unused($dev); - my $storage_params = { - type => 'lvm', - vgname => $name, - storage => $name, - content => 'rootdir,images', - shared => 0, - nodes => $node, - }; - my $verify_params = [qw(vgname)]; + my $storage_params = { + type => 'lvm', + vgname => $name, + storage => $name, + content => 'rootdir,images', + shared => 0, + nodes => $node, + }; + my $verify_params = [qw(vgname)]; - if ($param->{add_storage}) { - $rpcenv->check($user, "/storage", ['Datastore.Allocate']); + if ($param->{add_storage}) { + $rpcenv->check($user, "/storage", ['Datastore.Allocate']); - # reserve the name and add as disabled, will be enabled below if creation works out - PVE::API2::Storage::Config->create_or_update( - $name, $node, $storage_params, $verify_params, 1); - } + # reserve the name and add as disabled, will be enabled below if creation works out + PVE::API2::Storage::Config->create_or_update( + $name, $node, $storage_params, $verify_params, 1, + ); + } - my $worker = sub { - PVE::Diskmanage::locked_disk_action(sub { - PVE::Diskmanage::assert_disk_unused($dev); - die "volume group with name '${name}' already exists on node '${node}'\n" - if PVE::Storage::LVMPlugin::lvm_vgs()->{$name}; + my $worker = sub { + PVE::Diskmanage::locked_disk_action(sub { + PVE::Diskmanage::assert_disk_unused($dev); + die "volume group with name '${name}' already exists on node '${node}'\n" + if PVE::Storage::LVMPlugin::lvm_vgs()->{$name}; - if (PVE::Diskmanage::is_partition($dev)) { - eval { PVE::Diskmanage::change_parttype($dev, '8E00'); }; - warn $@ if $@; - } + if (PVE::Diskmanage::is_partition($dev)) { + eval { PVE::Diskmanage::change_parttype($dev, '8E00'); }; + warn $@ if $@; + } - PVE::Storage::LVMPlugin::lvm_create_volume_group($dev, $name); + PVE::Storage::LVMPlugin::lvm_create_volume_group($dev, $name); - PVE::Diskmanage::udevadm_trigger($dev); + PVE::Diskmanage::udevadm_trigger($dev); - if ($param->{add_storage}) { - PVE::API2::Storage::Config->create_or_update( - $name, $node, $storage_params, $verify_params); - } - }); - }; + if ($param->{add_storage}) { + PVE::API2::Storage::Config->create_or_update( + $name, $node, $storage_params, $verify_params, + ); + } + }); + }; - return $rpcenv->fork_worker('lvmcreate', $name, $user, $worker); - }}); + return $rpcenv->fork_worker('lvmcreate', $name, $user, $worker); + }, +}); -__PACKAGE__->register_method ({ +__PACKAGE__->register_method({ name => 'delete', path => '{name}', method => 'DELETE', proxyto => 'node', protected => 1, permissions => { - description => "Requires additionally 'Datastore.Allocate' on /storage when setting 'cleanup-config'", - check => ['perm', '/', ['Sys.Modify']], + description => + "Requires additionally 'Datastore.Allocate' on /storage when setting 'cleanup-config'", + check => ['perm', '/', ['Sys.Modify']], }, description => "Remove an LVM Volume Group.", parameters => { - additionalProperties => 0, - properties => { - node => get_standard_option('pve-node'), - name => get_standard_option('pve-storage-id'), - 'cleanup-config' => { - description => "Marks associated storage(s) as not available on this node anymore ". - "or removes them from the configuration (if configured for this node only).", - type => 'boolean', - optional => 1, - default => 0, - }, - 'cleanup-disks' => { - description => "Also wipe disks so they can be repurposed afterwards.", - type => 'boolean', - optional => 1, - default => 0, - }, - }, + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + name => get_standard_option('pve-storage-id'), + 'cleanup-config' => { + description => + "Marks associated storage(s) as not available on this node anymore " + . "or removes them from the configuration (if configured for this node only).", + type => 'boolean', + optional => 1, + default => 0, + }, + 'cleanup-disks' => { + description => "Also wipe disks so they can be repurposed afterwards.", + type => 'boolean', + optional => 1, + default => 0, + }, + }, }, returns => { type => 'string' }, code => sub { - my ($param) = @_; + my ($param) = @_; - my $rpcenv = PVE::RPCEnvironment::get(); - my $user = $rpcenv->get_user(); + my $rpcenv = PVE::RPCEnvironment::get(); + my $user = $rpcenv->get_user(); - $rpcenv->check($user, "/storage", ['Datastore.Allocate']) if $param->{'cleanup-config'}; + $rpcenv->check($user, "/storage", ['Datastore.Allocate']) if $param->{'cleanup-config'}; - my $name = $param->{name}; - my $node = $param->{node}; + my $name = $param->{name}; + my $node = $param->{node}; - my $worker = sub { - PVE::Diskmanage::locked_disk_action(sub { - my $vgs = PVE::Storage::LVMPlugin::lvm_vgs(1); - die "no such volume group '$name'\n" if !$vgs->{$name}; + my $worker = sub { + PVE::Diskmanage::locked_disk_action(sub { + my $vgs = PVE::Storage::LVMPlugin::lvm_vgs(1); + die "no such volume group '$name'\n" if !$vgs->{$name}; - PVE::Storage::LVMPlugin::lvm_destroy_volume_group($name); + PVE::Storage::LVMPlugin::lvm_destroy_volume_group($name); - my $config_err; - if ($param->{'cleanup-config'}) { - my $match = sub { - my ($scfg) = @_; - return $scfg->{type} eq 'lvm' && $scfg->{vgname} eq $name; - }; - eval { PVE::API2::Storage::Config->cleanup_storages_for_node($match, $node); }; - warn $config_err = $@ if $@; - } + my $config_err; + if ($param->{'cleanup-config'}) { + my $match = sub { + my ($scfg) = @_; + return $scfg->{type} eq 'lvm' && $scfg->{vgname} eq $name; + }; + eval { + PVE::API2::Storage::Config->cleanup_storages_for_node($match, $node); + }; + warn $config_err = $@ if $@; + } - if ($param->{'cleanup-disks'}) { - my $wiped = []; - eval { - for my $pv ($vgs->{$name}->{pvs}->@*) { - my $dev = PVE::Diskmanage::verify_blockdev_path($pv->{name}); - PVE::Diskmanage::wipe_blockdev($dev); - push $wiped->@*, $dev; - } - }; - my $err = $@; - PVE::Diskmanage::udevadm_trigger($wiped->@*); - die "cleanup failed - $err" if $err; - } + if ($param->{'cleanup-disks'}) { + my $wiped = []; + eval { + for my $pv ($vgs->{$name}->{pvs}->@*) { + my $dev = PVE::Diskmanage::verify_blockdev_path($pv->{name}); + PVE::Diskmanage::wipe_blockdev($dev); + push $wiped->@*, $dev; + } + }; + my $err = $@; + PVE::Diskmanage::udevadm_trigger($wiped->@*); + die "cleanup failed - $err" if $err; + } - die "config cleanup failed - $config_err" if $config_err; - }); - }; + die "config cleanup failed - $config_err" if $config_err; + }); + }; - return $rpcenv->fork_worker('lvmremove', $name, $user, $worker); - }}); + return $rpcenv->fork_worker('lvmremove', $name, $user, $worker); + }, +}); 1; diff --git a/src/PVE/API2/Disks/LVMThin.pm b/src/PVE/API2/Disks/LVMThin.pm index f1c3957..a0ce497 100644 --- a/src/PVE/API2/Disks/LVMThin.pm +++ b/src/PVE/API2/Disks/LVMThin.pm @@ -15,255 +15,269 @@ use PVE::RESTHandler; use base qw(PVE::RESTHandler); -__PACKAGE__->register_method ({ +__PACKAGE__->register_method({ name => 'index', path => '', method => 'GET', proxyto => 'node', protected => 1, permissions => { - check => ['perm', '/', ['Sys.Audit']], + check => ['perm', '/', ['Sys.Audit']], }, description => "List LVM thinpools", parameters => { - additionalProperties => 0, - properties => { - node => get_standard_option('pve-node'), - }, + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + }, }, returns => { - type => 'array', - items => { - type => 'object', - properties => { - lv => { - type => 'string', - description => 'The name of the thinpool.', - }, - vg => { - type => 'string', - description => 'The associated volume group.', - }, - lv_size => { - type => 'integer', - description => 'The size of the thinpool in bytes.', - }, - used => { - type => 'integer', - description => 'The used bytes of the thinpool.', - }, - metadata_size => { - type => 'integer', - description => 'The size of the metadata lv in bytes.', - }, - metadata_used => { - type => 'integer', - description => 'The used bytes of the metadata lv.', - }, - }, - }, + type => 'array', + items => { + type => 'object', + properties => { + lv => { + type => 'string', + description => 'The name of the thinpool.', + }, + vg => { + type => 'string', + description => 'The associated volume group.', + }, + lv_size => { + type => 'integer', + description => 'The size of the thinpool in bytes.', + }, + used => { + type => 'integer', + description => 'The used bytes of the thinpool.', + }, + metadata_size => { + type => 'integer', + description => 'The size of the metadata lv in bytes.', + }, + metadata_used => { + type => 'integer', + description => 'The used bytes of the metadata lv.', + }, + }, + }, }, code => sub { - my ($param) = @_; - return PVE::Storage::LvmThinPlugin::list_thinpools(undef); - }}); + my ($param) = @_; + return PVE::Storage::LvmThinPlugin::list_thinpools(undef); + }, +}); -__PACKAGE__->register_method ({ +__PACKAGE__->register_method({ name => 'create', path => '', method => 'POST', proxyto => 'node', protected => 1, permissions => { - description => "Requires additionally 'Datastore.Allocate' on /storage when setting 'add_storage'", - check => ['perm', '/', ['Sys.Modify']], + description => + "Requires additionally 'Datastore.Allocate' on /storage when setting 'add_storage'", + check => ['perm', '/', ['Sys.Modify']], }, description => "Create an LVM thinpool", parameters => { - additionalProperties => 0, - properties => { - node => get_standard_option('pve-node'), - name => get_standard_option('pve-storage-id'), - device => { - type => 'string', - description => 'The block device you want to create the thinpool on.', - }, - add_storage => { - description => "Configure storage using the thinpool.", - type => 'boolean', - optional => 1, - default => 0, - }, - }, + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + name => get_standard_option('pve-storage-id'), + device => { + type => 'string', + description => 'The block device you want to create the thinpool on.', + }, + add_storage => { + description => "Configure storage using the thinpool.", + type => 'boolean', + optional => 1, + default => 0, + }, + }, }, returns => { type => 'string' }, code => sub { - my ($param) = @_; + my ($param) = @_; - my $rpcenv = PVE::RPCEnvironment::get(); - my $user = $rpcenv->get_user(); + my $rpcenv = PVE::RPCEnvironment::get(); + my $user = $rpcenv->get_user(); - my $name = $param->{name}; - my $dev = $param->{device}; - my $node = $param->{node}; + my $name = $param->{name}; + my $dev = $param->{device}; + my $node = $param->{node}; - $dev = PVE::Diskmanage::verify_blockdev_path($dev); - PVE::Diskmanage::assert_disk_unused($dev); + $dev = PVE::Diskmanage::verify_blockdev_path($dev); + PVE::Diskmanage::assert_disk_unused($dev); - my $storage_params = { - type => 'lvmthin', - vgname => $name, - thinpool => $name, - storage => $name, - content => 'rootdir,images', - nodes => $node, - }; - my $verify_params = [qw(vgname thinpool)]; + my $storage_params = { + type => 'lvmthin', + vgname => $name, + thinpool => $name, + storage => $name, + content => 'rootdir,images', + nodes => $node, + }; + my $verify_params = [qw(vgname thinpool)]; - if ($param->{add_storage}) { - $rpcenv->check($user, "/storage", ['Datastore.Allocate']); + if ($param->{add_storage}) { + $rpcenv->check($user, "/storage", ['Datastore.Allocate']); - # reserve the name and add as disabled, will be enabled below if creation works out - PVE::API2::Storage::Config->create_or_update( - $name, $node, $storage_params, $verify_params, 1); - } + # reserve the name and add as disabled, will be enabled below if creation works out + PVE::API2::Storage::Config->create_or_update( + $name, $node, $storage_params, $verify_params, 1, + ); + } - my $worker = sub { - PVE::Diskmanage::locked_disk_action(sub { - PVE::Diskmanage::assert_disk_unused($dev); + my $worker = sub { + PVE::Diskmanage::locked_disk_action(sub { + PVE::Diskmanage::assert_disk_unused($dev); - die "volume group with name '${name}' already exists on node '${node}'\n" - if PVE::Storage::LVMPlugin::lvm_vgs()->{$name}; + die "volume group with name '${name}' already exists on node '${node}'\n" + if PVE::Storage::LVMPlugin::lvm_vgs()->{$name}; - if (PVE::Diskmanage::is_partition($dev)) { - eval { PVE::Diskmanage::change_parttype($dev, '8E00'); }; - warn $@ if $@; - } + if (PVE::Diskmanage::is_partition($dev)) { + eval { PVE::Diskmanage::change_parttype($dev, '8E00'); }; + warn $@ if $@; + } - PVE::Storage::LVMPlugin::lvm_create_volume_group($dev, $name); - my $pv = PVE::Storage::LVMPlugin::lvm_pv_info($dev); - # keep some free space just in case - my $datasize = $pv->{size} - 128*1024; - # default to 1% for metadata - my $metadatasize = $datasize/100; - # but at least 1G, as recommended in lvmthin man - $metadatasize = 1024*1024 if $metadatasize < 1024*1024; - # but at most 16G, which is the current lvm max - $metadatasize = 16*1024*1024 if $metadatasize > 16*1024*1024; - # shrink data by needed amount for metadata - $datasize -= 2*$metadatasize; + PVE::Storage::LVMPlugin::lvm_create_volume_group($dev, $name); + my $pv = PVE::Storage::LVMPlugin::lvm_pv_info($dev); + # keep some free space just in case + my $datasize = $pv->{size} - 128 * 1024; + # default to 1% for metadata + my $metadatasize = $datasize / 100; + # but at least 1G, as recommended in lvmthin man + $metadatasize = 1024 * 1024 if $metadatasize < 1024 * 1024; + # but at most 16G, which is the current lvm max + $metadatasize = 16 * 1024 * 1024 if $metadatasize > 16 * 1024 * 1024; + # shrink data by needed amount for metadata + $datasize -= 2 * $metadatasize; - run_command([ - '/sbin/lvcreate', - '--type', 'thin-pool', - "-L${datasize}K", - '--poolmetadatasize', "${metadatasize}K", - '-n', $name, - $name - ]); + run_command([ + '/sbin/lvcreate', + '--type', + 'thin-pool', + "-L${datasize}K", + '--poolmetadatasize', + "${metadatasize}K", + '-n', + $name, + $name, + ]); - PVE::Diskmanage::udevadm_trigger($dev); + PVE::Diskmanage::udevadm_trigger($dev); - if ($param->{add_storage}) { - PVE::API2::Storage::Config->create_or_update( - $name, $node, $storage_params, $verify_params); - } - }); - }; + if ($param->{add_storage}) { + PVE::API2::Storage::Config->create_or_update( + $name, $node, $storage_params, $verify_params, + ); + } + }); + }; - return $rpcenv->fork_worker('lvmthincreate', $name, $user, $worker); - }}); + return $rpcenv->fork_worker('lvmthincreate', $name, $user, $worker); + }, +}); -__PACKAGE__->register_method ({ +__PACKAGE__->register_method({ name => 'delete', path => '{name}', method => 'DELETE', proxyto => 'node', protected => 1, permissions => { - description => "Requires additionally 'Datastore.Allocate' on /storage when setting 'cleanup-config'", - check => ['perm', '/', ['Sys.Modify']], + description => + "Requires additionally 'Datastore.Allocate' on /storage when setting 'cleanup-config'", + check => ['perm', '/', ['Sys.Modify']], }, description => "Remove an LVM thin pool.", parameters => { - additionalProperties => 0, - properties => { - node => get_standard_option('pve-node'), - name => get_standard_option('pve-storage-id'), - 'volume-group' => get_standard_option('pve-storage-id'), - 'cleanup-config' => { - description => "Marks associated storage(s) as not available on this node anymore ". - "or removes them from the configuration (if configured for this node only).", - type => 'boolean', - optional => 1, - default => 0, - }, - 'cleanup-disks' => { - description => "Also wipe disks so they can be repurposed afterwards.", - type => 'boolean', - optional => 1, - default => 0, - }, - }, + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + name => get_standard_option('pve-storage-id'), + 'volume-group' => get_standard_option('pve-storage-id'), + 'cleanup-config' => { + description => + "Marks associated storage(s) as not available on this node anymore " + . "or removes them from the configuration (if configured for this node only).", + type => 'boolean', + optional => 1, + default => 0, + }, + 'cleanup-disks' => { + description => "Also wipe disks so they can be repurposed afterwards.", + type => 'boolean', + optional => 1, + default => 0, + }, + }, }, returns => { type => 'string' }, code => sub { - my ($param) = @_; + my ($param) = @_; - my $rpcenv = PVE::RPCEnvironment::get(); - my $user = $rpcenv->get_user(); + my $rpcenv = PVE::RPCEnvironment::get(); + my $user = $rpcenv->get_user(); - $rpcenv->check($user, "/storage", ['Datastore.Allocate']) if $param->{'cleanup-config'}; + $rpcenv->check($user, "/storage", ['Datastore.Allocate']) if $param->{'cleanup-config'}; - my $vg = $param->{'volume-group'}; - my $lv = $param->{name}; - my $node = $param->{node}; + my $vg = $param->{'volume-group'}; + my $lv = $param->{name}; + my $node = $param->{node}; - my $worker = sub { - PVE::Diskmanage::locked_disk_action(sub { - my $thinpools = PVE::Storage::LvmThinPlugin::list_thinpools(); + my $worker = sub { + PVE::Diskmanage::locked_disk_action(sub { + my $thinpools = PVE::Storage::LvmThinPlugin::list_thinpools(); - die "no such thin pool ${vg}/${lv}\n" - if !grep { $_->{lv} eq $lv && $_->{vg} eq $vg } $thinpools->@*; + die "no such thin pool ${vg}/${lv}\n" + if !grep { $_->{lv} eq $lv && $_->{vg} eq $vg } $thinpools->@*; - run_command(['lvremove', '-y', "${vg}/${lv}"]); + run_command(['lvremove', '-y', "${vg}/${lv}"]); - my $config_err; - if ($param->{'cleanup-config'}) { - my $match = sub { - my ($scfg) = @_; - return $scfg->{type} eq 'lvmthin' - && $scfg->{vgname} eq $vg - && $scfg->{thinpool} eq $lv; - }; - eval { PVE::API2::Storage::Config->cleanup_storages_for_node($match, $node); }; - warn $config_err = $@ if $@; - } + my $config_err; + if ($param->{'cleanup-config'}) { + my $match = sub { + my ($scfg) = @_; + return + $scfg->{type} eq 'lvmthin' + && $scfg->{vgname} eq $vg + && $scfg->{thinpool} eq $lv; + }; + eval { + PVE::API2::Storage::Config->cleanup_storages_for_node($match, $node); + }; + warn $config_err = $@ if $@; + } - if ($param->{'cleanup-disks'}) { - my $vgs = PVE::Storage::LVMPlugin::lvm_vgs(1); + if ($param->{'cleanup-disks'}) { + my $vgs = PVE::Storage::LVMPlugin::lvm_vgs(1); - die "no such volume group '$vg'\n" if !$vgs->{$vg}; - die "volume group '$vg' still in use\n" if $vgs->{$vg}->{lvcount} > 0; + die "no such volume group '$vg'\n" if !$vgs->{$vg}; + die "volume group '$vg' still in use\n" if $vgs->{$vg}->{lvcount} > 0; - my $wiped = []; - eval { - for my $pv ($vgs->{$vg}->{pvs}->@*) { - my $dev = PVE::Diskmanage::verify_blockdev_path($pv->{name}); - PVE::Diskmanage::wipe_blockdev($dev); - push $wiped->@*, $dev; - } - }; - my $err = $@; - PVE::Diskmanage::udevadm_trigger($wiped->@*); - die "cleanup failed - $err" if $err; - } + my $wiped = []; + eval { + for my $pv ($vgs->{$vg}->{pvs}->@*) { + my $dev = PVE::Diskmanage::verify_blockdev_path($pv->{name}); + PVE::Diskmanage::wipe_blockdev($dev); + push $wiped->@*, $dev; + } + }; + my $err = $@; + PVE::Diskmanage::udevadm_trigger($wiped->@*); + die "cleanup failed - $err" if $err; + } - die "config cleanup failed - $config_err" if $config_err; - }); - }; + die "config cleanup failed - $config_err" if $config_err; + }); + }; - return $rpcenv->fork_worker('lvmthinremove', "${vg}-${lv}", $user, $worker); - }}); + return $rpcenv->fork_worker('lvmthinremove', "${vg}-${lv}", $user, $worker); + }, +}); 1; diff --git a/src/PVE/API2/Disks/ZFS.pm b/src/PVE/API2/Disks/ZFS.pm index 6fb6bd6..7dae404 100644 --- a/src/PVE/API2/Disks/ZFS.pm +++ b/src/PVE/API2/Disks/ZFS.pm @@ -19,592 +19,637 @@ my $ZPOOL = '/sbin/zpool'; my $ZFS = '/sbin/zfs'; sub get_pool_data { - die "zfsutils-linux not installed\n" if ! -f $ZPOOL; + die "zfsutils-linux not installed\n" if !-f $ZPOOL; my $propnames = [qw(name size alloc free frag dedup health)]; my $numbers = { - size => 1, - alloc => 1, - free => 1, - frag => 1, - dedup => 1, + size => 1, + alloc => 1, + free => 1, + frag => 1, + dedup => 1, }; my $pools = []; - run_command([$ZPOOL, 'list', '-HpPLo', join(',', @$propnames)], outfunc => sub { - my ($line) = @_; + run_command( + [$ZPOOL, 'list', '-HpPLo', join(',', @$propnames)], + outfunc => sub { + my ($line) = @_; - my @props = split('\s+', trim($line)); - my $pool = {}; - for (my $i = 0; $i < scalar(@$propnames); $i++) { - if ($numbers->{$propnames->[$i]}) { - $pool->{$propnames->[$i]} = $props[$i] + 0; - } else { - $pool->{$propnames->[$i]} = $props[$i]; - } - } + my @props = split('\s+', trim($line)); + my $pool = {}; + for (my $i = 0; $i < scalar(@$propnames); $i++) { + if ($numbers->{ $propnames->[$i] }) { + $pool->{ $propnames->[$i] } = $props[$i] + 0; + } else { + $pool->{ $propnames->[$i] } = $props[$i]; + } + } - push @$pools, $pool; - }); + push @$pools, $pool; + }, + ); return $pools; } -__PACKAGE__->register_method ({ +__PACKAGE__->register_method({ name => 'index', path => '', method => 'GET', proxyto => 'node', protected => 1, permissions => { - check => ['perm', '/', ['Sys.Audit']], + check => ['perm', '/', ['Sys.Audit']], }, description => "List Zpools.", parameters => { - additionalProperties => 0, - properties => { - node => get_standard_option('pve-node'), - }, + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + }, }, returns => { - type => 'array', - items => { - type => 'object', - properties => { - name => { - type => 'string', - description => "", - }, - size => { - type => 'integer', - description => "", - }, - alloc => { - type => 'integer', - description => "", - }, - free => { - type => 'integer', - description => "", - }, - frag => { - type => 'integer', - description => "", - }, - dedup => { - type => 'number', - description => "", - }, - health => { - type => 'string', - description => "", - }, - }, - }, - links => [ { rel => 'child', href => "{name}" } ], + type => 'array', + items => { + type => 'object', + properties => { + name => { + type => 'string', + description => "", + }, + size => { + type => 'integer', + description => "", + }, + alloc => { + type => 'integer', + description => "", + }, + free => { + type => 'integer', + description => "", + }, + frag => { + type => 'integer', + description => "", + }, + dedup => { + type => 'number', + description => "", + }, + health => { + type => 'string', + description => "", + }, + }, + }, + links => [{ rel => 'child', href => "{name}" }], }, code => sub { - my ($param) = @_; + my ($param) = @_; - return get_pool_data(); - }}); + return get_pool_data(); + }, +}); sub preparetree { my ($el) = @_; delete $el->{lvl}; - if ($el->{children} && scalar(@{$el->{children}})) { - $el->{leaf} = 0; - foreach my $child (@{$el->{children}}) { - preparetree($child); - } + if ($el->{children} && scalar(@{ $el->{children} })) { + $el->{leaf} = 0; + foreach my $child (@{ $el->{children} }) { + preparetree($child); + } } else { - $el->{leaf} = 1; + $el->{leaf} = 1; } } - -__PACKAGE__->register_method ({ +__PACKAGE__->register_method({ name => 'detail', path => '{name}', method => 'GET', proxyto => 'node', protected => 1, permissions => { - check => ['perm', '/', ['Sys.Audit']], + check => ['perm', '/', ['Sys.Audit']], }, description => "Get details about a zpool.", parameters => { - additionalProperties => 0, - properties => { - node => get_standard_option('pve-node'), - name => get_standard_option('pve-storage-id'), - }, + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + name => get_standard_option('pve-storage-id'), + }, }, returns => { - type => 'object', - properties => { - name => { - type => 'string', - description => 'The name of the zpool.', - }, - state => { - type => 'string', - description => 'The state of the zpool.', - }, - status => { - optional => 1, - type => 'string', - description => 'Information about the state of the zpool.', - }, - action => { - optional => 1, - type => 'string', - description => 'Information about the recommended action to fix the state.', - }, - scan => { - optional => 1, - type => 'string', - description => 'Information about the last/current scrub.', - }, - errors => { - type => 'string', - description => 'Information about the errors on the zpool.', - }, - children => { - type => 'array', - description => "The pool configuration information, including the vdevs for each section (e.g. spares, cache), may be nested.", - items => { - type => 'object', - properties => { - name => { - type => 'string', - description => 'The name of the vdev or section.', - }, - state => { - optional => 1, - type => 'string', - description => 'The state of the vdev.', - }, - read => { - optional => 1, - type => 'number', - }, - write => { - optional => 1, - type => 'number', - }, - cksum => { - optional => 1, - type => 'number', - }, - msg => { - type => 'string', - description => 'An optional message about the vdev.' - } - }, - }, - }, - }, + type => 'object', + properties => { + name => { + type => 'string', + description => 'The name of the zpool.', + }, + state => { + type => 'string', + description => 'The state of the zpool.', + }, + status => { + optional => 1, + type => 'string', + description => 'Information about the state of the zpool.', + }, + action => { + optional => 1, + type => 'string', + description => 'Information about the recommended action to fix the state.', + }, + scan => { + optional => 1, + type => 'string', + description => 'Information about the last/current scrub.', + }, + errors => { + type => 'string', + description => 'Information about the errors on the zpool.', + }, + children => { + type => 'array', + description => + "The pool configuration information, including the vdevs for each section (e.g. spares, cache), may be nested.", + items => { + type => 'object', + properties => { + name => { + type => 'string', + description => 'The name of the vdev or section.', + }, + state => { + optional => 1, + type => 'string', + description => 'The state of the vdev.', + }, + read => { + optional => 1, + type => 'number', + }, + write => { + optional => 1, + type => 'number', + }, + cksum => { + optional => 1, + type => 'number', + }, + msg => { + type => 'string', + description => 'An optional message about the vdev.', + }, + }, + }, + }, + }, }, code => sub { - my ($param) = @_; + my ($param) = @_; - if (!-f $ZPOOL) { - die "zfsutils-linux not installed\n"; - } + if (!-f $ZPOOL) { + die "zfsutils-linux not installed\n"; + } - my $cmd = [$ZPOOL, 'status', '-P', $param->{name}]; + my $cmd = [$ZPOOL, 'status', '-P', $param->{name}]; - my $pool = { - lvl => 0, - }; + my $pool = { + lvl => 0, + }; - my $curfield; - my $config = 0; + my $curfield; + my $config = 0; - my $stack = [$pool]; - my $curlvl = 0; + my $stack = [$pool]; + my $curlvl = 0; - run_command($cmd, outfunc => sub { - my ($line) = @_; + run_command( + $cmd, + outfunc => sub { + my ($line) = @_; - if ($line =~ m/^\s*(\S+): (\S+.*)$/) { - $curfield = $1; - $pool->{$curfield} = $2; + if ($line =~ m/^\s*(\S+): (\S+.*)$/) { + $curfield = $1; + $pool->{$curfield} = $2; - $config = 0 if $curfield eq 'errors'; - } elsif (!$config && $line =~ m/^\s+(\S+.*)$/) { - $pool->{$curfield} .= " " . $1; - } elsif (!$config && $line =~ m/^\s*config:/) { - $config = 1; - } elsif ($config && $line =~ m/^(\s+)(\S+)\s*(\S+)?(?:\s+(\S+)\s+(\S+)\s+(\S+))?\s*(.*)$/) { - my ($space, $name, $state, $read, $write, $cksum, $msg) = ($1, $2, $3, $4, $5, $6, $7); - if ($name ne "NAME") { - my $lvl = int(length($space) / 2) + 1; # two spaces per level - my $vdev = { - name => $name, - msg => $msg, - lvl => $lvl, - }; + $config = 0 if $curfield eq 'errors'; + } elsif (!$config && $line =~ m/^\s+(\S+.*)$/) { + $pool->{$curfield} .= " " . $1; + } elsif (!$config && $line =~ m/^\s*config:/) { + $config = 1; + } elsif ( + $config + && $line =~ m/^(\s+)(\S+)\s*(\S+)?(?:\s+(\S+)\s+(\S+)\s+(\S+))?\s*(.*)$/ + ) { + my ($space, $name, $state, $read, $write, $cksum, $msg) = + ($1, $2, $3, $4, $5, $6, $7); + if ($name ne "NAME") { + my $lvl = int(length($space) / 2) + 1; # two spaces per level + my $vdev = { + name => $name, + msg => $msg, + lvl => $lvl, + }; - $vdev->{state} = $state if defined($state); - $vdev->{read} = $read + 0 if defined($read); - $vdev->{write} = $write + 0 if defined($write); - $vdev->{cksum} = $cksum + 0 if defined($cksum); + $vdev->{state} = $state if defined($state); + $vdev->{read} = $read + 0 if defined($read); + $vdev->{write} = $write + 0 if defined($write); + $vdev->{cksum} = $cksum + 0 if defined($cksum); - my $cur = pop @$stack; + my $cur = pop @$stack; - if ($lvl > $curlvl) { - $cur->{children} = [ $vdev ]; - } elsif ($lvl == $curlvl) { - $cur = pop @$stack; - push @{$cur->{children}}, $vdev; - } else { - while ($lvl <= $cur->{lvl} && $cur->{lvl} != 0) { - $cur = pop @$stack; - } - push @{$cur->{children}}, $vdev; - } + if ($lvl > $curlvl) { + $cur->{children} = [$vdev]; + } elsif ($lvl == $curlvl) { + $cur = pop @$stack; + push @{ $cur->{children} }, $vdev; + } else { + while ($lvl <= $cur->{lvl} && $cur->{lvl} != 0) { + $cur = pop @$stack; + } + push @{ $cur->{children} }, $vdev; + } - push @$stack, $cur; - push @$stack, $vdev; - $curlvl = $lvl; - } - } - }); + push @$stack, $cur; + push @$stack, $vdev; + $curlvl = $lvl; + } + } + }, + ); - # change treenodes for extjs tree - $pool->{name} = delete $pool->{pool}; - preparetree($pool); + # change treenodes for extjs tree + $pool->{name} = delete $pool->{pool}; + preparetree($pool); - return $pool; - }}); + return $pool; + }, +}); my $draid_config_format = { spares => { - type => 'integer', - minimum => 0, - description => 'Number of dRAID spares.', + type => 'integer', + minimum => 0, + description => 'Number of dRAID spares.', }, data => { - type => 'integer', - minimum => 1, - description => 'The number of data devices per redundancy group. (dRAID)', + type => 'integer', + minimum => 1, + description => 'The number of data devices per redundancy group. (dRAID)', }, }; -__PACKAGE__->register_method ({ +__PACKAGE__->register_method({ name => 'create', path => '', method => 'POST', proxyto => 'node', protected => 1, permissions => { - description => "Requires additionally 'Datastore.Allocate' on /storage when setting 'add_storage'", - check => ['perm', '/', ['Sys.Modify']], + description => + "Requires additionally 'Datastore.Allocate' on /storage when setting 'add_storage'", + check => ['perm', '/', ['Sys.Modify']], }, description => "Create a ZFS pool.", parameters => { - additionalProperties => 0, - properties => { - node => get_standard_option('pve-node'), - name => get_standard_option('pve-storage-id'), - raidlevel => { - type => 'string', - description => 'The RAID level to use.', - enum => [ - 'single', 'mirror', - 'raid10', 'raidz', 'raidz2', 'raidz3', - 'draid', 'draid2', 'draid3', - ], - }, - devices => { - type => 'string', format => 'string-list', - description => 'The block devices you want to create the zpool on.', - }, - 'draid-config' => { - type => 'string', - format => $draid_config_format, - optional => 1, - }, - ashift => { - type => 'integer', - minimum => 9, - maximum => 16, - optional => 1, - default => 12, - description => 'Pool sector size exponent.', - }, - compression => { - type => 'string', - description => 'The compression algorithm to use.', - enum => ['on', 'off', 'gzip', 'lz4', 'lzjb', 'zle', 'zstd'], - optional => 1, - default => 'on', - }, - add_storage => { - description => "Configure storage using the zpool.", - type => 'boolean', - optional => 1, - default => 0, - }, - }, + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + name => get_standard_option('pve-storage-id'), + raidlevel => { + type => 'string', + description => 'The RAID level to use.', + enum => [ + 'single', + 'mirror', + 'raid10', + 'raidz', + 'raidz2', + 'raidz3', + 'draid', + 'draid2', + 'draid3', + ], + }, + devices => { + type => 'string', + format => 'string-list', + description => 'The block devices you want to create the zpool on.', + }, + 'draid-config' => { + type => 'string', + format => $draid_config_format, + optional => 1, + }, + ashift => { + type => 'integer', + minimum => 9, + maximum => 16, + optional => 1, + default => 12, + description => 'Pool sector size exponent.', + }, + compression => { + type => 'string', + description => 'The compression algorithm to use.', + enum => ['on', 'off', 'gzip', 'lz4', 'lzjb', 'zle', 'zstd'], + optional => 1, + default => 'on', + }, + add_storage => { + description => "Configure storage using the zpool.", + type => 'boolean', + optional => 1, + default => 0, + }, + }, }, returns => { type => 'string' }, code => sub { - my ($param) = @_; + my ($param) = @_; - my $rpcenv = PVE::RPCEnvironment::get(); - my $user = $rpcenv->get_user(); + my $rpcenv = PVE::RPCEnvironment::get(); + my $user = $rpcenv->get_user(); - my $name = $param->{name}; - my $node = $param->{node}; - my $devs = [PVE::Tools::split_list($param->{devices})]; - my $raidlevel = $param->{raidlevel}; - my $compression = $param->{compression} // 'on'; + my $name = $param->{name}; + my $node = $param->{node}; + my $devs = [PVE::Tools::split_list($param->{devices})]; + my $raidlevel = $param->{raidlevel}; + my $compression = $param->{compression} // 'on'; - my $draid_config; - if (exists $param->{'draid-config'}) { - die "draid-config set without using dRAID level\n" if $raidlevel !~ m/^draid/; - $draid_config = parse_property_string($draid_config_format, $param->{'draid-config'}); - } + my $draid_config; + if (exists $param->{'draid-config'}) { + die "draid-config set without using dRAID level\n" if $raidlevel !~ m/^draid/; + $draid_config = + parse_property_string($draid_config_format, $param->{'draid-config'}); + } - for my $dev (@$devs) { - $dev = PVE::Diskmanage::verify_blockdev_path($dev); - PVE::Diskmanage::assert_disk_unused($dev); + for my $dev (@$devs) { + $dev = PVE::Diskmanage::verify_blockdev_path($dev); + PVE::Diskmanage::assert_disk_unused($dev); - } - my $storage_params = { - type => 'zfspool', - pool => $name, - storage => $name, - content => 'rootdir,images', - nodes => $node, - }; - my $verify_params = [qw(pool)]; + } + my $storage_params = { + type => 'zfspool', + pool => $name, + storage => $name, + content => 'rootdir,images', + nodes => $node, + }; + my $verify_params = [qw(pool)]; - if ($param->{add_storage}) { - $rpcenv->check($user, "/storage", ['Datastore.Allocate']); + if ($param->{add_storage}) { + $rpcenv->check($user, "/storage", ['Datastore.Allocate']); - # reserve the name and add as disabled, will be enabled below if creation works out - PVE::API2::Storage::Config->create_or_update( - $name, $node, $storage_params, $verify_params, 1); - } + # reserve the name and add as disabled, will be enabled below if creation works out + PVE::API2::Storage::Config->create_or_update( + $name, $node, $storage_params, $verify_params, 1, + ); + } - my $pools = get_pool_data(); - die "pool '${name}' already exists on node '${node}'\n" - if grep { $_->{name} eq $name } @{$pools}; + my $pools = get_pool_data(); + die "pool '${name}' already exists on node '${node}'\n" + if grep { $_->{name} eq $name } @{$pools}; - my $numdisks = scalar(@$devs); - my $mindisks = { - single => 1, - mirror => 2, - raid10 => 4, - raidz => 3, - raidz2 => 4, - raidz3 => 5, - draid => 3, - draid2 => 4, - draid3 => 5, - }; + my $numdisks = scalar(@$devs); + my $mindisks = { + single => 1, + mirror => 2, + raid10 => 4, + raidz => 3, + raidz2 => 4, + raidz3 => 5, + draid => 3, + draid2 => 4, + draid3 => 5, + }; - # sanity checks - die "raid10 needs an even number of disks\n" - if $raidlevel eq 'raid10' && $numdisks % 2 != 0; + # sanity checks + die "raid10 needs an even number of disks\n" + if $raidlevel eq 'raid10' && $numdisks % 2 != 0; - die "please give only one disk for single disk mode\n" - if $raidlevel eq 'single' && $numdisks > 1; + die "please give only one disk for single disk mode\n" + if $raidlevel eq 'single' && $numdisks > 1; - die "$raidlevel needs at least $mindisks->{$raidlevel} disks\n" - if $numdisks < $mindisks->{$raidlevel}; + die "$raidlevel needs at least $mindisks->{$raidlevel} disks\n" + if $numdisks < $mindisks->{$raidlevel}; - # draid checks - if ($raidlevel =~ m/^draid/) { - # bare minimum would be two drives: one for parity & one for data, but forbid that - # because it makes no sense in practice, at least one spare disk should be used - my $draid_min = $mindisks->{$raidlevel} - 2; - if ($draid_config) { - $draid_min += $draid_config->{data} || 0; - $draid_min += $draid_config->{spares} || 0; - } - die "At least $draid_min disks needed for current dRAID config\n" - if $numdisks < $draid_min; - } + # draid checks + if ($raidlevel =~ m/^draid/) { + # bare minimum would be two drives: one for parity & one for data, but forbid that + # because it makes no sense in practice, at least one spare disk should be used + my $draid_min = $mindisks->{$raidlevel} - 2; + if ($draid_config) { + $draid_min += $draid_config->{data} || 0; + $draid_min += $draid_config->{spares} || 0; + } + die "At least $draid_min disks needed for current dRAID config\n" + if $numdisks < $draid_min; + } - my $code = sub { - for my $dev (@$devs) { - PVE::Diskmanage::assert_disk_unused($dev); + my $code = sub { + for my $dev (@$devs) { + PVE::Diskmanage::assert_disk_unused($dev); - my $is_partition = PVE::Diskmanage::is_partition($dev); + my $is_partition = PVE::Diskmanage::is_partition($dev); - if ($is_partition) { - eval { - PVE::Diskmanage::change_parttype($dev, '6a898cc3-1dd2-11b2-99a6-080020736631'); - }; - warn $@ if $@; - } + if ($is_partition) { + eval { + PVE::Diskmanage::change_parttype( + $dev, + '6a898cc3-1dd2-11b2-99a6-080020736631', + ); + }; + warn $@ if $@; + } - my $sysfsdev = $is_partition ? PVE::Diskmanage::get_blockdev($dev) : $dev; + my $sysfsdev = $is_partition ? PVE::Diskmanage::get_blockdev($dev) : $dev; - $sysfsdev =~ s!^/dev/!/sys/block/!; - if ($is_partition) { - my $part = $dev =~ s!^/dev/!!r; - $sysfsdev .= "/${part}"; - } + $sysfsdev =~ s!^/dev/!/sys/block/!; + if ($is_partition) { + my $part = $dev =~ s!^/dev/!!r; + $sysfsdev .= "/${part}"; + } - my $udevinfo = PVE::Diskmanage::get_udev_info($sysfsdev); - $dev = $udevinfo->{by_id_link} if defined($udevinfo->{by_id_link}); - } + my $udevinfo = PVE::Diskmanage::get_udev_info($sysfsdev); + $dev = $udevinfo->{by_id_link} if defined($udevinfo->{by_id_link}); + } - # create zpool with desired raidlevel - my $ashift = $param->{ashift} // 12; + # create zpool with desired raidlevel + my $ashift = $param->{ashift} // 12; - my $cmd = [$ZPOOL, 'create', '-o', "ashift=$ashift", $name]; + my $cmd = [$ZPOOL, 'create', '-o', "ashift=$ashift", $name]; - if ($raidlevel eq 'raid10') { - for (my $i = 0; $i < @$devs; $i+=2) { - push @$cmd, 'mirror', $devs->[$i], $devs->[$i+1]; - } - } elsif ($raidlevel eq 'single') { - push @$cmd, $devs->[0]; - } elsif ($raidlevel =~ m/^draid/) { - my $draid_cmd = $raidlevel; - $draid_cmd .= ":$draid_config->{data}d" if $draid_config->{data}; - $draid_cmd .= ":$draid_config->{spares}s" if $draid_config->{spares}; - push @$cmd, $draid_cmd, @$devs; - } else { - push @$cmd, $raidlevel, @$devs; - } + if ($raidlevel eq 'raid10') { + for (my $i = 0; $i < @$devs; $i += 2) { + push @$cmd, 'mirror', $devs->[$i], $devs->[$i + 1]; + } + } elsif ($raidlevel eq 'single') { + push @$cmd, $devs->[0]; + } elsif ($raidlevel =~ m/^draid/) { + my $draid_cmd = $raidlevel; + $draid_cmd .= ":$draid_config->{data}d" if $draid_config->{data}; + $draid_cmd .= ":$draid_config->{spares}s" if $draid_config->{spares}; + push @$cmd, $draid_cmd, @$devs; + } else { + push @$cmd, $raidlevel, @$devs; + } - print "# ", join(' ', @$cmd), "\n"; - run_command($cmd); + print "# ", join(' ', @$cmd), "\n"; + run_command($cmd); - $cmd = [$ZFS, 'set', "compression=$compression", $name]; - print "# ", join(' ', @$cmd), "\n"; - run_command($cmd); + $cmd = [$ZFS, 'set', "compression=$compression", $name]; + print "# ", join(' ', @$cmd), "\n"; + run_command($cmd); - if (-e '/lib/systemd/system/zfs-import@.service') { - my $importunit = 'zfs-import@'. PVE::Systemd::escape_unit($name, undef) . '.service'; - $cmd = ['systemctl', 'enable', $importunit]; - print "# ", join(' ', @$cmd), "\n"; - run_command($cmd); - } + if (-e '/lib/systemd/system/zfs-import@.service') { + my $importunit = + 'zfs-import@' . PVE::Systemd::escape_unit($name, undef) . '.service'; + $cmd = ['systemctl', 'enable', $importunit]; + print "# ", join(' ', @$cmd), "\n"; + run_command($cmd); + } - PVE::Diskmanage::udevadm_trigger($devs->@*); + PVE::Diskmanage::udevadm_trigger($devs->@*); - if ($param->{add_storage}) { - PVE::API2::Storage::Config->create_or_update( - $name, $node, $storage_params, $verify_params); - } - }; + if ($param->{add_storage}) { + PVE::API2::Storage::Config->create_or_update( + $name, $node, $storage_params, $verify_params, + ); + } + }; - return $rpcenv->fork_worker('zfscreate', $name, $user, sub { - PVE::Diskmanage::locked_disk_action($code); - }); - }}); + return $rpcenv->fork_worker( + 'zfscreate', + $name, + $user, + sub { + PVE::Diskmanage::locked_disk_action($code); + }, + ); + }, +}); -__PACKAGE__->register_method ({ +__PACKAGE__->register_method({ name => 'delete', path => '{name}', method => 'DELETE', proxyto => 'node', protected => 1, permissions => { - description => "Requires additionally 'Datastore.Allocate' on /storage when setting 'cleanup-config'", - check => ['perm', '/', ['Sys.Modify']], + description => + "Requires additionally 'Datastore.Allocate' on /storage when setting 'cleanup-config'", + check => ['perm', '/', ['Sys.Modify']], }, description => "Destroy a ZFS pool.", parameters => { - additionalProperties => 0, - properties => { - node => get_standard_option('pve-node'), - name => get_standard_option('pve-storage-id'), - 'cleanup-config' => { - description => "Marks associated storage(s) as not available on this node anymore ". - "or removes them from the configuration (if configured for this node only).", - type => 'boolean', - optional => 1, - default => 0, - }, - 'cleanup-disks' => { - description => "Also wipe disks so they can be repurposed afterwards.", - type => 'boolean', - optional => 1, - default => 0, - }, - }, + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + name => get_standard_option('pve-storage-id'), + 'cleanup-config' => { + description => + "Marks associated storage(s) as not available on this node anymore " + . "or removes them from the configuration (if configured for this node only).", + type => 'boolean', + optional => 1, + default => 0, + }, + 'cleanup-disks' => { + description => "Also wipe disks so they can be repurposed afterwards.", + type => 'boolean', + optional => 1, + default => 0, + }, + }, }, returns => { type => 'string' }, code => sub { - my ($param) = @_; + my ($param) = @_; - my $rpcenv = PVE::RPCEnvironment::get(); - my $user = $rpcenv->get_user(); + my $rpcenv = PVE::RPCEnvironment::get(); + my $user = $rpcenv->get_user(); - $rpcenv->check($user, "/storage", ['Datastore.Allocate']) if $param->{'cleanup-config'}; + $rpcenv->check($user, "/storage", ['Datastore.Allocate']) if $param->{'cleanup-config'}; - my $name = $param->{name}; - my $node = $param->{node}; + my $name = $param->{name}; + my $node = $param->{node}; - my $worker = sub { - PVE::Diskmanage::locked_disk_action(sub { - my $to_wipe = []; - if ($param->{'cleanup-disks'}) { - # Using -o name does not only output the name in combination with -v. - run_command(['zpool', 'list', '-vHPL', $name], outfunc => sub { - my ($line) = @_; + my $worker = sub { + PVE::Diskmanage::locked_disk_action(sub { + my $to_wipe = []; + if ($param->{'cleanup-disks'}) { + # Using -o name does not only output the name in combination with -v. + run_command( + ['zpool', 'list', '-vHPL', $name], + outfunc => sub { + my ($line) = @_; - my ($name) = PVE::Tools::split_list($line); - return if $name !~ m|^/dev/.+|; + my ($name) = PVE::Tools::split_list($line); + return if $name !~ m|^/dev/.+|; - my $dev = PVE::Diskmanage::verify_blockdev_path($name); - my $wipe = $dev; + my $dev = PVE::Diskmanage::verify_blockdev_path($name); + my $wipe = $dev; - $dev =~ s|^/dev/||; - my $info = PVE::Diskmanage::get_disks($dev, 1, 1); - die "unable to obtain information for disk '$dev'\n" if !$info->{$dev}; + $dev =~ s|^/dev/||; + my $info = PVE::Diskmanage::get_disks($dev, 1, 1); + die "unable to obtain information for disk '$dev'\n" + if !$info->{$dev}; - # Wipe whole disk if usual ZFS layout with partition 9 as ZFS reserved. - my $parent = $info->{$dev}->{parent}; - if ($parent && scalar(keys $info->%*) == 3) { - $parent =~ s|^/dev/||; - my $info9 = $info->{"${parent}9"}; + # Wipe whole disk if usual ZFS layout with partition 9 as ZFS reserved. + my $parent = $info->{$dev}->{parent}; + if ($parent && scalar(keys $info->%*) == 3) { + $parent =~ s|^/dev/||; + my $info9 = $info->{"${parent}9"}; - $wipe = $info->{$dev}->{parent} # need leading /dev/ - if $info9 && $info9->{used} && $info9->{used} =~ m/^ZFS reserved/; - } + $wipe = $info->{$dev}->{parent} # need leading /dev/ + if $info9 + && $info9->{used} + && $info9->{used} =~ m/^ZFS reserved/; + } - push $to_wipe->@*, $wipe; - }); - } + push $to_wipe->@*, $wipe; + }, + ); + } - if (-e '/lib/systemd/system/zfs-import@.service') { - my $importunit = 'zfs-import@' . PVE::Systemd::escape_unit($name) . '.service'; - run_command(['systemctl', 'disable', $importunit]); - } + if (-e '/lib/systemd/system/zfs-import@.service') { + my $importunit = + 'zfs-import@' . PVE::Systemd::escape_unit($name) . '.service'; + run_command(['systemctl', 'disable', $importunit]); + } - run_command(['zpool', 'destroy', $name]); + run_command(['zpool', 'destroy', $name]); - my $config_err; - if ($param->{'cleanup-config'}) { - my $match = sub { - my ($scfg) = @_; - return $scfg->{type} eq 'zfspool' && $scfg->{pool} eq $name; - }; - eval { PVE::API2::Storage::Config->cleanup_storages_for_node($match, $node); }; - warn $config_err = $@ if $@; - } + my $config_err; + if ($param->{'cleanup-config'}) { + my $match = sub { + my ($scfg) = @_; + return $scfg->{type} eq 'zfspool' && $scfg->{pool} eq $name; + }; + eval { + PVE::API2::Storage::Config->cleanup_storages_for_node($match, $node); + }; + warn $config_err = $@ if $@; + } - eval { PVE::Diskmanage::wipe_blockdev($_) for $to_wipe->@*; }; - my $err = $@; - PVE::Diskmanage::udevadm_trigger($to_wipe->@*); - die "cleanup failed - $err" if $err; + eval { PVE::Diskmanage::wipe_blockdev($_) for $to_wipe->@*; }; + my $err = $@; + PVE::Diskmanage::udevadm_trigger($to_wipe->@*); + die "cleanup failed - $err" if $err; - die "config cleanup failed - $config_err" if $config_err; - }); - }; + die "config cleanup failed - $config_err" if $config_err; + }); + }; - return $rpcenv->fork_worker('zfsremove', $name, $user, $worker); - }}); + return $rpcenv->fork_worker('zfsremove', $name, $user, $worker); + }, +}); 1; diff --git a/src/PVE/API2/Storage/Config.pm b/src/PVE/API2/Storage/Config.pm index 7facc62..03440cf 100755 --- a/src/PVE/API2/Storage/Config.pm +++ b/src/PVE/API2/Storage/Config.pm @@ -29,10 +29,12 @@ my $api_storage_config = sub { my $scfg = dclone(PVE::Storage::storage_config($cfg, $storeid)); $scfg->{storage} = $storeid; $scfg->{digest} = $cfg->{digest}; - $scfg->{content} = PVE::Storage::Plugin->encode_value($scfg->{type}, 'content', $scfg->{content}); + $scfg->{content} = + PVE::Storage::Plugin->encode_value($scfg->{type}, 'content', $scfg->{content}); if ($scfg->{nodes}) { - $scfg->{nodes} = PVE::Storage::Plugin->encode_value($scfg->{type}, 'nodes', $scfg->{nodes}); + $scfg->{nodes} = + PVE::Storage::Plugin->encode_value($scfg->{type}, 'nodes', $scfg->{nodes}); } return $scfg; @@ -47,21 +49,21 @@ sub cleanup_storages_for_node { my $cluster_nodes = PVE::Cluster::get_nodelist(); for my $storeid (keys $config->{ids}->%*) { - my $scfg = PVE::Storage::storage_config($config, $storeid); - next if !$match->($scfg); + my $scfg = PVE::Storage::storage_config($config, $storeid); + next if !$match->($scfg); - my $nodes = $scfg->{nodes} || { map { $_ => 1 } $cluster_nodes->@* }; - next if !$nodes->{$node}; # not configured on $node, so nothing to do - delete $nodes->{$node}; + my $nodes = $scfg->{nodes} || { map { $_ => 1 } $cluster_nodes->@* }; + next if !$nodes->{$node}; # not configured on $node, so nothing to do + delete $nodes->{$node}; - if (scalar(keys $nodes->%*) > 0) { - $self->update({ - nodes => join(',', sort keys $nodes->%*), - storage => $storeid, - }); - } else { - $self->delete({storage => $storeid}); - } + if (scalar(keys $nodes->%*) > 0) { + $self->update({ + nodes => join(',', sort keys $nodes->%*), + storage => $storeid, + }); + } else { + $self->delete({ storage => $storeid }); + } } } @@ -80,355 +82,375 @@ sub create_or_update { my $scfg = PVE::Storage::storage_config($cfg, $sid, 1); if ($scfg) { - die "storage config for '${sid}' exists but no parameters to verify were provided\n" - if !$verify_params; + die "storage config for '${sid}' exists but no parameters to verify were provided\n" + if !$verify_params; - $node = PVE::INotify::nodename() if !$node || ($node eq 'localhost'); - die "Storage ID '${sid}' already exists on node ${node}\n" - if !defined($scfg->{nodes}) || $scfg->{nodes}->{$node}; + $node = PVE::INotify::nodename() if !$node || ($node eq 'localhost'); + die "Storage ID '${sid}' already exists on node ${node}\n" + if !defined($scfg->{nodes}) || $scfg->{nodes}->{$node}; - # check for type mismatch first to get a clear error - for my $key ('type', $verify_params->@*) { - if (!defined($scfg->{$key})) { - die "Option '${key}' is not configured for storage '$sid', " - ."expected it to be '$storage_params->{$key}'"; - } - if ($storage_params->{$key} ne $scfg->{$key}) { - die "Option '${key}' ($storage_params->{$key}) does not match " - ."existing storage configuration '$scfg->{$key}'\n"; - } - } + # check for type mismatch first to get a clear error + for my $key ('type', $verify_params->@*) { + if (!defined($scfg->{$key})) { + die "Option '${key}' is not configured for storage '$sid', " + . "expected it to be '$storage_params->{$key}'"; + } + if ($storage_params->{$key} ne $scfg->{$key}) { + die "Option '${key}' ($storage_params->{$key}) does not match " + . "existing storage configuration '$scfg->{$key}'\n"; + } + } } if (!$dryrun) { - if ($scfg) { - if ($scfg->{nodes}) { - $scfg->{nodes}->{$node} = 1; - $self->update({ - nodes => join(',', sort keys $scfg->{nodes}->%*), - storage => $sid, - }); - print "Added '${node}' to nodes for storage '${sid}'\n"; - } - } else { - $self->create($storage_params); - } + if ($scfg) { + if ($scfg->{nodes}) { + $scfg->{nodes}->{$node} = 1; + $self->update({ + nodes => join(',', sort keys $scfg->{nodes}->%*), + storage => $sid, + }); + print "Added '${node}' to nodes for storage '${sid}'\n"; + } + } else { + $self->create($storage_params); + } } } -__PACKAGE__->register_method ({ +__PACKAGE__->register_method({ name => 'index', path => '', method => 'GET', description => "Storage index.", permissions => { - description => "Only list entries where you have 'Datastore.Audit' or 'Datastore.AllocateSpace' permissions on '/storage/'", - user => 'all', + description => + "Only list entries where you have 'Datastore.Audit' or 'Datastore.AllocateSpace' permissions on '/storage/'", + user => 'all', }, parameters => { - additionalProperties => 0, - properties => { - type => { - description => "Only list storage of specific type", - type => 'string', - enum => $storage_type_enum, - optional => 1, - }, - }, + additionalProperties => 0, + properties => { + type => { + description => "Only list storage of specific type", + type => 'string', + enum => $storage_type_enum, + optional => 1, + }, + }, }, returns => { - type => 'array', - items => { - type => "object", - properties => { storage => { type => 'string'} }, - }, - links => [ { rel => 'child', href => "{storage}" } ], + type => 'array', + items => { + type => "object", + properties => { storage => { type => 'string' } }, + }, + links => [{ rel => 'child', href => "{storage}" }], }, code => sub { - my ($param) = @_; + my ($param) = @_; - my $rpcenv = PVE::RPCEnvironment::get(); - my $authuser = $rpcenv->get_user(); + my $rpcenv = PVE::RPCEnvironment::get(); + my $authuser = $rpcenv->get_user(); - my $cfg = PVE::Storage::config(); + my $cfg = PVE::Storage::config(); - my @sids = PVE::Storage::storage_ids($cfg); + my @sids = PVE::Storage::storage_ids($cfg); - my $res = []; - foreach my $storeid (@sids) { - my $privs = [ 'Datastore.Audit', 'Datastore.AllocateSpace' ]; - next if !$rpcenv->check_any($authuser, "/storage/$storeid", $privs, 1); + my $res = []; + foreach my $storeid (@sids) { + my $privs = ['Datastore.Audit', 'Datastore.AllocateSpace']; + next if !$rpcenv->check_any($authuser, "/storage/$storeid", $privs, 1); - my $scfg = &$api_storage_config($cfg, $storeid); - next if $param->{type} && $param->{type} ne $scfg->{type}; - push @$res, $scfg; - } + my $scfg = &$api_storage_config($cfg, $storeid); + next if $param->{type} && $param->{type} ne $scfg->{type}; + push @$res, $scfg; + } - return $res; - }}); + return $res; + }, +}); -__PACKAGE__->register_method ({ +__PACKAGE__->register_method({ name => 'read', path => '{storage}', method => 'GET', description => "Read storage configuration.", permissions => { - check => ['perm', '/storage/{storage}', ['Datastore.Allocate']], + check => ['perm', '/storage/{storage}', ['Datastore.Allocate']], }, parameters => { - additionalProperties => 0, - properties => { - storage => get_standard_option('pve-storage-id'), - }, + additionalProperties => 0, + properties => { + storage => get_standard_option('pve-storage-id'), + }, }, returns => { type => 'object' }, code => sub { - my ($param) = @_; + my ($param) = @_; - my $cfg = PVE::Storage::config(); + my $cfg = PVE::Storage::config(); - return &$api_storage_config($cfg, $param->{storage}); - }}); + return &$api_storage_config($cfg, $param->{storage}); + }, +}); -__PACKAGE__->register_method ({ +__PACKAGE__->register_method({ name => 'create', protected => 1, path => '', method => 'POST', description => "Create a new storage.", permissions => { - check => ['perm', '/storage', ['Datastore.Allocate']], + check => ['perm', '/storage', ['Datastore.Allocate']], }, parameters => PVE::Storage::Plugin->createSchema(), returns => { - type => 'object', - properties => { - storage => { - description => "The ID of the created storage.", - type => 'string', - }, - type => { - description => "The type of the created storage.", - type => 'string', - enum => $storage_type_enum, - }, - config => { - description => "Partial, possible server generated, configuration properties.", - type => 'object', - optional => 1, - additionalProperties => 1, - properties => { - 'encryption-key' => { - description => "The, possible auto-generated, encryption-key.", - optional => 1, - type => 'string', - }, - }, - }, - }, + type => 'object', + properties => { + storage => { + description => "The ID of the created storage.", + type => 'string', + }, + type => { + description => "The type of the created storage.", + type => 'string', + enum => $storage_type_enum, + }, + config => { + description => "Partial, possible server generated, configuration properties.", + type => 'object', + optional => 1, + additionalProperties => 1, + properties => { + 'encryption-key' => { + description => "The, possible auto-generated, encryption-key.", + optional => 1, + type => 'string', + }, + }, + }, + }, }, code => sub { - my ($param) = @_; + my ($param) = @_; - my $type = extract_param($param, 'type'); - my $storeid = extract_param($param, 'storage'); + my $type = extract_param($param, 'type'); + my $storeid = extract_param($param, 'storage'); - # revent an empty nodelist. - # fix me in section config create never need an empty entity. - delete $param->{nodes} if !$param->{nodes}; + # revent an empty nodelist. + # fix me in section config create never need an empty entity. + delete $param->{nodes} if !$param->{nodes}; - my $sensitive_params = PVE::Storage::Plugin::sensitive_properties($type); - my $sensitive = extract_sensitive_params($param, $sensitive_params, []); + my $sensitive_params = PVE::Storage::Plugin::sensitive_properties($type); + my $sensitive = extract_sensitive_params($param, $sensitive_params, []); - my $plugin = PVE::Storage::Plugin->lookup($type); - my $opts = $plugin->check_config($storeid, $param, 1, 1); + my $plugin = PVE::Storage::Plugin->lookup($type); + my $opts = $plugin->check_config($storeid, $param, 1, 1); - my $returned_config; - PVE::Storage::lock_storage_config(sub { - my $cfg = PVE::Storage::config(); + my $returned_config; + PVE::Storage::lock_storage_config( + sub { + my $cfg = PVE::Storage::config(); - if (my $scfg = PVE::Storage::storage_config($cfg, $storeid, 1)) { - die "storage ID '$storeid' already defined\n"; - } + if (my $scfg = PVE::Storage::storage_config($cfg, $storeid, 1)) { + die "storage ID '$storeid' already defined\n"; + } - $cfg->{ids}->{$storeid} = $opts; + $cfg->{ids}->{$storeid} = $opts; - $returned_config = $plugin->on_add_hook($storeid, $opts, %$sensitive); + $returned_config = $plugin->on_add_hook($storeid, $opts, %$sensitive); - if (defined($opts->{mkdir})) { # TODO: remove complete option in Proxmox VE 9 - warn "NOTE: The 'mkdir' option set for '${storeid}' is deprecated and will be removed" - ." in Proxmox VE 9. Use 'create-base-path' or 'create-subdirs' instead.\n" - } + if (defined($opts->{mkdir})) { # TODO: remove complete option in Proxmox VE 9 + warn + "NOTE: The 'mkdir' option set for '${storeid}' is deprecated and will be removed" + . " in Proxmox VE 9. Use 'create-base-path' or 'create-subdirs' instead.\n"; + } - eval { - # try to activate if enabled on local node, - # we only do this to detect errors/problems sooner - if (PVE::Storage::storage_check_enabled($cfg, $storeid, undef, 1)) { - PVE::Storage::activate_storage($cfg, $storeid); - } - }; - if (my $err = $@) { - eval { $plugin->on_delete_hook($storeid, $opts) }; - warn "$@\n" if $@; - die $err; - } + eval { + # try to activate if enabled on local node, + # we only do this to detect errors/problems sooner + if (PVE::Storage::storage_check_enabled($cfg, $storeid, undef, 1)) { + PVE::Storage::activate_storage($cfg, $storeid); + } + }; + if (my $err = $@) { + eval { $plugin->on_delete_hook($storeid, $opts) }; + warn "$@\n" if $@; + die $err; + } - PVE::Storage::write_config($cfg); + PVE::Storage::write_config($cfg); - }, "create storage failed"); + }, + "create storage failed", + ); - my $res = { - storage => $storeid, - type => $type, - }; - $res->{config} = $returned_config if $returned_config; - return $res; - }}); + my $res = { + storage => $storeid, + type => $type, + }; + $res->{config} = $returned_config if $returned_config; + return $res; + }, +}); -__PACKAGE__->register_method ({ +__PACKAGE__->register_method({ name => 'update', protected => 1, path => '{storage}', method => 'PUT', description => "Update storage configuration.", permissions => { - check => ['perm', '/storage', ['Datastore.Allocate']], + check => ['perm', '/storage', ['Datastore.Allocate']], }, parameters => PVE::Storage::Plugin->updateSchema(), returns => { - type => 'object', - properties => { - storage => { - description => "The ID of the created storage.", - type => 'string', - }, - type => { - description => "The type of the created storage.", - type => 'string', - enum => $storage_type_enum, - }, - config => { - description => "Partial, possible server generated, configuration properties.", - type => 'object', - optional => 1, - additionalProperties => 1, - properties => { - 'encryption-key' => { - description => "The, possible auto-generated, encryption-key.", - optional => 1, - type => 'string', - }, - }, - }, - }, + type => 'object', + properties => { + storage => { + description => "The ID of the created storage.", + type => 'string', + }, + type => { + description => "The type of the created storage.", + type => 'string', + enum => $storage_type_enum, + }, + config => { + description => "Partial, possible server generated, configuration properties.", + type => 'object', + optional => 1, + additionalProperties => 1, + properties => { + 'encryption-key' => { + description => "The, possible auto-generated, encryption-key.", + optional => 1, + type => 'string', + }, + }, + }, + }, }, code => sub { - my ($param) = @_; + my ($param) = @_; - my $storeid = extract_param($param, 'storage'); - my $digest = extract_param($param, 'digest'); - my $delete = extract_param($param, 'delete'); - my $type; + my $storeid = extract_param($param, 'storage'); + my $digest = extract_param($param, 'digest'); + my $delete = extract_param($param, 'delete'); + my $type; - if ($delete) { - $delete = [ PVE::Tools::split_list($delete) ]; - } + if ($delete) { + $delete = [PVE::Tools::split_list($delete)]; + } - my $returned_config; - PVE::Storage::lock_storage_config(sub { - my $cfg = PVE::Storage::config(); + my $returned_config; + PVE::Storage::lock_storage_config( + sub { + my $cfg = PVE::Storage::config(); - PVE::SectionConfig::assert_if_modified($cfg, $digest); + PVE::SectionConfig::assert_if_modified($cfg, $digest); - my $scfg = PVE::Storage::storage_config($cfg, $storeid); - $type = $scfg->{type}; + my $scfg = PVE::Storage::storage_config($cfg, $storeid); + $type = $scfg->{type}; - my $sensitive_params = PVE::Storage::Plugin::sensitive_properties($type); - my $sensitive = extract_sensitive_params($param, $sensitive_params, $delete); + my $sensitive_params = PVE::Storage::Plugin::sensitive_properties($type); + my $sensitive = extract_sensitive_params($param, $sensitive_params, $delete); - my $plugin = PVE::Storage::Plugin->lookup($type); - my $opts = $plugin->check_config($storeid, $param, 0, 1); + my $plugin = PVE::Storage::Plugin->lookup($type); + my $opts = $plugin->check_config($storeid, $param, 0, 1); - if ($delete) { - my $options = $plugin->private()->{options}->{$type}; - foreach my $k (@$delete) { - my $d = $options->{$k} || die "no such option '$k'\n"; - die "unable to delete required option '$k'\n" if !$d->{optional}; - die "unable to delete fixed option '$k'\n" if $d->{fixed}; - die "cannot set and delete property '$k' at the same time!\n" - if defined($opts->{$k}); + if ($delete) { + my $options = $plugin->private()->{options}->{$type}; + foreach my $k (@$delete) { + my $d = $options->{$k} || die "no such option '$k'\n"; + die "unable to delete required option '$k'\n" if !$d->{optional}; + die "unable to delete fixed option '$k'\n" if $d->{fixed}; + die "cannot set and delete property '$k' at the same time!\n" + if defined($opts->{$k}); - delete $scfg->{$k}; - } - } + delete $scfg->{$k}; + } + } - $returned_config = $plugin->on_update_hook($storeid, $opts, %$sensitive); + $returned_config = $plugin->on_update_hook($storeid, $opts, %$sensitive); - for my $k (keys %$opts) { - $scfg->{$k} = $opts->{$k}; - } + for my $k (keys %$opts) { + $scfg->{$k} = $opts->{$k}; + } - if (defined($scfg->{mkdir})) { # TODO: remove complete option in Proxmox VE 9 - warn "NOTE: The 'mkdir' option set for '${storeid}' is deprecated and will be removed" - ." in Proxmox VE 9. Use 'create-base-path' or 'create-subdirs' instead.\n" - } + if (defined($scfg->{mkdir})) { # TODO: remove complete option in Proxmox VE 9 + warn + "NOTE: The 'mkdir' option set for '${storeid}' is deprecated and will be removed" + . " in Proxmox VE 9. Use 'create-base-path' or 'create-subdirs' instead.\n"; + } - PVE::Storage::write_config($cfg); + PVE::Storage::write_config($cfg); - }, "update storage failed"); + }, + "update storage failed", + ); - my $res = { - storage => $storeid, - type => $type, - }; - $res->{config} = $returned_config if $returned_config; - return $res; - }}); + my $res = { + storage => $storeid, + type => $type, + }; + $res->{config} = $returned_config if $returned_config; + return $res; + }, +}); -__PACKAGE__->register_method ({ +__PACKAGE__->register_method({ name => 'delete', protected => 1, path => '{storage}', # /storage/config/{storage} method => 'DELETE', description => "Delete storage configuration.", permissions => { - check => ['perm', '/storage', ['Datastore.Allocate']], + check => ['perm', '/storage', ['Datastore.Allocate']], }, parameters => { - additionalProperties => 0, - properties => { - storage => get_standard_option('pve-storage-id', { - completion => \&PVE::Storage::complete_storage, - }), - }, + additionalProperties => 0, + properties => { + storage => get_standard_option( + 'pve-storage-id', + { + completion => \&PVE::Storage::complete_storage, + }, + ), + }, }, returns => { type => 'null' }, code => sub { - my ($param) = @_; + my ($param) = @_; - my $storeid = extract_param($param, 'storage'); + my $storeid = extract_param($param, 'storage'); - PVE::Storage::lock_storage_config(sub { - my $cfg = PVE::Storage::config(); + PVE::Storage::lock_storage_config( + sub { + my $cfg = PVE::Storage::config(); - my $scfg = PVE::Storage::storage_config($cfg, $storeid); + my $scfg = PVE::Storage::storage_config($cfg, $storeid); - die "can't remove storage - storage is used as base of another storage\n" - if PVE::Storage::storage_is_used($cfg, $storeid); + die "can't remove storage - storage is used as base of another storage\n" + if PVE::Storage::storage_is_used($cfg, $storeid); - my $plugin = PVE::Storage::Plugin->lookup($scfg->{type}); + my $plugin = PVE::Storage::Plugin->lookup($scfg->{type}); - $plugin->on_delete_hook($storeid, $scfg); + $plugin->on_delete_hook($storeid, $scfg); - delete $cfg->{ids}->{$storeid}; + delete $cfg->{ids}->{$storeid}; - PVE::Storage::write_config($cfg); + PVE::Storage::write_config($cfg); - }, "delete storage failed"); + }, + "delete storage failed", + ); - PVE::AccessControl::remove_storage_access($storeid); + PVE::AccessControl::remove_storage_access($storeid); - return undef; - }}); + return undef; + }, +}); 1; diff --git a/src/PVE/API2/Storage/Content.pm b/src/PVE/API2/Storage/Content.pm index 8fbfbe9..1fe7303 100644 --- a/src/PVE/API2/Storage/Content.pm +++ b/src/PVE/API2/Storage/Content.pm @@ -16,214 +16,248 @@ use PVE::SSHInfo; use base qw(PVE::RESTHandler); -__PACKAGE__->register_method ({ +__PACKAGE__->register_method({ name => 'index', path => '', method => 'GET', description => "List storage content.", permissions => { - check => ['perm', '/storage/{storage}', ['Datastore.Audit', 'Datastore.AllocateSpace'], any => 1], + check => [ + 'perm', + '/storage/{storage}', + ['Datastore.Audit', 'Datastore.AllocateSpace'], + any => 1, + ], }, protected => 1, proxyto => 'node', parameters => { - additionalProperties => 0, - properties => { - node => get_standard_option('pve-node'), - storage => get_standard_option('pve-storage-id', { - completion => \&PVE::Storage::complete_storage_enabled, - }), - content => { - description => "Only list content of this type.", - type => 'string', format => 'pve-storage-content', - optional => 1, - completion => \&PVE::Storage::complete_content_type, - }, - vmid => get_standard_option('pve-vmid', { - description => "Only list images for this VM", - optional => 1, - completion => \&PVE::Cluster::complete_vmid, - }), - }, + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + storage => get_standard_option( + 'pve-storage-id', + { + completion => \&PVE::Storage::complete_storage_enabled, + }, + ), + content => { + description => "Only list content of this type.", + type => 'string', + format => 'pve-storage-content', + optional => 1, + completion => \&PVE::Storage::complete_content_type, + }, + vmid => get_standard_option( + 'pve-vmid', + { + description => "Only list images for this VM", + optional => 1, + completion => \&PVE::Cluster::complete_vmid, + }, + ), + }, }, returns => { - type => 'array', - items => { - type => "object", - properties => { - volid => { - description => "Volume identifier.", - type => 'string', - }, - vmid => { - description => "Associated Owner VMID.", - type => 'integer', - optional => 1, - }, - parent => { - description => "Volume identifier of parent (for linked cloned).", - type => 'string', - optional => 1, - }, - 'format' => { - description => "Format identifier ('raw', 'qcow2', 'subvol', 'iso', 'tgz' ...)", - type => 'string', - }, - size => { - description => "Volume size in bytes.", - type => 'integer', - renderer => 'bytes', - }, - used => { - description => "Used space. Please note that most storage plugins " . - "do not report anything useful here.", - type => 'integer', - renderer => 'bytes', - optional => 1, - }, - ctime => { - description => "Creation time (seconds since the UNIX Epoch).", - type => 'integer', - minimum => 0, - optional => 1, - }, - notes => { - description => "Optional notes. If they contain multiple lines, only the first one is returned here.", - type => 'string', - optional => 1, - }, - encrypted => { - description => "If whole backup is encrypted, value is the fingerprint or '1' " - ." if encrypted. Only useful for the Proxmox Backup Server storage type.", - type => 'string', - optional => 1, - }, - verification => { - description => "Last backup verification result, only useful for PBS storages.", - type => 'object', - properties => { - state => { - description => "Last backup verification state.", - type => 'string', - }, - upid => { - description => "Last backup verification UPID.", - type => 'string', - }, - }, - optional => 1, - }, - protected => { - description => "Protection status. Currently only supported for backups.", - type => 'boolean', - optional => 1, - }, - }, - }, - links => [ { rel => 'child', href => "{volid}" } ], + type => 'array', + items => { + type => "object", + properties => { + volid => { + description => "Volume identifier.", + type => 'string', + }, + vmid => { + description => "Associated Owner VMID.", + type => 'integer', + optional => 1, + }, + parent => { + description => "Volume identifier of parent (for linked cloned).", + type => 'string', + optional => 1, + }, + 'format' => { + description => + "Format identifier ('raw', 'qcow2', 'subvol', 'iso', 'tgz' ...)", + type => 'string', + }, + size => { + description => "Volume size in bytes.", + type => 'integer', + renderer => 'bytes', + }, + used => { + description => "Used space. Please note that most storage plugins " + . "do not report anything useful here.", + type => 'integer', + renderer => 'bytes', + optional => 1, + }, + ctime => { + description => "Creation time (seconds since the UNIX Epoch).", + type => 'integer', + minimum => 0, + optional => 1, + }, + notes => { + description => + "Optional notes. If they contain multiple lines, only the first one is returned here.", + type => 'string', + optional => 1, + }, + encrypted => { + description => + "If whole backup is encrypted, value is the fingerprint or '1' " + . " if encrypted. Only useful for the Proxmox Backup Server storage type.", + type => 'string', + optional => 1, + }, + verification => { + description => + "Last backup verification result, only useful for PBS storages.", + type => 'object', + properties => { + state => { + description => "Last backup verification state.", + type => 'string', + }, + upid => { + description => "Last backup verification UPID.", + type => 'string', + }, + }, + optional => 1, + }, + protected => { + description => "Protection status. Currently only supported for backups.", + type => 'boolean', + optional => 1, + }, + }, + }, + links => [{ rel => 'child', href => "{volid}" }], }, code => sub { - my ($param) = @_; + my ($param) = @_; - my $rpcenv = PVE::RPCEnvironment::get(); + my $rpcenv = PVE::RPCEnvironment::get(); - my $authuser = $rpcenv->get_user(); + my $authuser = $rpcenv->get_user(); - my $storeid = $param->{storage}; + my $storeid = $param->{storage}; - my $cfg = PVE::Storage::config(); + my $cfg = PVE::Storage::config(); - my $vollist = PVE::Storage::volume_list($cfg, $storeid, $param->{vmid}, $param->{content}); + my $vollist = + PVE::Storage::volume_list($cfg, $storeid, $param->{vmid}, $param->{content}); - my $res = []; - foreach my $item (@$vollist) { - eval { PVE::Storage::check_volume_access($rpcenv, $authuser, $cfg, undef, $item->{volid}); }; - next if $@; - $item->{vmid} = int($item->{vmid}) if defined($item->{vmid}); - $item->{size} = int($item->{size}) if defined($item->{size}); - $item->{used} = int($item->{used}) if defined($item->{used}); - push @$res, $item; - } + my $res = []; + foreach my $item (@$vollist) { + eval { + PVE::Storage::check_volume_access( + $rpcenv, $authuser, $cfg, undef, $item->{volid}, + ); + }; + next if $@; + $item->{vmid} = int($item->{vmid}) if defined($item->{vmid}); + $item->{size} = int($item->{size}) if defined($item->{size}); + $item->{used} = int($item->{used}) if defined($item->{used}); + push @$res, $item; + } - return $res; - }}); + return $res; + }, +}); -__PACKAGE__->register_method ({ +__PACKAGE__->register_method({ name => 'create', path => '', method => 'POST', description => "Allocate disk images.", permissions => { - check => ['perm', '/storage/{storage}', ['Datastore.AllocateSpace']], + check => ['perm', '/storage/{storage}', ['Datastore.AllocateSpace']], }, protected => 1, proxyto => 'node', parameters => { - additionalProperties => 0, - properties => { - node => get_standard_option('pve-node'), - storage => get_standard_option('pve-storage-id', { - completion => \&PVE::Storage::complete_storage_enabled, - }), - filename => { - description => "The name of the file to create.", - type => 'string', - }, - vmid => get_standard_option('pve-vmid', { - description => "Specify owner VM", - completion => \&PVE::Cluster::complete_vmid, - }), - size => { - description => "Size in kilobyte (1024 bytes). Optional suffixes 'M' (megabyte, 1024K) and 'G' (gigabyte, 1024M)", - type => 'string', - pattern => '\d+[MG]?', - }, - format => get_standard_option('pve-storage-image-format', { - requires => 'size', - optional => 1, - }), - }, + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + storage => get_standard_option( + 'pve-storage-id', + { + completion => \&PVE::Storage::complete_storage_enabled, + }, + ), + filename => { + description => "The name of the file to create.", + type => 'string', + }, + vmid => get_standard_option( + 'pve-vmid', + { + description => "Specify owner VM", + completion => \&PVE::Cluster::complete_vmid, + }, + ), + size => { + description => + "Size in kilobyte (1024 bytes). Optional suffixes 'M' (megabyte, 1024K) and 'G' (gigabyte, 1024M)", + type => 'string', + pattern => '\d+[MG]?', + }, + format => get_standard_option( + 'pve-storage-image-format', + { + requires => 'size', + optional => 1, + }, + ), + }, }, returns => { - description => "Volume identifier", - type => 'string', + description => "Volume identifier", + type => 'string', }, code => sub { - my ($param) = @_; + my ($param) = @_; - my $storeid = $param->{storage}; - my $name = $param->{filename}; - my $sizestr = $param->{size}; + my $storeid = $param->{storage}; + my $name = $param->{filename}; + my $sizestr = $param->{size}; - my $size; - if ($sizestr =~ m/^\d+$/) { - $size = $sizestr; - } elsif ($sizestr =~ m/^(\d+)M$/) { - $size = $1 * 1024; - } elsif ($sizestr =~ m/^(\d+)G$/) { - $size = $1 * 1024 * 1024; - } else { - raise_param_exc({ size => "unable to parse size '$sizestr'" }); - } + my $size; + if ($sizestr =~ m/^\d+$/) { + $size = $sizestr; + } elsif ($sizestr =~ m/^(\d+)M$/) { + $size = $1 * 1024; + } elsif ($sizestr =~ m/^(\d+)G$/) { + $size = $1 * 1024 * 1024; + } else { + raise_param_exc({ size => "unable to parse size '$sizestr'" }); + } - # extract FORMAT from name - if ($name =~ m/\.(raw|qcow2|vmdk)$/) { - my $fmt = $1; + # extract FORMAT from name + if ($name =~ m/\.(raw|qcow2|vmdk)$/) { + my $fmt = $1; - raise_param_exc({ format => "different storage formats ($param->{format} != $fmt)" }) - if $param->{format} && $param->{format} ne $fmt; + raise_param_exc({ + format => "different storage formats ($param->{format} != $fmt)" }) + if $param->{format} && $param->{format} ne $fmt; - $param->{format} = $fmt; - } + $param->{format} = $fmt; + } - my $cfg = PVE::Storage::config(); + my $cfg = PVE::Storage::config(); - my $volid = PVE::Storage::vdisk_alloc ($cfg, $storeid, $param->{vmid}, - $param->{format}, - $name, $size); + my $volid = PVE::Storage::vdisk_alloc( + $cfg, $storeid, $param->{vmid}, $param->{format}, $name, $size, + ); - return $volid; - }}); + return $volid; + }, +}); # we allow to pass volume names (without storage prefix) if the storage # is specified as separate parameter. @@ -233,257 +267,268 @@ my $real_volume_id = sub { my $volid; if ($volume =~ m/:/) { - eval { - my ($sid, $volname) = PVE::Storage::parse_volume_id ($volume); - die "storage ID mismatch ($sid != $storeid)\n" - if $storeid && $sid ne $storeid; - $volid = $volume; - $storeid = $sid; - }; - raise_param_exc({ volume => $@ }) if $@; + eval { + my ($sid, $volname) = PVE::Storage::parse_volume_id($volume); + die "storage ID mismatch ($sid != $storeid)\n" + if $storeid && $sid ne $storeid; + $volid = $volume; + $storeid = $sid; + }; + raise_param_exc({ volume => $@ }) if $@; } else { - raise_param_exc({ volume => "no storage specified - incomplete volume ID" }) - if !$storeid; + raise_param_exc({ volume => "no storage specified - incomplete volume ID" }) + if !$storeid; - $volid = "$storeid:$volume"; + $volid = "$storeid:$volume"; } return wantarray ? ($volid, $storeid) : $volid; }; -__PACKAGE__->register_method ({ +__PACKAGE__->register_method({ name => 'info', path => '{volume}', method => 'GET', description => "Get volume attributes", permissions => { - description => "You need read access for the volume.", - user => 'all', + description => "You need read access for the volume.", + user => 'all', }, protected => 1, proxyto => 'node', parameters => { - additionalProperties => 0, - properties => { - node => get_standard_option('pve-node'), - storage => get_standard_option('pve-storage-id', { optional => 1 }), - volume => { - description => "Volume identifier", - type => 'string', - }, - }, + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + storage => get_standard_option('pve-storage-id', { optional => 1 }), + volume => { + description => "Volume identifier", + type => 'string', + }, + }, }, returns => { - type => 'object', - properties => { - path => { - description => "The Path", - type => 'string', - }, - size => { - description => "Volume size in bytes.", - type => 'integer', - renderer => 'bytes', - }, - used => { - description => "Used space. Please note that most storage plugins " . - "do not report anything useful here.", - type => 'integer', - renderer => 'bytes', - }, - format => { - description => "Format identifier ('raw', 'qcow2', 'subvol', 'iso', 'tgz' ...)", - type => 'string', - }, - notes => { - description => "Optional notes.", - optional => 1, - type => 'string', - }, - protected => { - description => "Protection status. Currently only supported for backups.", - type => 'boolean', - optional => 1, - }, - }, + type => 'object', + properties => { + path => { + description => "The Path", + type => 'string', + }, + size => { + description => "Volume size in bytes.", + type => 'integer', + renderer => 'bytes', + }, + used => { + description => "Used space. Please note that most storage plugins " + . "do not report anything useful here.", + type => 'integer', + renderer => 'bytes', + }, + format => { + description => "Format identifier ('raw', 'qcow2', 'subvol', 'iso', 'tgz' ...)", + type => 'string', + }, + notes => { + description => "Optional notes.", + optional => 1, + type => 'string', + }, + protected => { + description => "Protection status. Currently only supported for backups.", + type => 'boolean', + optional => 1, + }, + }, }, code => sub { - my ($param) = @_; + my ($param) = @_; - my $rpcenv = PVE::RPCEnvironment::get(); - my $authuser = $rpcenv->get_user(); + my $rpcenv = PVE::RPCEnvironment::get(); + my $authuser = $rpcenv->get_user(); - my ($volid, $storeid) = &$real_volume_id($param->{storage}, $param->{volume}); + my ($volid, $storeid) = &$real_volume_id($param->{storage}, $param->{volume}); - my $cfg = PVE::Storage::config(); + my $cfg = PVE::Storage::config(); - PVE::Storage::check_volume_access($rpcenv, $authuser, $cfg, undef, $volid); + PVE::Storage::check_volume_access($rpcenv, $authuser, $cfg, undef, $volid); - my $path = PVE::Storage::path($cfg, $volid); - my ($size, $format, $used, $parent) = PVE::Storage::volume_size_info($cfg, $volid); - die "volume_size_info on '$volid' failed - no format\n" if !$format; - die "volume_size_info on '$volid' failed - no size\n" if !defined($size); - die "volume '$volid' has size zero\n" if !$size && $format ne 'subvol'; + my $path = PVE::Storage::path($cfg, $volid); + my ($size, $format, $used, $parent) = PVE::Storage::volume_size_info($cfg, $volid); + die "volume_size_info on '$volid' failed - no format\n" if !$format; + die "volume_size_info on '$volid' failed - no size\n" if !defined($size); + die "volume '$volid' has size zero\n" if !$size && $format ne 'subvol'; - my $entry = { - path => $path, - size => int($size), # cast to integer in case it was changed to a string previously - used => int($used), - format => $format, - }; + my $entry = { + path => $path, + size => int($size), # cast to integer in case it was changed to a string previously + used => int($used), + format => $format, + }; - for my $attribute (qw(notes protected)) { - # keep going if fetching an optional attribute fails - eval { - my $value = PVE::Storage::get_volume_attribute($cfg, $volid, $attribute); - $entry->{$attribute} = $value if defined($value); - }; - warn $@ if $@; - } + for my $attribute (qw(notes protected)) { + # keep going if fetching an optional attribute fails + eval { + my $value = PVE::Storage::get_volume_attribute($cfg, $volid, $attribute); + $entry->{$attribute} = $value if defined($value); + }; + warn $@ if $@; + } - return $entry; - }}); + return $entry; + }, +}); -__PACKAGE__->register_method ({ +__PACKAGE__->register_method({ name => 'updateattributes', path => '{volume}', method => 'PUT', description => "Update volume attributes", permissions => { - description => "You need read access for the volume.", - user => 'all', + description => "You need read access for the volume.", + user => 'all', }, protected => 1, proxyto => 'node', parameters => { - additionalProperties => 0, - properties => { - node => get_standard_option('pve-node'), - storage => get_standard_option('pve-storage-id', { optional => 1 }), - volume => { - description => "Volume identifier", - type => 'string', - }, - notes => { - description => "The new notes.", - type => 'string', - optional => 1, - }, - protected => { - description => "Protection status. Currently only supported for backups.", - type => 'boolean', - optional => 1, - }, - }, + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + storage => get_standard_option('pve-storage-id', { optional => 1 }), + volume => { + description => "Volume identifier", + type => 'string', + }, + notes => { + description => "The new notes.", + type => 'string', + optional => 1, + }, + protected => { + description => "Protection status. Currently only supported for backups.", + type => 'boolean', + optional => 1, + }, + }, }, returns => { type => 'null' }, code => sub { - my ($param) = @_; + my ($param) = @_; - my $rpcenv = PVE::RPCEnvironment::get(); - my $authuser = $rpcenv->get_user(); + my $rpcenv = PVE::RPCEnvironment::get(); + my $authuser = $rpcenv->get_user(); - my ($volid, $storeid) = &$real_volume_id($param->{storage}, $param->{volume}); + my ($volid, $storeid) = &$real_volume_id($param->{storage}, $param->{volume}); - my $cfg = PVE::Storage::config(); + my $cfg = PVE::Storage::config(); - PVE::Storage::check_volume_access($rpcenv, $authuser, $cfg, undef, $volid); + PVE::Storage::check_volume_access($rpcenv, $authuser, $cfg, undef, $volid); - for my $attr (qw(notes protected)) { - if (exists $param->{$attr}) { - PVE::Storage::update_volume_attribute($cfg, $volid, $attr, $param->{$attr}); - } - } + for my $attr (qw(notes protected)) { + if (exists $param->{$attr}) { + PVE::Storage::update_volume_attribute($cfg, $volid, $attr, $param->{$attr}); + } + } - return undef; - }}); + return undef; + }, +}); -__PACKAGE__->register_method ({ +__PACKAGE__->register_method({ name => 'delete', path => '{volume}', method => 'DELETE', description => "Delete volume", permissions => { - description => "You need 'Datastore.Allocate' privilege on the storage (or 'Datastore.AllocateSpace' for backup volumes if you have VM.Backup privilege on the VM).", - user => 'all', + description => + "You need 'Datastore.Allocate' privilege on the storage (or 'Datastore.AllocateSpace' for backup volumes if you have VM.Backup privilege on the VM).", + user => 'all', }, protected => 1, proxyto => 'node', parameters => { - additionalProperties => 0, - properties => { - node => get_standard_option('pve-node'), - storage => get_standard_option('pve-storage-id', { - optional => 1, - completion => \&PVE::Storage::complete_storage, - }), - volume => { - description => "Volume identifier", - type => 'string', - completion => \&PVE::Storage::complete_volume, - }, - delay => { - type => 'integer', - description => "Time to wait for the task to finish. We return 'null' if the task finish within that time.", - minimum => 1, - maximum => 30, - optional => 1, - }, - }, + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + storage => get_standard_option( + 'pve-storage-id', + { + optional => 1, + completion => \&PVE::Storage::complete_storage, + }, + ), + volume => { + description => "Volume identifier", + type => 'string', + completion => \&PVE::Storage::complete_volume, + }, + delay => { + type => 'integer', + description => + "Time to wait for the task to finish. We return 'null' if the task finish within that time.", + minimum => 1, + maximum => 30, + optional => 1, + }, + }, }, - returns => { type => 'string', optional => 1, }, + returns => { type => 'string', optional => 1 }, code => sub { - my ($param) = @_; + my ($param) = @_; - my $rpcenv = PVE::RPCEnvironment::get(); - my $authuser = $rpcenv->get_user(); + my $rpcenv = PVE::RPCEnvironment::get(); + my $authuser = $rpcenv->get_user(); - my $cfg = PVE::Storage::config(); + my $cfg = PVE::Storage::config(); - my ($volid, $storeid) = &$real_volume_id($param->{storage}, $param->{volume}); + my ($volid, $storeid) = &$real_volume_id($param->{storage}, $param->{volume}); - my ($path, $ownervm, $vtype) = PVE::Storage::path($cfg, $volid); - if ($vtype eq 'backup' && $ownervm) { - $rpcenv->check($authuser, "/storage/$storeid", ['Datastore.AllocateSpace']); - $rpcenv->check($authuser, "/vms/$ownervm", ['VM.Backup']); - } else { - $rpcenv->check($authuser, "/storage/$storeid", ['Datastore.Allocate']); - } + my ($path, $ownervm, $vtype) = PVE::Storage::path($cfg, $volid); + if ($vtype eq 'backup' && $ownervm) { + $rpcenv->check($authuser, "/storage/$storeid", ['Datastore.AllocateSpace']); + $rpcenv->check($authuser, "/vms/$ownervm", ['VM.Backup']); + } else { + $rpcenv->check($authuser, "/storage/$storeid", ['Datastore.Allocate']); + } - my $worker = sub { - PVE::Storage::vdisk_free ($cfg, $volid); - print "Removed volume '$volid'\n"; - if ($vtype eq 'backup' - && $path =~ /(.*\/vzdump-\w+-\d+-\d{4}_\d{2}_\d{2}-\d{2}_\d{2}_\d{2})[^\/]+$/) { - # Remove log file #318 and notes file #3972 if they still exist - PVE::Storage::archive_auxiliaries_remove($path); - } - }; + my $worker = sub { + PVE::Storage::vdisk_free($cfg, $volid); + print "Removed volume '$volid'\n"; + if ( + $vtype eq 'backup' + && $path =~ /(.*\/vzdump-\w+-\d+-\d{4}_\d{2}_\d{2}-\d{2}_\d{2}_\d{2})[^\/]+$/ + ) { + # Remove log file #318 and notes file #3972 if they still exist + PVE::Storage::archive_auxiliaries_remove($path); + } + }; - my $id = (defined $ownervm ? "$ownervm@" : '') . $storeid; - my $upid = $rpcenv->fork_worker('imgdel', $id, $authuser, $worker); - my $background_delay = $param->{delay}; - if ($background_delay) { - my $end_time = time() + $background_delay; - my $currently_deleting; # not necessarily true, e.g. sequential api call from cli - do { - my $task = PVE::Tools::upid_decode($upid); - $currently_deleting = PVE::ProcFSTools::check_process_running($task->{pid}, $task->{pstart}); - sleep 1 if $currently_deleting; - } while (time() < $end_time && $currently_deleting); + my $id = (defined $ownervm ? "$ownervm@" : '') . $storeid; + my $upid = $rpcenv->fork_worker('imgdel', $id, $authuser, $worker); + my $background_delay = $param->{delay}; + if ($background_delay) { + my $end_time = time() + $background_delay; + my $currently_deleting; # not necessarily true, e.g. sequential api call from cli + do { + my $task = PVE::Tools::upid_decode($upid); + $currently_deleting = + PVE::ProcFSTools::check_process_running($task->{pid}, $task->{pstart}); + sleep 1 if $currently_deleting; + } while (time() < $end_time && $currently_deleting); - if (!$currently_deleting) { - my $status = PVE::Tools::upid_read_status($upid); - chomp $status; - return undef if !PVE::Tools::upid_status_is_error($status); - die "$status\n"; - } - } - return $upid; - }}); + if (!$currently_deleting) { + my $status = PVE::Tools::upid_read_status($upid); + chomp $status; + return undef if !PVE::Tools::upid_status_is_error($status); + die "$status\n"; + } + } + return $upid; + }, +}); -__PACKAGE__->register_method ({ +__PACKAGE__->register_method({ name => 'copy', path => '{volume}', method => 'POST', @@ -491,70 +536,80 @@ __PACKAGE__->register_method ({ protected => 1, proxyto => 'node', parameters => { - additionalProperties => 0, - properties => { - node => get_standard_option('pve-node'), - storage => get_standard_option('pve-storage-id', { optional => 1}), - volume => { - description => "Source volume identifier", - type => 'string', - }, - target => { - description => "Target volume identifier", - type => 'string', - }, - target_node => get_standard_option('pve-node', { - description => "Target node. Default is local node.", - optional => 1, - }), - }, + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + storage => get_standard_option('pve-storage-id', { optional => 1 }), + volume => { + description => "Source volume identifier", + type => 'string', + }, + target => { + description => "Target volume identifier", + type => 'string', + }, + target_node => get_standard_option( + 'pve-node', + { + description => "Target node. Default is local node.", + optional => 1, + }, + ), + }, }, returns => { - type => 'string', + type => 'string', }, code => sub { - my ($param) = @_; + my ($param) = @_; - my $rpcenv = PVE::RPCEnvironment::get(); + my $rpcenv = PVE::RPCEnvironment::get(); - my $user = $rpcenv->get_user(); + my $user = $rpcenv->get_user(); - my $target_node = $param->{target_node} || PVE::INotify::nodename(); - # pvesh examples - # cd /nodes/localhost/storage/local/content - # pve:/> create local:103/vm-103-disk-1.raw -target local:103/vm-103-disk-2.raw - # pve:/> create 103/vm-103-disk-1.raw -target 103/vm-103-disk-3.raw + my $target_node = $param->{target_node} || PVE::INotify::nodename(); + # pvesh examples + # cd /nodes/localhost/storage/local/content + # pve:/> create local:103/vm-103-disk-1.raw -target local:103/vm-103-disk-2.raw + # pve:/> create 103/vm-103-disk-1.raw -target 103/vm-103-disk-3.raw - my $src_volid = &$real_volume_id($param->{storage}, $param->{volume}); - my $dst_volid = &$real_volume_id($param->{storage}, $param->{target}); + my $src_volid = &$real_volume_id($param->{storage}, $param->{volume}); + my $dst_volid = &$real_volume_id($param->{storage}, $param->{target}); - print "DEBUG: COPY $src_volid TO $dst_volid\n"; + print "DEBUG: COPY $src_volid TO $dst_volid\n"; - my $cfg = PVE::Storage::config(); + my $cfg = PVE::Storage::config(); - # do all parameter checks first + # do all parameter checks first - # then do all short running task (to raise errors before we go to background) + # then do all short running task (to raise errors before we go to background) - # then start the worker task - my $worker = sub { - my $upid = shift; + # then start the worker task + my $worker = sub { + my $upid = shift; - print "DEBUG: starting worker $upid\n"; + print "DEBUG: starting worker $upid\n"; - my ($target_sid, $target_volname) = PVE::Storage::parse_volume_id($dst_volid); - #my $target_ip = PVE::Cluster::remote_node_ip($target_node); + my ($target_sid, $target_volname) = PVE::Storage::parse_volume_id($dst_volid); + #my $target_ip = PVE::Cluster::remote_node_ip($target_node); - # you need to get this working (fails currently, because storage_migrate() uses - # ssh to connect to local host (which is not needed - my $sshinfo = PVE::SSHInfo::get_ssh_info($target_node); - PVE::Storage::storage_migrate($cfg, $src_volid, $sshinfo, $target_sid, {'target_volname' => $target_volname}); + # you need to get this working (fails currently, because storage_migrate() uses + # ssh to connect to local host (which is not needed + my $sshinfo = PVE::SSHInfo::get_ssh_info($target_node); + PVE::Storage::storage_migrate( + $cfg, + $src_volid, + $sshinfo, + $target_sid, + { 'target_volname' => $target_volname }, + ); - print "DEBUG: end worker $upid\n"; + print "DEBUG: end worker $upid\n"; - }; + }; - return $rpcenv->fork_worker('imgcopy', undef, $user, $worker); - }}); + return $rpcenv->fork_worker('imgcopy', undef, $user, $worker); + }, +}); 1; diff --git a/src/PVE/API2/Storage/FileRestore.pm b/src/PVE/API2/Storage/FileRestore.pm index 598573c..4058c56 100644 --- a/src/PVE/API2/Storage/FileRestore.pm +++ b/src/PVE/API2/Storage/FileRestore.pm @@ -20,204 +20,219 @@ my $parse_volname_or_id = sub { my ($sid, $volname) = PVE::Storage::parse_volume_id($volume, 1); if (defined($sid)) { - raise_param_exc({ volume => "storage ID mismatch ($sid != $storeid)." }) - if $sid ne $storeid; + raise_param_exc({ volume => "storage ID mismatch ($sid != $storeid)." }) + if $sid ne $storeid; - $volid = $volume; + $volid = $volume; } elsif ($volume =~ m/^backup\//) { - $volid = "$storeid:$volume"; + $volid = "$storeid:$volume"; } else { - $volid = "$storeid:backup/$volume"; + $volid = "$storeid:backup/$volume"; } return $volid; }; -__PACKAGE__->register_method ({ +__PACKAGE__->register_method({ name => 'list', path => 'list', method => 'GET', proxyto => 'node', permissions => { - description => "You need read access for the volume.", - user => 'all', + description => "You need read access for the volume.", + user => 'all', }, description => "List files and directories for single file restore under the given path.", parameters => { - additionalProperties => 0, - properties => { - node => get_standard_option('pve-node'), - storage => get_standard_option('pve-storage-id', { - completion => \&PVE::Storage::complete_storage_enabled, - }), - volume => { - description => "Backup volume ID or name. Currently only PBS snapshots are supported.", - type => 'string', - completion => \&PVE::Storage::complete_volume, - }, - filepath => { - description => 'base64-path to the directory or file being listed, or "/".', - type => 'string', - }, - }, + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + storage => get_standard_option( + 'pve-storage-id', + { + completion => \&PVE::Storage::complete_storage_enabled, + }, + ), + volume => { + description => + "Backup volume ID or name. Currently only PBS snapshots are supported.", + type => 'string', + completion => \&PVE::Storage::complete_volume, + }, + filepath => { + description => 'base64-path to the directory or file being listed, or "/".', + type => 'string', + }, + }, }, returns => { - type => 'array', - items => { - type => "object", - properties => { - filepath => { - description => "base64 path of the current entry", - type => 'string', - }, - type => { - description => "Entry type.", - type => 'string', - }, - text => { - description => "Entry display text.", - type => 'string', - }, - leaf => { - description => "If this entry is a leaf in the directory graph.", - type => 'boolean', - }, - size => { - description => "Entry file size.", - type => 'integer', - optional => 1, - }, - mtime => { - description => "Entry last-modified time (unix timestamp).", - type => 'integer', - optional => 1, - }, - }, - }, + type => 'array', + items => { + type => "object", + properties => { + filepath => { + description => "base64 path of the current entry", + type => 'string', + }, + type => { + description => "Entry type.", + type => 'string', + }, + text => { + description => "Entry display text.", + type => 'string', + }, + leaf => { + description => "If this entry is a leaf in the directory graph.", + type => 'boolean', + }, + size => { + description => "Entry file size.", + type => 'integer', + optional => 1, + }, + mtime => { + description => "Entry last-modified time (unix timestamp).", + type => 'integer', + optional => 1, + }, + }, + }, }, protected => 1, code => sub { - my ($param) = @_; + my ($param) = @_; - my $rpcenv = PVE::RPCEnvironment::get(); - my $user = $rpcenv->get_user(); + my $rpcenv = PVE::RPCEnvironment::get(); + my $user = $rpcenv->get_user(); - my $path = extract_param($param, 'filepath') || "/"; - my $base64 = $path ne "/"; + my $path = extract_param($param, 'filepath') || "/"; + my $base64 = $path ne "/"; - my $storeid = extract_param($param, 'storage'); + my $storeid = extract_param($param, 'storage'); - my $volid = $parse_volname_or_id->($storeid, $param->{volume}); - my $cfg = PVE::Storage::config(); - my $scfg = PVE::Storage::storage_config($cfg, $storeid); + my $volid = $parse_volname_or_id->($storeid, $param->{volume}); + my $cfg = PVE::Storage::config(); + my $scfg = PVE::Storage::storage_config($cfg, $storeid); - PVE::Storage::check_volume_access($rpcenv, $user, $cfg, undef, $volid, 'backup'); + PVE::Storage::check_volume_access($rpcenv, $user, $cfg, undef, $volid, 'backup'); - raise_param_exc({'storage' => "Only PBS storages supported for file-restore."}) - if $scfg->{type} ne 'pbs'; + raise_param_exc({ 'storage' => "Only PBS storages supported for file-restore." }) + if $scfg->{type} ne 'pbs'; - my (undef, $snap) = PVE::Storage::parse_volname($cfg, $volid); + my (undef, $snap) = PVE::Storage::parse_volname($cfg, $volid); - my $client = PVE::PBSClient->new($scfg, $storeid); - my $ret = $client->file_restore_list($snap, $path, $base64, { timeout => 25 }); + my $client = PVE::PBSClient->new($scfg, $storeid); + my $ret = $client->file_restore_list($snap, $path, $base64, { timeout => 25 }); - if (ref($ret) eq "HASH") { - my $msg = $ret->{message}; - if (my $code = $ret->{code}) { - die PVE::Exception->new("$msg\n", code => $code); - } else { - die "$msg\n"; - } - } elsif (ref($ret) eq "ARRAY") { - # 'leaf' is a proper JSON boolean, map to perl-y bool - # TODO: make PBSClient decode all bools always as 1/0? - foreach my $item (@$ret) { - $item->{leaf} = $item->{leaf} ? 1 : 0; - } + if (ref($ret) eq "HASH") { + my $msg = $ret->{message}; + if (my $code = $ret->{code}) { + die PVE::Exception->new("$msg\n", code => $code); + } else { + die "$msg\n"; + } + } elsif (ref($ret) eq "ARRAY") { + # 'leaf' is a proper JSON boolean, map to perl-y bool + # TODO: make PBSClient decode all bools always as 1/0? + foreach my $item (@$ret) { + $item->{leaf} = $item->{leaf} ? 1 : 0; + } - return $ret; - } + return $ret; + } - die "invalid proxmox-file-restore output"; - }}); + die "invalid proxmox-file-restore output"; + }, +}); -__PACKAGE__->register_method ({ +__PACKAGE__->register_method({ name => 'download', path => 'download', method => 'GET', proxyto => 'node', download_allowed => 1, permissions => { - description => "You need read access for the volume.", - user => 'all', + description => "You need read access for the volume.", + user => 'all', }, description => "Extract a file or directory (as zip archive) from a PBS backup.", parameters => { - additionalProperties => 0, - properties => { - node => get_standard_option('pve-node'), - storage => get_standard_option('pve-storage-id', { - completion => \&PVE::Storage::complete_storage_enabled, - }), - volume => { - description => "Backup volume ID or name. Currently only PBS snapshots are supported.", - type => 'string', - completion => \&PVE::Storage::complete_volume, - }, - filepath => { - description => 'base64-path to the directory or file to download.', - type => 'string', - }, - tar => { - description => "Download dirs as 'tar.zst' instead of 'zip'.", - type => 'boolean', - optional => 1, - default => 0, - }, - }, + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + storage => get_standard_option( + 'pve-storage-id', + { + completion => \&PVE::Storage::complete_storage_enabled, + }, + ), + volume => { + description => + "Backup volume ID or name. Currently only PBS snapshots are supported.", + type => 'string', + completion => \&PVE::Storage::complete_volume, + }, + filepath => { + description => 'base64-path to the directory or file to download.', + type => 'string', + }, + tar => { + description => "Download dirs as 'tar.zst' instead of 'zip'.", + type => 'boolean', + optional => 1, + default => 0, + }, + }, }, returns => { - type => 'any', # download + type => 'any', # download }, protected => 1, code => sub { - my ($param) = @_; + my ($param) = @_; - my $rpcenv = PVE::RPCEnvironment::get(); - my $user = $rpcenv->get_user(); + my $rpcenv = PVE::RPCEnvironment::get(); + my $user = $rpcenv->get_user(); - my $path = extract_param($param, 'filepath'); - my $storeid = extract_param($param, 'storage'); - my $volid = $parse_volname_or_id->($storeid, $param->{volume}); - my $tar = extract_param($param, 'tar') // 0; + my $path = extract_param($param, 'filepath'); + my $storeid = extract_param($param, 'storage'); + my $volid = $parse_volname_or_id->($storeid, $param->{volume}); + my $tar = extract_param($param, 'tar') // 0; - my $cfg = PVE::Storage::config(); - my $scfg = PVE::Storage::storage_config($cfg, $storeid); + my $cfg = PVE::Storage::config(); + my $scfg = PVE::Storage::storage_config($cfg, $storeid); - PVE::Storage::check_volume_access($rpcenv, $user, $cfg, undef, $volid, 'backup'); + PVE::Storage::check_volume_access($rpcenv, $user, $cfg, undef, $volid, 'backup'); - raise_param_exc({'storage' => "Only PBS storages supported for file-restore."}) - if $scfg->{type} ne 'pbs'; + raise_param_exc({ 'storage' => "Only PBS storages supported for file-restore." }) + if $scfg->{type} ne 'pbs'; - my (undef, $snap) = PVE::Storage::parse_volname($cfg, $volid); + my (undef, $snap) = PVE::Storage::parse_volname($cfg, $volid); - my $client = PVE::PBSClient->new($scfg, $storeid); - my $fifo = $client->file_restore_extract_prepare(); + my $client = PVE::PBSClient->new($scfg, $storeid); + my $fifo = $client->file_restore_extract_prepare(); - $rpcenv->fork_worker('pbs-download', undef, $user, sub { - my $name = decode_base64($path); - print "Starting download of file: $name\n"; - $client->file_restore_extract($fifo, $snap, $path, 1, $tar); - }); + $rpcenv->fork_worker( + 'pbs-download', + undef, + $user, + sub { + my $name = decode_base64($path); + print "Starting download of file: $name\n"; + $client->file_restore_extract($fifo, $snap, $path, 1, $tar); + }, + ); - my $ret = { - download => { - path => $fifo, - stream => 1, - 'content-type' => 'application/octet-stream', - }, - }; - return $ret; - }}); + my $ret = { + download => { + path => $fifo, + stream => 1, + 'content-type' => 'application/octet-stream', + }, + }; + return $ret; + }, +}); 1; diff --git a/src/PVE/API2/Storage/PruneBackups.pm b/src/PVE/API2/Storage/PruneBackups.pm index e6ab276..2920c89 100644 --- a/src/PVE/API2/Storage/PruneBackups.pm +++ b/src/PVE/API2/Storage/PruneBackups.pm @@ -12,153 +12,185 @@ use PVE::Tools qw(extract_param); use base qw(PVE::RESTHandler); -__PACKAGE__->register_method ({ +__PACKAGE__->register_method({ name => 'dryrun', path => '', method => 'GET', - description => "Get prune information for backups. NOTE: this is only a preview and might not be " . - "what a subsequent prune call does if backups are removed/added in the meantime.", + description => + "Get prune information for backups. NOTE: this is only a preview and might not be " + . "what a subsequent prune call does if backups are removed/added in the meantime.", permissions => { - check => ['perm', '/storage/{storage}', ['Datastore.Audit', 'Datastore.AllocateSpace'], any => 1], + check => [ + 'perm', + '/storage/{storage}', + ['Datastore.Audit', 'Datastore.AllocateSpace'], + any => 1, + ], }, protected => 1, proxyto => 'node', parameters => { - additionalProperties => 0, - properties => { - node => get_standard_option('pve-node'), - storage => get_standard_option('pve-storage-id', { - completion => \&PVE::Storage::complete_storage_enabled, - }), - 'prune-backups' => get_standard_option('prune-backups', { - description => "Use these retention options instead of those from the storage configuration.", - optional => 1, - }), - type => { - description => "Either 'qemu' or 'lxc'. Only consider backups for guests of this type.", - type => 'string', - optional => 1, - enum => ['qemu', 'lxc'], - }, - vmid => get_standard_option('pve-vmid', { - description => "Only consider backups for this guest.", - optional => 1, - completion => \&PVE::Cluster::complete_vmid, - }), - }, + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + storage => get_standard_option( + 'pve-storage-id', + { + completion => \&PVE::Storage::complete_storage_enabled, + }, + ), + 'prune-backups' => get_standard_option( + 'prune-backups', + { + description => + "Use these retention options instead of those from the storage configuration.", + optional => 1, + }, + ), + type => { + description => + "Either 'qemu' or 'lxc'. Only consider backups for guests of this type.", + type => 'string', + optional => 1, + enum => ['qemu', 'lxc'], + }, + vmid => get_standard_option( + 'pve-vmid', + { + description => "Only consider backups for this guest.", + optional => 1, + completion => \&PVE::Cluster::complete_vmid, + }, + ), + }, }, returns => { - type => 'array', - items => { - type => 'object', - properties => { - volid => { - description => "Backup volume ID.", - type => 'string', - }, - 'ctime' => { - description => "Creation time of the backup (seconds since the UNIX epoch).", - type => 'integer', - }, - 'mark' => { - description => "Whether the backup would be kept or removed. Backups that are" . - " protected or don't use the standard naming scheme are not removed.", - type => 'string', - enum => ['keep', 'remove', 'protected', 'renamed'], - }, - type => { - description => "One of 'qemu', 'lxc', 'openvz' or 'unknown'.", - type => 'string', - }, - 'vmid' => { - description => "The VM the backup belongs to.", - type => 'integer', - optional => 1, - }, - }, - }, + type => 'array', + items => { + type => 'object', + properties => { + volid => { + description => "Backup volume ID.", + type => 'string', + }, + 'ctime' => { + description => + "Creation time of the backup (seconds since the UNIX epoch).", + type => 'integer', + }, + 'mark' => { + description => + "Whether the backup would be kept or removed. Backups that are" + . " protected or don't use the standard naming scheme are not removed.", + type => 'string', + enum => ['keep', 'remove', 'protected', 'renamed'], + }, + type => { + description => "One of 'qemu', 'lxc', 'openvz' or 'unknown'.", + type => 'string', + }, + 'vmid' => { + description => "The VM the backup belongs to.", + type => 'integer', + optional => 1, + }, + }, + }, }, code => sub { - my ($param) = @_; + my ($param) = @_; - my $cfg = PVE::Storage::config(); + my $cfg = PVE::Storage::config(); - my $vmid = extract_param($param, 'vmid'); - my $type = extract_param($param, 'type'); - my $storeid = extract_param($param, 'storage'); + my $vmid = extract_param($param, 'vmid'); + my $type = extract_param($param, 'type'); + my $storeid = extract_param($param, 'storage'); - my $prune_backups = extract_param($param, 'prune-backups'); - $prune_backups = PVE::JSONSchema::parse_property_string('prune-backups', $prune_backups) - if defined($prune_backups); + my $prune_backups = extract_param($param, 'prune-backups'); + $prune_backups = PVE::JSONSchema::parse_property_string('prune-backups', $prune_backups) + if defined($prune_backups); - return PVE::Storage::prune_backups($cfg, $storeid, $prune_backups, $vmid, $type, 1); - }}); + return PVE::Storage::prune_backups($cfg, $storeid, $prune_backups, $vmid, $type, 1); + }, +}); -__PACKAGE__->register_method ({ +__PACKAGE__->register_method({ name => 'delete', path => '', method => 'DELETE', description => "Prune backups. Only those using the standard naming scheme are considered.", permissions => { - description => "You need the 'Datastore.Allocate' privilege on the storage " . - "(or if a VM ID is specified, 'Datastore.AllocateSpace' and 'VM.Backup' for the VM).", - user => 'all', + description => "You need the 'Datastore.Allocate' privilege on the storage " + . "(or if a VM ID is specified, 'Datastore.AllocateSpace' and 'VM.Backup' for the VM).", + user => 'all', }, protected => 1, proxyto => 'node', parameters => { - additionalProperties => 0, - properties => { - node => get_standard_option('pve-node'), - storage => get_standard_option('pve-storage-id', { - completion => \&PVE::Storage::complete_storage, - }), - 'prune-backups' => get_standard_option('prune-backups', { - description => "Use these retention options instead of those from the storage configuration.", - }), - type => { - description => "Either 'qemu' or 'lxc'. Only consider backups for guests of this type.", - type => 'string', - optional => 1, - enum => ['qemu', 'lxc'], - }, - vmid => get_standard_option('pve-vmid', { - description => "Only prune backups for this VM.", - completion => \&PVE::Cluster::complete_vmid, - optional => 1, - }), - }, + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + storage => get_standard_option( + 'pve-storage-id', + { + completion => \&PVE::Storage::complete_storage, + }, + ), + 'prune-backups' => get_standard_option( + 'prune-backups', + { + description => + "Use these retention options instead of those from the storage configuration.", + }, + ), + type => { + description => + "Either 'qemu' or 'lxc'. Only consider backups for guests of this type.", + type => 'string', + optional => 1, + enum => ['qemu', 'lxc'], + }, + vmid => get_standard_option( + 'pve-vmid', + { + description => "Only prune backups for this VM.", + completion => \&PVE::Cluster::complete_vmid, + optional => 1, + }, + ), + }, }, returns => { type => 'string' }, code => sub { - my ($param) = @_; + my ($param) = @_; - my $rpcenv = PVE::RPCEnvironment::get(); - my $authuser = $rpcenv->get_user(); + my $rpcenv = PVE::RPCEnvironment::get(); + my $authuser = $rpcenv->get_user(); - my $cfg = PVE::Storage::config(); + my $cfg = PVE::Storage::config(); - my $vmid = extract_param($param, 'vmid'); - my $type = extract_param($param, 'type'); - my $storeid = extract_param($param, 'storage'); + my $vmid = extract_param($param, 'vmid'); + my $type = extract_param($param, 'type'); + my $storeid = extract_param($param, 'storage'); - my $prune_backups = extract_param($param, 'prune-backups'); - $prune_backups = PVE::JSONSchema::parse_property_string('prune-backups', $prune_backups) - if defined($prune_backups); + my $prune_backups = extract_param($param, 'prune-backups'); + $prune_backups = PVE::JSONSchema::parse_property_string('prune-backups', $prune_backups) + if defined($prune_backups); - if (defined($vmid)) { - $rpcenv->check($authuser, "/storage/$storeid", ['Datastore.AllocateSpace']); - $rpcenv->check($authuser, "/vms/$vmid", ['VM.Backup']); - } else { - $rpcenv->check($authuser, "/storage/$storeid", ['Datastore.Allocate']); - } + if (defined($vmid)) { + $rpcenv->check($authuser, "/storage/$storeid", ['Datastore.AllocateSpace']); + $rpcenv->check($authuser, "/vms/$vmid", ['VM.Backup']); + } else { + $rpcenv->check($authuser, "/storage/$storeid", ['Datastore.Allocate']); + } - my $id = (defined($vmid) ? "$vmid@" : '') . $storeid; - my $worker = sub { - PVE::Storage::prune_backups($cfg, $storeid, $prune_backups, $vmid, $type, 0); - }; + my $id = (defined($vmid) ? "$vmid@" : '') . $storeid; + my $worker = sub { + PVE::Storage::prune_backups($cfg, $storeid, $prune_backups, $vmid, $type, 0); + }; - return $rpcenv->fork_worker('prunebackups', $id, $authuser, $worker); - }}); + return $rpcenv->fork_worker('prunebackups', $id, $authuser, $worker); + }, +}); 1; diff --git a/src/PVE/API2/Storage/Scan.pm b/src/PVE/API2/Storage/Scan.pm index d7a8743..db751b2 100644 --- a/src/PVE/API2/Storage/Scan.pm +++ b/src/PVE/API2/Storage/Scan.pm @@ -20,39 +20,40 @@ __PACKAGE__->register_method({ method => 'GET', description => "Index of available scan methods", permissions => { - user => 'all', + user => 'all', }, parameters => { - additionalProperties => 0, - properties => { - node => get_standard_option('pve-node'), - }, + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + }, }, returns => { - type => 'array', - items => { - type => "object", - properties => { - method => { type => 'string'}, - }, - }, - links => [ { rel => 'child', href => "{method}" } ], + type => 'array', + items => { + type => "object", + properties => { + method => { type => 'string' }, + }, + }, + links => [{ rel => 'child', href => "{method}" }], }, code => sub { - my ($param) = @_; + my ($param) = @_; - my $res = [ - { method => 'cifs' }, - { method => 'glusterfs' }, - { method => 'iscsi' }, - { method => 'lvm' }, - { method => 'nfs' }, - { method => 'pbs' }, - { method => 'zfs' }, - ]; + my $res = [ + { method => 'cifs' }, + { method => 'glusterfs' }, + { method => 'iscsi' }, + { method => 'lvm' }, + { method => 'nfs' }, + { method => 'pbs' }, + { method => 'zfs' }, + ]; - return $res; - }}); + return $res; + }, +}); __PACKAGE__->register_method({ name => 'nfsscan', @@ -62,46 +63,48 @@ __PACKAGE__->register_method({ protected => 1, proxyto => "node", permissions => { - check => ['perm', '/storage', ['Datastore.Allocate']], + check => ['perm', '/storage', ['Datastore.Allocate']], }, parameters => { - additionalProperties => 0, - properties => { - node => get_standard_option('pve-node'), - server => { - description => "The server address (name or IP).", - type => 'string', format => 'pve-storage-server', - }, - }, + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + server => { + description => "The server address (name or IP).", + type => 'string', + format => 'pve-storage-server', + }, + }, }, returns => { - type => 'array', - items => { - type => "object", - properties => { - path => { - description => "The exported path.", - type => 'string', - }, - options => { - description => "NFS export options.", - type => 'string', - }, - }, - }, + type => 'array', + items => { + type => "object", + properties => { + path => { + description => "The exported path.", + type => 'string', + }, + options => { + description => "NFS export options.", + type => 'string', + }, + }, + }, }, code => sub { - my ($param) = @_; + my ($param) = @_; - my $server = $param->{server}; - my $res = PVE::Storage::scan_nfs($server); + my $server = $param->{server}; + my $res = PVE::Storage::scan_nfs($server); - my $data = []; - foreach my $k (sort keys %$res) { - push @$data, { path => $k, options => $res->{$k} }; - } - return $data; - }}); + my $data = []; + foreach my $k (sort keys %$res) { + push @$data, { path => $k, options => $res->{$k} }; + } + return $data; + }, +}); __PACKAGE__->register_method({ name => 'cifsscan', @@ -111,68 +114,70 @@ __PACKAGE__->register_method({ protected => 1, proxyto => "node", permissions => { - check => ['perm', '/storage', ['Datastore.Allocate']], + check => ['perm', '/storage', ['Datastore.Allocate']], }, parameters => { - additionalProperties => 0, - properties => { - node => get_standard_option('pve-node'), - server => { - description => "The server address (name or IP).", - type => 'string', format => 'pve-storage-server', - }, - username => { - description => "User name.", - type => 'string', - optional => 1, - }, - password => { - description => "User password.", - type => 'string', - optional => 1, - }, - domain => { - description => "SMB domain (Workgroup).", - type => 'string', - optional => 1, - }, - }, + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + server => { + description => "The server address (name or IP).", + type => 'string', + format => 'pve-storage-server', + }, + username => { + description => "User name.", + type => 'string', + optional => 1, + }, + password => { + description => "User password.", + type => 'string', + optional => 1, + }, + domain => { + description => "SMB domain (Workgroup).", + type => 'string', + optional => 1, + }, + }, }, returns => { - type => 'array', - items => { - type => "object", - properties => { - share => { - description => "The cifs share name.", - type => 'string', - }, - description => { - description => "Descriptive text from server.", - type => 'string', - }, - }, - }, + type => 'array', + items => { + type => "object", + properties => { + share => { + description => "The cifs share name.", + type => 'string', + }, + description => { + description => "Descriptive text from server.", + type => 'string', + }, + }, + }, }, code => sub { - my ($param) = @_; + my ($param) = @_; - my $server = $param->{server}; + my $server = $param->{server}; - my $username = $param->{username}; - my $password = $param->{password}; - my $domain = $param->{domain}; + my $username = $param->{username}; + my $password = $param->{password}; + my $domain = $param->{domain}; - my $res = PVE::Storage::scan_cifs($server, $username, $password, $domain); + my $res = PVE::Storage::scan_cifs($server, $username, $password, $domain); - my $data = []; - foreach my $k (sort keys %$res) { - next if $k =~ m/NT_STATUS_/; - push @$data, { share => $k, description => $res->{$k} }; - } + my $data = []; + foreach my $k (sort keys %$res) { + next if $k =~ m/NT_STATUS_/; + push @$data, { share => $k, description => $res->{$k} }; + } - return $data; - }}); + return $data; + }, +}); __PACKAGE__->register_method({ name => 'pbsscan', @@ -182,61 +187,62 @@ __PACKAGE__->register_method({ protected => 1, proxyto => "node", permissions => { - check => ['perm', '/storage', ['Datastore.Allocate']], + check => ['perm', '/storage', ['Datastore.Allocate']], }, parameters => { - additionalProperties => 0, - properties => { - node => get_standard_option('pve-node'), - server => { - description => "The server address (name or IP).", - type => 'string', format => 'pve-storage-server', - }, - username => { - description => "User-name or API token-ID.", - type => 'string', - }, - password => { - description => "User password or API token secret.", - type => 'string', - }, - fingerprint => get_standard_option('fingerprint-sha256', { - optional => 1, - }), - port => { - description => "Optional port.", - type => 'integer', - minimum => 1, - maximum => 65535, - default => 8007, - optional => 1, - }, - }, + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + server => { + description => "The server address (name or IP).", + type => 'string', + format => 'pve-storage-server', + }, + username => { + description => "User-name or API token-ID.", + type => 'string', + }, + password => { + description => "User password or API token secret.", + type => 'string', + }, + fingerprint => get_standard_option('fingerprint-sha256', { + optional => 1, + }), + port => { + description => "Optional port.", + type => 'integer', + minimum => 1, + maximum => 65535, + default => 8007, + optional => 1, + }, + }, }, returns => { - type => 'array', - items => { - type => "object", - properties => { - store => { - description => "The datastore name.", - type => 'string', - }, - comment => { - description => "Comment from server.", - type => 'string', - optional => 1, - }, - }, - }, + type => 'array', + items => { + type => "object", + properties => { + store => { + description => "The datastore name.", + type => 'string', + }, + comment => { + description => "Comment from server.", + type => 'string', + optional => 1, + }, + }, + }, }, code => sub { - my ($param) = @_; + my ($param) = @_; - my $password = delete $param->{password}; + my $password = delete $param->{password}; - return PVE::Storage::PBSPlugin::scan_datastores($param, $password); - } + return PVE::Storage::PBSPlugin::scan_datastores($param, $password); + }, }); # Note: GlusterFS currently does not have an equivalent of showmount. @@ -250,44 +256,46 @@ __PACKAGE__->register_method({ protected => 1, proxyto => "node", permissions => { - check => ['perm', '/storage', ['Datastore.Allocate']], + check => ['perm', '/storage', ['Datastore.Allocate']], }, parameters => { - additionalProperties => 0, - properties => { - node => get_standard_option('pve-node'), - server => { - description => "The server address (name or IP).", - type => 'string', format => 'pve-storage-server', - }, - }, + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + server => { + description => "The server address (name or IP).", + type => 'string', + format => 'pve-storage-server', + }, + }, }, returns => { - type => 'array', - items => { - type => "object", - properties => { - volname => { - description => "The volume name.", - type => 'string', - }, - }, - }, + type => 'array', + items => { + type => "object", + properties => { + volname => { + description => "The volume name.", + type => 'string', + }, + }, + }, }, code => sub { - my ($param) = @_; + my ($param) = @_; - my $server = $param->{server}; - my $res = PVE::Storage::scan_nfs($server); + my $server = $param->{server}; + my $res = PVE::Storage::scan_nfs($server); - my $data = []; - foreach my $path (sort keys %$res) { - if ($path =~ m!^/([^\s/]+)$!) { - push @$data, { volname => $1 }; - } - } - return $data; - }}); + my $data = []; + foreach my $path (sort keys %$res) { + if ($path =~ m!^/([^\s/]+)$!) { + push @$data, { volname => $1 }; + } + } + return $data; + }, +}); __PACKAGE__->register_method({ name => 'iscsiscan', @@ -297,46 +305,48 @@ __PACKAGE__->register_method({ protected => 1, proxyto => "node", permissions => { - check => ['perm', '/storage', ['Datastore.Allocate']], + check => ['perm', '/storage', ['Datastore.Allocate']], }, parameters => { - additionalProperties => 0, - properties => { - node => get_standard_option('pve-node'), - portal => { - description => "The iSCSI portal (IP or DNS name with optional port).", - type => 'string', format => 'pve-storage-portal-dns', - }, - }, + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + portal => { + description => "The iSCSI portal (IP or DNS name with optional port).", + type => 'string', + format => 'pve-storage-portal-dns', + }, + }, }, returns => { - type => 'array', - items => { - type => "object", - properties => { - target => { - description => "The iSCSI target name.", - type => 'string', - }, - portal => { - description => "The iSCSI portal name.", - type => 'string', - }, - }, - }, + type => 'array', + items => { + type => "object", + properties => { + target => { + description => "The iSCSI target name.", + type => 'string', + }, + portal => { + description => "The iSCSI portal name.", + type => 'string', + }, + }, + }, }, code => sub { - my ($param) = @_; + my ($param) = @_; - my $res = PVE::Storage::scan_iscsi($param->{portal}); + my $res = PVE::Storage::scan_iscsi($param->{portal}); - my $data = []; - foreach my $k (sort keys %$res) { - push @$data, { target => $k, portal => join(',', @{$res->{$k}}) }; - } + my $data = []; + foreach my $k (sort keys %$res) { + push @$data, { target => $k, portal => join(',', @{ $res->{$k} }) }; + } - return $data; - }}); + return $data; + }, +}); __PACKAGE__->register_method({ name => 'lvmscan', @@ -346,32 +356,33 @@ __PACKAGE__->register_method({ protected => 1, proxyto => "node", permissions => { - check => ['perm', '/storage', ['Datastore.Allocate']], + check => ['perm', '/storage', ['Datastore.Allocate']], }, parameters => { - additionalProperties => 0, - properties => { - node => get_standard_option('pve-node'), - }, + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + }, }, returns => { - type => 'array', - items => { - type => "object", - properties => { - vg => { - description => "The LVM logical volume group name.", - type => 'string', - }, - }, - }, + type => 'array', + items => { + type => "object", + properties => { + vg => { + description => "The LVM logical volume group name.", + type => 'string', + }, + }, + }, }, code => sub { - my ($param) = @_; + my ($param) = @_; - my $res = PVE::Storage::LVMPlugin::lvm_vgs(); - return PVE::RESTHandler::hash_to_array($res, 'vg'); - }}); + my $res = PVE::Storage::LVMPlugin::lvm_vgs(); + return PVE::RESTHandler::hash_to_array($res, 'vg'); + }, +}); __PACKAGE__->register_method({ name => 'lvmthinscan', @@ -381,36 +392,37 @@ __PACKAGE__->register_method({ protected => 1, proxyto => "node", permissions => { - check => ['perm', '/storage', ['Datastore.Allocate']], + check => ['perm', '/storage', ['Datastore.Allocate']], }, parameters => { - additionalProperties => 0, - properties => { - node => get_standard_option('pve-node'), - vg => { - type => 'string', - pattern => '[a-zA-Z0-9\.\+\_][a-zA-Z0-9\.\+\_\-]+', # see lvm(8) manpage - maxLength => 100, - }, - }, + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + vg => { + type => 'string', + pattern => '[a-zA-Z0-9\.\+\_][a-zA-Z0-9\.\+\_\-]+', # see lvm(8) manpage + maxLength => 100, + }, + }, }, returns => { - type => 'array', - items => { - type => "object", - properties => { - lv => { - description => "The LVM Thin Pool name (LVM logical volume).", - type => 'string', - }, - }, - }, + type => 'array', + items => { + type => "object", + properties => { + lv => { + description => "The LVM Thin Pool name (LVM logical volume).", + type => 'string', + }, + }, + }, }, code => sub { - my ($param) = @_; + my ($param) = @_; - return PVE::Storage::LvmThinPlugin::list_thinpools($param->{vg}); - }}); + return PVE::Storage::LvmThinPlugin::list_thinpools($param->{vg}); + }, +}); __PACKAGE__->register_method({ name => 'zfsscan', @@ -420,30 +432,31 @@ __PACKAGE__->register_method({ protected => 1, proxyto => "node", permissions => { - check => ['perm', '/storage', ['Datastore.Allocate']], + check => ['perm', '/storage', ['Datastore.Allocate']], }, parameters => { - additionalProperties => 0, - properties => { - node => get_standard_option('pve-node'), - }, + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + }, }, returns => { - type => 'array', - items => { - type => "object", - properties => { - pool => { - description => "ZFS pool name.", - type => 'string', - }, - }, - }, + type => 'array', + items => { + type => "object", + properties => { + pool => { + description => "ZFS pool name.", + type => 'string', + }, + }, + }, }, code => sub { - my ($param) = @_; + my ($param) = @_; - return PVE::Storage::scan_zfs(); - }}); + return PVE::Storage::scan_zfs(); + }, +}); 1; diff --git a/src/PVE/API2/Storage/Status.pm b/src/PVE/API2/Storage/Status.pm index 14915ae..f7c3519 100644 --- a/src/PVE/API2/Storage/Status.pm +++ b/src/PVE/API2/Storage/Status.pm @@ -23,12 +23,12 @@ use PVE::Storage; use base qw(PVE::RESTHandler); -__PACKAGE__->register_method ({ +__PACKAGE__->register_method({ subclass => "PVE::API2::Storage::PruneBackups", path => '{storage}/prunebackups', }); -__PACKAGE__->register_method ({ +__PACKAGE__->register_method({ subclass => "PVE::API2::Storage::Content", # set fragment delimiter (no subdirs) - we need that, because volume # IDs may contain a slash '/' @@ -36,9 +36,9 @@ __PACKAGE__->register_method ({ path => '{storage}/content', }); -__PACKAGE__->register_method ({ - subclass => "PVE::API2::Storage::FileRestore", - path => '{storage}/file-restore', +__PACKAGE__->register_method({ + subclass => "PVE::API2::Storage::FileRestore", + path => '{storage}/file-restore', }); my sub assert_ova_contents { @@ -46,543 +46,601 @@ my sub assert_ova_contents { # test if it's really a tar file with an ovf file inside my $hasOvf = 0; - run_command(['tar', '-t', '-f', $file], outfunc => sub { - my ($line) = @_; + run_command( + ['tar', '-t', '-f', $file], + outfunc => sub { + my ($line) = @_; - if ($line =~ m/\.ovf$/) { - $hasOvf = 1; - } - }); + if ($line =~ m/\.ovf$/) { + $hasOvf = 1; + } + }, + ); die "ova archive has no .ovf file inside\n" if !$hasOvf; return 1; } -__PACKAGE__->register_method ({ +__PACKAGE__->register_method({ name => 'index', path => '', method => 'GET', description => "Get status for all datastores.", permissions => { - description => "Only list entries where you have 'Datastore.Audit' or 'Datastore.AllocateSpace' permissions on '/storage/'", - user => 'all', + description => + "Only list entries where you have 'Datastore.Audit' or 'Datastore.AllocateSpace' permissions on '/storage/'", + user => 'all', }, protected => 1, proxyto => 'node', parameters => { - additionalProperties => 0, - properties => { - node => get_standard_option('pve-node'), - storage => get_standard_option('pve-storage-id', { - description => "Only list status for specified storage", - optional => 1, - completion => \&PVE::Storage::complete_storage_enabled, - }), - content => { - description => "Only list stores which support this content type.", - type => 'string', format => 'pve-storage-content-list', - optional => 1, - completion => \&PVE::Storage::complete_content_type, - }, - enabled => { - description => "Only list stores which are enabled (not disabled in config).", - type => 'boolean', - optional => 1, - default => 0, - }, - target => get_standard_option('pve-node', { - description => "If target is different to 'node', we only lists shared storages which " . - "content is accessible on this 'node' and the specified 'target' node.", - optional => 1, - completion => \&PVE::Cluster::get_nodelist, - }), - 'format' => { - description => "Include information about formats", - type => 'boolean', - optional => 1, - default => 0, - }, - }, + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + storage => get_standard_option( + 'pve-storage-id', + { + description => "Only list status for specified storage", + optional => 1, + completion => \&PVE::Storage::complete_storage_enabled, + }, + ), + content => { + description => "Only list stores which support this content type.", + type => 'string', + format => 'pve-storage-content-list', + optional => 1, + completion => \&PVE::Storage::complete_content_type, + }, + enabled => { + description => "Only list stores which are enabled (not disabled in config).", + type => 'boolean', + optional => 1, + default => 0, + }, + target => get_standard_option( + 'pve-node', + { + description => + "If target is different to 'node', we only lists shared storages which " + . "content is accessible on this 'node' and the specified 'target' node.", + optional => 1, + completion => \&PVE::Cluster::get_nodelist, + }, + ), + 'format' => { + description => "Include information about formats", + type => 'boolean', + optional => 1, + default => 0, + }, + }, }, returns => { - type => 'array', - items => { - type => "object", - properties => { - storage => get_standard_option('pve-storage-id'), - type => { - description => "Storage type.", - type => 'string', - }, - content => { - description => "Allowed storage content types.", - type => 'string', format => 'pve-storage-content-list', - }, - enabled => { - description => "Set when storage is enabled (not disabled).", - type => 'boolean', - optional => 1, - }, - active => { - description => "Set when storage is accessible.", - type => 'boolean', - optional => 1, - }, - shared => { - description => "Shared flag from storage configuration.", - type => 'boolean', - optional => 1, - }, - total => { - description => "Total storage space in bytes.", - type => 'integer', - renderer => 'bytes', - optional => 1, - }, - used => { - description => "Used storage space in bytes.", - type => 'integer', - renderer => 'bytes', - optional => 1, - }, - avail => { - description => "Available storage space in bytes.", - type => 'integer', - renderer => 'bytes', - optional => 1, - }, - used_fraction => { - description => "Used fraction (used/total).", - type => 'number', - renderer => 'fraction_as_percentage', - optional => 1, - }, - }, - }, - links => [ { rel => 'child', href => "{storage}" } ], + type => 'array', + items => { + type => "object", + properties => { + storage => get_standard_option('pve-storage-id'), + type => { + description => "Storage type.", + type => 'string', + }, + content => { + description => "Allowed storage content types.", + type => 'string', + format => 'pve-storage-content-list', + }, + enabled => { + description => "Set when storage is enabled (not disabled).", + type => 'boolean', + optional => 1, + }, + active => { + description => "Set when storage is accessible.", + type => 'boolean', + optional => 1, + }, + shared => { + description => "Shared flag from storage configuration.", + type => 'boolean', + optional => 1, + }, + total => { + description => "Total storage space in bytes.", + type => 'integer', + renderer => 'bytes', + optional => 1, + }, + used => { + description => "Used storage space in bytes.", + type => 'integer', + renderer => 'bytes', + optional => 1, + }, + avail => { + description => "Available storage space in bytes.", + type => 'integer', + renderer => 'bytes', + optional => 1, + }, + used_fraction => { + description => "Used fraction (used/total).", + type => 'number', + renderer => 'fraction_as_percentage', + optional => 1, + }, + }, + }, + links => [{ rel => 'child', href => "{storage}" }], }, code => sub { - my ($param) = @_; + my ($param) = @_; - my $rpcenv = PVE::RPCEnvironment::get(); - my $authuser = $rpcenv->get_user(); + my $rpcenv = PVE::RPCEnvironment::get(); + my $authuser = $rpcenv->get_user(); - my $localnode = PVE::INotify::nodename(); + my $localnode = PVE::INotify::nodename(); - my $target = $param->{target}; + my $target = $param->{target}; - undef $target if $target && ($target eq $localnode || $target eq 'localhost'); + undef $target if $target && ($target eq $localnode || $target eq 'localhost'); - my $cfg = PVE::Storage::config(); + my $cfg = PVE::Storage::config(); - my $info = PVE::Storage::storage_info($cfg, $param->{content}, $param->{format}); + my $info = PVE::Storage::storage_info($cfg, $param->{content}, $param->{format}); - raise_param_exc({ storage => "No such storage." }) - if $param->{storage} && !defined($info->{$param->{storage}}); + raise_param_exc({ storage => "No such storage." }) + if $param->{storage} && !defined($info->{ $param->{storage} }); - my $res = {}; - my @sids = PVE::Storage::storage_ids($cfg); - foreach my $storeid (@sids) { - my $data = $info->{$storeid}; - next if !$data; - my $privs = [ 'Datastore.Audit', 'Datastore.AllocateSpace' ]; - next if !$rpcenv->check_any($authuser, "/storage/$storeid", $privs, 1); - next if $param->{storage} && $param->{storage} ne $storeid; + my $res = {}; + my @sids = PVE::Storage::storage_ids($cfg); + foreach my $storeid (@sids) { + my $data = $info->{$storeid}; + next if !$data; + my $privs = ['Datastore.Audit', 'Datastore.AllocateSpace']; + next if !$rpcenv->check_any($authuser, "/storage/$storeid", $privs, 1); + next if $param->{storage} && $param->{storage} ne $storeid; - my $scfg = PVE::Storage::storage_config($cfg, $storeid); + my $scfg = PVE::Storage::storage_config($cfg, $storeid); - next if $param->{enabled} && $scfg->{disable}; + next if $param->{enabled} && $scfg->{disable}; - if ($target) { - # check if storage content is accessible on local node and specified target node - # we use this on the Clone GUI + if ($target) { + # check if storage content is accessible on local node and specified target node + # we use this on the Clone GUI - next if !$scfg->{shared}; - next if !PVE::Storage::storage_check_node($cfg, $storeid, undef, 1); - next if !PVE::Storage::storage_check_node($cfg, $storeid, $target, 1); - } + next if !$scfg->{shared}; + next if !PVE::Storage::storage_check_node($cfg, $storeid, undef, 1); + next if !PVE::Storage::storage_check_node($cfg, $storeid, $target, 1); + } - if ($data->{total}) { - $data->{used_fraction} = ($data->{used} // 0) / $data->{total}; - } + if ($data->{total}) { + $data->{used_fraction} = ($data->{used} // 0) / $data->{total}; + } - $res->{$storeid} = $data; - } + $res->{$storeid} = $data; + } - return PVE::RESTHandler::hash_to_array($res, 'storage'); - }}); + return PVE::RESTHandler::hash_to_array($res, 'storage'); + }, +}); -__PACKAGE__->register_method ({ +__PACKAGE__->register_method({ name => 'diridx', path => '{storage}', method => 'GET', description => "", permissions => { - check => ['perm', '/storage/{storage}', ['Datastore.Audit', 'Datastore.AllocateSpace'], any => 1], + check => [ + 'perm', + '/storage/{storage}', + ['Datastore.Audit', 'Datastore.AllocateSpace'], + any => 1, + ], }, parameters => { - additionalProperties => 0, - properties => { - node => get_standard_option('pve-node'), - storage => get_standard_option('pve-storage-id'), - }, + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + storage => get_standard_option('pve-storage-id'), + }, }, returns => { - type => 'array', - items => { - type => "object", - properties => { - subdir => { type => 'string' }, - }, - }, - links => [ { rel => 'child', href => "{subdir}" } ], + type => 'array', + items => { + type => "object", + properties => { + subdir => { type => 'string' }, + }, + }, + links => [{ rel => 'child', href => "{subdir}" }], }, code => sub { - my ($param) = @_; + my ($param) = @_; - my $res = [ - { subdir => 'content' }, - { subdir => 'download-url' }, - { subdir => 'file-restore' }, - { subdir => 'import-metadata' }, - { subdir => 'prunebackups' }, - { subdir => 'rrd' }, - { subdir => 'rrddata' }, - { subdir => 'status' }, - { subdir => 'upload' }, - ]; + my $res = [ + { subdir => 'content' }, + { subdir => 'download-url' }, + { subdir => 'file-restore' }, + { subdir => 'import-metadata' }, + { subdir => 'prunebackups' }, + { subdir => 'rrd' }, + { subdir => 'rrddata' }, + { subdir => 'status' }, + { subdir => 'upload' }, + ]; - return $res; - }}); + return $res; + }, +}); -__PACKAGE__->register_method ({ +__PACKAGE__->register_method({ name => 'read_status', path => '{storage}/status', method => 'GET', description => "Read storage status.", permissions => { - check => ['perm', '/storage/{storage}', ['Datastore.Audit', 'Datastore.AllocateSpace'], any => 1], + check => [ + 'perm', + '/storage/{storage}', + ['Datastore.Audit', 'Datastore.AllocateSpace'], + any => 1, + ], }, protected => 1, proxyto => 'node', parameters => { - additionalProperties => 0, - properties => { - node => get_standard_option('pve-node'), - storage => get_standard_option('pve-storage-id'), - }, + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + storage => get_standard_option('pve-storage-id'), + }, }, returns => { - type => "object", - properties => {}, + type => "object", + properties => {}, }, code => sub { - my ($param) = @_; + my ($param) = @_; - my $cfg = PVE::Storage::config(); + my $cfg = PVE::Storage::config(); - my $info = PVE::Storage::storage_info($cfg, $param->{content}); + my $info = PVE::Storage::storage_info($cfg, $param->{content}); - my $data = $info->{$param->{storage}}; + my $data = $info->{ $param->{storage} }; - raise_param_exc({ storage => "No such storage." }) - if !defined($data); + raise_param_exc({ storage => "No such storage." }) + if !defined($data); - return $data; - }}); + return $data; + }, +}); -__PACKAGE__->register_method ({ +__PACKAGE__->register_method({ name => 'rrd', path => '{storage}/rrd', method => 'GET', description => "Read storage RRD statistics (returns PNG).", permissions => { - check => ['perm', '/storage/{storage}', ['Datastore.Audit', 'Datastore.AllocateSpace'], any => 1], + check => [ + 'perm', + '/storage/{storage}', + ['Datastore.Audit', 'Datastore.AllocateSpace'], + any => 1, + ], }, protected => 1, proxyto => 'node', parameters => { - additionalProperties => 0, - properties => { - node => get_standard_option('pve-node'), - storage => get_standard_option('pve-storage-id'), - timeframe => { - description => "Specify the time frame you are interested in.", - type => 'string', - enum => [ 'hour', 'day', 'week', 'month', 'year' ], - }, - ds => { - description => "The list of datasources you want to display.", - type => 'string', format => 'pve-configid-list', - }, - cf => { - description => "The RRD consolidation function", - type => 'string', - enum => [ 'AVERAGE', 'MAX' ], - optional => 1, - }, - }, + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + storage => get_standard_option('pve-storage-id'), + timeframe => { + description => "Specify the time frame you are interested in.", + type => 'string', + enum => ['hour', 'day', 'week', 'month', 'year'], + }, + ds => { + description => "The list of datasources you want to display.", + type => 'string', + format => 'pve-configid-list', + }, + cf => { + description => "The RRD consolidation function", + type => 'string', + enum => ['AVERAGE', 'MAX'], + optional => 1, + }, + }, }, returns => { - type => "object", - properties => { - filename => { type => 'string' }, - }, + type => "object", + properties => { + filename => { type => 'string' }, + }, }, code => sub { - my ($param) = @_; + my ($param) = @_; - return PVE::RRD::create_rrd_graph( - "pve2-storage/$param->{node}/$param->{storage}", - $param->{timeframe}, $param->{ds}, $param->{cf}); - }}); + return PVE::RRD::create_rrd_graph("pve2-storage/$param->{node}/$param->{storage}", + $param->{timeframe}, $param->{ds}, $param->{cf}); + }, +}); -__PACKAGE__->register_method ({ +__PACKAGE__->register_method({ name => 'rrddata', path => '{storage}/rrddata', method => 'GET', description => "Read storage RRD statistics.", permissions => { - check => ['perm', '/storage/{storage}', ['Datastore.Audit', 'Datastore.AllocateSpace'], any => 1], + check => [ + 'perm', + '/storage/{storage}', + ['Datastore.Audit', 'Datastore.AllocateSpace'], + any => 1, + ], }, protected => 1, proxyto => 'node', parameters => { - additionalProperties => 0, - properties => { - node => get_standard_option('pve-node'), - storage => get_standard_option('pve-storage-id'), - timeframe => { - description => "Specify the time frame you are interested in.", - type => 'string', - enum => [ 'hour', 'day', 'week', 'month', 'year' ], - }, - cf => { - description => "The RRD consolidation function", - type => 'string', - enum => [ 'AVERAGE', 'MAX' ], - optional => 1, - }, - }, + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + storage => get_standard_option('pve-storage-id'), + timeframe => { + description => "Specify the time frame you are interested in.", + type => 'string', + enum => ['hour', 'day', 'week', 'month', 'year'], + }, + cf => { + description => "The RRD consolidation function", + type => 'string', + enum => ['AVERAGE', 'MAX'], + optional => 1, + }, + }, }, returns => { - type => "array", - items => { - type => "object", - properties => {}, - }, + type => "array", + items => { + type => "object", + properties => {}, + }, }, code => sub { - my ($param) = @_; + my ($param) = @_; - return PVE::RRD::create_rrd_data( - "pve2-storage/$param->{node}/$param->{storage}", - $param->{timeframe}, $param->{cf}); - }}); + return PVE::RRD::create_rrd_data( + "pve2-storage/$param->{node}/$param->{storage}", + $param->{timeframe}, + $param->{cf}, + ); + }, +}); # makes no sense for big images and backup files (because it # create a copy of the file). -__PACKAGE__->register_method ({ +__PACKAGE__->register_method({ name => 'upload', path => '{storage}/upload', method => 'POST', description => "Upload templates, ISO images, OVAs and VM images.", permissions => { - check => ['perm', '/storage/{storage}', ['Datastore.AllocateTemplate']], + check => ['perm', '/storage/{storage}', ['Datastore.AllocateTemplate']], }, protected => 1, parameters => { - additionalProperties => 0, - properties => { - node => get_standard_option('pve-node'), - storage => get_standard_option('pve-storage-id'), - content => { - description => "Content type.", - type => 'string', format => 'pve-storage-content', - enum => ['iso', 'vztmpl', 'import'], - }, - filename => { - description => "The name of the file to create. Caution: This will be normalized!", - maxLength => 255, - type => 'string', - }, - checksum => { - description => "The expected checksum of the file.", - type => 'string', - requires => 'checksum-algorithm', - optional => 1, - }, - 'checksum-algorithm' => { - description => "The algorithm to calculate the checksum of the file.", - type => 'string', - enum => ['md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512'], - requires => 'checksum', - optional => 1, - }, - tmpfilename => { - description => "The source file name. This parameter is usually set by the REST handler. You can only overwrite it when connecting to the trusted port on localhost.", - type => 'string', - optional => 1, - pattern => '/var/tmp/pveupload-[0-9a-f]+', - }, - }, + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + storage => get_standard_option('pve-storage-id'), + content => { + description => "Content type.", + type => 'string', + format => 'pve-storage-content', + enum => ['iso', 'vztmpl', 'import'], + }, + filename => { + description => + "The name of the file to create. Caution: This will be normalized!", + maxLength => 255, + type => 'string', + }, + checksum => { + description => "The expected checksum of the file.", + type => 'string', + requires => 'checksum-algorithm', + optional => 1, + }, + 'checksum-algorithm' => { + description => "The algorithm to calculate the checksum of the file.", + type => 'string', + enum => ['md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512'], + requires => 'checksum', + optional => 1, + }, + tmpfilename => { + description => + "The source file name. This parameter is usually set by the REST handler. You can only overwrite it when connecting to the trusted port on localhost.", + type => 'string', + optional => 1, + pattern => '/var/tmp/pveupload-[0-9a-f]+', + }, + }, }, returns => { type => "string" }, code => sub { - my ($param) = @_; + my ($param) = @_; - my $rpcenv = PVE::RPCEnvironment::get(); + my $rpcenv = PVE::RPCEnvironment::get(); - my $user = $rpcenv->get_user(); + my $user = $rpcenv->get_user(); - my $cfg = PVE::Storage::config(); + my $cfg = PVE::Storage::config(); - my ($node, $storage) = $param->@{qw(node storage)}; - my $scfg = PVE::Storage::storage_check_enabled($cfg, $storage, $node); + my ($node, $storage) = $param->@{qw(node storage)}; + my $scfg = PVE::Storage::storage_check_enabled($cfg, $storage, $node); - die "can't upload to storage type '$scfg->{type}'\n" - if !defined($scfg->{path}); + die "can't upload to storage type '$scfg->{type}'\n" + if !defined($scfg->{path}); - my $content = $param->{content}; + my $content = $param->{content}; - my $tmpfilename = $param->{tmpfilename}; - die "missing temporary file name\n" if !$tmpfilename; + my $tmpfilename = $param->{tmpfilename}; + die "missing temporary file name\n" if !$tmpfilename; - my $size = -s $tmpfilename; - die "temporary file '$tmpfilename' does not exist\n" if !defined($size); + my $size = -s $tmpfilename; + die "temporary file '$tmpfilename' does not exist\n" if !defined($size); - my $filename = PVE::Storage::normalize_content_filename($param->{filename}); + my $filename = PVE::Storage::normalize_content_filename($param->{filename}); - my $path; - my $is_ova = 0; - my $image_format; + my $path; + my $is_ova = 0; + my $image_format; - if ($content eq 'iso') { - if ($filename !~ m![^/]+$PVE::Storage::ISO_EXT_RE_0$!) { - raise_param_exc({ filename => "wrong file extension" }); - } - $path = PVE::Storage::get_iso_dir($cfg, $storage); - } elsif ($content eq 'vztmpl') { - if ($filename !~ m![^/]+$PVE::Storage::VZTMPL_EXT_RE_1$!) { - raise_param_exc({ filename => "wrong file extension" }); - } - $path = PVE::Storage::get_vztmpl_dir($cfg, $storage); - } elsif ($content eq 'import') { - if ($filename !~ m!${PVE::Storage::SAFE_CHAR_CLASS_RE}+$PVE::Storage::UPLOAD_IMPORT_EXT_RE_1$!) { - raise_param_exc({ filename => "invalid filename or wrong extension" }); - } - my $format = $1; + if ($content eq 'iso') { + if ($filename !~ m![^/]+$PVE::Storage::ISO_EXT_RE_0$!) { + raise_param_exc({ filename => "wrong file extension" }); + } + $path = PVE::Storage::get_iso_dir($cfg, $storage); + } elsif ($content eq 'vztmpl') { + if ($filename !~ m![^/]+$PVE::Storage::VZTMPL_EXT_RE_1$!) { + raise_param_exc({ filename => "wrong file extension" }); + } + $path = PVE::Storage::get_vztmpl_dir($cfg, $storage); + } elsif ($content eq 'import') { + if ($filename !~ + m!${PVE::Storage::SAFE_CHAR_CLASS_RE}+$PVE::Storage::UPLOAD_IMPORT_EXT_RE_1$! + ) { + raise_param_exc({ filename => "invalid filename or wrong extension" }); + } + my $format = $1; - if ($format eq 'ova') { - $is_ova = 1; - } else { - $image_format = $format; - } + if ($format eq 'ova') { + $is_ova = 1; + } else { + $image_format = $format; + } - $path = PVE::Storage::get_import_dir($cfg, $storage); - } else { - raise_param_exc({ content => "upload content type '$content' not allowed" }); - } + $path = PVE::Storage::get_import_dir($cfg, $storage); + } else { + raise_param_exc({ content => "upload content type '$content' not allowed" }); + } - die "storage '$storage' does not support '$content' content\n" - if !$scfg->{content}->{$content}; + die "storage '$storage' does not support '$content' content\n" + if !$scfg->{content}->{$content}; - my $dest = "$path/$filename"; - my $dirname = dirname($dest); + my $dest = "$path/$filename"; + my $dirname = dirname($dest); - # best effort to match apl_download behaviour - chmod 0644, $tmpfilename; + # best effort to match apl_download behaviour + chmod 0644, $tmpfilename; - my $err_cleanup = sub { unlink $dest or $! == ENOENT or die "cleanup failed: $!\n" }; + my $err_cleanup = sub { unlink $dest or $! == ENOENT or die "cleanup failed: $!\n" }; - my $cmd; - if ($node ne 'localhost' && $node ne PVE::INotify::nodename()) { - my $remip = PVE::Cluster::remote_node_ip($node); + my $cmd; + if ($node ne 'localhost' && $node ne PVE::INotify::nodename()) { + my $remip = PVE::Cluster::remote_node_ip($node); - my $ssh_options = PVE::SSHInfo::ssh_info_to_ssh_opts({ ip => $remip, name => $node }); + my $ssh_options = + PVE::SSHInfo::ssh_info_to_ssh_opts({ ip => $remip, name => $node }); - my @remcmd = ('/usr/bin/ssh', $ssh_options->@*, $remip, '--'); + my @remcmd = ('/usr/bin/ssh', $ssh_options->@*, $remip, '--'); - eval { # activate remote storage - run_command([@remcmd, '/usr/sbin/pvesm', 'status', '--storage', $storage]); - }; - die "can't activate storage '$storage' on node '$node': $@\n" if $@; + eval { # activate remote storage + run_command([@remcmd, '/usr/sbin/pvesm', 'status', '--storage', $storage]); + }; + die "can't activate storage '$storage' on node '$node': $@\n" if $@; - run_command( - [@remcmd, '/bin/mkdir', '-p', '--', PVE::Tools::shell_quote($dirname)], - errmsg => "mkdir failed", - ); - - $cmd = ['/usr/bin/scp', $ssh_options->@*, '-p', '--', $tmpfilename, "[$remip]:" . PVE::Tools::shell_quote($dest)]; + run_command( + [@remcmd, '/bin/mkdir', '-p', '--', PVE::Tools::shell_quote($dirname)], + errmsg => "mkdir failed", + ); - $err_cleanup = sub { run_command([@remcmd, 'rm', '-f', '--', $dest]) }; - } else { - PVE::Storage::activate_storage($cfg, $storage); - File::Path::make_path($dirname); - $cmd = ['cp', '--', $tmpfilename, $dest]; - } + $cmd = [ + '/usr/bin/scp', + $ssh_options->@*, + '-p', + '--', + $tmpfilename, + "[$remip]:" . PVE::Tools::shell_quote($dest), + ]; - # NOTE: we simply overwrite the destination file if it already exists - my $worker = sub { - my $upid = shift; + $err_cleanup = sub { run_command([@remcmd, 'rm', '-f', '--', $dest]) }; + } else { + PVE::Storage::activate_storage($cfg, $storage); + File::Path::make_path($dirname); + $cmd = ['cp', '--', $tmpfilename, $dest]; + } - print "starting file import from: $tmpfilename\n"; + # NOTE: we simply overwrite the destination file if it already exists + my $worker = sub { + my $upid = shift; - eval { - my ($checksum, $checksum_algorithm) = $param->@{'checksum', 'checksum-algorithm'}; - if ($checksum_algorithm) { - print "calculating checksum..."; + print "starting file import from: $tmpfilename\n"; - my $checksum_got = PVE::Tools::get_file_hash($checksum_algorithm, $tmpfilename); + eval { + my ($checksum, $checksum_algorithm) = + $param->@{ 'checksum', 'checksum-algorithm' }; + if ($checksum_algorithm) { + print "calculating checksum..."; - if (lc($checksum_got) eq lc($checksum)) { - print "OK, checksum verified\n"; - } else { - print "\n"; # the front end expects the error to reside at the last line without any noise - die "checksum mismatch: got '$checksum_got' != expect '$checksum'\n"; - } - } + my $checksum_got = + PVE::Tools::get_file_hash($checksum_algorithm, $tmpfilename); - if ($content eq 'iso') { - PVE::Storage::assert_iso_content($tmpfilename); - } + if (lc($checksum_got) eq lc($checksum)) { + print "OK, checksum verified\n"; + } else { + print "\n"; # the front end expects the error to reside at the last line without any noise + die "checksum mismatch: got '$checksum_got' != expect '$checksum'\n"; + } + } - if ($is_ova) { - assert_ova_contents($tmpfilename); - } elsif (defined($image_format)) { - # checks untrusted image - PVE::Storage::file_size_info($tmpfilename, 10, $image_format, 1); - } - }; - if (my $err = $@) { - # unlinks only the temporary file from the http server - unlink $tmpfilename or $! == ENOENT - or warn "unable to clean up temporory file '$tmpfilename' - $!\n"; - die $err; - } + if ($content eq 'iso') { + PVE::Storage::assert_iso_content($tmpfilename); + } - print "target node: $node\n"; - print "target file: $dest\n"; - print "file size is: $size\n"; - print "command: " . join(' ', @$cmd) . "\n"; + if ($is_ova) { + assert_ova_contents($tmpfilename); + } elsif (defined($image_format)) { + # checks untrusted image + PVE::Storage::file_size_info($tmpfilename, 10, $image_format, 1); + } + }; + if (my $err = $@) { + # unlinks only the temporary file from the http server + unlink $tmpfilename + or $! == ENOENT + or warn "unable to clean up temporory file '$tmpfilename' - $!\n"; + die $err; + } - eval { run_command($cmd, errmsg => 'import failed'); }; + print "target node: $node\n"; + print "target file: $dest\n"; + print "file size is: $size\n"; + print "command: " . join(' ', @$cmd) . "\n"; - # the temporary file got only uploaded locally, no need to rm remote - unlink $tmpfilename or $! == ENOENT - or warn "unable to clean up temporary file '$tmpfilename' - $!\n"; + eval { run_command($cmd, errmsg => 'import failed'); }; - if (my $err = $@) { - eval { $err_cleanup->() }; - warn "$@" if $@; - die $err; - } - print "finished file import successfully\n"; - }; + # the temporary file got only uploaded locally, no need to rm remote + unlink $tmpfilename + or $! == ENOENT + or warn "unable to clean up temporary file '$tmpfilename' - $!\n"; - return $rpcenv->fork_worker('imgcopy', undef, $user, $worker); - }}); + if (my $err = $@) { + eval { $err_cleanup->() }; + warn "$@" if $@; + die $err; + } + print "finished file import successfully\n"; + }; + + return $rpcenv->fork_worker('imgcopy', undef, $user, $worker); + }, +}); __PACKAGE__->register_method({ name => 'download_url', @@ -591,278 +649,292 @@ __PACKAGE__->register_method({ description => "Download templates, ISO images, OVAs and VM images by using an URL.", proxyto => 'node', permissions => { - description => 'Requires allocation access on the storage and as this allows one to probe' - .' the (local!) host network indirectly it also requires one of Sys.Modify on / (for' - .' backwards compatibility) or the newer Sys.AccessNetwork privilege on the node.', - check => [ 'and', - ['perm', '/storage/{storage}', [ 'Datastore.AllocateTemplate' ]], - [ 'or', - ['perm', '/', [ 'Sys.Audit', 'Sys.Modify' ]], - ['perm', '/nodes/{node}', [ 'Sys.AccessNetwork' ]], - ], - ], + description => + 'Requires allocation access on the storage and as this allows one to probe' + . ' the (local!) host network indirectly it also requires one of Sys.Modify on / (for' + . ' backwards compatibility) or the newer Sys.AccessNetwork privilege on the node.', + check => [ + 'and', + ['perm', '/storage/{storage}', ['Datastore.AllocateTemplate']], + [ + 'or', + ['perm', '/', ['Sys.Audit', 'Sys.Modify']], + ['perm', '/nodes/{node}', ['Sys.AccessNetwork']], + ], + ], }, protected => 1, parameters => { - additionalProperties => 0, - properties => { - node => get_standard_option('pve-node'), - storage => get_standard_option('pve-storage-id'), - url => { - description => "The URL to download the file from.", - type => 'string', - pattern => 'https?://.*', - }, - content => { - description => "Content type.", # TODO: could be optional & detected in most cases - type => 'string', format => 'pve-storage-content', - enum => ['iso', 'vztmpl', 'import'], - }, - filename => { - description => "The name of the file to create. Caution: This will be normalized!", - maxLength => 255, - type => 'string', - }, - checksum => { - description => "The expected checksum of the file.", - type => 'string', - requires => 'checksum-algorithm', - optional => 1, - }, - compression => { - description => - "Decompress the downloaded file using the specified compression algorithm.", - type => 'string', - enum => $PVE::Storage::Plugin::KNOWN_COMPRESSION_FORMATS, - optional => 1, - }, - 'checksum-algorithm' => { - description => "The algorithm to calculate the checksum of the file.", - type => 'string', - enum => ['md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512'], - requires => 'checksum', - optional => 1, - }, - 'verify-certificates' => { - description => "If false, no SSL/TLS certificates will be verified.", - type => 'boolean', - optional => 1, - default => 1, - }, - }, + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + storage => get_standard_option('pve-storage-id'), + url => { + description => "The URL to download the file from.", + type => 'string', + pattern => 'https?://.*', + }, + content => { + description => "Content type.", # TODO: could be optional & detected in most cases + type => 'string', + format => 'pve-storage-content', + enum => ['iso', 'vztmpl', 'import'], + }, + filename => { + description => + "The name of the file to create. Caution: This will be normalized!", + maxLength => 255, + type => 'string', + }, + checksum => { + description => "The expected checksum of the file.", + type => 'string', + requires => 'checksum-algorithm', + optional => 1, + }, + compression => { + description => + "Decompress the downloaded file using the specified compression algorithm.", + type => 'string', + enum => $PVE::Storage::Plugin::KNOWN_COMPRESSION_FORMATS, + optional => 1, + }, + 'checksum-algorithm' => { + description => "The algorithm to calculate the checksum of the file.", + type => 'string', + enum => ['md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512'], + requires => 'checksum', + optional => 1, + }, + 'verify-certificates' => { + description => "If false, no SSL/TLS certificates will be verified.", + type => 'boolean', + optional => 1, + default => 1, + }, + }, }, returns => { - type => "string" + type => "string", }, code => sub { - my ($param) = @_; + my ($param) = @_; - my $rpcenv = PVE::RPCEnvironment::get(); - my $user = $rpcenv->get_user(); + my $rpcenv = PVE::RPCEnvironment::get(); + my $user = $rpcenv->get_user(); - my $cfg = PVE::Storage::config(); + my $cfg = PVE::Storage::config(); - my ($node, $storage, $compression) = $param->@{qw(node storage compression)}; - my $scfg = PVE::Storage::storage_check_enabled($cfg, $storage, $node); + my ($node, $storage, $compression) = $param->@{qw(node storage compression)}; + my $scfg = PVE::Storage::storage_check_enabled($cfg, $storage, $node); - die "can't upload to storage type '$scfg->{type}', not a file based storage!\n" - if !defined($scfg->{path}); + die "can't upload to storage type '$scfg->{type}', not a file based storage!\n" + if !defined($scfg->{path}); - my ($content, $url) = $param->@{'content', 'url'}; + my ($content, $url) = $param->@{ 'content', 'url' }; - die "storage '$storage' is not configured for content-type '$content'\n" - if !$scfg->{content}->{$content}; + die "storage '$storage' is not configured for content-type '$content'\n" + if !$scfg->{content}->{$content}; - my $filename = PVE::Storage::normalize_content_filename($param->{filename}); + my $filename = PVE::Storage::normalize_content_filename($param->{filename}); - my $path; - my $is_ova = 0; - my $image_format; + my $path; + my $is_ova = 0; + my $image_format; - if ($content eq 'iso') { - if ($filename !~ m![^/]+$PVE::Storage::ISO_EXT_RE_0$!) { - raise_param_exc({ filename => "wrong file extension" }); - } - $path = PVE::Storage::get_iso_dir($cfg, $storage); - } elsif ($content eq 'vztmpl') { - if ($filename !~ m![^/]+$PVE::Storage::VZTMPL_EXT_RE_1$!) { - raise_param_exc({ filename => "wrong file extension" }); - } - $path = PVE::Storage::get_vztmpl_dir($cfg, $storage); - } elsif ($content eq 'import') { - if ($filename !~ m!${PVE::Storage::SAFE_CHAR_CLASS_RE}+$PVE::Storage::UPLOAD_IMPORT_EXT_RE_1$!) { - raise_param_exc({ filename => "invalid filename or wrong extension" }); - } - my $format = $1; + if ($content eq 'iso') { + if ($filename !~ m![^/]+$PVE::Storage::ISO_EXT_RE_0$!) { + raise_param_exc({ filename => "wrong file extension" }); + } + $path = PVE::Storage::get_iso_dir($cfg, $storage); + } elsif ($content eq 'vztmpl') { + if ($filename !~ m![^/]+$PVE::Storage::VZTMPL_EXT_RE_1$!) { + raise_param_exc({ filename => "wrong file extension" }); + } + $path = PVE::Storage::get_vztmpl_dir($cfg, $storage); + } elsif ($content eq 'import') { + if ($filename !~ + m!${PVE::Storage::SAFE_CHAR_CLASS_RE}+$PVE::Storage::UPLOAD_IMPORT_EXT_RE_1$! + ) { + raise_param_exc({ filename => "invalid filename or wrong extension" }); + } + my $format = $1; - if ($format eq 'ova') { - $is_ova = 1; - } else { - $image_format = $format; - } + if ($format eq 'ova') { + $is_ova = 1; + } else { + $image_format = $format; + } - $path = PVE::Storage::get_import_dir($cfg, $storage); - } else { - raise_param_exc({ content => "upload content-type '$content' is not allowed" }); - } + $path = PVE::Storage::get_import_dir($cfg, $storage); + } else { + raise_param_exc({ content => "upload content-type '$content' is not allowed" }); + } - PVE::Storage::activate_storage($cfg, $storage); - File::Path::make_path($path); + PVE::Storage::activate_storage($cfg, $storage); + File::Path::make_path($path); - my $dccfg = PVE::Cluster::cfs_read_file('datacenter.cfg'); - my $opts = { - hash_required => 0, - verify_certificates => $param->{'verify-certificates'} // 1, - http_proxy => $dccfg->{http_proxy}, - https_proxy => $dccfg->{http_proxy}, - }; + my $dccfg = PVE::Cluster::cfs_read_file('datacenter.cfg'); + my $opts = { + hash_required => 0, + verify_certificates => $param->{'verify-certificates'} // 1, + http_proxy => $dccfg->{http_proxy}, + https_proxy => $dccfg->{http_proxy}, + }; - my ($checksum, $checksum_algorithm) = $param->@{'checksum', 'checksum-algorithm'}; - if ($checksum) { - $opts->{"${checksum_algorithm}sum"} = $checksum; - $opts->{hash_required} = 1; - } + my ($checksum, $checksum_algorithm) = $param->@{ 'checksum', 'checksum-algorithm' }; + if ($checksum) { + $opts->{"${checksum_algorithm}sum"} = $checksum; + $opts->{hash_required} = 1; + } - $opts->{assert_file_validity} = sub { - my ($tmp_path) = @_; + $opts->{assert_file_validity} = sub { + my ($tmp_path) = @_; - if ($content eq 'iso') { - PVE::Storage::assert_iso_content($tmp_path); - } + if ($content eq 'iso') { + PVE::Storage::assert_iso_content($tmp_path); + } - if ($is_ova) { - assert_ova_contents($tmp_path); - } elsif (defined($image_format)) { - # checks untrusted image - PVE::Storage::file_size_info($tmp_path, 10, $image_format, 1); - } - }; + if ($is_ova) { + assert_ova_contents($tmp_path); + } elsif (defined($image_format)) { + # checks untrusted image + PVE::Storage::file_size_info($tmp_path, 10, $image_format, 1); + } + }; - my $worker = sub { - if ($compression) { - die "decompression not supported for $content\n" if $content ne 'iso'; - my $info = PVE::Storage::decompressor_info('iso', $compression); - die "no decompression method found\n" if !$info->{decompressor}; - $opts->{decompression_command} = $info->{decompressor}; - } + my $worker = sub { + if ($compression) { + die "decompression not supported for $content\n" if $content ne 'iso'; + my $info = PVE::Storage::decompressor_info('iso', $compression); + die "no decompression method found\n" if !$info->{decompressor}; + $opts->{decompression_command} = $info->{decompressor}; + } - PVE::Tools::download_file_from_url("$path/$filename", $url, $opts); - }; + PVE::Tools::download_file_from_url("$path/$filename", $url, $opts); + }; - my $worker_id = PVE::Tools::encode_text($filename); # must not pass : or the like as w-ID + my $worker_id = PVE::Tools::encode_text($filename); # must not pass : or the like as w-ID - return $rpcenv->fork_worker('download', $worker_id, $user, $worker); - }}); + return $rpcenv->fork_worker('download', $worker_id, $user, $worker); + }, +}); __PACKAGE__->register_method({ name => 'get_import_metadata', path => '{storage}/import-metadata', method => 'GET', description => - "Get the base parameters for creating a guest which imports data from a foreign importable" - ." guest, like an ESXi VM", + "Get the base parameters for creating a guest which imports data from a foreign importable" + . " guest, like an ESXi VM", proxyto => 'node', permissions => { - description => "You need read access for the volume.", - user => 'all', + description => "You need read access for the volume.", + user => 'all', }, protected => 1, parameters => { - additionalProperties => 0, - properties => { - node => get_standard_option('pve-node'), - storage => get_standard_option('pve-storage-id'), - volume => { - description => "Volume identifier for the guest archive/entry.", - type => 'string', - }, - }, + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + storage => get_standard_option('pve-storage-id'), + volume => { + description => "Volume identifier for the guest archive/entry.", + type => 'string', + }, + }, }, returns => { - type => "object", - description => 'Information about how to import a guest.', - additionalProperties => 0, - properties => { - type => { - type => 'string', - enum => [ 'vm' ], - description => 'The type of guest this is going to produce.', - }, - source => { - type => 'string', - enum => [ 'esxi' ], - description => 'The type of the import-source of this guest volume.', - }, - 'create-args' => { - type => 'object', - additionalProperties => 1, - description => 'Parameters which can be used in a call to create a VM or container.', - }, - 'disks' => { - type => 'object', - additionalProperties => 1, - optional => 1, - description => 'Recognised disk volumes as `$bus$id` => `$storeid:$path` map.', - }, - 'net' => { - type => 'object', - additionalProperties => 1, - optional => 1, - description => 'Recognised network interfaces as `net$id` => { ...params } object.', - }, - 'warnings' => { - type => 'array', - description => 'List of known issues that can affect the import of a guest.' - .' Note that lack of warning does not imply that there cannot be any problems.', - optional => 1, - items => { - type => "object", - additionalProperties => 1, - properties => { - 'type' => { - description => 'What this warning is about.', - enum => [ - 'cdrom-image-ignored', - 'efi-state-lost', - 'guest-is-running', - 'nvme-unsupported', - 'ova-needs-extracting', - 'ovmf-with-lsi-unsupported', - 'serial-port-socket-only', - ], - type => 'string', - }, - 'key' => { - description => 'Related subject (config) key of warning.', - optional => 1, - type => 'string', - }, - 'value' => { - description => 'Related subject (config) value of warning.', - optional => 1, - type => 'string', - }, - }, - }, - }, - }, + type => "object", + description => 'Information about how to import a guest.', + additionalProperties => 0, + properties => { + type => { + type => 'string', + enum => ['vm'], + description => 'The type of guest this is going to produce.', + }, + source => { + type => 'string', + enum => ['esxi'], + description => 'The type of the import-source of this guest volume.', + }, + 'create-args' => { + type => 'object', + additionalProperties => 1, + description => + 'Parameters which can be used in a call to create a VM or container.', + }, + 'disks' => { + type => 'object', + additionalProperties => 1, + optional => 1, + description => 'Recognised disk volumes as `$bus$id` => `$storeid:$path` map.', + }, + 'net' => { + type => 'object', + additionalProperties => 1, + optional => 1, + description => + 'Recognised network interfaces as `net$id` => { ...params } object.', + }, + 'warnings' => { + type => 'array', + description => 'List of known issues that can affect the import of a guest.' + . ' Note that lack of warning does not imply that there cannot be any problems.', + optional => 1, + items => { + type => "object", + additionalProperties => 1, + properties => { + 'type' => { + description => 'What this warning is about.', + enum => [ + 'cdrom-image-ignored', + 'efi-state-lost', + 'guest-is-running', + 'nvme-unsupported', + 'ova-needs-extracting', + 'ovmf-with-lsi-unsupported', + 'serial-port-socket-only', + ], + type => 'string', + }, + 'key' => { + description => 'Related subject (config) key of warning.', + optional => 1, + type => 'string', + }, + 'value' => { + description => 'Related subject (config) value of warning.', + optional => 1, + type => 'string', + }, + }, + }, + }, + }, }, code => sub { - my ($param) = @_; + my ($param) = @_; - my $rpcenv = PVE::RPCEnvironment::get(); - my $authuser = $rpcenv->get_user(); + my $rpcenv = PVE::RPCEnvironment::get(); + my $authuser = $rpcenv->get_user(); - my ($storeid, $volume) = $param->@{qw(storage volume)}; - my $volid = "$storeid:$volume"; + my ($storeid, $volume) = $param->@{qw(storage volume)}; + my $volid = "$storeid:$volume"; - my $cfg = PVE::Storage::config(); + my $cfg = PVE::Storage::config(); - PVE::Storage::check_volume_access($rpcenv, $authuser, $cfg, undef, $volid); + PVE::Storage::check_volume_access($rpcenv, $authuser, $cfg, undef, $volid); - return PVE::Tools::run_with_timeout(30, sub { - return PVE::Storage::get_import_metadata($cfg, $volid); - }); - }}); + return PVE::Tools::run_with_timeout( + 30, + sub { + return PVE::Storage::get_import_metadata($cfg, $volid); + }, + ); + }, +}); 1; diff --git a/src/PVE/BackupProvider/Plugin/Base.pm b/src/PVE/BackupProvider/Plugin/Base.pm index e382d57..fd583fe 100644 --- a/src/PVE/BackupProvider/Plugin/Base.pm +++ b/src/PVE/BackupProvider/Plugin/Base.pm @@ -168,6 +168,7 @@ The message to be printed. =back =cut + sub new { my ($class, $storage_plugin, $scfg, $storeid, $log_function) = @_; @@ -183,6 +184,7 @@ Returns the name of the backup provider. It will be printed in some log lines. =back =cut + sub provider_name { my ($self) = @_; @@ -211,6 +213,7 @@ Unix time-stamp of when the job started. =back =cut + sub job_init { my ($self, $start_time) = @_; @@ -227,6 +230,7 @@ the backup server. Called in both, success and failure scenarios. =back =cut + sub job_cleanup { my ($self) = @_; @@ -271,6 +275,7 @@ Unix time-stamp of when the guest backup started. =back =cut + sub backup_init { my ($self, $vmid, $vmtype, $start_time) = @_; @@ -326,6 +331,7 @@ Present if there was a failure. The error message indicating the failure. =back =cut + sub backup_cleanup { my ($self, $vmid, $vmtype, $success, $info) = @_; @@ -366,6 +372,7 @@ The type of the guest being backed up. Currently, either C or C. =back =cut + sub backup_get_mechanism { my ($self, $vmid, $vmtype) = @_; @@ -396,6 +403,7 @@ Path to the file with the backup log. =back =cut + sub backup_handle_log_file { my ($self, $vmid, $filename) = @_; @@ -462,6 +470,7 @@ bitmap and existing ones will be discarded. =back =cut + sub backup_vm_query_incremental { my ($self, $vmid, $volumes) = @_; @@ -619,6 +628,7 @@ configuration as raw data. =back =cut + sub backup_vm { my ($self, $vmid, $guest_config, $volumes, $info) = @_; @@ -652,6 +662,7 @@ description there. =back =cut + sub backup_container_prepare { my ($self, $vmid, $info) = @_; @@ -752,6 +763,7 @@ for unprivileged containers by default. =back =cut + sub backup_container { my ($self, $vmid, $guest_config, $exclude_patterns, $info) = @_; @@ -797,6 +809,7 @@ The volume ID of the archive being restored. =back =cut + sub restore_get_mechanism { my ($self, $volname) = @_; @@ -824,6 +837,7 @@ The volume ID of the archive being restored. =back =cut + sub archive_get_guest_config { my ($self, $volname) = @_; @@ -853,6 +867,7 @@ The volume ID of the archive being restored. =back =cut + sub archive_get_firewall_config { my ($self, $volname) = @_; @@ -901,6 +916,7 @@ The volume ID of the archive being restored. =back =cut + sub restore_vm_init { my ($self, $volname) = @_; @@ -927,6 +943,7 @@ The volume ID of the archive being restored. =back =cut + sub restore_vm_cleanup { my ($self, $volname) = @_; @@ -984,6 +1001,7 @@ empty. =back =cut + sub restore_vm_volume_init { my ($self, $volname, $device_name, $info) = @_; @@ -1020,6 +1038,7 @@ empty. =back =cut + sub restore_vm_volume_cleanup { my ($self, $volname, $device_name, $info) = @_; @@ -1086,6 +1105,7 @@ empty. =back =cut + sub restore_container_init { my ($self, $volname, $info) = @_; @@ -1117,6 +1137,7 @@ empty. =back =cut + sub restore_container_cleanup { my ($self, $volname, $info) = @_; diff --git a/src/PVE/CLI/pvesm.pm b/src/PVE/CLI/pvesm.pm index fd5f788..b759cf1 100755 --- a/src/PVE/CLI/pvesm.pm +++ b/src/PVE/CLI/pvesm.pm @@ -35,48 +35,51 @@ my $nodename = PVE::INotify::nodename(); sub param_mapping { my ($name) = @_; - my $password_map = PVE::CLIHandler::get_standard_mapping('pve-password', { - func => sub { - my ($value) = @_; - return $value if $value; - return PVE::PTY::read_password("Enter Password: "); - }, - }); + my $password_map = PVE::CLIHandler::get_standard_mapping( + 'pve-password', + { + func => sub { + my ($value) = @_; + return $value if $value; + return PVE::PTY::read_password("Enter Password: "); + }, + }, + ); my $enc_key_map = { - name => 'encryption-key', - desc => 'a file containing an encryption key, or the special value "autogen"', - func => sub { - my ($value) = @_; - return $value if $value eq 'autogen'; - return PVE::Tools::file_get_contents($value); - } + name => 'encryption-key', + desc => 'a file containing an encryption key, or the special value "autogen"', + func => sub { + my ($value) = @_; + return $value if $value eq 'autogen'; + return PVE::Tools::file_get_contents($value); + }, }; my $master_key_map = { - name => 'master-pubkey', - desc => 'a file containing a PEM-formatted master public key', - func => sub { - my ($value) = @_; - return encode_base64(PVE::Tools::file_get_contents($value), ''); - } + name => 'master-pubkey', + desc => 'a file containing a PEM-formatted master public key', + func => sub { + my ($value) = @_; + return encode_base64(PVE::Tools::file_get_contents($value), ''); + }, }; my $keyring_map = { - name => 'keyring', - desc => 'file containing the keyring to authenticate in the Ceph cluster', - func => sub { - my ($value) = @_; - return PVE::Tools::file_get_contents($value); - }, + name => 'keyring', + desc => 'file containing the keyring to authenticate in the Ceph cluster', + func => sub { + my ($value) = @_; + return PVE::Tools::file_get_contents($value); + }, }; my $mapping = { - 'cifsscan' => [ $password_map ], - 'cifs' => [ $password_map ], - 'pbs' => [ $password_map ], - 'create' => [ $password_map, $enc_key_map, $master_key_map, $keyring_map ], - 'update' => [ $password_map, $enc_key_map, $master_key_map, $keyring_map ], + 'cifsscan' => [$password_map], + 'cifs' => [$password_map], + 'pbs' => [$password_map], + 'create' => [$password_map, $enc_key_map, $master_key_map, $keyring_map], + 'update' => [$password_map, $enc_key_map, $master_key_map, $keyring_map], }; return $mapping->{$name}; } @@ -85,118 +88,117 @@ sub setup_environment { PVE::RPCEnvironment->setup_default_cli_env(); } -__PACKAGE__->register_method ({ +__PACKAGE__->register_method({ name => 'apiinfo', path => 'apiinfo', method => 'GET', description => "Returns APIVER and APIAGE.", parameters => { - additionalProperties => 0, - properties => {}, + additionalProperties => 0, + properties => {}, }, returns => { - type => 'object', - properties => { - apiver => { type => 'integer' }, - apiage => { type => 'integer' }, - }, + type => 'object', + properties => { + apiver => { type => 'integer' }, + apiage => { type => 'integer' }, + }, }, code => sub { - return { - apiver => PVE::Storage::APIVER, - apiage => PVE::Storage::APIAGE, - }; - } + return { + apiver => PVE::Storage::APIVER, + apiage => PVE::Storage::APIAGE, + }; + }, }); -__PACKAGE__->register_method ({ +__PACKAGE__->register_method({ name => 'path', path => 'path', method => 'GET', description => "Get filesystem path for specified volume", parameters => { - additionalProperties => 0, - properties => { - volume => { - description => "Volume identifier", - type => 'string', format => 'pve-volume-id', - completion => \&PVE::Storage::complete_volume, - }, - }, + additionalProperties => 0, + properties => { + volume => { + description => "Volume identifier", + type => 'string', + format => 'pve-volume-id', + completion => \&PVE::Storage::complete_volume, + }, + }, }, returns => { type => 'null' }, code => sub { - my ($param) = @_; + my ($param) = @_; - my $cfg = PVE::Storage::config(); + my $cfg = PVE::Storage::config(); - my $path = PVE::Storage::path ($cfg, $param->{volume}); + my $path = PVE::Storage::path($cfg, $param->{volume}); - print "$path\n"; + print "$path\n"; - return undef; + return undef; - }}); + }, +}); -__PACKAGE__->register_method ({ +__PACKAGE__->register_method({ name => 'extractconfig', path => 'extractconfig', method => 'GET', description => "Extract configuration from vzdump backup archive.", permissions => { - description => "The user needs 'VM.Backup' permissions on the backed up guest ID, and 'Datastore.AllocateSpace' on the backup storage.", - user => 'all', + description => + "The user needs 'VM.Backup' permissions on the backed up guest ID, and 'Datastore.AllocateSpace' on the backup storage.", + user => 'all', }, protected => 1, parameters => { - additionalProperties => 0, - properties => { - volume => { - description => "Volume identifier", - type => 'string', - completion => \&PVE::Storage::complete_volume, - }, - }, + additionalProperties => 0, + properties => { + volume => { + description => "Volume identifier", + type => 'string', + completion => \&PVE::Storage::complete_volume, + }, + }, }, returns => { type => 'null' }, code => sub { - my ($param) = @_; - my $volume = $param->{volume}; + my ($param) = @_; + my $volume = $param->{volume}; - my $rpcenv = PVE::RPCEnvironment::get(); - my $authuser = $rpcenv->get_user(); + my $rpcenv = PVE::RPCEnvironment::get(); + my $authuser = $rpcenv->get_user(); - my $storage_cfg = PVE::Storage::config(); - PVE::Storage::check_volume_access( - $rpcenv, - $authuser, - $storage_cfg, - undef, - $volume, - 'backup', - ); + my $storage_cfg = PVE::Storage::config(); + PVE::Storage::check_volume_access( + $rpcenv, $authuser, $storage_cfg, undef, $volume, 'backup', + ); - if (PVE::Storage::parse_volume_id($volume, 1)) { - my (undef, undef, $ownervm) = PVE::Storage::parse_volname($storage_cfg, $volume); - $rpcenv->check($authuser, "/vms/$ownervm", ['VM.Backup']); - } + if (PVE::Storage::parse_volume_id($volume, 1)) { + my (undef, undef, $ownervm) = PVE::Storage::parse_volname($storage_cfg, $volume); + $rpcenv->check($authuser, "/vms/$ownervm", ['VM.Backup']); + } - my $config_raw = PVE::Storage::extract_vzdump_config($storage_cfg, $volume); + my $config_raw = PVE::Storage::extract_vzdump_config($storage_cfg, $volume); - print "$config_raw\n"; - return; - }}); + print "$config_raw\n"; + return; + }, +}); my $print_content = sub { my ($list) = @_; my ($maxlenname, $maxsize) = (0, 0); foreach my $info (@$list) { - my $volid = $info->{volid}; - my $sidlen = length ($volid); - $maxlenname = $sidlen if $sidlen > $maxlenname; - $maxsize = $info->{size} if ($info->{size} // 0) > $maxsize; + my $volid = $info->{volid}; + my $sidlen = length($volid); + $maxlenname = $sidlen if $sidlen > $maxlenname; + $maxsize = $info->{size} if ($info->{size} // 0) > $maxsize; } my $sizemaxdigits = length($maxsize); @@ -204,17 +206,18 @@ my $print_content = sub { printf "$basefmt %s\n", "Volid", "Format", "Type", "Size", "VMID"; foreach my $info (@$list) { - next if !$info->{vmid}; - my $volid = $info->{volid}; + next if !$info->{vmid}; + my $volid = $info->{volid}; - printf "$basefmt %d\n", $volid, $info->{format}, $info->{content}, $info->{size}, $info->{vmid}; + printf "$basefmt %d\n", $volid, $info->{format}, $info->{content}, $info->{size}, + $info->{vmid}; } foreach my $info (sort { $a->{format} cmp $b->{format} } @$list) { - next if $info->{vmid}; - my $volid = $info->{volid}; + next if $info->{vmid}; + my $volid = $info->{volid}; - printf "$basefmt\n", $volid, $info->{format}, $info->{content}, $info->{size}; + printf "$basefmt\n", $volid, $info->{format}, $info->{content}, $info->{size}; } }; @@ -223,363 +226,388 @@ my $print_status = sub { my $maxlen = 0; foreach my $res (@$res) { - my $storeid = $res->{storage}; - $maxlen = length ($storeid) if length ($storeid) > $maxlen; + my $storeid = $res->{storage}; + $maxlen = length($storeid) if length($storeid) > $maxlen; } - $maxlen+=1; + $maxlen += 1; printf "%-${maxlen}s %10s %10s %15s %15s %15s %8s\n", 'Name', 'Type', - 'Status', 'Total', 'Used', 'Available', '%'; + 'Status', 'Total', 'Used', 'Available', '%'; foreach my $res (sort { $a->{storage} cmp $b->{storage} } @$res) { - my $storeid = $res->{storage}; + my $storeid = $res->{storage}; - my $active = $res->{active} ? 'active' : 'inactive'; - my ($per, $per_fmt) = (0, '% 7.2f%%'); - $per = ($res->{used}*100)/$res->{total} if $res->{total} > 0; + my $active = $res->{active} ? 'active' : 'inactive'; + my ($per, $per_fmt) = (0, '% 7.2f%%'); + $per = ($res->{used} * 100) / $res->{total} if $res->{total} > 0; - if (!$res->{enabled}) { - $per = 'N/A'; - $per_fmt = '% 8s'; - $active = 'disabled'; - } + if (!$res->{enabled}) { + $per = 'N/A'; + $per_fmt = '% 8s'; + $active = 'disabled'; + } - printf "%-${maxlen}s %10s %10s %15d %15d %15d $per_fmt\n", $storeid, - $res->{type}, $active, $res->{total}/1024, $res->{used}/1024, - $res->{avail}/1024, $per; + printf "%-${maxlen}s %10s %10s %15d %15d %15d $per_fmt\n", $storeid, + $res->{type}, $active, $res->{total} / 1024, $res->{used} / 1024, + $res->{avail} / 1024, $per; } }; -__PACKAGE__->register_method ({ +__PACKAGE__->register_method({ name => 'export', path => 'export', method => 'GET', description => "Used internally to export a volume.", protected => 1, parameters => { - additionalProperties => 0, - properties => { - volume => { - description => "Volume identifier", - type => 'string', - completion => \&PVE::Storage::complete_volume, - }, - format => { - description => "Export stream format", - type => 'string', - enum => $PVE::Storage::KNOWN_EXPORT_FORMATS, - }, - filename => { - description => "Destination file name", - type => 'string', - }, - base => { - description => "Snapshot to start an incremental stream from", - type => 'string', - pattern => qr/[a-z0-9_\-]{1,40}/i, - maxLength => 40, - optional => 1, - }, - snapshot => { - description => "Snapshot to export", - type => 'string', - pattern => qr/[a-z0-9_\-]{1,40}/i, - maxLength => 40, - optional => 1, - }, - 'with-snapshots' => { - description => - "Whether to include intermediate snapshots in the stream", - type => 'boolean', - optional => 1, - default => 0, - }, - 'snapshot-list' => { - description => "Ordered list of snapshots to transfer", - type => 'string', - format => 'string-list', - optional => 1, - }, - }, + additionalProperties => 0, + properties => { + volume => { + description => "Volume identifier", + type => 'string', + completion => \&PVE::Storage::complete_volume, + }, + format => { + description => "Export stream format", + type => 'string', + enum => $PVE::Storage::KNOWN_EXPORT_FORMATS, + }, + filename => { + description => "Destination file name", + type => 'string', + }, + base => { + description => "Snapshot to start an incremental stream from", + type => 'string', + pattern => qr/[a-z0-9_\-]{1,40}/i, + maxLength => 40, + optional => 1, + }, + snapshot => { + description => "Snapshot to export", + type => 'string', + pattern => qr/[a-z0-9_\-]{1,40}/i, + maxLength => 40, + optional => 1, + }, + 'with-snapshots' => { + description => "Whether to include intermediate snapshots in the stream", + type => 'boolean', + optional => 1, + default => 0, + }, + 'snapshot-list' => { + description => "Ordered list of snapshots to transfer", + type => 'string', + format => 'string-list', + optional => 1, + }, + }, }, returns => { type => 'null' }, code => sub { - my ($param) = @_; + my ($param) = @_; - my $with_snapshots = $param->{'with-snapshots'}; - if (defined(my $list = $param->{'snapshot-list'})) { - $with_snapshots = [PVE::Tools::split_list($list)]; - } + my $with_snapshots = $param->{'with-snapshots'}; + if (defined(my $list = $param->{'snapshot-list'})) { + $with_snapshots = [PVE::Tools::split_list($list)]; + } - my $filename = $param->{filename}; + my $filename = $param->{filename}; - my $outfh; - if ($filename eq '-') { - # No other messages must go to STDOUT if it's used for the export stream! - open($outfh, '>&', STDOUT) or die "unable to dup() STDOUT - $!\n"; - close(STDOUT); - open(STDOUT, '>', '/dev/null'); - } else { - sysopen($outfh, $filename, O_CREAT|O_WRONLY|O_TRUNC) - or die "open($filename): $!\n"; - } + my $outfh; + if ($filename eq '-') { + # No other messages must go to STDOUT if it's used for the export stream! + open($outfh, '>&', STDOUT) or die "unable to dup() STDOUT - $!\n"; + close(STDOUT); + open(STDOUT, '>', '/dev/null'); + } else { + sysopen($outfh, $filename, O_CREAT | O_WRONLY | O_TRUNC) + or die "open($filename): $!\n"; + } - eval { - my $cfg = PVE::Storage::config(); - PVE::Storage::volume_export($cfg, $outfh, $param->{volume}, $param->{format}, - $param->{snapshot}, $param->{base}, $with_snapshots); - }; - my $err = $@; - if ($filename ne '-') { - close($outfh); - unlink($filename) if $err; - } - die $err if $err; - return; - } + eval { + my $cfg = PVE::Storage::config(); + PVE::Storage::volume_export( + $cfg, + $outfh, + $param->{volume}, + $param->{format}, + $param->{snapshot}, + $param->{base}, + $with_snapshots, + ); + }; + my $err = $@; + if ($filename ne '-') { + close($outfh); + unlink($filename) if $err; + } + die $err if $err; + return; + }, }); -__PACKAGE__->register_method ({ +__PACKAGE__->register_method({ name => 'import', path => 'import', method => 'PUT', description => "Used internally to import a volume.", protected => 1, parameters => { - additionalProperties => 0, - properties => { - volume => { - description => "Volume identifier", - type => 'string', - completion => \&PVE::Storage::complete_volume, - }, - format => { - description => "Import stream format", - type => 'string', - enum => $PVE::Storage::KNOWN_EXPORT_FORMATS, - }, - filename => { - description => "Source file name. For '-' stdin is used, the " . - "tcp:// format allows to use a TCP connection, " . - "the unix://PATH-TO-SOCKET format a UNIX socket as input." . - "Else, the file is treated as common file.", - type => 'string', - }, - base => { - description => "Base snapshot of an incremental stream", - type => 'string', - pattern => qr/[a-z0-9_\-]{1,40}/i, - maxLength => 40, - optional => 1, - }, - 'with-snapshots' => { - description => - "Whether the stream includes intermediate snapshots", - type => 'boolean', - optional => 1, - default => 0, - }, - 'delete-snapshot' => { - description => "A snapshot to delete on success", - type => 'string', - pattern => qr/[a-z0-9_\-]{1,80}/i, - maxLength => 80, - optional => 1, - }, - 'allow-rename' => { - description => "Choose a new volume ID if the requested " . - "volume ID already exists, instead of throwing an error.", - type => 'boolean', - optional => 1, - default => 0, - }, - snapshot => { - description => "The current-state snapshot if the stream contains snapshots", - type => 'string', - pattern => qr/[a-z0-9_\-]{1,40}/i, - maxLength => 40, - optional => 1, - }, - }, + additionalProperties => 0, + properties => { + volume => { + description => "Volume identifier", + type => 'string', + completion => \&PVE::Storage::complete_volume, + }, + format => { + description => "Import stream format", + type => 'string', + enum => $PVE::Storage::KNOWN_EXPORT_FORMATS, + }, + filename => { + description => "Source file name. For '-' stdin is used, the " + . "tcp:// format allows to use a TCP connection, " + . "the unix://PATH-TO-SOCKET format a UNIX socket as input." + . "Else, the file is treated as common file.", + type => 'string', + }, + base => { + description => "Base snapshot of an incremental stream", + type => 'string', + pattern => qr/[a-z0-9_\-]{1,40}/i, + maxLength => 40, + optional => 1, + }, + 'with-snapshots' => { + description => "Whether the stream includes intermediate snapshots", + type => 'boolean', + optional => 1, + default => 0, + }, + 'delete-snapshot' => { + description => "A snapshot to delete on success", + type => 'string', + pattern => qr/[a-z0-9_\-]{1,80}/i, + maxLength => 80, + optional => 1, + }, + 'allow-rename' => { + description => "Choose a new volume ID if the requested " + . "volume ID already exists, instead of throwing an error.", + type => 'boolean', + optional => 1, + default => 0, + }, + snapshot => { + description => "The current-state snapshot if the stream contains snapshots", + type => 'string', + pattern => qr/[a-z0-9_\-]{1,40}/i, + maxLength => 40, + optional => 1, + }, + }, }, returns => { type => 'string' }, code => sub { - my ($param) = @_; + my ($param) = @_; - my $filename = $param->{filename}; + my $filename = $param->{filename}; - my $infh; - if ($filename eq '-') { - $infh = \*STDIN; - } elsif ($filename =~ m!^tcp://(([^/]+)(/\d+)?)$!) { - my ($cidr, $ip, $subnet) = ($1, $2, $3); - if ($subnet) { # got real CIDR notation, not just IP - my $ips = PVE::Network::get_local_ip_from_cidr($cidr); - die "Unable to get any local IP address in network '$cidr'\n" - if scalar(@$ips) < 1; - die "Got multiple local IP address in network '$cidr'\n" - if scalar(@$ips) > 1; + my $infh; + if ($filename eq '-') { + $infh = \*STDIN; + } elsif ($filename =~ m!^tcp://(([^/]+)(/\d+)?)$!) { + my ($cidr, $ip, $subnet) = ($1, $2, $3); + if ($subnet) { # got real CIDR notation, not just IP + my $ips = PVE::Network::get_local_ip_from_cidr($cidr); + die "Unable to get any local IP address in network '$cidr'\n" + if scalar(@$ips) < 1; + die "Got multiple local IP address in network '$cidr'\n" + if scalar(@$ips) > 1; - $ip = $ips->[0]; - } - my $family = PVE::Tools::get_host_address_family($ip); - my $port = PVE::Tools::next_migrate_port($family, $ip); + $ip = $ips->[0]; + } + my $family = PVE::Tools::get_host_address_family($ip); + my $port = PVE::Tools::next_migrate_port($family, $ip); - my $sock_params = { - Listen => 1, - ReuseAddr => 1, - Proto => &Socket::IPPROTO_TCP, - GetAddrInfoFlags => 0, - LocalAddr => $ip, - LocalPort => $port, - }; - my $socket = IO::Socket::IP->new(%$sock_params) - or die "failed to open socket: $!\n"; + my $sock_params = { + Listen => 1, + ReuseAddr => 1, + Proto => &Socket::IPPROTO_TCP, + GetAddrInfoFlags => 0, + LocalAddr => $ip, + LocalPort => $port, + }; + my $socket = IO::Socket::IP->new(%$sock_params) + or die "failed to open socket: $!\n"; - print "$ip\n$port\n"; # tell remote where to connect - *STDOUT->flush(); + print "$ip\n$port\n"; # tell remote where to connect + *STDOUT->flush(); - my $prev_alarm = alarm 0; - local $SIG{ALRM} = sub { die "timed out waiting for client\n" }; - alarm 30; - my $client = $socket->accept; # Wait for a client - alarm $prev_alarm; - close($socket); + my $prev_alarm = alarm 0; + local $SIG{ALRM} = sub { die "timed out waiting for client\n" }; + alarm 30; + my $client = $socket->accept; # Wait for a client + alarm $prev_alarm; + close($socket); - $infh = \*$client; - } elsif ($filename =~ m!^unix://(.*)$!) { - my $socket_path = $1; - my $socket = IO::Socket::UNIX->new( - Type => SOCK_STREAM(), - Local => $socket_path, - Listen => 1, - ) or die "failed to open socket: $!\n"; + $infh = \*$client; + } elsif ($filename =~ m!^unix://(.*)$!) { + my $socket_path = $1; + my $socket = IO::Socket::UNIX->new( + Type => SOCK_STREAM(), + Local => $socket_path, + Listen => 1, + ) or die "failed to open socket: $!\n"; - print "ready\n"; - *STDOUT->flush(); + print "ready\n"; + *STDOUT->flush(); - my $prev_alarm = alarm 0; - local $SIG{ALRM} = sub { die "timed out waiting for client\n" }; - alarm 30; - my $client = $socket->accept; # Wait for a client - alarm $prev_alarm; - close($socket); + my $prev_alarm = alarm 0; + local $SIG{ALRM} = sub { die "timed out waiting for client\n" }; + alarm 30; + my $client = $socket->accept; # Wait for a client + alarm $prev_alarm; + close($socket); - $infh = \*$client; - } else { - sysopen($infh, $filename, O_RDONLY) - or die "open($filename): $!\n"; - } + $infh = \*$client; + } else { + sysopen($infh, $filename, O_RDONLY) + or die "open($filename): $!\n"; + } - my $cfg = PVE::Storage::config(); - my $volume = $param->{volume}; - my $delete = $param->{'delete-snapshot'}; - my $imported_volid = PVE::Storage::volume_import($cfg, $infh, $volume, $param->{format}, - $param->{snapshot}, $param->{base}, $param->{'with-snapshots'}, - $param->{'allow-rename'}); - PVE::Storage::volume_snapshot_delete($cfg, $imported_volid, $delete) - if defined($delete); - return $imported_volid; - } + my $cfg = PVE::Storage::config(); + my $volume = $param->{volume}; + my $delete = $param->{'delete-snapshot'}; + my $imported_volid = PVE::Storage::volume_import( + $cfg, + $infh, + $volume, + $param->{format}, + $param->{snapshot}, + $param->{base}, + $param->{'with-snapshots'}, + $param->{'allow-rename'}, + ); + PVE::Storage::volume_snapshot_delete($cfg, $imported_volid, $delete) + if defined($delete); + return $imported_volid; + }, }); -__PACKAGE__->register_method ({ +__PACKAGE__->register_method({ name => 'prunebackups', path => 'prunebackups', method => 'GET', - description => "Prune backups. Only those using the standard naming scheme are considered. " . - "If no keep options are specified, those from the storage configuration are used.", + description => "Prune backups. Only those using the standard naming scheme are considered. " + . "If no keep options are specified, those from the storage configuration are used.", protected => 1, proxyto => 'node', parameters => { - additionalProperties => 0, - properties => { - 'dry-run' => { - description => "Only show what would be pruned, don't delete anything.", - type => 'boolean', - optional => 1, - }, - node => get_standard_option('pve-node'), - storage => get_standard_option('pve-storage-id', { - completion => \&PVE::Storage::complete_storage_enabled, - }), - %{$PVE::Storage::Plugin::prune_backups_format}, - type => { - description => "Either 'qemu' or 'lxc'. Only consider backups for guests of this type.", - type => 'string', - optional => 1, - enum => ['qemu', 'lxc'], - }, - vmid => get_standard_option('pve-vmid', { - description => "Only consider backups for this guest.", - optional => 1, - completion => \&PVE::Cluster::complete_vmid, - }), - }, + additionalProperties => 0, + properties => { + 'dry-run' => { + description => "Only show what would be pruned, don't delete anything.", + type => 'boolean', + optional => 1, + }, + node => get_standard_option('pve-node'), + storage => get_standard_option( + 'pve-storage-id', + { + completion => \&PVE::Storage::complete_storage_enabled, + }, + ), + %{$PVE::Storage::Plugin::prune_backups_format}, + type => { + description => + "Either 'qemu' or 'lxc'. Only consider backups for guests of this type.", + type => 'string', + optional => 1, + enum => ['qemu', 'lxc'], + }, + vmid => get_standard_option( + 'pve-vmid', + { + description => "Only consider backups for this guest.", + optional => 1, + completion => \&PVE::Cluster::complete_vmid, + }, + ), + }, }, returns => { - type => 'object', - properties => { - dryrun => { - description => 'If it was a dry run or not. The list will only be defined in that case.', - type => 'boolean', - }, - list => { - type => 'array', - items => { - type => 'object', - properties => { - volid => { - description => "Backup volume ID.", - type => 'string', - }, - 'ctime' => { - description => "Creation time of the backup (seconds since the UNIX epoch).", - type => 'integer', - }, - 'mark' => { - description => "Whether the backup would be kept or removed. For backups that don't " . - "use the standard naming scheme, it's 'protected'.", - type => 'string', - }, - type => { - description => "One of 'qemu', 'lxc', 'openvz' or 'unknown'.", - type => 'string', - }, - 'vmid' => { - description => "The VM the backup belongs to.", - type => 'integer', - optional => 1, - }, - }, - }, - }, - }, + type => 'object', + properties => { + dryrun => { + description => + 'If it was a dry run or not. The list will only be defined in that case.', + type => 'boolean', + }, + list => { + type => 'array', + items => { + type => 'object', + properties => { + volid => { + description => "Backup volume ID.", + type => 'string', + }, + 'ctime' => { + description => + "Creation time of the backup (seconds since the UNIX epoch).", + type => 'integer', + }, + 'mark' => { + description => + "Whether the backup would be kept or removed. For backups that don't " + . "use the standard naming scheme, it's 'protected'.", + type => 'string', + }, + type => { + description => "One of 'qemu', 'lxc', 'openvz' or 'unknown'.", + type => 'string', + }, + 'vmid' => { + description => "The VM the backup belongs to.", + type => 'integer', + optional => 1, + }, + }, + }, + }, + }, }, code => sub { - my ($param) = @_; + my ($param) = @_; - my $dryrun = extract_param($param, 'dry-run') ? 1 : 0; + my $dryrun = extract_param($param, 'dry-run') ? 1 : 0; - my $keep_opts; - foreach my $keep (keys %{$PVE::Storage::Plugin::prune_backups_format}) { - $keep_opts->{$keep} = extract_param($param, $keep) if defined($param->{$keep}); - } - $param->{'prune-backups'} = PVE::JSONSchema::print_property_string( - $keep_opts, $PVE::Storage::Plugin::prune_backups_format) if $keep_opts; + my $keep_opts; + foreach my $keep (keys %{$PVE::Storage::Plugin::prune_backups_format}) { + $keep_opts->{$keep} = extract_param($param, $keep) if defined($param->{$keep}); + } + $param->{'prune-backups'} = PVE::JSONSchema::print_property_string( + $keep_opts, + $PVE::Storage::Plugin::prune_backups_format, + ) if $keep_opts; - my $list = []; - if ($dryrun) { - $list = PVE::API2::Storage::PruneBackups->dryrun($param); - } else { - PVE::API2::Storage::PruneBackups->delete($param); - } + my $list = []; + if ($dryrun) { + $list = PVE::API2::Storage::PruneBackups->dryrun($param); + } else { + PVE::API2::Storage::PruneBackups->delete($param); + } - return { - dryrun => $dryrun, - list => $list, - }; - }}); + return { + dryrun => $dryrun, + list => $list, + }; + }, +}); my $print_api_result = sub { my ($data, $schema, $options) = @_; @@ -587,91 +615,141 @@ my $print_api_result = sub { }; our $cmddef = { - add => [ "PVE::API2::Storage::Config", 'create', ['type', 'storage'] ], - set => [ "PVE::API2::Storage::Config", 'update', ['storage'] ], - remove => [ "PVE::API2::Storage::Config", 'delete', ['storage'] ], - status => [ "PVE::API2::Storage::Status", 'index', [], - { node => $nodename }, $print_status ], - list => [ "PVE::API2::Storage::Content", 'index', ['storage'], - { node => $nodename }, $print_content ], - alloc => [ "PVE::API2::Storage::Content", 'create', ['storage', 'vmid', 'filename', 'size'], - { node => $nodename }, sub { - my $volid = shift; - print "successfully created '$volid'\n"; - }], - free => [ "PVE::API2::Storage::Content", 'delete', ['volume'], - { node => $nodename } ], + add => ["PVE::API2::Storage::Config", 'create', ['type', 'storage']], + set => ["PVE::API2::Storage::Config", 'update', ['storage']], + remove => ["PVE::API2::Storage::Config", 'delete', ['storage']], + status => ["PVE::API2::Storage::Status", 'index', [], { node => $nodename }, $print_status], + list => [ + "PVE::API2::Storage::Content", + 'index', + ['storage'], + { node => $nodename }, + $print_content, + ], + alloc => [ + "PVE::API2::Storage::Content", + 'create', + ['storage', 'vmid', 'filename', 'size'], + { node => $nodename }, + sub { + my $volid = shift; + print "successfully created '$volid'\n"; + }, + ], + free => ["PVE::API2::Storage::Content", 'delete', ['volume'], { node => $nodename }], scan => { - nfs => [ "PVE::API2::Storage::Scan", 'nfsscan', ['server'], { node => $nodename }, sub { - my $res = shift; + nfs => [ + "PVE::API2::Storage::Scan", + 'nfsscan', + ['server'], + { node => $nodename }, + sub { + my $res = shift; - my $maxlen = 0; - foreach my $rec (@$res) { - my $len = length ($rec->{path}); - $maxlen = $len if $len > $maxlen; - } - foreach my $rec (@$res) { - printf "%-${maxlen}s %s\n", $rec->{path}, $rec->{options}; - } - }], - cifs => [ "PVE::API2::Storage::Scan", 'cifsscan', ['server'], { node => $nodename }, sub { - my $res = shift; + my $maxlen = 0; + foreach my $rec (@$res) { + my $len = length($rec->{path}); + $maxlen = $len if $len > $maxlen; + } + foreach my $rec (@$res) { + printf "%-${maxlen}s %s\n", $rec->{path}, $rec->{options}; + } + }, + ], + cifs => [ + "PVE::API2::Storage::Scan", + 'cifsscan', + ['server'], + { node => $nodename }, + sub { + my $res = shift; - my $maxlen = 0; - foreach my $rec (@$res) { - my $len = length ($rec->{share}); - $maxlen = $len if $len > $maxlen; - } - foreach my $rec (@$res) { - printf "%-${maxlen}s %s\n", $rec->{share}, $rec->{description}; - } - }], - glusterfs => [ "PVE::API2::Storage::Scan", 'glusterfsscan', ['server'], { node => $nodename }, sub { - my $res = shift; + my $maxlen = 0; + foreach my $rec (@$res) { + my $len = length($rec->{share}); + $maxlen = $len if $len > $maxlen; + } + foreach my $rec (@$res) { + printf "%-${maxlen}s %s\n", $rec->{share}, $rec->{description}; + } + }, + ], + glusterfs => [ + "PVE::API2::Storage::Scan", + 'glusterfsscan', + ['server'], + { node => $nodename }, + sub { + my $res = shift; - foreach my $rec (@$res) { - printf "%s\n", $rec->{volname}; - } - }], - iscsi => [ "PVE::API2::Storage::Scan", 'iscsiscan', ['portal'], { node => $nodename }, sub { - my $res = shift; + foreach my $rec (@$res) { + printf "%s\n", $rec->{volname}; + } + }, + ], + iscsi => [ + "PVE::API2::Storage::Scan", + 'iscsiscan', + ['portal'], + { node => $nodename }, + sub { + my $res = shift; - my $maxlen = 0; - foreach my $rec (@$res) { - my $len = length ($rec->{target}); - $maxlen = $len if $len > $maxlen; - } - foreach my $rec (@$res) { - printf "%-${maxlen}s %s\n", $rec->{target}, $rec->{portal}; - } - }], - lvm => [ "PVE::API2::Storage::Scan", 'lvmscan', [], { node => $nodename }, sub { - my $res = shift; - foreach my $rec (@$res) { - printf "$rec->{vg}\n"; - } - }], - lvmthin => [ "PVE::API2::Storage::Scan", 'lvmthinscan', ['vg'], { node => $nodename }, sub { - my $res = shift; - foreach my $rec (@$res) { - printf "$rec->{lv}\n"; - } - }], - pbs => [ - "PVE::API2::Storage::Scan", - 'pbsscan', - ['server', 'username'], - { node => $nodename }, - $print_api_result, - $PVE::RESTHandler::standard_output_options, - ], - zfs => [ "PVE::API2::Storage::Scan", 'zfsscan', [], { node => $nodename }, sub { - my $res = shift; + my $maxlen = 0; + foreach my $rec (@$res) { + my $len = length($rec->{target}); + $maxlen = $len if $len > $maxlen; + } + foreach my $rec (@$res) { + printf "%-${maxlen}s %s\n", $rec->{target}, $rec->{portal}; + } + }, + ], + lvm => [ + "PVE::API2::Storage::Scan", + 'lvmscan', + [], + { node => $nodename }, + sub { + my $res = shift; + foreach my $rec (@$res) { + printf "$rec->{vg}\n"; + } + }, + ], + lvmthin => [ + "PVE::API2::Storage::Scan", + 'lvmthinscan', + ['vg'], + { node => $nodename }, + sub { + my $res = shift; + foreach my $rec (@$res) { + printf "$rec->{lv}\n"; + } + }, + ], + pbs => [ + "PVE::API2::Storage::Scan", + 'pbsscan', + ['server', 'username'], + { node => $nodename }, + $print_api_result, + $PVE::RESTHandler::standard_output_options, + ], + zfs => [ + "PVE::API2::Storage::Scan", + 'zfsscan', + [], + { node => $nodename }, + sub { + my $res = shift; - foreach my $rec (@$res) { - printf "$rec->{pool}\n"; - } - }], + foreach my $rec (@$res) { + printf "$rec->{pool}\n"; + } + }, + ], }, nfsscan => { alias => 'scan nfs' }, cifsscan => { alias => 'scan cifs' }, @@ -680,55 +758,79 @@ our $cmddef = { lvmscan => { alias => 'scan lvm' }, lvmthinscan => { alias => 'scan lvmthin' }, zfsscan => { alias => 'scan zfs' }, - path => [ __PACKAGE__, 'path', ['volume']], + path => [__PACKAGE__, 'path', ['volume']], extractconfig => [__PACKAGE__, 'extractconfig', ['volume']], - export => [ __PACKAGE__, 'export', ['volume', 'format', 'filename']], - import => [ __PACKAGE__, 'import', ['volume', 'format', 'filename'], {}, sub { - my $volid = shift; - print PVE::Storage::volume_imported_message($volid); - }], - apiinfo => [ __PACKAGE__, 'apiinfo', [], {}, sub { - my $res = shift; + export => [__PACKAGE__, 'export', ['volume', 'format', 'filename']], + import => [ + __PACKAGE__, + 'import', + ['volume', 'format', 'filename'], + {}, + sub { + my $volid = shift; + print PVE::Storage::volume_imported_message($volid); + }, + ], + apiinfo => [ + __PACKAGE__, + 'apiinfo', + [], + {}, + sub { + my $res = shift; - print "APIVER $res->{apiver}\n"; - print "APIAGE $res->{apiage}\n"; - }], - 'prune-backups' => [ __PACKAGE__, 'prunebackups', ['storage'], { node => $nodename }, sub { - my $res = shift; + print "APIVER $res->{apiver}\n"; + print "APIAGE $res->{apiage}\n"; + }, + ], + 'prune-backups' => [ + __PACKAGE__, + 'prunebackups', + ['storage'], + { node => $nodename }, + sub { + my $res = shift; - my ($dryrun, $list) = ($res->{dryrun}, $res->{list}); + my ($dryrun, $list) = ($res->{dryrun}, $res->{list}); - return if !$dryrun; + return if !$dryrun; - if (!scalar(@{$list})) { - print "No backups found\n"; - return; - } + if (!scalar(@{$list})) { + print "No backups found\n"; + return; + } - print "NOTE: this is only a preview and might not be what a subsequent\n" . - "prune call does if backups are removed/added in the meantime.\n\n"; + print "NOTE: this is only a preview and might not be what a subsequent\n" + . "prune call does if backups are removed/added in the meantime.\n\n"; - my @sorted = sort { - my $vmcmp = PVE::Tools::safe_compare($a->{vmid}, $b->{vmid}, sub { $_[0] <=> $_[1] }); - return $vmcmp if $vmcmp ne 0; - return $a->{ctime} <=> $b->{ctime}; - } @{$list}; + my @sorted = sort { + my $vmcmp = + PVE::Tools::safe_compare($a->{vmid}, $b->{vmid}, sub { $_[0] <=> $_[1] }); + return $vmcmp if $vmcmp ne 0; + return $a->{ctime} <=> $b->{ctime}; + } @{$list}; - my $maxlen = 0; - foreach my $backup (@sorted) { - my $volid = $backup->{volid}; - $maxlen = length($volid) if length($volid) > $maxlen; - } - $maxlen+=1; + my $maxlen = 0; + foreach my $backup (@sorted) { + my $volid = $backup->{volid}; + $maxlen = length($volid) if length($volid) > $maxlen; + } + $maxlen += 1; - printf("%-${maxlen}s %15s %10s\n", 'Backup', 'Backup-ID', 'Prune-Mark'); - foreach my $backup (@sorted) { - my $type = $backup->{type}; - my $vmid = $backup->{vmid}; - my $backup_id = defined($vmid) ? "$type/$vmid" : "$type"; - printf("%-${maxlen}s %15s %10s\n", $backup->{volid}, $backup_id, $backup->{mark}); - } - }], + printf("%-${maxlen}s %15s %10s\n", 'Backup', 'Backup-ID', 'Prune-Mark'); + foreach my $backup (@sorted) { + my $type = $backup->{type}; + my $vmid = $backup->{vmid}; + my $backup_id = defined($vmid) ? "$type/$vmid" : "$type"; + printf( + "%-${maxlen}s %15s %10s\n", + $backup->{volid}, + $backup_id, + $backup->{mark}, + ); + } + }, + ], }; 1; diff --git a/src/PVE/CephConfig.pm b/src/PVE/CephConfig.pm index 56e7989..5347781 100644 --- a/src/PVE/CephConfig.pm +++ b/src/PVE/CephConfig.pm @@ -6,9 +6,7 @@ use Net::IP; use PVE::Tools qw(run_command); use PVE::Cluster qw(cfs_register_file); -cfs_register_file('ceph.conf', - \&parse_ceph_config, - \&write_ceph_config); +cfs_register_file('ceph.conf', \&parse_ceph_config, \&write_ceph_config); # For more information on how the Ceph parser works and how its grammar is # defined, see: @@ -77,177 +75,177 @@ sub parse_ceph_config { my @lines = split(/\n/, $raw); my $parse_section_header = sub { - my ($section_line) = @_; + my ($section_line) = @_; - # continued lines in section headers are allowed - while ($section_line =~ s/$re_continue_marker$//) { - $section_line .= shift(@lines); - } + # continued lines in section headers are allowed + while ($section_line =~ s/$re_continue_marker$//) { + $section_line .= shift(@lines); + } - my $remainder = $section_line; + my $remainder = $section_line; - $remainder =~ s/$re_section_header//; - my $parsed_header = $1; + $remainder =~ s/$re_section_header//; + my $parsed_header = $1; - # Un-escape comment literals - $parsed_header =~ s/\\($re_comment_class)/$1/g; + # Un-escape comment literals + $parsed_header =~ s/\\($re_comment_class)/$1/g; - if (!$parsed_header) { - die "failed to parse section - skip: $section_line\n"; - } + if (!$parsed_header) { + die "failed to parse section - skip: $section_line\n"; + } - # preserve Ceph's behaviour and disallow anything after the section header - # that's not whitespace or a comment - $remainder =~ s/$re_leading_ws//; - $remainder =~ s/^$re_comment_class.*$//; + # preserve Ceph's behaviour and disallow anything after the section header + # that's not whitespace or a comment + $remainder =~ s/$re_leading_ws//; + $remainder =~ s/^$re_comment_class.*$//; - if ($remainder) { - die "unexpected remainder after section - skip: $section_line\n"; - } + if ($remainder) { + die "unexpected remainder after section - skip: $section_line\n"; + } - return $parsed_header; + return $parsed_header; }; my $parse_key = sub { - my ($line) = @_; + my ($line) = @_; - my $remainder = $line; + my $remainder = $line; - my $key = ''; - while ($remainder =~ s/$re_key//) { - $key .= $1; + my $key = ''; + while ($remainder =~ s/$re_key//) { + $key .= $1; - while ($key =~ s/$re_continue_marker$//) { - $remainder = shift(@lines); - } - } + while ($key =~ s/$re_continue_marker$//) { + $remainder = shift(@lines); + } + } - $key =~ s/$re_trailing_ws//; - $key =~ s/$re_leading_ws//; + $key =~ s/$re_trailing_ws//; + $key =~ s/$re_leading_ws//; - $key =~ s/\s/ /; - while ($key =~ s/\s\s/ /) {} # squeeze repeated whitespace + $key =~ s/\s/ /; + while ($key =~ s/\s\s/ /) { } # squeeze repeated whitespace - # Ceph treats *single* spaces in keys the same as underscores, - # but we'll just use underscores for readability - $key =~ s/ /_/g; + # Ceph treats *single* spaces in keys the same as underscores, + # but we'll just use underscores for readability + $key =~ s/ /_/g; - # Un-escape comment literals - $key =~ s/\\($re_comment_class)/$1/g; + # Un-escape comment literals + $key =~ s/\\($re_comment_class)/$1/g; - if ($key eq '') { - die "failed to parse key from line - skip: $line\n"; - } + if ($key eq '') { + die "failed to parse key from line - skip: $line\n"; + } - my $had_equals = $remainder =~ s/^$re_kv_separator//; + my $had_equals = $remainder =~ s/^$re_kv_separator//; - if (!$had_equals) { - die "expected '=' after key - skip: $line\n"; - } + if (!$had_equals) { + die "expected '=' after key - skip: $line\n"; + } - while ($remainder =~ s/^$re_continue_marker$//) { - # Whitespace and continuations after equals sign can be arbitrary - $remainder = shift(@lines); - $remainder =~ s/$re_leading_ws//; - } + while ($remainder =~ s/^$re_continue_marker$//) { + # Whitespace and continuations after equals sign can be arbitrary + $remainder = shift(@lines); + $remainder =~ s/$re_leading_ws//; + } - return ($key, $remainder); + return ($key, $remainder); }; my $parse_value = sub { - my ($line, $remainder) = @_; + my ($line, $remainder) = @_; - my $starts_with_quote = $remainder =~ m/^['"]/; - $remainder =~ s/$re_value//; - my $value = $1 // ''; + my $starts_with_quote = $remainder =~ m/^['"]/; + $remainder =~ s/$re_value//; + my $value = $1 // ''; - if ($value eq '') { - die "failed to parse value - skip: $line\n"; - } + if ($value eq '') { + die "failed to parse value - skip: $line\n"; + } - if ($starts_with_quote) { - # If it started with a quote, the parsed value MUST end with a quote - my $is_single_quoted = $value =~ m/$re_single_quoted_value/; - $value = $1 if $is_single_quoted; - my $is_double_quoted = !$is_single_quoted && $value =~ m/$re_double_quoted_value/; - $value = $1 if $is_double_quoted; + if ($starts_with_quote) { + # If it started with a quote, the parsed value MUST end with a quote + my $is_single_quoted = $value =~ m/$re_single_quoted_value/; + $value = $1 if $is_single_quoted; + my $is_double_quoted = !$is_single_quoted && $value =~ m/$re_double_quoted_value/; + $value = $1 if $is_double_quoted; - if (!($is_single_quoted || $is_double_quoted)) { - die "failed to parse quoted value - skip: $line\n"; - } + if (!($is_single_quoted || $is_double_quoted)) { + die "failed to parse quoted value - skip: $line\n"; + } - # Optionally, *only* line continuations may *only* follow right after - while ($remainder =~ s/^$re_continue_marker$//) { - $remainder .= shift(@lines); - } + # Optionally, *only* line continuations may *only* follow right after + while ($remainder =~ s/^$re_continue_marker$//) { + $remainder .= shift(@lines); + } - # Nothing but whitespace or a comment may follow - $remainder =~ s/$re_leading_ws//; - $remainder =~ s/^$re_comment_class.*$//; + # Nothing but whitespace or a comment may follow + $remainder =~ s/$re_leading_ws//; + $remainder =~ s/^$re_comment_class.*$//; - if ($remainder) { - die "unexpected remainder after value - skip: $line\n"; - } + if ($remainder) { + die "unexpected remainder after value - skip: $line\n"; + } - } else { - while ($value =~ s/$re_continue_marker$//) { - my $next_line = shift(@lines); + } else { + while ($value =~ s/$re_continue_marker$//) { + my $next_line = shift(@lines); - $next_line =~ s/$re_unquoted_value//; - my $value_part = $1 // ''; - $value .= $value_part; - } + $next_line =~ s/$re_unquoted_value//; + my $value_part = $1 // ''; + $value .= $value_part; + } - $value =~ s/$re_trailing_ws//; - } + $value =~ s/$re_trailing_ws//; + } - # Un-escape comment literals - $value =~ s/\\($re_comment_class)/$1/g; + # Un-escape comment literals + $value =~ s/\\($re_comment_class)/$1/g; - return $value; + return $value; }; while (scalar(@lines)) { - my $line = shift(@lines); + my $line = shift(@lines); - $line =~ s/^\s*(?($line) }; - if ($@) { - warn "$@\n"; - } + eval { $section = $parse_section_header->($line) }; + if ($@) { + warn "$@\n"; + } - if (defined($section)) { - $cfg->{$section} = {} if !exists($cfg->{$section}); - } + if (defined($section)) { + $cfg->{$section} = {} if !exists($cfg->{$section}); + } - next; - } + next; + } - if (!defined($section)) { - warn "no section header - skip: $line\n"; - next; - } + if (!defined($section)) { + warn "no section header - skip: $line\n"; + next; + } - my ($key, $remainder) = eval { $parse_key->($line) }; - if ($@) { - warn "$@\n"; - next; - } + my ($key, $remainder) = eval { $parse_key->($line) }; + if ($@) { + warn "$@\n"; + next; + } - my $value = eval { $parse_value->($line, $remainder) }; - if ($@) { - warn "$@\n"; - next; - } + my $value = eval { $parse_value->($line, $remainder) }; + if ($@) { + warn "$@\n"; + next; + } - $cfg->{$section}->{$key} = $value; + $cfg->{$section}->{$key} = $value; } return $cfg; @@ -258,7 +256,7 @@ my $parse_ceph_file = sub { my $cfg = {}; - return $cfg if ! -f $filename; + return $cfg if !-f $filename; my $content = PVE::Tools::file_get_contents($filename); @@ -272,45 +270,45 @@ sub write_ceph_config { my $out = ''; my $cond_write_sec = sub { - my $re = shift; + my $re = shift; - for my $section (sort keys $cfg->%*) { - next if $section !~ m/^$re$/; - next if exists($written_sections->{$section}); + for my $section (sort keys $cfg->%*) { + next if $section !~ m/^$re$/; + next if exists($written_sections->{$section}); - $out .= "[$section]\n"; - for my $key (sort keys $cfg->{$section}->%*) { - $out .= "\t$key = $cfg->{$section}->{$key}\n"; - } - $out .= "\n"; + $out .= "[$section]\n"; + for my $key (sort keys $cfg->{$section}->%*) { + $out .= "\t$key = $cfg->{$section}->{$key}\n"; + } + $out .= "\n"; - $written_sections->{$section} = 1; - } + $written_sections->{$section} = 1; + } }; my @rexprs = ( - qr/global/, + qr/global/, - qr/client/, - qr/client\..*/, + qr/client/, + qr/client\..*/, - qr/mds/, - qr/mds\..*/, + qr/mds/, + qr/mds\..*/, - qr/mon/, - qr/mon\..*/, + qr/mon/, + qr/mon\..*/, - qr/osd/, - qr/osd\..*/, + qr/osd/, + qr/osd\..*/, - qr/mgr/, - qr/mgr\..*/, + qr/mgr/, + qr/mgr\..*/, - qr/.*/, + qr/.*/, ); for my $re (@rexprs) { - $cond_write_sec->($re); + $cond_write_sec->($re); } # Escape comment literals that aren't escaped already @@ -332,7 +330,7 @@ my $get_host = sub { my ($hostport) = @_; my ($host, $port) = PVE::Tools::parse_host_and_port($hostport); if (!defined($host)) { - return ""; + return ""; } $port = defined($port) ? ":$port" : ''; $host = "[$host]" if Net::IP::ip_is_ipv6($host); @@ -343,8 +341,8 @@ sub get_monaddr_list { my ($configfile) = shift; if (!defined($configfile)) { - warn "No ceph config specified\n"; - return; + warn "No ceph config specified\n"; + return; } my $config = $parse_ceph_file->($configfile); @@ -352,24 +350,24 @@ sub get_monaddr_list { my $monhostlist = {}; # get all ip addresses from mon_host - my $monhosts = [ split (/[ ,;]+/, $config->{global}->{mon_host} // "") ]; + my $monhosts = [split(/[ ,;]+/, $config->{global}->{mon_host} // "")]; foreach my $monhost (@$monhosts) { - $monhost =~ s/^\[?v\d\://; # remove beginning of vector - $monhost =~ s|/\d+\]?||; # remove end of vector - my $host = $get_host->($monhost); - if ($host ne "") { - $monhostlist->{$host} = 1; - } + $monhost =~ s/^\[?v\d\://; # remove beginning of vector + $monhost =~ s|/\d+\]?||; # remove end of vector + my $host = $get_host->($monhost); + if ($host ne "") { + $monhostlist->{$host} = 1; + } } # then get all addrs from mon. sections - for my $section ( keys %$config ) { - next if $section !~ m/^mon\./; + for my $section (keys %$config) { + next if $section !~ m/^mon\./; - if (my $addr = $config->{$section}->{mon_addr}) { - $monhostlist->{$addr} = 1; - } + if (my $addr = $config->{$section}->{mon_addr}) { + $monhostlist->{$addr} = 1; + } } return join(',', sort keys %$monhostlist); @@ -385,17 +383,17 @@ sub hostlist { my $ceph_check_keyfile = sub { my ($filename, $type) = @_; - return if ! -f $filename; + return if !-f $filename; my $content = PVE::Tools::file_get_contents($filename); eval { - die if !$content; + die if !$content; - if ($type eq 'rbd') { - die if $content !~ /\s*\[\S+\]\s*key\s*=\s*\S+==\s*$/m; - } elsif ($type eq 'cephfs') { - die if $content !~ /\S+==\s*$/; - } + if ($type eq 'rbd') { + die if $content !~ /\s*\[\S+\]\s*key\s*=\s*\S+==\s*$/m; + } elsif ($type eq 'cephfs') { + die if $content !~ /\S+==\s*$/; + } }; die "Not a proper $type authentication file: $filename\n" if $@; @@ -415,23 +413,24 @@ sub ceph_connect_option { $ceph_check_keyfile->($keyfile, $scfg->{type}); if (-e "/etc/pve/priv/ceph/${storeid}.conf") { - # allow custom ceph configuration for external clusters - if ($pveceph_managed) { - warn "ignoring custom ceph config for storage '$storeid', 'monhost' is not set (assuming pveceph managed cluster)!\n"; - } else { - $cmd_option->{ceph_conf} = "/etc/pve/priv/ceph/${storeid}.conf"; - } + # allow custom ceph configuration for external clusters + if ($pveceph_managed) { + warn + "ignoring custom ceph config for storage '$storeid', 'monhost' is not set (assuming pveceph managed cluster)!\n"; + } else { + $cmd_option->{ceph_conf} = "/etc/pve/priv/ceph/${storeid}.conf"; + } } $cmd_option->{keyring} = $keyfile if (-e $keyfile); $cmd_option->{auth_supported} = (defined $cmd_option->{keyring}) ? 'cephx' : 'none'; - $cmd_option->{userid} = $scfg->{username} ? $scfg->{username} : 'admin'; + $cmd_option->{userid} = $scfg->{username} ? $scfg->{username} : 'admin'; $cmd_option->{mon_host} = hostlist($scfg->{monhost}, ',') if (defined($scfg->{monhost})); if (%options) { - foreach my $k (keys %options) { - $cmd_option->{$k} = $options{$k}; - } + foreach my $k (keys %options) { + $cmd_option->{$k} = $options{$k}; + } } return $cmd_option; @@ -448,30 +447,31 @@ sub ceph_create_keyfile { my $ceph_storage_keyring = "/etc/pve/priv/ceph/${storeid}.$extension"; die "ceph authx keyring file for storage '$storeid' already exists!\n" - if -e $ceph_storage_keyring && !defined($secret); + if -e $ceph_storage_keyring && !defined($secret); if (-e $ceph_admin_keyring || defined($secret)) { - eval { - if (defined($secret)) { - mkdir '/etc/pve/priv/ceph'; - chomp $secret; - PVE::Tools::file_set_contents($ceph_storage_keyring, "${secret}\n", 0400); - } elsif ($type eq 'rbd') { - mkdir '/etc/pve/priv/ceph'; - PVE::Tools::file_copy($ceph_admin_keyring, $ceph_storage_keyring); - } elsif ($type eq 'cephfs') { - my $cephfs_secret = $ceph_get_key->($ceph_admin_keyring, 'admin'); - mkdir '/etc/pve/priv/ceph'; - chomp $cephfs_secret; - PVE::Tools::file_set_contents($ceph_storage_keyring, "${cephfs_secret}\n", 0400); - } - }; - if (my $err = $@) { - unlink $ceph_storage_keyring; - die "failed to copy ceph authx $extension for storage '$storeid': $err\n"; - } + eval { + if (defined($secret)) { + mkdir '/etc/pve/priv/ceph'; + chomp $secret; + PVE::Tools::file_set_contents($ceph_storage_keyring, "${secret}\n", 0400); + } elsif ($type eq 'rbd') { + mkdir '/etc/pve/priv/ceph'; + PVE::Tools::file_copy($ceph_admin_keyring, $ceph_storage_keyring); + } elsif ($type eq 'cephfs') { + my $cephfs_secret = $ceph_get_key->($ceph_admin_keyring, 'admin'); + mkdir '/etc/pve/priv/ceph'; + chomp $cephfs_secret; + PVE::Tools::file_set_contents($ceph_storage_keyring, "${cephfs_secret}\n", + 0400); + } + }; + if (my $err = $@) { + unlink $ceph_storage_keyring; + die "failed to copy ceph authx $extension for storage '$storeid': $err\n"; + } } else { - warn "$ceph_admin_keyring not found, authentication is disabled.\n"; + warn "$ceph_admin_keyring not found, authentication is disabled.\n"; } } @@ -483,7 +483,7 @@ sub ceph_remove_keyfile { my $ceph_storage_keyring = "/etc/pve/priv/ceph/${storeid}.$extension"; if (-f $ceph_storage_keyring) { - unlink($ceph_storage_keyring) or warn "removing keyring of storage failed: $!\n"; + unlink($ceph_storage_keyring) or warn "removing keyring of storage failed: $!\n"; } } @@ -491,10 +491,10 @@ my $ceph_version_parser = sub { my $ceph_version = shift; # FIXME this is the same as pve-manager PVE::Ceph::Tools get_local_version if ($ceph_version =~ /^ceph.*\sv?(\d+(?:\.\d+)+(?:-pve\d+)?)\s+(?:\(([a-zA-Z0-9]+)\))?/) { - my ($version, $buildcommit) = ($1, $2); - my $subversions = [ split(/\.|-/, $version) ]; + my ($version, $buildcommit) = ($1, $2); + my $subversions = [split(/\.|-/, $version)]; - return ($subversions, $version, $buildcommit); + return ($subversions, $version, $buildcommit); } warn "Could not parse Ceph version: '$ceph_version'\n"; }; @@ -504,9 +504,12 @@ sub local_ceph_version { my $version_string = $cache; if (!defined($version_string)) { - run_command('ceph --version', outfunc => sub { - $version_string = shift; - }); + run_command( + 'ceph --version', + outfunc => sub { + $version_string = shift; + }, + ); } return undef if !defined($version_string); # subversion is an array ref. with the version parts from major to minor diff --git a/src/PVE/Diskmanage.pm b/src/PVE/Diskmanage.pm index 4272668..1062bfd 100644 --- a/src/PVE/Diskmanage.pm +++ b/src/PVE/Diskmanage.pm @@ -11,7 +11,8 @@ use File::Basename; use File::stat; use JSON; -use PVE::Tools qw(extract_param run_command file_get_contents file_read_firstline dir_glob_regex dir_glob_foreach trim); +use PVE::Tools + qw(extract_param run_command file_get_contents file_read_firstline dir_glob_regex dir_glob_foreach trim); my $SMARTCTL = "/usr/sbin/smartctl"; my $ZPOOL = "/sbin/zpool"; @@ -20,7 +21,7 @@ my $PVS = "/sbin/pvs"; my $LVS = "/sbin/lvs"; my $LSBLK = "/bin/lsblk"; -my sub strip_dev :prototype($) { +my sub strip_dev : prototype($) { my ($devpath) = @_; $devpath =~ s|^/dev/||; return $devpath; @@ -38,7 +39,7 @@ sub verify_blockdev_path { my $path = abs_path($rel_path); die "failed to get absolute path to $rel_path\n" if !$path; - die "got unusual device path '$path'\n" if $path !~ m|^/dev/(.*)$|; + die "got unusual device path '$path'\n" if $path !~ m|^/dev/(.*)$|; $path = "/dev/$1"; # untaint @@ -51,8 +52,8 @@ sub assert_blockdev { my ($dev, $noerr) = @_; if ($dev !~ m|^/dev/| || !(-b $dev)) { - return if $noerr; - die "not a valid block device\n"; + return if $noerr; + die "not a valid block device\n"; } return 1; @@ -98,61 +99,72 @@ sub get_smart_data { push @$cmd, $disk; my $returncode = eval { - run_command($cmd, noerr => 1, outfunc => sub { - my ($line) = @_; + run_command( + $cmd, + noerr => 1, + outfunc => sub { + my ($line) = @_; -# ATA SMART attributes, e.g.: -# ID# ATTRIBUTE_NAME FLAGS VALUE WORST THRESH FAIL RAW_VALUE -# 1 Raw_Read_Error_Rate POSR-K 100 100 000 - 0 -# -# SAS and NVME disks, e.g.: -# Data Units Written: 5,584,952 [2.85 TB] -# Accumulated start-stop cycles: 34 + # ATA SMART attributes, e.g.: + # ID# ATTRIBUTE_NAME FLAGS VALUE WORST THRESH FAIL RAW_VALUE + # 1 Raw_Read_Error_Rate POSR-K 100 100 000 - 0 + # + # SAS and NVME disks, e.g.: + # Data Units Written: 5,584,952 [2.85 TB] + # Accumulated start-stop cycles: 34 - if (defined($type) && $type eq 'ata' && $line =~ m/^([ \d]{2}\d)\s+(\S+)\s+(\S{6})\s+(\d+)\s+(\d+)\s+(\S+)\s+(\S+)\s+(.*)$/) { - my $entry = {}; + if ( + defined($type) + && $type eq 'ata' + && $line =~ + m/^([ \d]{2}\d)\s+(\S+)\s+(\S{6})\s+(\d+)\s+(\d+)\s+(\S+)\s+(\S+)\s+(.*)$/ + ) { + my $entry = {}; - $entry->{name} = $2 if defined $2; - $entry->{flags} = $3 if defined $3; - # the +0 makes a number out of the strings - # FIXME: 'value' is depreacated by 'normalized'; remove with PVE 7.0 - $entry->{value} = $4+0 if defined $4; - $entry->{normalized} = $4+0 if defined $4; - $entry->{worst} = $5+0 if defined $5; - # some disks report the default threshold as --- instead of 000 - if (defined($6) && $6 eq '---') { - $entry->{threshold} = 0; - } else { - $entry->{threshold} = $6+0 if defined $6; - } - $entry->{fail} = $7 if defined $7; - $entry->{raw} = $8 if defined $8; - $entry->{id} = $1 if defined $1; - push @{$smartdata->{attributes}}, $entry; - } elsif ($line =~ m/(?:Health Status|self\-assessment test result): (.*)$/ ) { - $smartdata->{health} = $1; - } elsif ($line =~ m/Vendor Specific SMART Attributes with Thresholds:/) { - $type = 'ata'; - delete $smartdata->{text}; - } elsif ($line =~ m/=== START OF (READ )?SMART DATA SECTION ===/) { - $type = 'text'; - } elsif (defined($type) && $type eq 'text') { - $smartdata->{text} = '' if !defined $smartdata->{text}; - $smartdata->{text} .= "$line\n"; - # extract wearout from nvme/sas text, allow for decimal values - if ($line =~ m/Percentage Used(?: endurance indicator)?:\s*(\d+(?:\.\d+)?)\%/i) { - $smartdata->{wearout} = 100 - $1; - } - } elsif ($line =~ m/SMART Disabled/) { - $smartdata->{health} = "SMART Disabled"; - } - }) + $entry->{name} = $2 if defined $2; + $entry->{flags} = $3 if defined $3; + # the +0 makes a number out of the strings + # FIXME: 'value' is depreacated by 'normalized'; remove with PVE 7.0 + $entry->{value} = $4 + 0 if defined $4; + $entry->{normalized} = $4 + 0 if defined $4; + $entry->{worst} = $5 + 0 if defined $5; + # some disks report the default threshold as --- instead of 000 + if (defined($6) && $6 eq '---') { + $entry->{threshold} = 0; + } else { + $entry->{threshold} = $6 + 0 if defined $6; + } + $entry->{fail} = $7 if defined $7; + $entry->{raw} = $8 if defined $8; + $entry->{id} = $1 if defined $1; + push @{ $smartdata->{attributes} }, $entry; + } elsif ($line =~ m/(?:Health Status|self\-assessment test result): (.*)$/) { + $smartdata->{health} = $1; + } elsif ($line =~ m/Vendor Specific SMART Attributes with Thresholds:/) { + $type = 'ata'; + delete $smartdata->{text}; + } elsif ($line =~ m/=== START OF (READ )?SMART DATA SECTION ===/) { + $type = 'text'; + } elsif (defined($type) && $type eq 'text') { + $smartdata->{text} = '' if !defined $smartdata->{text}; + $smartdata->{text} .= "$line\n"; + # extract wearout from nvme/sas text, allow for decimal values + if ($line =~ + m/Percentage Used(?: endurance indicator)?:\s*(\d+(?:\.\d+)?)\%/i + ) { + $smartdata->{wearout} = 100 - $1; + } + } elsif ($line =~ m/SMART Disabled/) { + $smartdata->{health} = "SMART Disabled"; + } + }, + ); }; my $err = $@; # bit 0 and 1 mark a fatal error, other bits are for disk status -> ignore (see man 8 smartctl) if ((defined($returncode) && ($returncode & 0b00000011)) || $err) { - die "Error getting S.M.A.R.T. data: Exit code: $returncode\n"; + die "Error getting S.M.A.R.T. data: Exit code: $returncode\n"; } $smartdata->{type} = $type; @@ -163,7 +175,9 @@ sub get_smart_data { sub get_lsblk_info { my $cmd = [$LSBLK, '--json', '-o', 'path,parttype,fstype']; my $output = ""; - eval { run_command($cmd, outfunc => sub { $output .= "$_[0]\n"; }) }; + eval { + run_command($cmd, outfunc => sub { $output .= "$_[0]\n"; }); + }; warn "$@\n" if $@; return {} if $output eq ''; @@ -172,12 +186,12 @@ sub get_lsblk_info { my $list = $parsed->{blockdevices} // []; return { - map { - $_->{path} => { - parttype => $_->{parttype}, - fstype => $_->{fstype} - } - } @{$list} + map { + $_->{path} => { + parttype => $_->{parttype}, + fstype => $_->{fstype}, + } + } @{$list} }; } @@ -187,9 +201,9 @@ my sub get_devices_by_partuuid { $res = {} if !defined($res); foreach my $dev (sort keys %{$lsblk_info}) { - my $uuid = $lsblk_info->{$dev}->{parttype}; - next if !defined($uuid) || !defined($uuids->{$uuid}); - $res->{$dev} = $uuids->{$uuid}; + my $uuid = $lsblk_info->{$dev}->{parttype}; + next if !defined($uuid) || !defined($uuids->{$uuid}); + $res->{$dev} = $uuids->{$uuid}; } return $res; @@ -203,23 +217,25 @@ sub get_zfs_devices { # use zpool and parttype uuid, because log and cache do not have zfs type uuid eval { - run_command([$ZPOOL, 'list', '-HPLv'], outfunc => sub { - my ($line) = @_; - if ($line =~ m|^\t([^\t]+)\t|) { - $res->{$1} = 1; - } - }); + run_command( + [$ZPOOL, 'list', '-HPLv'], + outfunc => sub { + my ($line) = @_; + if ($line =~ m|^\t([^\t]+)\t|) { + $res->{$1} = 1; + } + }, + ); }; # only warn here, because maybe zfs tools are not installed warn "$@\n" if $@; my $uuids = { - "6a898cc3-1dd2-11b2-99a6-080020736631" => 1, # apple - "516e7cba-6ecf-11d6-8ff8-00022d09712b" => 1, # bsd + "6a898cc3-1dd2-11b2-99a6-080020736631" => 1, # apple + "516e7cba-6ecf-11d6-8ff8-00022d09712b" => 1, # bsd }; - $res = get_devices_by_partuuid($lsblk_info, $uuids, $res); return $res; @@ -229,20 +245,23 @@ sub get_lvm_devices { my ($lsblk_info) = @_; my $res = {}; eval { - run_command([$PVS, '--noheadings', '--readonly', '-o', 'pv_name'], outfunc => sub{ - my ($line) = @_; - $line = trim($line); - if ($line =~ m|^/dev/|) { - $res->{$line} = 1; - } - }); + run_command( + [$PVS, '--noheadings', '--readonly', '-o', 'pv_name'], + outfunc => sub { + my ($line) = @_; + $line = trim($line); + if ($line =~ m|^/dev/|) { + $res->{$line} = 1; + } + }, + ); }; # if something goes wrong, we do not want to give up, but indicate an error has occurred warn "$@\n" if $@; my $uuids = { - "e6d6d379-f507-44c2-a23c-238f2a3df928" => 1, + "e6d6d379-f507-44c2-a23c-238f2a3df928" => 1, }; $res = get_devices_by_partuuid($lsblk_info, $uuids, $res); @@ -255,10 +274,10 @@ sub get_ceph_journals { my $res = {}; my $uuids = { - '45b0969e-9b03-4f30-b4c6-b4b80ceff106' => 1, # journal - '30cd0809-c2b2-499c-8879-2d6b78529876' => 2, # db - '5ce17fce-4087-4169-b7ff-056cc58473f9' => 3, # wal - 'cafecafe-9b03-4f30-b4c6-b4b80ceff106' => 4, # block + '45b0969e-9b03-4f30-b4c6-b4b80ceff106' => 1, # journal + '30cd0809-c2b2-499c-8879-2d6b78529876' => 2, # db + '5ce17fce-4087-4169-b7ff-056cc58473f9' => 3, # wal + 'cafecafe-9b03-4f30-b4c6-b4b80ceff106' => 4, # block }; $res = get_devices_by_partuuid($lsblk_info, $uuids, $res); @@ -270,36 +289,51 @@ sub get_ceph_journals { sub get_ceph_volume_infos { my $result = {}; - my $cmd = [ $LVS, '-S', 'lv_name=~^osd-', '-o', 'devices,lv_name,lv_tags', - '--noheadings', '--readonly', '--separator', ';' ]; + my $cmd = [ + $LVS, + '-S', + 'lv_name=~^osd-', + '-o', + 'devices,lv_name,lv_tags', + '--noheadings', + '--readonly', + '--separator', + ';', + ]; - run_command($cmd, outfunc => sub { - my $line = shift; - $line =~ s/(?:^\s+)|(?:\s+$)//g; # trim whitespaces + run_command( + $cmd, + outfunc => sub { + my $line = shift; + $line =~ s/(?:^\s+)|(?:\s+$)//g; # trim whitespaces - my $fields = [ split(';', $line) ]; + my $fields = [split(';', $line)]; - # lvs syntax is /dev/sdX(Y) where Y is the start (which we do not need) - my ($dev) = $fields->[0] =~ m|^(/dev/[a-z]+[^(]*)|; - if ($fields->[1] =~ m|^osd-([^-]+)-|) { - my $type = $1; - # $result autovivification is wanted, to not creating empty hashes - if (($type eq 'block' || $type eq 'data') && $fields->[2] =~ m/ceph.osd_id=([^,]+)/) { - $result->{$dev}->{osdid} = $1; - if ( !defined($result->{$dev}->{'osdid-list'}) ) { - $result->{$dev}->{'osdid-list'} = []; - } - push($result->{$dev}->{'osdid-list'}->@*, $1); - $result->{$dev}->{bluestore} = ($type eq 'block'); - if ($fields->[2] =~ m/ceph\.encrypted=1/) { - $result->{$dev}->{encrypted} = 1; - } - } else { - # undef++ becomes '1' (see `perldoc perlop`: Auto-increment) - $result->{$dev}->{$type}++; - } - } - }); + # lvs syntax is /dev/sdX(Y) where Y is the start (which we do not need) + my ($dev) = $fields->[0] =~ m|^(/dev/[a-z]+[^(]*)|; + if ($fields->[1] =~ m|^osd-([^-]+)-|) { + my $type = $1; + # $result autovivification is wanted, to not creating empty hashes + if ( + ($type eq 'block' || $type eq 'data') + && $fields->[2] =~ m/ceph.osd_id=([^,]+)/ + ) { + $result->{$dev}->{osdid} = $1; + if (!defined($result->{$dev}->{'osdid-list'})) { + $result->{$dev}->{'osdid-list'} = []; + } + push($result->{$dev}->{'osdid-list'}->@*, $1); + $result->{$dev}->{bluestore} = ($type eq 'block'); + if ($fields->[2] =~ m/ceph\.encrypted=1/) { + $result->{$dev}->{encrypted} = 1; + } + } else { + # undef++ becomes '1' (see `perldoc perlop`: Auto-increment) + $result->{$dev}->{$type}++; + } + } + }, + ); return $result; } @@ -310,10 +344,13 @@ sub get_udev_info { my $info = ""; my $data = {}; eval { - run_command(['udevadm', 'info', '-p', $dev, '--query', 'all'], outfunc => sub { - my ($line) = @_; - $info .= "$line\n"; - }); + run_command( + ['udevadm', 'info', '-p', $dev, '--query', 'all'], + outfunc => sub { + my ($line) = @_; + $info .= "$line\n"; + }, + ); }; warn $@ if $@; return if !$info; @@ -323,7 +360,7 @@ sub get_udev_info { # we use this, because some disks are not simply in /dev e.g. /dev/cciss/c0d0 if ($info =~ m/^E: DEVNAME=(\S+)$/m) { - $data->{devpath} = $1; + $data->{devpath} = $1; } return if !defined($data->{devpath}); @@ -343,8 +380,8 @@ sub get_udev_info { $data->{wwn} = $1 if $info =~ m/^E: ID_WWN=(.*)$/m; if ($info =~ m/^E: DEVLINKS=(.+)$/m) { - my @devlinks = grep(m#^/dev/disk/by-id/(ata|scsi|nvme(?!-eui))#, split (/ /, $1)); - $data->{by_id_link} = $devlinks[0] if defined($devlinks[0]); + my @devlinks = grep(m#^/dev/disk/by-id/(ata|scsi|nvme(?!-eui))#, split(/ /, $1)); + $data->{by_id_link} = $devlinks[0] if defined($devlinks[0]); } return $data; @@ -363,7 +400,7 @@ sub get_sysdir_size { sub get_sysdir_info { my ($sysdir) = @_; - return if ! -d "$sysdir/device"; + return if !-d "$sysdir/device"; my $data = {}; @@ -383,7 +420,7 @@ sub get_wear_leveling_info { my $attributes = $smartdata->{attributes}; if (defined($smartdata->{wearout})) { - return $smartdata->{wearout}; + return $smartdata->{wearout}; } my $wearout; @@ -391,29 +428,29 @@ sub get_wear_leveling_info { # Common register names that represent percentage values of potential failure indicators used # in drivedb.h of smartmontool's. Order matters, as some drives may have multiple definitions my @wearoutregisters = ( - "Media_Wearout_Indicator", - "SSD_Life_Left", - "Wear_Leveling_Count", - "Perc_Write\/Erase_Ct_BC", - "Perc_Rated_Life_Remain", - "Remaining_Lifetime_Perc", - "Percent_Lifetime_Remain", - "Lifetime_Left", - "PCT_Life_Remaining", - "Lifetime_Remaining", - "Percent_Life_Remaining", - "Percent_Lifetime_Used", - "Perc_Rated_Life_Used" + "Media_Wearout_Indicator", + "SSD_Life_Left", + "Wear_Leveling_Count", + "Perc_Write\/Erase_Ct_BC", + "Perc_Rated_Life_Remain", + "Remaining_Lifetime_Perc", + "Percent_Lifetime_Remain", + "Lifetime_Left", + "PCT_Life_Remaining", + "Lifetime_Remaining", + "Percent_Life_Remaining", + "Percent_Lifetime_Used", + "Perc_Rated_Life_Used", ); # Search for S.M.A.R.T. attributes for known register foreach my $register (@wearoutregisters) { - last if defined $wearout; - foreach my $attr (@$attributes) { - next if $attr->{name} !~ m/$register/; - $wearout = $attr->{value}; - last; - } + last if defined $wearout; + foreach my $attr (@$attributes) { + next if $attr->{name} !~ m/$register/; + $wearout = $attr->{value}; + last; + } } return $wearout; @@ -422,13 +459,13 @@ sub get_wear_leveling_info { sub dir_is_empty { my ($dir) = @_; - my $dh = IO::Dir->new ($dir); + my $dh = IO::Dir->new($dir); return 1 if !$dh; while (defined(my $tmp = $dh->read)) { - next if $tmp eq '.' || $tmp eq '..'; - $dh->close; - return 0; + next if $tmp eq '.' || $tmp eq '..'; + $dh->close; + return 0; } $dh->close; return 1; @@ -438,7 +475,7 @@ sub is_iscsi { my ($sysdir) = @_; if (-l $sysdir && readlink($sysdir) =~ m|host[^/]*/session[^/]*|) { - return 1; + return 1; } return 0; @@ -455,9 +492,9 @@ sub mounted_blockdevs { my $mounts = PVE::ProcFSTools::parse_proc_mounts(); foreach my $mount (@$mounts) { - next if $mount->[0] !~ m|^/dev/|; - $mounted->{abs_path($mount->[0])} = $mount->[1]; - }; + next if $mount->[0] !~ m|^/dev/|; + $mounted->{ abs_path($mount->[0]) } = $mount->[1]; + } return $mounted; } @@ -469,8 +506,8 @@ sub mounted_paths { my $mounts = PVE::ProcFSTools::parse_proc_mounts(); foreach my $mount (@$mounts) { - $mounted->{abs_path($mount->[1])} = $mount->[0]; - }; + $mounted->{ abs_path($mount->[1]) } = $mount->[0]; + } return $mounted; } @@ -492,227 +529,238 @@ sub get_disks { my $disk_regex = ".*"; if (defined($disks)) { - if (!ref($disks)) { - $disks = [ $disks ]; - } elsif (ref($disks) ne 'ARRAY') { - die "disks is not a string or array reference\n"; - } - # we get cciss/c0d0 but need cciss!c0d0 - $_ =~ s|cciss/|cciss!| for @$disks; + if (!ref($disks)) { + $disks = [$disks]; + } elsif (ref($disks) ne 'ARRAY') { + die "disks is not a string or array reference\n"; + } + # we get cciss/c0d0 but need cciss!c0d0 + $_ =~ s|cciss/|cciss!| for @$disks; - if ($include_partitions) { - # Proper blockdevice is needed for the regex, use parent for partitions. - for my $disk ($disks->@*) { - next if !is_partition("/dev/$disk"); - $disk = strip_dev(get_blockdev("/dev/$disk")); - } - } + if ($include_partitions) { + # Proper blockdevice is needed for the regex, use parent for partitions. + for my $disk ($disks->@*) { + next if !is_partition("/dev/$disk"); + $disk = strip_dev(get_blockdev("/dev/$disk")); + } + } - $disk_regex = "(?:" . join('|', @$disks) . ")"; + $disk_regex = "(?:" . join('|', @$disks) . ")"; } - dir_glob_foreach('/sys/block', $disk_regex, sub { - my ($dev) = @_; - # whitelisting following devices - # - hdX ide block device - # - sdX scsi/sata block device - # - vdX virtIO block device - # - xvdX: xen virtual block device - # - nvmeXnY: nvme devices - # - cciss!cXnY cciss devices - return if $dev !~ m/^(h|s|x?v)d[a-z]+$/ && - $dev !~ m/^nvme\d+n\d+$/ && - $dev !~ m/^cciss\!c\d+d\d+$/; + dir_glob_foreach( + '/sys/block', + $disk_regex, + sub { + my ($dev) = @_; + # whitelisting following devices + # - hdX ide block device + # - sdX scsi/sata block device + # - vdX virtIO block device + # - xvdX: xen virtual block device + # - nvmeXnY: nvme devices + # - cciss!cXnY cciss devices + return + if $dev !~ m/^(h|s|x?v)d[a-z]+$/ + && $dev !~ m/^nvme\d+n\d+$/ + && $dev !~ m/^cciss\!c\d+d\d+$/; - my $data = get_udev_info("/sys/block/$dev") // return; - my $devpath = $data->{devpath}; + my $data = get_udev_info("/sys/block/$dev") // return; + my $devpath = $data->{devpath}; - my $sysdir = "/sys/block/$dev"; + my $sysdir = "/sys/block/$dev"; - # we do not want iscsi devices - return if is_iscsi($sysdir); + # we do not want iscsi devices + return if is_iscsi($sysdir); - my $sysdata = get_sysdir_info($sysdir); - return if !defined($sysdata); + my $sysdata = get_sysdir_info($sysdir); + return if !defined($sysdata); - my $type = 'unknown'; + my $type = 'unknown'; - if ($sysdata->{rotational} == 0) { - $type = 'ssd'; - $type = 'nvme' if $dev =~ m/^nvme\d+n\d+$/; - $data->{rpm} = 0; - } elsif ($sysdata->{rotational} == 1) { - if ($data->{rpm} != -1) { - $type = 'hdd'; - } elsif ($data->{usb}) { - $type = 'usb'; - $data->{rpm} = 0; - } - } + if ($sysdata->{rotational} == 0) { + $type = 'ssd'; + $type = 'nvme' if $dev =~ m/^nvme\d+n\d+$/; + $data->{rpm} = 0; + } elsif ($sysdata->{rotational} == 1) { + if ($data->{rpm} != -1) { + $type = 'hdd'; + } elsif ($data->{usb}) { + $type = 'usb'; + $data->{rpm} = 0; + } + } - my ($health, $wearout) = ('UNKNOWN', 'N/A'); - if (!$nosmart) { - eval { - my $smartdata = get_smart_data($devpath, !is_ssdlike($type)); - $health = $smartdata->{health} if $smartdata->{health}; + my ($health, $wearout) = ('UNKNOWN', 'N/A'); + if (!$nosmart) { + eval { + my $smartdata = get_smart_data($devpath, !is_ssdlike($type)); + $health = $smartdata->{health} if $smartdata->{health}; - if (is_ssdlike($type)) { # if we have an ssd we try to get the wearout indicator - my $wear_level = get_wear_leveling_info($smartdata); - $wearout = $wear_level if defined($wear_level); - } - }; - } + if (is_ssdlike($type)) { # if we have an ssd we try to get the wearout indicator + my $wear_level = get_wear_leveling_info($smartdata); + $wearout = $wear_level if defined($wear_level); + } + }; + } - # we replaced cciss/ with cciss! above, but in the result we need cciss/ again because the - # caller might want to check the result again with the original parameter - if ($dev =~ m|^cciss!|) { - $dev =~ s|^cciss!|cciss/|; - } + # we replaced cciss/ with cciss! above, but in the result we need cciss/ again because the + # caller might want to check the result again with the original parameter + if ($dev =~ m|^cciss!|) { + $dev =~ s|^cciss!|cciss/|; + } - $disklist->{$dev} = { - vendor => $sysdata->{vendor}, - model => $data->{model} || $sysdata->{model}, - size => $sysdata->{size}, - serial => $data->{serial}, - gpt => $data->{gpt}, - rpm => $data->{rpm}, - type => $type, - wwn => $data->{wwn}, - health => $health, - devpath => $devpath, - wearout => $wearout, - }; - $disklist->{$dev}->{mounted} = 1 if exists $mounted->{$devpath}; + $disklist->{$dev} = { + vendor => $sysdata->{vendor}, + model => $data->{model} || $sysdata->{model}, + size => $sysdata->{size}, + serial => $data->{serial}, + gpt => $data->{gpt}, + rpm => $data->{rpm}, + type => $type, + wwn => $data->{wwn}, + health => $health, + devpath => $devpath, + wearout => $wearout, + }; + $disklist->{$dev}->{mounted} = 1 if exists $mounted->{$devpath}; - my $by_id_link = $data->{by_id_link}; - $disklist->{$dev}->{by_id_link} = $by_id_link if defined($by_id_link); + my $by_id_link = $data->{by_id_link}; + $disklist->{$dev}->{by_id_link} = $by_id_link if defined($by_id_link); - my ($osdid, $bluestore, $osdencrypted) = (-1, 0, 0); - my $osdid_list; - my ($journal_count, $db_count, $wal_count) = (0, 0, 0); + my ($osdid, $bluestore, $osdencrypted) = (-1, 0, 0); + my $osdid_list; + my ($journal_count, $db_count, $wal_count) = (0, 0, 0); - my $partpath = $devpath; - # remove trailing part to get the partition base path, e.g. /dev/cciss/c0d0 -> /dev/cciss - $partpath =~ s/\/[^\/]+$//; + my $partpath = $devpath; + # remove trailing part to get the partition base path, e.g. /dev/cciss/c0d0 -> /dev/cciss + $partpath =~ s/\/[^\/]+$//; - my $determine_usage = sub { - my ($devpath, $sysdir, $is_partition) = @_; + my $determine_usage = sub { + my ($devpath, $sysdir, $is_partition) = @_; - return 'LVM' if $lvmhash->{$devpath}; - return 'ZFS' if $zfshash->{$devpath}; + return 'LVM' if $lvmhash->{$devpath}; + return 'ZFS' if $zfshash->{$devpath}; - my $info = $lsblk_info->{$devpath} // {}; + my $info = $lsblk_info->{$devpath} // {}; - if (defined(my $parttype = $info->{parttype})) { - return 'BIOS boot'if $parttype eq '21686148-6449-6e6f-744e-656564454649'; - return 'EFI' if $parttype eq 'c12a7328-f81f-11d2-ba4b-00a0c93ec93b'; - return 'ZFS reserved' if $parttype eq '6a945a3b-1dd2-11b2-99a6-080020736631'; - } + if (defined(my $parttype = $info->{parttype})) { + return 'BIOS boot' if $parttype eq '21686148-6449-6e6f-744e-656564454649'; + return 'EFI' if $parttype eq 'c12a7328-f81f-11d2-ba4b-00a0c93ec93b'; + return 'ZFS reserved' + if $parttype eq '6a945a3b-1dd2-11b2-99a6-080020736631'; + } - return "$info->{fstype}" if defined($info->{fstype}); - return 'mounted' if $mounted->{$devpath}; + return "$info->{fstype}" if defined($info->{fstype}); + return 'mounted' if $mounted->{$devpath}; - return if !$is_partition; + return if !$is_partition; - # for devices, this check is done explicitly later - return 'Device Mapper' if !dir_is_empty("$sysdir/holders"); + # for devices, this check is done explicitly later + return 'Device Mapper' if !dir_is_empty("$sysdir/holders"); - return; # unused partition - }; + return; # unused partition + }; - my $collect_ceph_info = sub { - my ($devpath) = @_; + my $collect_ceph_info = sub { + my ($devpath) = @_; - my $ceph_volume = $ceph_volume_infos->{$devpath} or return; - $journal_count += $ceph_volume->{journal} // 0; - $db_count += $ceph_volume->{db} // 0; - $wal_count += $ceph_volume->{wal} // 0; - if (defined($ceph_volume->{osdid})) { - $osdid = $ceph_volume->{osdid}; - $osdid_list = $ceph_volume->{'osdid-list'}; - $bluestore = 1 if $ceph_volume->{bluestore}; - $osdencrypted = 1 if $ceph_volume->{encrypted}; - } + my $ceph_volume = $ceph_volume_infos->{$devpath} or return; + $journal_count += $ceph_volume->{journal} // 0; + $db_count += $ceph_volume->{db} // 0; + $wal_count += $ceph_volume->{wal} // 0; + if (defined($ceph_volume->{osdid})) { + $osdid = $ceph_volume->{osdid}; + $osdid_list = $ceph_volume->{'osdid-list'}; + $bluestore = 1 if $ceph_volume->{bluestore}; + $osdencrypted = 1 if $ceph_volume->{encrypted}; + } - my $result = { %{$ceph_volume} }; - $result->{journals} = delete $result->{journal} if $result->{journal}; - return $result; - }; + my $result = { %{$ceph_volume} }; + $result->{journals} = delete $result->{journal} if $result->{journal}; + return $result; + }; - my $partitions = {}; - dir_glob_foreach("$sysdir", "$dev.+", sub { - my ($part) = @_; + my $partitions = {}; + dir_glob_foreach( + "$sysdir", + "$dev.+", + sub { + my ($part) = @_; - $partitions->{$part} = $collect_ceph_info->("$partpath/$part"); - my $lvm_based_osd = defined($partitions->{$part}); + $partitions->{$part} = $collect_ceph_info->("$partpath/$part"); + my $lvm_based_osd = defined($partitions->{$part}); - $partitions->{$part}->{devpath} = "$partpath/$part"; - $partitions->{$part}->{parent} = "$devpath"; - $partitions->{$part}->{mounted} = 1 if exists $mounted->{"$partpath/$part"}; - $partitions->{$part}->{gpt} = $data->{gpt}; - $partitions->{$part}->{type} = 'partition'; - $partitions->{$part}->{size} = get_sysdir_size("$sysdir/$part") // 0; - $partitions->{$part}->{used} = $determine_usage->("$partpath/$part", "$sysdir/$part", 1); - $partitions->{$part}->{osdid} //= -1; - $partitions->{$part}->{'osdid-list'} //= undef; + $partitions->{$part}->{devpath} = "$partpath/$part"; + $partitions->{$part}->{parent} = "$devpath"; + $partitions->{$part}->{mounted} = 1 if exists $mounted->{"$partpath/$part"}; + $partitions->{$part}->{gpt} = $data->{gpt}; + $partitions->{$part}->{type} = 'partition'; + $partitions->{$part}->{size} = get_sysdir_size("$sysdir/$part") // 0; + $partitions->{$part}->{used} = + $determine_usage->("$partpath/$part", "$sysdir/$part", 1); + $partitions->{$part}->{osdid} //= -1; + $partitions->{$part}->{'osdid-list'} //= undef; - # avoid counting twice (e.g. partition with the LVM for the DB OSD is in $journalhash) - return if $lvm_based_osd; + # avoid counting twice (e.g. partition with the LVM for the DB OSD is in $journalhash) + return if $lvm_based_osd; - # Legacy handling for non-LVM based OSDs - if (my $mp = $mounted->{"$partpath/$part"}) { - if ($mp =~ m|^/var/lib/ceph/osd/ceph-(\d+)$|) { - $osdid = $1; - $partitions->{$part}->{osdid} = $osdid; - $osdid_list = [$1]; # assuming only one OSD per disk - $partitions->{$part}->{'osdid-list'} = $osdid_list; - } - } + # Legacy handling for non-LVM based OSDs + if (my $mp = $mounted->{"$partpath/$part"}) { + if ($mp =~ m|^/var/lib/ceph/osd/ceph-(\d+)$|) { + $osdid = $1; + $partitions->{$part}->{osdid} = $osdid; + $osdid_list = [$1]; # assuming only one OSD per disk + $partitions->{$part}->{'osdid-list'} = $osdid_list; + } + } - if (my $journal_part = $journalhash->{"$partpath/$part"}) { - $journal_count++ if $journal_part == 1; - $db_count++ if $journal_part == 2; - $wal_count++ if $journal_part == 3; - $bluestore = 1 if $journal_part == 4; + if (my $journal_part = $journalhash->{"$partpath/$part"}) { + $journal_count++ if $journal_part == 1; + $db_count++ if $journal_part == 2; + $wal_count++ if $journal_part == 3; + $bluestore = 1 if $journal_part == 4; - $partitions->{$part}->{journals} = 1 if $journal_part == 1; - $partitions->{$part}->{db} = 1 if $journal_part == 2; - $partitions->{$part}->{wal} = 1 if $journal_part == 3; - $partitions->{$part}->{bluestore} = 1 if $journal_part == 4; - } - }); + $partitions->{$part}->{journals} = 1 if $journal_part == 1; + $partitions->{$part}->{db} = 1 if $journal_part == 2; + $partitions->{$part}->{wal} = 1 if $journal_part == 3; + $partitions->{$part}->{bluestore} = 1 if $journal_part == 4; + } + }, + ); - my $used = $determine_usage->($devpath, $sysdir, 0); - if (!$include_partitions) { - foreach my $part (sort keys %{$partitions}) { - $used //= $partitions->{$part}->{used}; - } - } else { - # fstype might be set even if there are partitions, but showing that is confusing - $used = 'partitions' if scalar(keys %{$partitions}); - } - $used //= 'partitions' if scalar(keys %{$partitions}); - # multipath, software raid, etc. - # this check comes in last, to show more specific info - # if we have it - $used //= 'Device Mapper' if !dir_is_empty("$sysdir/holders"); + my $used = $determine_usage->($devpath, $sysdir, 0); + if (!$include_partitions) { + foreach my $part (sort keys %{$partitions}) { + $used //= $partitions->{$part}->{used}; + } + } else { + # fstype might be set even if there are partitions, but showing that is confusing + $used = 'partitions' if scalar(keys %{$partitions}); + } + $used //= 'partitions' if scalar(keys %{$partitions}); + # multipath, software raid, etc. + # this check comes in last, to show more specific info + # if we have it + $used //= 'Device Mapper' if !dir_is_empty("$sysdir/holders"); - $disklist->{$dev}->{used} = $used if $used; + $disklist->{$dev}->{used} = $used if $used; - $collect_ceph_info->($devpath); + $collect_ceph_info->($devpath); - $disklist->{$dev}->{osdid} = $osdid; - $disklist->{$dev}->{'osdid-list'} = $osdid_list; - $disklist->{$dev}->{journals} = $journal_count if $journal_count; - $disklist->{$dev}->{bluestore} = $bluestore if $osdid != -1; - $disklist->{$dev}->{osdencrypted} = $osdencrypted if $osdid != -1; - $disklist->{$dev}->{db} = $db_count if $db_count; - $disklist->{$dev}->{wal} = $wal_count if $wal_count; + $disklist->{$dev}->{osdid} = $osdid; + $disklist->{$dev}->{'osdid-list'} = $osdid_list; + $disklist->{$dev}->{journals} = $journal_count if $journal_count; + $disklist->{$dev}->{bluestore} = $bluestore if $osdid != -1; + $disklist->{$dev}->{osdencrypted} = $osdencrypted if $osdid != -1; + $disklist->{$dev}->{db} = $db_count if $db_count; + $disklist->{$dev}->{wal} = $wal_count if $wal_count; - if ($include_partitions) { - $disklist->{$_} = $partitions->{$_} for keys %{$partitions}; - } - }); + if ($include_partitions) { + $disklist->{$_} = $partitions->{$_} for keys %{$partitions}; + } + }, + ); return $disklist; } @@ -723,7 +771,7 @@ sub get_partnum { my $st = stat($part_path); die "error detecting block device '$part_path'\n" - if !$st || !$st->mode || !S_ISBLK($st->mode) || !$st->rdev; + if !$st || !$st->mode || !S_ISBLK($st->mode) || !$st->rdev; my $major = PVE::Tools::dev_t_major($st->rdev); my $minor = PVE::Tools::dev_t_minor($st->rdev); @@ -743,9 +791,9 @@ sub get_blockdev { my ($dev, $block_dev); if ($part_path =~ m|^/dev/(.*)$|) { - $dev = $1; - my $link = readlink "/sys/class/block/$dev"; - $block_dev = $1 if $link =~ m|([^/]*)/$dev$|; + $dev = $1; + my $link = readlink "/sys/class/block/$dev"; + $block_dev = $1 if $link =~ m|([^/]*)/$dev$|; } die "Can't parse parent device\n" if !defined($block_dev); @@ -783,28 +831,38 @@ sub append_partition { $devname =~ s|^/dev/||; my $newpartid = 1; - dir_glob_foreach("/sys/block/$devname", qr/\Q$devname\E.*?(\d+)/, sub { - my ($part, $partid) = @_; + dir_glob_foreach( + "/sys/block/$devname", + qr/\Q$devname\E.*?(\d+)/, + sub { + my ($part, $partid) = @_; - if ($partid >= $newpartid) { - $newpartid = $partid + 1; - } - }); + if ($partid >= $newpartid) { + $newpartid = $partid + 1; + } + }, + ); $size = PVE::Tools::convert_size($size, 'b' => 'mb'); - run_command([ $SGDISK, '-n', "$newpartid:0:+${size}M", $dev ], - errmsg => "error creating partition '$newpartid' on '$dev'"); + run_command( + [$SGDISK, '-n', "$newpartid:0:+${size}M", $dev], + errmsg => "error creating partition '$newpartid' on '$dev'", + ); my $partition; # loop again to detect the real partition device which does not always follow # a strict $devname$partition scheme like /dev/nvme0n1 -> /dev/nvme0n1p1 - dir_glob_foreach("/sys/block/$devname", qr/\Q$devname\E.*$newpartid/, sub { - my ($part) = @_; + dir_glob_foreach( + "/sys/block/$devname", + qr/\Q$devname\E.*$newpartid/, + sub { + my ($part) = @_; - $partition = "/dev/$part"; - }); + $partition = "/dev/$part"; + }, + ); return $partition; } @@ -820,10 +878,14 @@ sub has_holder { return $devpath if !dir_is_empty("/sys/class/block/${dev}/holders"); my $found; - dir_glob_foreach("/sys/block/${dev}", "${dev}.+", sub { - my ($part) = @_; - $found = "/dev/${part}" if !dir_is_empty("/sys/class/block/${part}/holders"); - }); + dir_glob_foreach( + "/sys/block/${dev}", + "${dev}.+", + sub { + my ($part) = @_; + $found = "/dev/${part}" if !dir_is_empty("/sys/class/block/${part}/holders"); + }, + ); return $found; } @@ -841,12 +903,16 @@ sub is_mounted { my $dev = strip_dev($devpath); my $found; - dir_glob_foreach("/sys/block/${dev}", "${dev}.+", sub { - my ($part) = @_; - my $partpath = "/dev/${part}"; + dir_glob_foreach( + "/sys/block/${dev}", + "${dev}.+", + sub { + my ($part) = @_; + my $partpath = "/dev/${part}"; - $found = $partpath if $mounted->{$partpath}; - }); + $found = $partpath if $mounted->{$partpath}; + }, + ); return $found; } @@ -884,13 +950,17 @@ sub wipe_blockdev { my $count = ($size < 200) ? $size : 200; my $to_wipe = []; - dir_glob_foreach("/sys/class/block/${devname}", "${devname}.+", sub { - my ($part) = @_; - push $to_wipe->@*, "/dev/${part}" if -b "/dev/${part}"; - }); + dir_glob_foreach( + "/sys/class/block/${devname}", + "${devname}.+", + sub { + my ($part) = @_; + push $to_wipe->@*, "/dev/${part}" if -b "/dev/${part}"; + }, + ); if (scalar($to_wipe->@*) > 0) { - print "found child partitions to wipe: ". join(', ', $to_wipe->@*) ."\n"; + print "found child partitions to wipe: " . join(', ', $to_wipe->@*) . "\n"; } push $to_wipe->@*, $devpath; # put actual device last @@ -899,13 +969,13 @@ sub wipe_blockdev { run_command(['wipefs', '--all', $to_wipe->@*], errmsg => "error wiping '${devpath}'"); run_command( - ['dd', 'if=/dev/zero', "of=${devpath}", 'bs=1M', 'conv=fdatasync', "count=${count}"], - errmsg => "error wiping '${devpath}'", + ['dd', 'if=/dev/zero', "of=${devpath}", 'bs=1M', 'conv=fdatasync', "count=${count}"], + errmsg => "error wiping '${devpath}'", ); if (is_partition($devpath)) { - eval { change_parttype($devpath, '8300'); }; - warn $@ if $@; + eval { change_parttype($devpath, '8300'); }; + warn $@ if $@; } } diff --git a/src/PVE/GuestImport.pm b/src/PVE/GuestImport.pm index 16099ca..3d59dcd 100644 --- a/src/PVE/GuestImport.pm +++ b/src/PVE/GuestImport.pm @@ -16,24 +16,24 @@ sub extract_disk_from_import_file { my $cfg = PVE::Storage::config(); my ($vtype, $name, undef, undef, undef, undef, $fmt) = - PVE::Storage::parse_volname($cfg, $volid); + PVE::Storage::parse_volname($cfg, $volid); die "only files with content type 'import' can be extracted\n" - if $vtype ne 'import'; + if $vtype ne 'import'; die "only files from 'ova' format can be extracted\n" - if $fmt !~ m/^ova\+/; + if $fmt !~ m/^ova\+/; # extract the inner file from the name my $archive_volid; my $inner_file; my $inner_fmt; if ($name =~ m!^(.*\.ova)/(${PVE::Storage::SAFE_CHAR_WITH_WHITESPACE_CLASS_RE}+)$!) { - $archive_volid = "$source_storeid:import/$1"; - $inner_file = $2; - ($inner_fmt) = $fmt =~ /^ova\+(.*)$/; + $archive_volid = "$source_storeid:import/$1"; + $inner_file = $2; + ($inner_fmt) = $fmt =~ /^ova\+(.*)$/; } else { - die "cannot extract $volid - invalid volname $volname\n"; + die "cannot extract $volid - invalid volname $volname\n"; } die "cannot determine format of '$volid'\n" if !$inner_fmt; @@ -49,36 +49,40 @@ sub extract_disk_from_import_file { my $target_path; my $target_volid; eval { - run_command([ - 'tar', - '-x', - '--force-local', - '--no-same-owner', - '-C', $tmpdir, - '-f', $ova_path, - $inner_file, - ]); + run_command([ + 'tar', + '-x', + '--force-local', + '--no-same-owner', + '-C', + $tmpdir, + '-f', + $ova_path, + $inner_file, + ]); - # check for symlinks and other non regular files - if (-l $source_path || ! -f $source_path) { - die "extracted file '$inner_file' from archive '$archive_volid' is not a regular file\n"; - } + # check for symlinks and other non regular files + if (-l $source_path || !-f $source_path) { + die + "extracted file '$inner_file' from archive '$archive_volid' is not a regular file\n"; + } - # check potentially untrusted image file! - PVE::Storage::file_size_info($source_path, undef, $inner_fmt, 1); + # check potentially untrusted image file! + PVE::Storage::file_size_info($source_path, undef, $inner_fmt, 1); - # create temporary 1M image that will get overwritten by the rename - # to reserve the filename and take care of locking - $target_volid = PVE::Storage::vdisk_alloc($cfg, $target_storeid, $vmid, $inner_fmt, undef, 1024); - $target_path = PVE::Storage::path($cfg, $target_volid); + # create temporary 1M image that will get overwritten by the rename + # to reserve the filename and take care of locking + $target_volid = + PVE::Storage::vdisk_alloc($cfg, $target_storeid, $vmid, $inner_fmt, undef, 1024); + $target_path = PVE::Storage::path($cfg, $target_volid); - print "renaming $source_path to $target_path\n"; + print "renaming $source_path to $target_path\n"; - rename($source_path, $target_path) or die "unable to move - $!\n"; + rename($source_path, $target_path) or die "unable to move - $!\n"; }; if (my $err = $@) { - File::Path::remove_tree($tmpdir); - die "error during extraction: $err\n"; + File::Path::remove_tree($tmpdir); + die "error during extraction: $err\n"; } File::Path::remove_tree($tmpdir); diff --git a/src/PVE/GuestImport/OVF.pm b/src/PVE/GuestImport/OVF.pm index 1fae69b..f5200a7 100644 --- a/src/PVE/GuestImport/OVF.pm +++ b/src/PVE/GuestImport/OVF.pm @@ -36,7 +36,7 @@ my @resources = ( { id => 17, dtmf_name => 'Disk Drive' }, { id => 18, dtmf_name => 'Tape Drive' }, { id => 19, dtmf_name => 'Storage Extent' }, - { id => 20, dtmf_name => 'Other storage device', pve_type => 'sata'}, + { id => 20, dtmf_name => 'Other storage device', pve_type => 'sata' }, { id => 21, dtmf_name => 'Serial port' }, { id => 22, dtmf_name => 'Parallel port' }, { id => 23, dtmf_name => 'USB Controller' }, @@ -51,7 +51,7 @@ my @resources = ( { id => 32, dtmf_name => 'Storage Volume' }, { id => 33, dtmf_name => 'Ethernet Connection' }, { id => 34, dtmf_name => 'DMTF reserved' }, - { id => 35, dtmf_name => 'Vendor Reserved'} + { id => 35, dtmf_name => 'Vendor Reserved' }, ); # see https://schemas.dmtf.org/wbem/cim-html/2.55.0+/CIM_OperatingSystem.html @@ -120,17 +120,15 @@ sub get_ostype { } my $allowed_nic_models = [ - 'e1000', - 'e1000e', - 'vmxnet3', + 'e1000', 'e1000e', 'vmxnet3', ]; sub find_by { my ($key, $param) = @_; foreach my $resource (@resources) { - if ($resource->{$key} eq $param) { - return ($resource); - } + if ($resource->{$key} eq $param) { + return ($resource); + } } return; } @@ -139,9 +137,9 @@ sub dtmf_name_to_id { my ($dtmf_name) = @_; my $found = find_by('dtmf_name', $dtmf_name); if ($found) { - return $found->{id}; + return $found->{id}; } else { - return; + return; } } @@ -149,9 +147,9 @@ sub id_to_pve { my ($id) = @_; my $resource = find_by('id', $id); if ($resource) { - return $resource->{pve_type}; + return $resource->{pve_type}; } else { - return; + return; } } @@ -161,9 +159,9 @@ sub try_parse_capacity_unit { my ($unit_text) = @_; if ($unit_text =~ m/^\s*byte\s*\*\s*([0-9]+)\s*\^\s*([0-9]+)\s*$/) { - my $base = $1; - my $exp = $2; - return $base ** $exp; + my $base = $1; + my $exp = $2; + return $base**$exp; } return undef; @@ -176,25 +174,32 @@ sub parse_ovf { # we have to ignore missing disk images for ova my $dom; if ($isOva) { - my $raw = ""; - PVE::Tools::run_command(['tar', '-xO', '--wildcards', '--occurrence=1', '-f', $ovf, '*.ovf'], outfunc => sub { - my $line = shift; - $raw .= $line; - }); - $dom = XML::LibXML->load_xml(string => $raw, no_blanks => 1); + my $raw = ""; + PVE::Tools::run_command( + ['tar', '-xO', '--wildcards', '--occurrence=1', '-f', $ovf, '*.ovf'], + outfunc => sub { + my $line = shift; + $raw .= $line; + }, + ); + $dom = XML::LibXML->load_xml(string => $raw, no_blanks => 1); } else { - $dom = XML::LibXML->load_xml(location => $ovf, no_blanks => 1); + $dom = XML::LibXML->load_xml(location => $ovf, no_blanks => 1); } - # register the xml namespaces in a xpath context object # 'ovf' is the default namespace so it will prepended to each xml element my $xpc = XML::LibXML::XPathContext->new($dom); $xpc->registerNs('ovf', 'http://schemas.dmtf.org/ovf/envelope/1'); $xpc->registerNs('vmw', 'http://www.vmware.com/schema/ovf'); - $xpc->registerNs('rasd', 'http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData'); - $xpc->registerNs('vssd', 'http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData'); - + $xpc->registerNs( + 'rasd', + 'http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData', + ); + $xpc->registerNs( + 'vssd', + 'http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData', + ); # hash to save qm.conf parameters my $qm; @@ -206,48 +211,55 @@ sub parse_ovf { # walk down the dom until we find the matching XML element my $ovf_name = $xpc->findvalue("/ovf:Envelope/ovf:VirtualSystem/ovf:Name"); if (!$ovf_name) { - # this is a bit of a hack, but best-effort and can only win here - my @nodes = $xpc->findnodes("/ovf:Envelope/ovf:VirtualSystem"); - if (my $virtual_system_node = shift @nodes) { - for my $attr ($virtual_system_node->attributes()) { - if ($attr->nodeName() eq 'ovf:id') { - $ovf_name = $attr->getValue(); - last; - } - } - } + # this is a bit of a hack, but best-effort and can only win here + my @nodes = $xpc->findnodes("/ovf:Envelope/ovf:VirtualSystem"); + if (my $virtual_system_node = shift @nodes) { + for my $attr ($virtual_system_node->attributes()) { + if ($attr->nodeName() eq 'ovf:id') { + $ovf_name = $attr->getValue(); + last; + } + } + } } if ($ovf_name) { - # PVE::QemuServer::confdesc requires a valid DNS name - $ovf_name =~ s/\s+/-/g; - ($qm->{name} = $ovf_name) =~ s/[^a-zA-Z0-9\-\.]//g; + # PVE::QemuServer::confdesc requires a valid DNS name + $ovf_name =~ s/\s+/-/g; + ($qm->{name} = $ovf_name) =~ s/[^a-zA-Z0-9\-\.]//g; } else { - warn "warning: unable to parse the VM name in this OVF manifest, generating a default value\n"; + warn + "warning: unable to parse the VM name in this OVF manifest, generating a default value\n"; } # middle level xpath # element[child] search the elements which have this [child] my $processor_id = dtmf_name_to_id('Processor'); - my $xpath_find_vcpu_count = "/ovf:Envelope/ovf:VirtualSystem/ovf:VirtualHardwareSection/ovf:Item[rasd:ResourceType=${processor_id}]/rasd:VirtualQuantity"; + my $xpath_find_vcpu_count = + "/ovf:Envelope/ovf:VirtualSystem/ovf:VirtualHardwareSection/ovf:Item[rasd:ResourceType=${processor_id}]/rasd:VirtualQuantity"; $qm->{'cores'} = $xpc->findvalue($xpath_find_vcpu_count); my $memory_id = dtmf_name_to_id('Memory'); - my $xpath_find_memory = ("/ovf:Envelope/ovf:VirtualSystem/ovf:VirtualHardwareSection/ovf:Item[rasd:ResourceType=${memory_id}]/rasd:VirtualQuantity"); + my $xpath_find_memory = ( + "/ovf:Envelope/ovf:VirtualSystem/ovf:VirtualHardwareSection/ovf:Item[rasd:ResourceType=${memory_id}]/rasd:VirtualQuantity" + ); $qm->{'memory'} = $xpc->findvalue($xpath_find_memory); # middle level xpath # here we expect multiple results, so we do not read the element value with # findvalue() but store multiple elements with findnodes() my $disk_id = dtmf_name_to_id('Disk Drive'); - my $xpath_find_disks = "/ovf:Envelope/ovf:VirtualSystem/ovf:VirtualHardwareSection/ovf:Item[rasd:ResourceType=${disk_id}]"; + my $xpath_find_disks = + "/ovf:Envelope/ovf:VirtualSystem/ovf:VirtualHardwareSection/ovf:Item[rasd:ResourceType=${disk_id}]"; my @disk_items = $xpc->findnodes($xpath_find_disks); - my $xpath_find_ostype_id = "/ovf:Envelope/ovf:VirtualSystem/ovf:OperatingSystemSection/\@ovf:id"; + my $xpath_find_ostype_id = + "/ovf:Envelope/ovf:VirtualSystem/ovf:OperatingSystemSection/\@ovf:id"; my $ostype_id = $xpc->findvalue($xpath_find_ostype_id); $qm->{ostype} = get_ostype($ostype_id); # vmware specific firmware config, seems to not be standardized in ovf ? - my $xpath_find_firmware = "/ovf:Envelope/ovf:VirtualSystem/ovf:VirtualHardwareSection/vmw:Config[\@vmw:key=\"firmware\"]/\@vmw:value"; + my $xpath_find_firmware = + "/ovf:Envelope/ovf:VirtualSystem/ovf:VirtualHardwareSection/vmw:Config[\@vmw:key=\"firmware\"]/\@vmw:value"; my $firmware = $xpc->findvalue($xpath_find_firmware) || 'seabios'; $qm->{bios} = 'ovmf' if $firmware eq 'efi'; @@ -265,130 +277,141 @@ sub parse_ovf { my $boot_order = []; for my $item_node (@disk_items) { - my ($disk_node, $file_node, $controller_node, $pve_disk); + my ($disk_node, $file_node, $controller_node, $pve_disk); - print "disk item:\n", $item_node->toString(1), "\n" if $debug; + print "disk item:\n", $item_node->toString(1), "\n" if $debug; - # from Item, find corresponding Disk node - # here the dot means the search should start from the current element in dom - my $host_resource = $xpc->findvalue('rasd:HostResource', $item_node); - my $disk_section_path; - my $disk_id; + # from Item, find corresponding Disk node + # here the dot means the search should start from the current element in dom + my $host_resource = $xpc->findvalue('rasd:HostResource', $item_node); + my $disk_section_path; + my $disk_id; - # RFC 3986 "2.3. Unreserved Characters" - my $valid_uripath_chars = qr/[[:alnum:]]|[\-\._~]/; + # RFC 3986 "2.3. Unreserved Characters" + my $valid_uripath_chars = qr/[[:alnum:]]|[\-\._~]/; - if ($host_resource =~ m|^(?:ovf:)?/(${valid_uripath_chars}+)/(${valid_uripath_chars}+)$|) { - $disk_section_path = $1; - $disk_id = $2; - } else { - warn "invalid host resource $host_resource, skipping\n"; - next; - } - printf "disk section path: $disk_section_path and disk id: $disk_id\n" if $debug; + if ($host_resource =~ m|^(?:ovf:)?/(${valid_uripath_chars}+)/(${valid_uripath_chars}+)$|) { + $disk_section_path = $1; + $disk_id = $2; + } else { + warn "invalid host resource $host_resource, skipping\n"; + next; + } + printf "disk section path: $disk_section_path and disk id: $disk_id\n" if $debug; - # tricky xpath - # @ means we filter the result query based on a the value of an item attribute ( @ = attribute) - # @ needs to be escaped to prevent Perl double quote interpolation - my $xpath_find_fileref = sprintf("/ovf:Envelope/ovf:DiskSection/\ -ovf:Disk[\@ovf:diskId='%s']/\@ovf:fileRef", $disk_id); - my $xpath_find_capacity = sprintf("/ovf:Envelope/ovf:DiskSection/\ -ovf:Disk[\@ovf:diskId='%s']/\@ovf:capacity", $disk_id); - my $xpath_find_capacity_unit = sprintf("/ovf:Envelope/ovf:DiskSection/\ -ovf:Disk[\@ovf:diskId='%s']/\@ovf:capacityAllocationUnits", $disk_id); - my $fileref = $xpc->findvalue($xpath_find_fileref); - my $capacity = $xpc->findvalue($xpath_find_capacity); - my $capacity_unit = $xpc->findvalue($xpath_find_capacity_unit); - my $virtual_size; - if (my $factor = try_parse_capacity_unit($capacity_unit)) { - $virtual_size = $capacity * $factor; - } + # tricky xpath + # @ means we filter the result query based on a the value of an item attribute ( @ = attribute) + # @ needs to be escaped to prevent Perl double quote interpolation + my $xpath_find_fileref = sprintf( + "/ovf:Envelope/ovf:DiskSection/\ +ovf:Disk[\@ovf:diskId='%s']/\@ovf:fileRef", $disk_id, + ); + my $xpath_find_capacity = sprintf( + "/ovf:Envelope/ovf:DiskSection/\ +ovf:Disk[\@ovf:diskId='%s']/\@ovf:capacity", $disk_id, + ); + my $xpath_find_capacity_unit = sprintf( + "/ovf:Envelope/ovf:DiskSection/\ +ovf:Disk[\@ovf:diskId='%s']/\@ovf:capacityAllocationUnits", $disk_id, + ); + my $fileref = $xpc->findvalue($xpath_find_fileref); + my $capacity = $xpc->findvalue($xpath_find_capacity); + my $capacity_unit = $xpc->findvalue($xpath_find_capacity_unit); + my $virtual_size; + if (my $factor = try_parse_capacity_unit($capacity_unit)) { + $virtual_size = $capacity * $factor; + } - my $valid_url_chars = qr@${valid_uripath_chars}|/@; - if (!$fileref || $fileref !~ m/^${valid_url_chars}+$/) { - warn "invalid host resource $host_resource, skipping\n"; - next; - } + my $valid_url_chars = qr@${valid_uripath_chars}|/@; + if (!$fileref || $fileref !~ m/^${valid_url_chars}+$/) { + warn "invalid host resource $host_resource, skipping\n"; + next; + } - # from Item, find owning Controller type - my $controller_id = $xpc->findvalue('rasd:Parent', $item_node); - my $xpath_find_parent_type = sprintf("/ovf:Envelope/ovf:VirtualSystem/ovf:VirtualHardwareSection/\ -ovf:Item[rasd:InstanceID='%s']/rasd:ResourceType", $controller_id); - my $controller_type = $xpc->findvalue($xpath_find_parent_type); - if (!$controller_type) { - warn "invalid or missing controller: $controller_type, skipping\n"; - next; - } - print "owning controller type: $controller_type\n" if $debug; + # from Item, find owning Controller type + my $controller_id = $xpc->findvalue('rasd:Parent', $item_node); + my $xpath_find_parent_type = sprintf( + "/ovf:Envelope/ovf:VirtualSystem/ovf:VirtualHardwareSection/\ +ovf:Item[rasd:InstanceID='%s']/rasd:ResourceType", $controller_id, + ); + my $controller_type = $xpc->findvalue($xpath_find_parent_type); + if (!$controller_type) { + warn "invalid or missing controller: $controller_type, skipping\n"; + next; + } + print "owning controller type: $controller_type\n" if $debug; - # extract corresponding Controller node details - my $adress_on_controller = $xpc->findvalue('rasd:AddressOnParent', $item_node); - my $pve_disk_address = id_to_pve($controller_type) . $adress_on_controller; + # extract corresponding Controller node details + my $adress_on_controller = $xpc->findvalue('rasd:AddressOnParent', $item_node); + my $pve_disk_address = id_to_pve($controller_type) . $adress_on_controller; - # from Disk Node, find corresponding filepath - my $xpath_find_filepath = sprintf("/ovf:Envelope/ovf:References/ovf:File[\@ovf:id='%s']/\@ovf:href", $fileref); - my $filepath = $xpc->findvalue($xpath_find_filepath); - if (!$filepath) { - warn "invalid file reference $fileref, skipping\n"; - next; - } - print "file path: $filepath\n" if $debug; - my $original_filepath = $filepath; - ($filepath) = $filepath =~ m|^(${PVE::Storage::SAFE_CHAR_WITH_WHITESPACE_CLASS_RE}+)$|; # untaint & check no sub/parent dirs - die "referenced path '$original_filepath' is invalid\n" if !$filepath || $filepath eq "." || $filepath eq ".."; + # from Disk Node, find corresponding filepath + my $xpath_find_filepath = + sprintf("/ovf:Envelope/ovf:References/ovf:File[\@ovf:id='%s']/\@ovf:href", $fileref); + my $filepath = $xpc->findvalue($xpath_find_filepath); + if (!$filepath) { + warn "invalid file reference $fileref, skipping\n"; + next; + } + print "file path: $filepath\n" if $debug; + my $original_filepath = $filepath; + ($filepath) = $filepath =~ m|^(${PVE::Storage::SAFE_CHAR_WITH_WHITESPACE_CLASS_RE}+)$|; # untaint & check no sub/parent dirs + die "referenced path '$original_filepath' is invalid\n" + if !$filepath || $filepath eq "." || $filepath eq ".."; - # resolve symlinks and relative path components - # and die if the diskimage is not somewhere under the $ovf path - my $ovf_dir = realpath(dirname(File::Spec->rel2abs($ovf))) - or die "could not get absolute path of $ovf: $!\n"; - my $backing_file_path = realpath(join ('/', $ovf_dir, $filepath)) - or die "could not get absolute path of $filepath: $!\n"; - if ($backing_file_path !~ /^\Q${ovf_dir}\E/) { - die "error parsing $filepath, are you using a symlink ?\n"; - } + # resolve symlinks and relative path components + # and die if the diskimage is not somewhere under the $ovf path + my $ovf_dir = realpath(dirname(File::Spec->rel2abs($ovf))) + or die "could not get absolute path of $ovf: $!\n"; + my $backing_file_path = realpath(join('/', $ovf_dir, $filepath)) + or die "could not get absolute path of $filepath: $!\n"; + if ($backing_file_path !~ /^\Q${ovf_dir}\E/) { + die "error parsing $filepath, are you using a symlink ?\n"; + } - ($backing_file_path) = $backing_file_path =~ m|^(/.*)|; # untaint + ($backing_file_path) = $backing_file_path =~ m|^(/.*)|; # untaint - if (!-e $backing_file_path && !$isOva) { - die "error parsing $filepath, file seems not to exist at $backing_file_path\n"; - } + if (!-e $backing_file_path && !$isOva) { + die "error parsing $filepath, file seems not to exist at $backing_file_path\n"; + } - if (!$isOva) { - my $size = PVE::Storage::file_size_info($backing_file_path, undef, 'auto-detect'); - die "error parsing $backing_file_path, cannot determine file size\n" - if !$size; + if (!$isOva) { + my $size = PVE::Storage::file_size_info($backing_file_path, undef, 'auto-detect'); + die "error parsing $backing_file_path, cannot determine file size\n" + if !$size; - $virtual_size = $size; - } - $pve_disk = { - disk_address => $pve_disk_address, - backing_file => $backing_file_path, - virtual_size => $virtual_size, - relative_path => $filepath, - }; - $pve_disk->{virtual_size} = $virtual_size if defined($virtual_size); - push @disks, $pve_disk; - push @$boot_order, $pve_disk_address; + $virtual_size = $size; + } + $pve_disk = { + disk_address => $pve_disk_address, + backing_file => $backing_file_path, + virtual_size => $virtual_size, + relative_path => $filepath, + }; + $pve_disk->{virtual_size} = $virtual_size if defined($virtual_size); + push @disks, $pve_disk; + push @$boot_order, $pve_disk_address; } $qm->{boot} = "order=" . join(';', @$boot_order) if scalar(@$boot_order) > 0; my $nic_id = dtmf_name_to_id('Ethernet Adapter'); - my $xpath_find_nics = "/ovf:Envelope/ovf:VirtualSystem/ovf:VirtualHardwareSection/ovf:Item[rasd:ResourceType=${nic_id}]"; + my $xpath_find_nics = + "/ovf:Envelope/ovf:VirtualSystem/ovf:VirtualHardwareSection/ovf:Item[rasd:ResourceType=${nic_id}]"; my @nic_items = $xpc->findnodes($xpath_find_nics); my $net = {}; my $net_count = 0; for my $item_node (@nic_items) { - my $model = $xpc->findvalue('rasd:ResourceSubType', $item_node); - $model = lc($model); - $model = 'e1000' if ! grep { $_ eq $model } @$allowed_nic_models; - $net->{"net${net_count}"} = { model => $model }; - $net_count++; + my $model = $xpc->findvalue('rasd:ResourceSubType', $item_node); + $model = lc($model); + $model = 'e1000' if !grep { $_ eq $model } @$allowed_nic_models; + $net->{"net${net_count}"} = { model => $model }; + $net_count++; } - return {qm => $qm, disks => \@disks, net => $net}; + return { qm => $qm, disks => \@disks, net => $net }; } 1; diff --git a/src/PVE/Storage.pm b/src/PVE/Storage.pm index d0a696a..3962077 100755 --- a/src/PVE/Storage.pm +++ b/src/PVE/Storage.pm @@ -68,38 +68,45 @@ PVE::Storage::BTRFSPlugin->register(); PVE::Storage::ESXiPlugin->register(); # load third-party plugins -if ( -d '/usr/share/perl5/PVE/Storage/Custom' ) { - dir_glob_foreach('/usr/share/perl5/PVE/Storage/Custom', '.*\.pm$', sub { - my ($file) = @_; - my $modname = 'PVE::Storage::Custom::' . $file; - $modname =~ s!\.pm$!!; - $file = 'PVE/Storage/Custom/' . $file; +if (-d '/usr/share/perl5/PVE/Storage/Custom') { + dir_glob_foreach( + '/usr/share/perl5/PVE/Storage/Custom', + '.*\.pm$', + sub { + my ($file) = @_; + my $modname = 'PVE::Storage::Custom::' . $file; + $modname =~ s!\.pm$!!; + $file = 'PVE/Storage/Custom/' . $file; - eval { - require $file; + eval { + require $file; - # Check perl interface: - die "not derived from PVE::Storage::Plugin\n" if !$modname->isa('PVE::Storage::Plugin'); - die "does not provide an api() method\n" if !$modname->can('api'); - # Check storage API version and that file is really storage plugin. - my $version = $modname->api(); - die "implements an API version newer than current ($version > " . APIVER . ")\n" - if $version > APIVER; - my $min_version = (APIVER - APIAGE); - die "API version too old, please update the plugin ($version < $min_version)\n" - if $version < $min_version; - # all OK, do import and register (i.e., "use") - import $file; - $modname->register(); + # Check perl interface: + die "not derived from PVE::Storage::Plugin\n" + if !$modname->isa('PVE::Storage::Plugin'); + die "does not provide an api() method\n" if !$modname->can('api'); + # Check storage API version and that file is really storage plugin. + my $version = $modname->api(); + die "implements an API version newer than current ($version > " + . APIVER . ")\n" + if $version > APIVER; + my $min_version = (APIVER - APIAGE); + die "API version too old, please update the plugin ($version < $min_version)\n" + if $version < $min_version; + # all OK, do import and register (i.e., "use") + import $file; + $modname->register(); - # If we got this far and the API version is not the same, make some noise: - warn "Plugin \"$modname\" is implementing an older storage API, an upgrade is recommended\n" - if $version != APIVER; - }; - if ($@) { - warn "Error loading storage plugin \"$modname\": $@"; - } - }); + # If we got this far and the API version is not the same, make some noise: + warn + "Plugin \"$modname\" is implementing an older storage API, an upgrade is recommended\n" + if $version != APIVER; + }; + if ($@) { + warn "Error loading storage plugin \"$modname\": $@"; + } + }, + ); } # initialize all plugins @@ -144,7 +151,7 @@ sub lock_storage_config { cfs_lock_file("storage.cfg", undef, $code); my $err = $@; if ($err) { - $errmsg ? die "$errmsg: $err" : die $err; + $errmsg ? die "$errmsg: $err" : die $err; } } @@ -157,16 +164,15 @@ my $convert_maxfiles_to_prune_backups = sub { my $maxfiles = delete $scfg->{maxfiles}; if (!defined($scfg->{'prune-backups'}) && defined($maxfiles)) { - my $prune_backups; - if ($maxfiles) { - $prune_backups = { 'keep-last' => $maxfiles }; - } else { # maxfiles 0 means no limit - $prune_backups = { 'keep-all' => 1 }; - } - $scfg->{'prune-backups'} = PVE::JSONSchema::print_property_string( - $prune_backups, - 'prune-backups' - ); + my $prune_backups; + if ($maxfiles) { + $prune_backups = { 'keep-last' => $maxfiles }; + } else { # maxfiles 0 means no limit + $prune_backups = { 'keep-all' => 1 }; + } + $scfg->{'prune-backups'} = PVE::JSONSchema::print_property_string( + $prune_backups, 'prune-backups', + ); } }; @@ -190,11 +196,11 @@ sub storage_check_node { my $scfg = storage_config($cfg, $storeid); if ($scfg->{nodes}) { - $node = PVE::INotify::nodename() if !$node || ($node eq 'localhost'); - if (!$scfg->{nodes}->{$node}) { - die "storage '$storeid' is not available on node '$node'\n" if !$noerr; - return undef; - } + $node = PVE::INotify::nodename() if !$node || ($node eq 'localhost'); + if (!$scfg->{nodes}->{$node}) { + die "storage '$storeid' is not available on node '$node'\n" if !$noerr; + return undef; + } } return $scfg; @@ -206,8 +212,8 @@ sub storage_check_enabled { my $scfg = storage_config($cfg, $storeid); if ($scfg->{disable}) { - die "storage '$storeid' is disabled\n" if !$noerr; - return undef; + die "storage '$storeid' is disabled\n" if !$noerr; + return undef; } return storage_check_node($cfg, $storeid, $node, $noerr); @@ -246,7 +252,7 @@ sub get_max_protected_backups { sub storage_ids { my ($cfg) = @_; - return keys %{$cfg->{ids}}; + return keys %{ $cfg->{ids} }; } sub file_size_info { @@ -276,24 +282,25 @@ sub update_volume_attribute { my $max_protected_backups = get_max_protected_backups($scfg, $storeid); if ( - $vtype eq 'backup' - && $vmid - && $attribute eq 'protected' - && $value - && !$plugin->get_volume_attribute($scfg, $storeid, $volname, 'protected') - && $max_protected_backups > -1 # -1 is unlimited + $vtype eq 'backup' + && $vmid + && $attribute eq 'protected' + && $value + && !$plugin->get_volume_attribute($scfg, $storeid, $volname, 'protected') + && $max_protected_backups > -1 # -1 is unlimited ) { - my $backups = $plugin->list_volumes($storeid, $scfg, $vmid, ['backup']); - my ($backup_type) = map { $_->{subtype} } grep { $_->{volid} eq $volid } $backups->@*; + my $backups = $plugin->list_volumes($storeid, $scfg, $vmid, ['backup']); + my ($backup_type) = map { $_->{subtype} } grep { $_->{volid} eq $volid } $backups->@*; - my $protected_count = grep { - $_->{protected} && (!$backup_type || ($_->{subtype} && $_->{subtype} eq $backup_type)) - } $backups->@*; + my $protected_count = grep { + $_->{protected} + && (!$backup_type || ($_->{subtype} && $_->{subtype} eq $backup_type)) + } $backups->@*; - if ($max_protected_backups <= $protected_count) { - die "The number of protected backups per guest is limited to $max_protected_backups ". - "on storage '$storeid'\n"; - } + if ($max_protected_backups <= $protected_count) { + die "The number of protected backups per guest is limited to $max_protected_backups " + . "on storage '$storeid'\n"; + } } return $plugin->update_volume_attribute($scfg, $storeid, $volname, $attribute, $value); @@ -304,13 +311,13 @@ sub volume_size_info { my ($storeid, $volname) = parse_volume_id($volid, 1); if ($storeid) { - my $scfg = storage_config($cfg, $storeid); - my $plugin = PVE::Storage::Plugin->lookup($scfg->{type}); - return $plugin->volume_size_info($scfg, $storeid, $volname, $timeout); + my $scfg = storage_config($cfg, $storeid); + my $plugin = PVE::Storage::Plugin->lookup($scfg->{type}); + return $plugin->volume_size_info($scfg, $storeid, $volname, $timeout); } elsif ($volid =~ m|^(/.+)$| && -e $volid) { - return file_size_info($volid, $timeout, 'auto-detect'); + return file_size_info($volid, $timeout, 'auto-detect'); } else { - return 0; + return 0; } } @@ -322,13 +329,13 @@ sub volume_resize { my ($storeid, $volname) = parse_volume_id($volid, 1); if ($storeid) { - my $scfg = storage_config($cfg, $storeid); - my $plugin = PVE::Storage::Plugin->lookup($scfg->{type}); - return $plugin->volume_resize($scfg, $storeid, $volname, $size, $running); + my $scfg = storage_config($cfg, $storeid); + my $plugin = PVE::Storage::Plugin->lookup($scfg->{type}); + return $plugin->volume_resize($scfg, $storeid, $volname, $size, $running); } elsif ($volid =~ m|^(/.+)$| && -e $volid) { - die "resize file/device '$volid' is not possible\n"; + die "resize file/device '$volid' is not possible\n"; } else { - die "unable to parse volume ID '$volid'\n"; + die "unable to parse volume ID '$volid'\n"; } } @@ -337,13 +344,13 @@ sub volume_rollback_is_possible { my ($storeid, $volname) = parse_volume_id($volid, 1); if ($storeid) { - my $scfg = storage_config($cfg, $storeid); - my $plugin = PVE::Storage::Plugin->lookup($scfg->{type}); - return $plugin->volume_rollback_is_possible($scfg, $storeid, $volname, $snap, $blockers); + my $scfg = storage_config($cfg, $storeid); + my $plugin = PVE::Storage::Plugin->lookup($scfg->{type}); + return $plugin->volume_rollback_is_possible($scfg, $storeid, $volname, $snap, $blockers); } elsif ($volid =~ m|^(/.+)$| && -e $volid) { - die "snapshot rollback file/device '$volid' is not possible\n"; + die "snapshot rollback file/device '$volid' is not possible\n"; } else { - die "unable to parse volume ID '$volid'\n"; + die "unable to parse volume ID '$volid'\n"; } } @@ -352,13 +359,13 @@ sub volume_snapshot { my ($storeid, $volname) = parse_volume_id($volid, 1); if ($storeid) { - my $scfg = storage_config($cfg, $storeid); - my $plugin = PVE::Storage::Plugin->lookup($scfg->{type}); - return $plugin->volume_snapshot($scfg, $storeid, $volname, $snap); + my $scfg = storage_config($cfg, $storeid); + my $plugin = PVE::Storage::Plugin->lookup($scfg->{type}); + return $plugin->volume_snapshot($scfg, $storeid, $volname, $snap); } elsif ($volid =~ m|^(/.+)$| && -e $volid) { - die "snapshot file/device '$volid' is not possible\n"; + die "snapshot file/device '$volid' is not possible\n"; } else { - die "unable to parse volume ID '$volid'\n"; + die "unable to parse volume ID '$volid'\n"; } } @@ -367,14 +374,14 @@ sub volume_snapshot_rollback { my ($storeid, $volname) = parse_volume_id($volid, 1); if ($storeid) { - my $scfg = storage_config($cfg, $storeid); - my $plugin = PVE::Storage::Plugin->lookup($scfg->{type}); - $plugin->volume_rollback_is_possible($scfg, $storeid, $volname, $snap); - return $plugin->volume_snapshot_rollback($scfg, $storeid, $volname, $snap); + my $scfg = storage_config($cfg, $storeid); + my $plugin = PVE::Storage::Plugin->lookup($scfg->{type}); + $plugin->volume_rollback_is_possible($scfg, $storeid, $volname, $snap); + return $plugin->volume_snapshot_rollback($scfg, $storeid, $volname, $snap); } elsif ($volid =~ m|^(/.+)$| && -e $volid) { - die "snapshot rollback file/device '$volid' is not possible\n"; + die "snapshot rollback file/device '$volid' is not possible\n"; } else { - die "unable to parse volume ID '$volid'\n"; + die "unable to parse volume ID '$volid'\n"; } } @@ -384,13 +391,13 @@ sub volume_snapshot_delete { my ($storeid, $volname) = parse_volume_id($volid, 1); if ($storeid) { - my $scfg = storage_config($cfg, $storeid); - my $plugin = PVE::Storage::Plugin->lookup($scfg->{type}); - return $plugin->volume_snapshot_delete($scfg, $storeid, $volname, $snap, $running); + my $scfg = storage_config($cfg, $storeid); + my $plugin = PVE::Storage::Plugin->lookup($scfg->{type}); + return $plugin->volume_snapshot_delete($scfg, $storeid, $volname, $snap, $running); } elsif ($volid =~ m|^(/.+)$| && -e $volid) { - die "snapshot delete file/device '$volid' is not possible\n"; + die "snapshot delete file/device '$volid' is not possible\n"; } else { - die "unable to parse volume ID '$volid'\n"; + die "unable to parse volume ID '$volid'\n"; } } @@ -427,13 +434,14 @@ sub volume_has_feature { my ($storeid, $volname) = parse_volume_id($volid, 1); if ($storeid) { - my $scfg = storage_config($cfg, $storeid); - my $plugin = PVE::Storage::Plugin->lookup($scfg->{type}); - return $plugin->volume_has_feature($scfg, $feature, $storeid, $volname, $snap, $running, $opts); + my $scfg = storage_config($cfg, $storeid); + my $plugin = PVE::Storage::Plugin->lookup($scfg->{type}); + return $plugin->volume_has_feature($scfg, $feature, $storeid, $volname, $snap, $running, + $opts); } elsif ($volid =~ m|^(/.+)$| && -e $volid) { - return undef; + return undef; } else { - return undef; + return undef; } } @@ -543,31 +551,35 @@ sub check_volume_access { my ($sid, $volname) = parse_volume_id($volid, 1); if ($sid) { - my ($vtype, undef, $ownervm) = parse_volname($cfg, $volid); + my ($vtype, undef, $ownervm) = parse_volname($cfg, $volid); - # Need to allow 'images' when expecting 'rootdir' too - not cleanly separated in plugins. - die "unable to use volume $volid - content type needs to be '$type'\n" - if defined($type) && $vtype ne $type && ($type ne 'rootdir' || $vtype ne 'images'); + # Need to allow 'images' when expecting 'rootdir' too - not cleanly separated in plugins. + die "unable to use volume $volid - content type needs to be '$type'\n" + if defined($type) && $vtype ne $type && ($type ne 'rootdir' || $vtype ne 'images'); - return if $rpcenv->check($user, "/storage/$sid", ['Datastore.Allocate'], 1); + return if $rpcenv->check($user, "/storage/$sid", ['Datastore.Allocate'], 1); - if ($vtype eq 'iso' || $vtype eq 'vztmpl' || $vtype eq 'import') { - # require at least read access to storage, (custom) templates/ISOs could be sensitive - $rpcenv->check_any($user, "/storage/$sid", ['Datastore.AllocateSpace', 'Datastore.Audit']); - } elsif (defined($ownervm) && defined($vmid) && ($ownervm == $vmid)) { - # we are owner - allow access - } elsif ($vtype eq 'backup' && $ownervm) { - $rpcenv->check($user, "/storage/$sid", ['Datastore.AllocateSpace']); - $rpcenv->check($user, "/vms/$ownervm", ['VM.Backup']); - } elsif (($vtype eq 'images' || $vtype eq 'rootdir') && $ownervm) { - $rpcenv->check($user, "/storage/$sid", ['Datastore.Audit']); - $rpcenv->check($user, "/vms/$ownervm", ['VM.Config.Disk']); - } else { - die "missing privileges to access $volid\n"; - } + if ($vtype eq 'iso' || $vtype eq 'vztmpl' || $vtype eq 'import') { + # require at least read access to storage, (custom) templates/ISOs could be sensitive + $rpcenv->check_any( + $user, + "/storage/$sid", + ['Datastore.AllocateSpace', 'Datastore.Audit'], + ); + } elsif (defined($ownervm) && defined($vmid) && ($ownervm == $vmid)) { + # we are owner - allow access + } elsif ($vtype eq 'backup' && $ownervm) { + $rpcenv->check($user, "/storage/$sid", ['Datastore.AllocateSpace']); + $rpcenv->check($user, "/vms/$ownervm", ['VM.Backup']); + } elsif (($vtype eq 'images' || $vtype eq 'rootdir') && $ownervm) { + $rpcenv->check($user, "/storage/$sid", ['Datastore.Audit']); + $rpcenv->check($user, "/vms/$ownervm", ['VM.Config.Disk']); + } else { + die "missing privileges to access $volid\n"; + } } else { - die "Only root can pass arbitrary filesystem paths." - if $user ne 'root@pam'; + die "Only root can pass arbitrary filesystem paths." + if $user ne 'root@pam'; } return undef; @@ -583,25 +595,24 @@ sub volume_is_base_and_used { my $scfg = storage_config($cfg, $storeid); my $plugin = PVE::Storage::Plugin->lookup($scfg->{type}); - my ($vtype, $name, $vmid, undef, undef, $isBase, undef) = - $plugin->parse_volname($volname); + my ($vtype, $name, $vmid, undef, undef, $isBase, undef) = $plugin->parse_volname($volname); if ($isBase) { - my $vollist = $plugin->list_images($storeid, $scfg); - foreach my $info (@$vollist) { - my (undef, $tmpvolname) = parse_volume_id($info->{volid}); - my $basename = undef; - my $basevmid = undef; + my $vollist = $plugin->list_images($storeid, $scfg); + foreach my $info (@$vollist) { + my (undef, $tmpvolname) = parse_volume_id($info->{volid}); + my $basename = undef; + my $basevmid = undef; - eval{ - (undef, undef, undef, $basename, $basevmid) = - $plugin->parse_volname($tmpvolname); - }; + eval { + (undef, undef, undef, $basename, $basevmid) = + $plugin->parse_volname($tmpvolname); + }; - if ($basename && defined($basevmid) && $basevmid == $vmid && $basename eq $name) { - return 1; - } - } + if ($basename && defined($basevmid) && $basevmid == $vmid && $basename eq $name) { + return 1; + } + } } return 0; } @@ -614,14 +625,14 @@ sub path_to_volume_id { my ($sid, $volname) = parse_volume_id($path, 1); if ($sid) { - if (my $scfg = $ids->{$sid}) { - if ($scfg->{path}) { - my $plugin = PVE::Storage::Plugin->lookup($scfg->{type}); - my ($vtype, $name, $vmid) = $plugin->parse_volname($volname); - return ($vtype, $path); - } - } - return (''); + if (my $scfg = $ids->{$sid}) { + if ($scfg->{path}) { + my $plugin = PVE::Storage::Plugin->lookup($scfg->{type}); + my ($vtype, $name, $vmid) = $plugin->parse_volname($volname); + return ($vtype, $path); + } + } + return (''); } # Note: abs_path() return undef if $path doesn not exist @@ -629,48 +640,48 @@ sub path_to_volume_id { $path = abs_path($path) || $path; foreach my $sid (keys %$ids) { - my $scfg = $ids->{$sid}; - next if !$scfg->{path}; - my $plugin = PVE::Storage::Plugin->lookup($scfg->{type}); - my $imagedir = $plugin->get_subdir($scfg, 'images'); - my $isodir = $plugin->get_subdir($scfg, 'iso'); - my $tmpldir = $plugin->get_subdir($scfg, 'vztmpl'); - my $backupdir = $plugin->get_subdir($scfg, 'backup'); - my $privatedir = $plugin->get_subdir($scfg, 'rootdir'); - my $snippetsdir = $plugin->get_subdir($scfg, 'snippets'); - my $importdir = $plugin->get_subdir($scfg, 'import'); + my $scfg = $ids->{$sid}; + next if !$scfg->{path}; + my $plugin = PVE::Storage::Plugin->lookup($scfg->{type}); + my $imagedir = $plugin->get_subdir($scfg, 'images'); + my $isodir = $plugin->get_subdir($scfg, 'iso'); + my $tmpldir = $plugin->get_subdir($scfg, 'vztmpl'); + my $backupdir = $plugin->get_subdir($scfg, 'backup'); + my $privatedir = $plugin->get_subdir($scfg, 'rootdir'); + my $snippetsdir = $plugin->get_subdir($scfg, 'snippets'); + my $importdir = $plugin->get_subdir($scfg, 'import'); - if ($path =~ m!^$imagedir/(\d+)/([^/\s]+)$!) { - my $vmid = $1; - my $name = $2; + if ($path =~ m!^$imagedir/(\d+)/([^/\s]+)$!) { + my $vmid = $1; + my $name = $2; - my $vollist = $plugin->list_images($sid, $scfg, $vmid); - foreach my $info (@$vollist) { - my ($storeid, $volname) = parse_volume_id($info->{volid}); - my $volpath = $plugin->path($scfg, $volname, $storeid); - if ($volpath eq $path) { - return ('images', $info->{volid}); - } - } - } elsif ($path =~ m!^$isodir/([^/]+$ISO_EXT_RE_0)$!) { - my $name = $1; - return ('iso', "$sid:iso/$name"); - } elsif ($path =~ m!^$tmpldir/([^/]+$VZTMPL_EXT_RE_1)$!) { - my $name = $1; - return ('vztmpl', "$sid:vztmpl/$name"); - } elsif ($path =~ m!^$privatedir/(\d+)$!) { - my $vmid = $1; - return ('rootdir', "$sid:rootdir/$vmid"); - } elsif ($path =~ m!^$backupdir/([^/]+$BACKUP_EXT_RE_2)$!) { - my $name = $1; - return ('backup', "$sid:backup/$name"); - } elsif ($path =~ m!^$snippetsdir/([^/]+)$!) { - my $name = $1; - return ('snippets', "$sid:snippets/$name"); - } elsif ($path =~ m!^$importdir/(${SAFE_CHAR_CLASS_RE}+${IMPORT_EXT_RE_1})$!) { - my $name = $1; - return ('import', "$sid:import/$name"); - } + my $vollist = $plugin->list_images($sid, $scfg, $vmid); + foreach my $info (@$vollist) { + my ($storeid, $volname) = parse_volume_id($info->{volid}); + my $volpath = $plugin->path($scfg, $volname, $storeid); + if ($volpath eq $path) { + return ('images', $info->{volid}); + } + } + } elsif ($path =~ m!^$isodir/([^/]+$ISO_EXT_RE_0)$!) { + my $name = $1; + return ('iso', "$sid:iso/$name"); + } elsif ($path =~ m!^$tmpldir/([^/]+$VZTMPL_EXT_RE_1)$!) { + my $name = $1; + return ('vztmpl', "$sid:vztmpl/$name"); + } elsif ($path =~ m!^$privatedir/(\d+)$!) { + my $vmid = $1; + return ('rootdir', "$sid:rootdir/$vmid"); + } elsif ($path =~ m!^$backupdir/([^/]+$BACKUP_EXT_RE_2)$!) { + my $name = $1; + return ('backup', "$sid:backup/$name"); + } elsif ($path =~ m!^$snippetsdir/([^/]+)$!) { + my $name = $1; + return ('snippets', "$sid:snippets/$name"); + } elsif ($path =~ m!^$importdir/(${SAFE_CHAR_CLASS_RE}+${IMPORT_EXT_RE_1})$!) { + my $name = $1; + return ('import', "$sid:import/$name"); + } } # can't map path to volume id @@ -693,19 +704,19 @@ sub abs_filesystem_path { my ($cfg, $volid, $allow_blockdev) = @_; my $path; - if (parse_volume_id ($volid, 1)) { - activate_volumes($cfg, [ $volid ]); - $path = PVE::Storage::path($cfg, $volid); + if (parse_volume_id($volid, 1)) { + activate_volumes($cfg, [$volid]); + $path = PVE::Storage::path($cfg, $volid); } else { - if (-f $volid || ($allow_blockdev && -b $volid)) { - my $abspath = abs_path($volid); - if ($abspath && $abspath =~ m|^(/.+)$|) { - $path = $1; # untaint any path - } - } + if (-f $volid || ($allow_blockdev && -b $volid)) { + my $abspath = abs_path($volid); + if ($abspath && $abspath =~ m|^(/.+)$|) { + $path = $1; # untaint any path + } + } } die "can't find file '$volid'\n" - if !($path && (-f $path || ($allow_blockdev && -b $path))); + if !($path && (-f $path || ($allow_blockdev && -b $path))); return $path; } @@ -719,14 +730,14 @@ my $volname_for_storage = sub { my (undef, $valid_formats) = PVE::Storage::Plugin::default_format($scfg); my $format_is_valid = grep { $_ eq $format } @$valid_formats; die "unsupported format '$format' for storage type $scfg->{type}\n" - if !$format_is_valid; + if !$format_is_valid; (my $name_without_extension = $name) =~ s/\.$format$//; if ($scfg->{path}) { - return "$vmid/$name_without_extension.$format"; + return "$vmid/$name_without_extension.$format"; } else { - return "$name_without_extension"; + return "$name_without_extension"; } }; @@ -749,16 +760,16 @@ my $volume_import_prepare = sub { my $recv = ['pvesm', 'import', $volid, $format, $path, '-with-snapshots', $with_snapshots]; if (defined($snapshot)) { - push @$recv, '-snapshot', $snapshot; + push @$recv, '-snapshot', $snapshot; } if ($migration_snapshot) { - push @$recv, '-delete-snapshot', $snapshot; + push @$recv, '-delete-snapshot', $snapshot; } push @$recv, '-allow-rename', $allow_rename; if (defined($base_snapshot)) { - # Check if the snapshot exists on the remote side: - push @$recv, '-base', $base_snapshot; + # Check if the snapshot exists on the remote side: + push @$recv, '-base', $base_snapshot; } return $recv; @@ -774,27 +785,29 @@ my $volume_export_prepare = sub { my $send = ['pvesm', 'export', $volid, $format, '-', '-with-snapshots', $with_snapshots]; if (defined($snapshot)) { - push @$send, '-snapshot', $snapshot; + push @$send, '-snapshot', $snapshot; } if (defined($base_snapshot)) { - push @$send, '-base', $base_snapshot; + push @$send, '-base', $base_snapshot; } my $cstream; if (defined($ratelimit_bps)) { - $cstream = [ '/usr/bin/cstream', '-t', $ratelimit_bps ]; - $logfunc->("using a bandwidth limit of $ratelimit_bps bytes per second for transferring '$volid'") if $logfunc; + $cstream = ['/usr/bin/cstream', '-t', $ratelimit_bps]; + $logfunc->( + "using a bandwidth limit of $ratelimit_bps bytes per second for transferring '$volid'") + if $logfunc; } volume_snapshot($cfg, $volid, $snapshot) if $migration_snapshot; if (defined($snapshot)) { - activate_volumes($cfg, [$volid], $snapshot); + activate_volumes($cfg, [$volid], $snapshot); } else { - activate_volumes($cfg, [$volid]); + activate_volumes($cfg, [$volid]); } - return $cstream ? [ $send, $cstream ] : [ $send ]; + return $cstream ? [$send, $cstream] : [$send]; }; sub storage_migrate { @@ -813,12 +826,12 @@ sub storage_migrate { my $target_volname; if ($opts->{target_volname}) { - $target_volname = $opts->{target_volname}; + $target_volname = $opts->{target_volname}; } elsif ($scfg->{type} eq $tcfg->{type}) { - $target_volname = $volname; + $target_volname = $volname; } else { - my (undef, $name, $vmid, undef, undef, undef, $format) = parse_volname($cfg, $volid); - $target_volname = $volname_for_storage->($cfg, $target_storeid, $name, $vmid, $format); + my (undef, $name, $vmid, undef, undef, undef, $format) = parse_volname($cfg, $volid); + $target_volname = $volname_for_storage->($cfg, $target_storeid, $name, $vmid, $format); } my $target_volid = "${target_storeid}:${target_volname}"; @@ -830,119 +843,128 @@ sub storage_migrate { local $ENV{RSYNC_RSH} = PVE::Tools::cmd2string($ssh_base); if (!defined($opts->{snapshot})) { - $opts->{migration_snapshot} = storage_migrate_snapshot($cfg, $storeid, $opts->{with_snapshots}); - $opts->{snapshot} = '__migration__' if $opts->{migration_snapshot}; + $opts->{migration_snapshot} = + storage_migrate_snapshot($cfg, $storeid, $opts->{with_snapshots}); + $opts->{snapshot} = '__migration__' if $opts->{migration_snapshot}; } - my @formats = volume_transfer_formats($cfg, $volid, $target_volid, $opts->{snapshot}, $opts->{base_snapshot}, $opts->{with_snapshots}); + my @formats = volume_transfer_formats( + $cfg, + $volid, + $target_volid, + $opts->{snapshot}, + $opts->{base_snapshot}, + $opts->{with_snapshots}, + ); die "cannot migrate from storage type '$scfg->{type}' to '$tcfg->{type}'\n" if !@formats; my $format = $formats[0]; my $import_fn = '-'; # let pvesm import read from stdin per default if ($insecure) { - my $net = $target_sshinfo->{network} // $target_sshinfo->{ip}; - $import_fn = "tcp://$net"; + my $net = $target_sshinfo->{network} // $target_sshinfo->{ip}; + $import_fn = "tcp://$net"; } - my $recv = [ @$ssh, '--', $volume_import_prepare->($target_volid, $format, $import_fn, $opts)->@* ]; + my $recv = + [@$ssh, '--', $volume_import_prepare->($target_volid, $format, $import_fn, $opts)->@*]; my $new_volid; my $pattern = volume_imported_message(undef, 1); # Matches new volid and rate-limits dd output my $match_volid_and_log = sub { - my $line = shift; - my $show = 1; + my $line = shift; + my $show = 1; - if ($line =~ /(?:\d+ bytes)(?:.+?copied, )(\d+) s/) { # rate-limit dd logs - my $elapsed = int($1); - if ($elapsed < 60) { - $show = !($1 % 3); - } elsif ($elapsed < 600) { - $show = !($1 % 10); - } else { - $show = !($1 % 30); - } - } + if ($line =~ /(?:\d+ bytes)(?:.+?copied, )(\d+) s/) { # rate-limit dd logs + my $elapsed = int($1); + if ($elapsed < 60) { + $show = !($1 % 3); + } elsif ($elapsed < 600) { + $show = !($1 % 10); + } else { + $show = !($1 % 30); + } + } - $new_volid = $1 if ($line =~ $pattern); + $new_volid = $1 if ($line =~ $pattern); - if ($logfunc && $show) { - chomp($line); - $logfunc->($line); - } + if ($logfunc && $show) { + chomp($line); + $logfunc->($line); + } }; my $cmds = $volume_export_prepare->($cfg, $volid, $format, $logfunc, $opts); eval { - if ($insecure) { - my ($ip, $port, $socket); - my $send_error; + if ($insecure) { + my ($ip, $port, $socket); + my $send_error; - my $handle_insecure_migration = sub { - my $line = shift; + my $handle_insecure_migration = sub { + my $line = shift; - if (!$ip) { - ($ip) = $line =~ /^($PVE::Tools::IPRE)$/ # untaint - or die "no tunnel IP received, got '$line'\n"; - } elsif (!$port) { - ($port) = $line =~ /^(\d+)$/ # untaint - or die "no tunnel port received, got '$line'\n"; + if (!$ip) { + ($ip) = $line =~ /^($PVE::Tools::IPRE)$/ # untaint + or die "no tunnel IP received, got '$line'\n"; + } elsif (!$port) { + ($port) = $line =~ /^(\d+)$/ # untaint + or die "no tunnel port received, got '$line'\n"; - # create socket, run command - $socket = IO::Socket::IP->new( - PeerHost => $ip, - PeerPort => $port, - Type => SOCK_STREAM, - ); - die "failed to connect to tunnel at $ip:$port\n" if !$socket; - # we won't be reading from the socket - shutdown($socket, 0); + # create socket, run command + $socket = IO::Socket::IP->new( + PeerHost => $ip, + PeerPort => $port, + Type => SOCK_STREAM, + ); + die "failed to connect to tunnel at $ip:$port\n" if !$socket; + # we won't be reading from the socket + shutdown($socket, 0); - eval { - run_command( - $cmds, - output => '>&'.fileno($socket), - errfunc => $match_volid_and_log, - ); - }; - $send_error = $@; + eval { + run_command( + $cmds, + output => '>&' . fileno($socket), + errfunc => $match_volid_and_log, + ); + }; + $send_error = $@; - # don't close the connection entirely otherwise the receiving end - # might not get all buffered data (and fails with 'connection reset by peer') - shutdown($socket, 1); - } else { - $match_volid_and_log->("[$target_sshinfo->{name}] $line"); - } - }; + # don't close the connection entirely otherwise the receiving end + # might not get all buffered data (and fails with 'connection reset by peer') + shutdown($socket, 1); + } else { + $match_volid_and_log->("[$target_sshinfo->{name}] $line"); + } + }; - eval { - run_command( - $recv, - outfunc => $handle_insecure_migration, - errfunc => sub { - my $line = shift; - $match_volid_and_log->("[$target_sshinfo->{name}] $line"); - }, - ); - }; - my $recv_err = $@; - close($socket) if $socket; - die "failed to run insecure migration: $recv_err\n" if $recv_err; + eval { + run_command( + $recv, + outfunc => $handle_insecure_migration, + errfunc => sub { + my $line = shift; + $match_volid_and_log->("[$target_sshinfo->{name}] $line"); + }, + ); + }; + my $recv_err = $@; + close($socket) if $socket; + die "failed to run insecure migration: $recv_err\n" if $recv_err; - die $send_error if $send_error; - } else { - push @$cmds, $recv; - run_command($cmds, logfunc => $match_volid_and_log); - } + die $send_error if $send_error; + } else { + push @$cmds, $recv; + run_command($cmds, logfunc => $match_volid_and_log); + } - die "unable to get ID of the migrated volume\n" if !defined($new_volid); + die "unable to get ID of the migrated volume\n" if !defined($new_volid); }; my $err = $@; if ($opts->{migration_snapshot}) { - warn "send/receive failed, cleaning up snapshot(s)..\n" if $err; - eval { volume_snapshot_delete($cfg, $volid, $opts->{snapshot}, 0) }; - warn "could not remove source snapshot: $@\n" if $@; + warn "send/receive failed, cleaning up snapshot(s)..\n" if $err; + eval { volume_snapshot_delete($cfg, $volid, $opts->{snapshot}, 0) }; + warn "could not remove source snapshot: $@\n" if $@; } die $err if $err; @@ -961,10 +983,15 @@ sub vdisk_clone { activate_storage($cfg, $storeid); # lock shared storage - return $plugin->cluster_lock_storage($storeid, $scfg->{shared}, undef, sub { - my $volname = $plugin->clone_image($scfg, $storeid, $volname, $vmid, $snap); - return "$storeid:$volname"; - }); + return $plugin->cluster_lock_storage( + $storeid, + $scfg->{shared}, + undef, + sub { + my $volname = $plugin->clone_image($scfg, $storeid, $volname, $vmid, $snap); + return "$storeid:$volname"; + }, + ); } sub vdisk_create_base { @@ -979,10 +1006,15 @@ sub vdisk_create_base { activate_storage($cfg, $storeid); # lock shared storage - return $plugin->cluster_lock_storage($storeid, $scfg->{shared}, undef, sub { - my $volname = $plugin->create_base($storeid, $scfg, $volname); - return "$storeid:$volname"; - }); + return $plugin->cluster_lock_storage( + $storeid, + $scfg->{shared}, + undef, + sub { + my $volname = $plugin->create_base($storeid, $scfg, $volname); + return "$storeid:$volname"; + }, + ); } sub map_volume { @@ -1031,14 +1063,20 @@ sub vdisk_alloc { my $plugin = PVE::Storage::Plugin->lookup($scfg->{type}); # lock shared storage - return $plugin->cluster_lock_storage($storeid, $scfg->{shared}, undef, sub { - my $old_umask = umask(umask|0037); - my $volname = eval { $plugin->alloc_image($storeid, $scfg, $vmid, $fmt, $name, $size) }; - my $err = $@; - umask $old_umask; - die $err if $err; - return "$storeid:$volname"; - }); + return $plugin->cluster_lock_storage( + $storeid, + $scfg->{shared}, + undef, + sub { + my $old_umask = umask(umask | 0037); + my $volname = + eval { $plugin->alloc_image($storeid, $scfg, $vmid, $fmt, $name, $size) }; + my $err = $@; + umask $old_umask; + die $err if $err; + return "$storeid:$volname"; + }, + ); } sub vdisk_free { @@ -1053,15 +1091,20 @@ sub vdisk_free { my $cleanup_worker; # lock shared storage - $plugin->cluster_lock_storage($storeid, $scfg->{shared}, undef, sub { - # LVM-thin allows deletion of still referenced base volumes! - die "base volume '$volname' is still in use by linked clones\n" - if volume_is_base_and_used($cfg, $volid); + $plugin->cluster_lock_storage( + $storeid, + $scfg->{shared}, + undef, + sub { + # LVM-thin allows deletion of still referenced base volumes! + die "base volume '$volname' is still in use by linked clones\n" + if volume_is_base_and_used($cfg, $volid); - my (undef, undef, undef, undef, undef, $isBase, $format) = - $plugin->parse_volname($volname); - $cleanup_worker = $plugin->free_image($storeid, $scfg, $volname, $isBase, $format); - }); + my (undef, undef, undef, undef, undef, $isBase, $format) = + $plugin->parse_volname($volname); + $cleanup_worker = $plugin->free_image($storeid, $scfg, $volname, $isBase, $format); + }, + ); return if !$cleanup_worker; @@ -1084,21 +1127,21 @@ sub vdisk_list { my $storage_list = []; if ($vollist) { - foreach my $volid (@$vollist) { - my ($sid, undef) = parse_volume_id($volid); - next if !defined($ids->{$sid}); - next if !storage_check_enabled($cfg, $sid, undef, 1); - push @$storage_list, $sid; - } + foreach my $volid (@$vollist) { + my ($sid, undef) = parse_volume_id($volid); + next if !defined($ids->{$sid}); + next if !storage_check_enabled($cfg, $sid, undef, 1); + push @$storage_list, $sid; + } } else { - foreach my $sid (keys %$ids) { - next if $storeid && $storeid ne $sid; - next if !storage_check_enabled($cfg, $sid, undef, 1); - my $content = $ids->{$sid}->{content}; - next if defined($ctype) && !$content->{$ctype}; - next if !($content->{rootdir} || $content->{images}); - push @$storage_list, $sid; - } + foreach my $sid (keys %$ids) { + next if $storeid && $storeid ne $sid; + next if !storage_check_enabled($cfg, $sid, undef, 1); + my $content = $ids->{$sid}->{content}; + next if defined($ctype) && !$content->{$ctype}; + next if !($content->{rootdir} || $content->{images}); + push @$storage_list, $sid; + } } my $cache = {}; @@ -1106,12 +1149,13 @@ sub vdisk_list { activate_storage_list($cfg, $storage_list, $cache); for my $sid ($storage_list->@*) { - next if $storeid && $storeid ne $sid; + next if $storeid && $storeid ne $sid; - my $scfg = $ids->{$sid}; - my $plugin = PVE::Storage::Plugin->lookup($scfg->{type}); - $res->{$sid} = $plugin->list_images($sid, $scfg, $vmid, $vollist, $cache); - @{$res->{$sid}} = sort {lc($a->{volid}) cmp lc ($b->{volid}) } @{$res->{$sid}} if $res->{$sid}; + my $scfg = $ids->{$sid}; + my $plugin = PVE::Storage::Plugin->lookup($scfg->{type}); + $res->{$sid} = $plugin->list_images($sid, $scfg, $vmid, $vollist, $cache); + @{ $res->{$sid} } = sort { lc($a->{volid}) cmp lc($b->{volid}) } @{ $res->{$sid} } + if $res->{$sid}; } return $res; @@ -1121,7 +1165,7 @@ sub template_list { my ($cfg, $storeid, $tt) = @_; die "unknown template type '$tt'\n" - if !($tt eq 'iso' || $tt eq 'vztmpl' || $tt eq 'backup' || $tt eq 'snippets'); + if !($tt eq 'iso' || $tt eq 'vztmpl' || $tt eq 'backup' || $tt eq 'snippets'); my $ids = $cfg->{ids}; @@ -1131,16 +1175,16 @@ sub template_list { # query the storage foreach my $sid (keys %$ids) { - next if $storeid && $storeid ne $sid; + next if $storeid && $storeid ne $sid; - my $scfg = $ids->{$sid}; - my $type = $scfg->{type}; + my $scfg = $ids->{$sid}; + my $type = $scfg->{type}; - next if !$scfg->{content}->{$tt}; + next if !$scfg->{content}->{$tt}; - next if !storage_check_enabled($cfg, $sid, undef, 1); + next if !storage_check_enabled($cfg, $sid, undef, 1); - $res->{$sid} = volume_list($cfg, $sid, undef, $tt); + $res->{$sid} = volume_list($cfg, $sid, undef, $tt); } return $res; @@ -1151,11 +1195,11 @@ sub volume_list { my @ctypes = qw(rootdir images vztmpl iso backup snippets import); - my $cts = $content ? [ $content ] : [ @ctypes ]; + my $cts = $content ? [$content] : [@ctypes]; my $scfg = PVE::Storage::storage_config($cfg, $storeid); - $cts = [ grep { defined($scfg->{content}->{$_}) } @$cts ]; + $cts = [grep { defined($scfg->{content}->{$_}) } @$cts]; my $plugin = PVE::Storage::Plugin->lookup($scfg->{type}); @@ -1163,7 +1207,7 @@ sub volume_list { my $res = $plugin->list_volumes($storeid, $scfg, $vmid, $cts); - @$res = sort {lc($a->{volid}) cmp lc ($b->{volid}) } @$res; + @$res = sort { lc($a->{volid}) cmp lc($b->{volid}) } @$res; return $res; } @@ -1174,11 +1218,11 @@ sub uevent_seqnum { my $seqnum = 0; if (my $fh = IO::File->new($filename, "r")) { - my $line = <$fh>; - if ($line =~ m/^(\d+)$/) { - $seqnum = int($1); - } - close ($fh); + my $line = <$fh>; + if ($line =~ m/^(\d+)$/) { + $seqnum = int($1); + } + close($fh); } return $seqnum; } @@ -1197,23 +1241,23 @@ sub activate_storage { my $plugin = PVE::Storage::Plugin->lookup($scfg->{type}); if ($scfg->{base}) { - my ($baseid, undef) = parse_volume_id ($scfg->{base}); - activate_storage($cfg, $baseid, $cache); + my ($baseid, undef) = parse_volume_id($scfg->{base}); + activate_storage($cfg, $baseid, $cache); } - if (! eval { $plugin->check_connection($storeid, $scfg) }) { - die "connection check for storage '$storeid' failed - $@\n" if $@; - die "storage '$storeid' is not online\n"; + if (!eval { $plugin->check_connection($storeid, $scfg) }) { + die "connection check for storage '$storeid' failed - $@\n" if $@; + die "storage '$storeid' is not online\n"; } $plugin->activate_storage($storeid, $scfg, $cache); - my $newseq = uevent_seqnum (); + my $newseq = uevent_seqnum(); # only call udevsettle if there are events if ($newseq > $cache->{uevent_seqnum}) { - system ("udevadm settle --timeout=30"); # ignore errors - $cache->{uevent_seqnum} = $newseq; + system("udevadm settle --timeout=30"); # ignore errors + $cache->{uevent_seqnum} = $newseq; } $cache->{activated}->{$storeid} = 1; @@ -1225,14 +1269,14 @@ sub activate_storage_list { $cache = {} if !$cache; foreach my $storeid (@$storeid_list) { - activate_storage($cfg, $storeid, $cache); + activate_storage($cfg, $storeid, $cache); } } sub deactivate_storage { my ($cfg, $storeid) = @_; - my $scfg = storage_config ($cfg, $storeid); + my $scfg = storage_config($cfg, $storeid); my $plugin = PVE::Storage::Plugin->lookup($scfg->{type}); my $cache = {}; @@ -1246,8 +1290,8 @@ sub activate_volumes { my $storagehash = {}; foreach my $volid (@$vollist) { - my ($storeid, undef) = parse_volume_id($volid); - $storagehash->{$storeid} = 1; + my ($storeid, undef) = parse_volume_id($volid); + $storagehash->{$storeid} = 1; } my $cache = {}; @@ -1255,10 +1299,10 @@ sub activate_volumes { activate_storage_list($cfg, [keys %$storagehash], $cache); foreach my $volid (@$vollist) { - my ($storeid, $volname) = parse_volume_id($volid); - my $scfg = storage_config($cfg, $storeid); - my $plugin = PVE::Storage::Plugin->lookup($scfg->{type}); - $plugin->activate_volume($storeid, $scfg, $volname, $snapname, $cache); + my ($storeid, $volname) = parse_volume_id($volid); + my $scfg = storage_config($cfg, $storeid); + my $plugin = PVE::Storage::Plugin->lookup($scfg->{type}); + $plugin->activate_volume($storeid, $scfg, $volname, $snapname, $cache); } } @@ -1271,22 +1315,20 @@ sub deactivate_volumes { my @errlist = (); foreach my $volid (@$vollist) { - my ($storeid, $volname) = parse_volume_id($volid); + my ($storeid, $volname) = parse_volume_id($volid); - my $scfg = storage_config($cfg, $storeid); - my $plugin = PVE::Storage::Plugin->lookup($scfg->{type}); + my $scfg = storage_config($cfg, $storeid); + my $plugin = PVE::Storage::Plugin->lookup($scfg->{type}); - eval { - $plugin->deactivate_volume($storeid, $scfg, $volname, $snapname, $cache); - }; - if (my $err = $@) { - warn $err; - push @errlist, $volid; - } + eval { $plugin->deactivate_volume($storeid, $scfg, $volname, $snapname, $cache); }; + if (my $err = $@) { + warn $err; + push @errlist, $volid; + } } die "volume deactivation failed: " . join(' ', @errlist) - if scalar(@errlist); + if scalar(@errlist); } sub storage_info { @@ -1300,65 +1342,66 @@ sub storage_info { my $slist = []; foreach my $storeid (keys %$ids) { - my $storage_enabled = defined(storage_check_enabled($cfg, $storeid, undef, 1)); + my $storage_enabled = defined(storage_check_enabled($cfg, $storeid, undef, 1)); - if (defined($content)) { - my $want_ctype = 0; - foreach my $ctype (@ctypes) { - if ($ids->{$storeid}->{content}->{$ctype}) { - $want_ctype = 1; - last; - } - } - next if !$want_ctype || !$storage_enabled; - } + if (defined($content)) { + my $want_ctype = 0; + foreach my $ctype (@ctypes) { + if ($ids->{$storeid}->{content}->{$ctype}) { + $want_ctype = 1; + last; + } + } + next if !$want_ctype || !$storage_enabled; + } - my $type = $ids->{$storeid}->{type}; + my $type = $ids->{$storeid}->{type}; - $info->{$storeid} = { - type => $type, - total => 0, - avail => 0, - used => 0, - shared => $ids->{$storeid}->{shared} ? 1 : 0, - content => PVE::Storage::Plugin::content_hash_to_string($ids->{$storeid}->{content}), - active => 0, - enabled => $storage_enabled ? 1 : 0, - }; + $info->{$storeid} = { + type => $type, + total => 0, + avail => 0, + used => 0, + shared => $ids->{$storeid}->{shared} ? 1 : 0, + content => + PVE::Storage::Plugin::content_hash_to_string($ids->{$storeid}->{content}), + active => 0, + enabled => $storage_enabled ? 1 : 0, + }; - push @$slist, $storeid; + push @$slist, $storeid; } my $cache = {}; foreach my $storeid (keys %$ids) { - my $scfg = $ids->{$storeid}; + my $scfg = $ids->{$storeid}; - next if !$info->{$storeid}; - next if !$info->{$storeid}->{enabled}; + next if !$info->{$storeid}; + next if !$info->{$storeid}->{enabled}; - my $plugin = PVE::Storage::Plugin->lookup($scfg->{type}); - if ($includeformat) { - my $pd = $plugin->plugindata(); - $info->{$storeid}->{format} = $pd->{format} - if $pd->{format}; - $info->{$storeid}->{select_existing} = $pd->{select_existing} - if $pd->{select_existing}; - } + my $plugin = PVE::Storage::Plugin->lookup($scfg->{type}); + if ($includeformat) { + my $pd = $plugin->plugindata(); + $info->{$storeid}->{format} = $pd->{format} + if $pd->{format}; + $info->{$storeid}->{select_existing} = $pd->{select_existing} + if $pd->{select_existing}; + } - eval { activate_storage($cfg, $storeid, $cache); }; - if (my $err = $@) { - warn $err; - next; - } + eval { activate_storage($cfg, $storeid, $cache); }; + if (my $err = $@) { + warn $err; + next; + } - my ($total, $avail, $used, $active) = eval { $plugin->status($storeid, $scfg, $cache); }; - warn $@ if $@; - next if !$active; - $info->{$storeid}->{total} = int($total); - $info->{$storeid}->{avail} = int($avail); - $info->{$storeid}->{used} = int($used); - $info->{$storeid}->{active} = $active; + my ($total, $avail, $used, $active) = eval { $plugin->status($storeid, $scfg, $cache); }; + warn $@ if $@; + next if !$active; + $info->{$storeid}->{total} = int($total); + $info->{$storeid}->{avail} = int($avail); + $info->{$storeid}->{used} = int($used); + $info->{$storeid}->{active} = $active; } return $info; @@ -1369,12 +1412,12 @@ sub resolv_server { my ($packed_ip, $family); eval { - my @res = PVE::Tools::getaddrinfo_all($server); - $family = $res[0]->{family}; - $packed_ip = (PVE::Tools::unpack_sockaddr_in46($res[0]->{addr}))[2]; + my @res = PVE::Tools::getaddrinfo_all($server); + $family = $res[0]->{family}; + $packed_ip = (PVE::Tools::unpack_sockaddr_in46($res[0]->{addr}))[2]; }; if (defined $packed_ip) { - return Socket::inet_ntop($family, $packed_ip); + return Socket::inet_ntop($family, $packed_ip); } return undef; } @@ -1383,21 +1426,24 @@ sub scan_nfs { my ($server_in) = @_; my $server; - if (!($server = resolv_server ($server_in))) { - die "unable to resolve address for server '${server_in}'\n"; + if (!($server = resolv_server($server_in))) { + die "unable to resolve address for server '${server_in}'\n"; } - my $cmd = ['/sbin/showmount', '--no-headers', '--exports', $server]; + my $cmd = ['/sbin/showmount', '--no-headers', '--exports', $server]; my $res = {}; - run_command($cmd, outfunc => sub { - my $line = shift; + run_command( + $cmd, + outfunc => sub { + my $line = shift; - # note: howto handle white spaces in export path?? - if ($line =~ m!^(/\S+)\s+(.+)$!) { - $res->{$1} = $2; - } - }); + # note: howto handle white spaces in export path?? + if ($line =~ m!^(/\S+)\s+(.+)$!) { + $res->{$1} = $2; + } + }, + ); return $res; } @@ -1418,20 +1464,21 @@ sub scan_cifs { my $res = {}; my $err = ''; - run_command($cmd, - noerr => 1, - errfunc => sub { - $err .= "$_[0]\n" - }, - outfunc => sub { - my $line = shift; - if ($line =~ m/(\S+)\s*Disk\s*(\S*)/) { - $res->{$1} = $2; - } elsif ($line =~ m/(NT_STATUS_(\S+))/) { - my $status = $1; - $err .= "unexpected status: $1\n" if uc($1) ne 'SUCCESS'; - } - }, + run_command( + $cmd, + noerr => 1, + errfunc => sub { + $err .= "$_[0]\n"; + }, + outfunc => sub { + my $line = shift; + if ($line =~ m/(\S+)\s*Disk\s*(\S*)/) { + $res->{$1} = $2; + } elsif ($line =~ m/(NT_STATUS_(\S+))/) { + my $status = $1; + $err .= "unexpected status: $1\n" if uc($1) ne 'SUCCESS'; + } + }, ); # only die if we got no share, else it's just some followup check error # (like workgroup querying) @@ -1442,22 +1489,25 @@ sub scan_cifs { sub scan_zfs { - my $cmd = ['zfs', 'list', '-t', 'filesystem', '-Hp', '-o', 'name,avail,used']; + my $cmd = ['zfs', 'list', '-t', 'filesystem', '-Hp', '-o', 'name,avail,used']; my $res = []; - run_command($cmd, outfunc => sub { - my $line = shift; + run_command( + $cmd, + outfunc => sub { + my $line = shift; - if ($line =~m/^(\S+)\s+(\S+)\s+(\S+)$/) { - my ($pool, $size_str, $used_str) = ($1, $2, $3); - my $size = $size_str + 0; - my $used = $used_str + 0; - # ignore subvolumes generated by our ZFSPoolPlugin - return if $pool =~ m!/subvol-\d+-[^/]+$!; - return if $pool =~ m!/basevol-\d+-[^/]+$!; - push @$res, { pool => $pool, size => $size, free => $size-$used }; - } - }); + if ($line =~ m/^(\S+)\s+(\S+)\s+(\S+)$/) { + my ($pool, $size_str, $used_str) = ($1, $2, $3); + my $size = $size_str + 0; + my $used = $used_str + 0; + # ignore subvolumes generated by our ZFSPoolPlugin + return if $pool =~ m!/subvol-\d+-[^/]+$!; + return if $pool =~ m!/basevol-\d+-[^/]+$!; + push @$res, { pool => $pool, size => $size, free => $size - $used }; + } + }, + ); return $res; } @@ -1467,33 +1517,32 @@ sub resolv_portal { my ($server, $port) = PVE::Tools::parse_host_and_port($portal); if ($server) { - if (my $ip = resolv_server($server)) { - $server = $ip; - $server = "[$server]" if $server =~ /^$IPV6RE$/; - return $port ? "$server:$port" : $server; - } + if (my $ip = resolv_server($server)) { + $server = $ip; + $server = "[$server]" if $server =~ /^$IPV6RE$/; + return $port ? "$server:$port" : $server; + } } return undef if $noerr; raise_param_exc({ portal => "unable to resolve portal address '$portal'" }); } - sub scan_iscsi { my ($portal_in) = @_; my $portal; if (!($portal = resolv_portal($portal_in))) { - die "unable to parse/resolve portal address '${portal_in}'\n"; + die "unable to parse/resolve portal address '${portal_in}'\n"; } - return PVE::Storage::ISCSIPlugin::iscsi_discovery(undef, [ $portal ]); + return PVE::Storage::ISCSIPlugin::iscsi_discovery(undef, [$portal]); } sub storage_default_format { my ($cfg, $storeid) = @_; - my $scfg = storage_config ($cfg, $storeid); + my $scfg = storage_config($cfg, $storeid); return PVE::Storage::Plugin::default_format($scfg); } @@ -1501,11 +1550,11 @@ sub storage_default_format { sub vgroup_is_used { my ($cfg, $vgname) = @_; - foreach my $storeid (keys %{$cfg->{ids}}) { - my $scfg = storage_config($cfg, $storeid); - if ($scfg->{type} eq 'lvm' && $scfg->{vgname} eq $vgname) { - return 1; - } + foreach my $storeid (keys %{ $cfg->{ids} }) { + my $scfg = storage_config($cfg, $storeid); + if ($scfg->{type} eq 'lvm' && $scfg->{vgname} eq $vgname) { + return 1; + } } return undef; @@ -1514,11 +1563,11 @@ sub vgroup_is_used { sub target_is_used { my ($cfg, $target) = @_; - foreach my $storeid (keys %{$cfg->{ids}}) { - my $scfg = storage_config($cfg, $storeid); - if ($scfg->{type} eq 'iscsi' && $scfg->{target} eq $target) { - return 1; - } + foreach my $storeid (keys %{ $cfg->{ids} }) { + my $scfg = storage_config($cfg, $storeid); + if ($scfg->{type} eq 'iscsi' && $scfg->{target} eq $target) { + return 1; + } } return undef; @@ -1527,11 +1576,11 @@ sub target_is_used { sub volume_is_used { my ($cfg, $volid) = @_; - foreach my $storeid (keys %{$cfg->{ids}}) { - my $scfg = storage_config($cfg, $storeid); - if ($scfg->{base} && $scfg->{base} eq $volid) { - return 1; - } + foreach my $storeid (keys %{ $cfg->{ids} }) { + my $scfg = storage_config($cfg, $storeid); + if ($scfg->{base} && $scfg->{base} eq $volid) { + return 1; + } } return undef; @@ -1540,11 +1589,11 @@ sub volume_is_used { sub storage_is_used { my ($cfg, $storeid) = @_; - foreach my $sid (keys %{$cfg->{ids}}) { - my $scfg = storage_config($cfg, $sid); - next if !$scfg->{base}; - my ($st) = parse_volume_id($scfg->{base}); - return 1 if $st && $st eq $storeid; + foreach my $sid (keys %{ $cfg->{ids} }) { + my $scfg = storage_config($cfg, $sid); + next if !$scfg->{base}; + my ($st) = parse_volume_id($scfg->{base}); + return 1 if $st && $st eq $storeid; } return undef; @@ -1556,15 +1605,15 @@ sub foreach_volid { return if !$list; foreach my $sid (keys %$list) { - foreach my $info (@{$list->{$sid}}) { - my $volid = $info->{volid}; - my ($sid1, $volname) = parse_volume_id($volid, 1); - if ($sid1 && $sid1 eq $sid) { - &$func ($volid, $sid, $info); - } else { - warn "detected strange volid '$volid' in volume list for '$sid'\n"; - } - } + foreach my $info (@{ $list->{$sid} }) { + my $volid = $info->{volid}; + my ($sid1, $volname) = parse_volume_id($volid, 1); + if ($sid1 && $sid1 eq $sid) { + &$func($volid, $sid, $info); + } else { + warn "detected strange volid '$volid' in volume list for '$sid'\n"; + } + } } } @@ -1572,40 +1621,40 @@ sub decompressor_info { my ($format, $comp) = @_; if ($format eq 'tgz' && !defined($comp)) { - ($format, $comp) = ('tar', 'gz'); + ($format, $comp) = ('tar', 'gz'); } my $decompressor = { - tar => { - gz => ['tar', '-z'], - lzo => ['tar', '--lzop'], - zst => ['tar', '--zstd'], - bz2 => ['tar', '--bzip2'], - }, - vma => { - gz => ['zcat'], - lzo => ['lzop', '-d', '-c'], - zst => ['zstd', '-q', '-d', '-c'], - bz2 => ['bzcat', '-q'], - }, - iso => { - gz => ['zcat'], - lzo => ['lzop', '-d', '-c'], - zst => ['zstd', '-q', '-d', '-c'], - bz2 => ['bzcat', '-q'], - }, + tar => { + gz => ['tar', '-z'], + lzo => ['tar', '--lzop'], + zst => ['tar', '--zstd'], + bz2 => ['tar', '--bzip2'], + }, + vma => { + gz => ['zcat'], + lzo => ['lzop', '-d', '-c'], + zst => ['zstd', '-q', '-d', '-c'], + bz2 => ['bzcat', '-q'], + }, + iso => { + gz => ['zcat'], + lzo => ['lzop', '-d', '-c'], + zst => ['zstd', '-q', '-d', '-c'], + bz2 => ['bzcat', '-q'], + }, }; die "ERROR: archive format not defined\n" - if !defined($decompressor->{$format}); + if !defined($decompressor->{$format}); my $decomp; $decomp = $decompressor->{$format}->{$comp} if $comp; my $info = { - format => $format, - compression => $comp, - decompressor => $decomp, + format => $format, + compression => $comp, + decompressor => $decomp, }; return $info; @@ -1623,24 +1672,26 @@ sub archive_info { my $volid = basename($archive); if ($volid =~ /^(vzdump-(lxc|openvz|qemu)-.+$BACKUP_EXT_RE_2)$/) { - my $filename = "$1"; # untaint - my ($type, $extension, $comp) = ($2, $3, $4); - (my $format = $extension) =~ s/\..*//; - $info = decompressor_info($format, $comp); - $info->{filename} = $filename; - $info->{type} = $type; + my $filename = "$1"; # untaint + my ($type, $extension, $comp) = ($2, $3, $4); + (my $format = $extension) =~ s/\..*//; + $info = decompressor_info($format, $comp); + $info->{filename} = $filename; + $info->{type} = $type; - if ($volid =~ /^(vzdump-${type}-([1-9][0-9]{2,8})-(\d{4})_(\d{2})_(\d{2})-(\d{2})_(\d{2})_(\d{2}))\.${extension}$/) { - $info->{logfilename} = "$1".PVE::Storage::Plugin::LOG_EXT; - $info->{notesfilename} = "$filename".PVE::Storage::Plugin::NOTES_EXT; - $info->{vmid} = int($2); - $info->{ctime} = timelocal($8, $7, $6, $5, $4 - 1, $3); - $info->{is_std_name} = 1; - } else { - $info->{is_std_name} = 0; - } + if ($volid =~ + /^(vzdump-${type}-([1-9][0-9]{2,8})-(\d{4})_(\d{2})_(\d{2})-(\d{2})_(\d{2})_(\d{2}))\.${extension}$/ + ) { + $info->{logfilename} = "$1" . PVE::Storage::Plugin::LOG_EXT; + $info->{notesfilename} = "$filename" . PVE::Storage::Plugin::NOTES_EXT; + $info->{vmid} = int($2); + $info->{ctime} = timelocal($8, $7, $6, $5, $4 - 1, $3); + $info->{is_std_name} = 1; + } else { + $info->{is_std_name} = 0; + } } else { - die "ERROR: couldn't determine archive info from '$archive'\n"; + die "ERROR: couldn't determine archive info from '$archive'\n"; } return $info; @@ -1650,7 +1701,7 @@ sub archive_remove { my ($archive_path) = @_; die "cannot remove protected archive '$archive_path'\n" - if -e protection_file_path($archive_path); + if -e protection_file_path($archive_path); unlink $archive_path or $! == ENOENT or die "removing archive $archive_path failed: $!\n"; @@ -1664,29 +1715,29 @@ sub archive_auxiliaries_remove { my $archive_info = eval { archive_info($archive_path) } // {}; for my $type (qw(log notes)) { - my $filename = $archive_info->{"${type}filename"} or next; - my $path = "$dirname/$filename"; + my $filename = $archive_info->{"${type}filename"} or next; + my $path = "$dirname/$filename"; - if (-e $path) { - unlink $path or $! == ENOENT or log_warn("Removing $type file failed: $!"); - } + if (-e $path) { + unlink $path or $! == ENOENT or log_warn("Removing $type file failed: $!"); + } } } sub extract_vzdump_config_tar { my ($archive, $conf_re) = @_; - die "ERROR: file '$archive' does not exist\n" if ! -f $archive; + die "ERROR: file '$archive' does not exist\n" if !-f $archive; - my $pid = open(my $fh, '-|', 'tar', 'tf', $archive) || - die "unable to open file '$archive'\n"; + my $pid = open(my $fh, '-|', 'tar', 'tf', $archive) + || die "unable to open file '$archive'\n"; my $file; while (defined($file = <$fh>)) { - if ($file =~ $conf_re) { - $file = $1; # untaint - last; - } + if ($file =~ $conf_re) { + $file = $1; # untaint + last; + } } kill 15, $pid; @@ -1698,8 +1749,8 @@ sub extract_vzdump_config_tar { my $raw = ''; my $out = sub { - my $output = shift; - $raw .= "$output\n"; + my $output = shift; + $raw .= "$output\n"; }; run_command(['tar', '-xpOf', $archive, $file, '--occurrence'], outfunc => $out); @@ -1718,32 +1769,36 @@ sub extract_vzdump_config_vma { my $decompressor = $info->{decompressor}; if ($comp) { - my $cmd = [ [@$decompressor, $archive], ["vma", "config", "-"] ]; + my $cmd = [[@$decompressor, $archive], ["vma", "config", "-"]]; - # lzop/zcat exits with 1 when the pipe is closed early by vma, detect this and ignore the exit code later - my $broken_pipe; - my $errstring; - my $err = sub { - my $output = shift; - if ($output =~ m/lzop: Broken pipe: / || $output =~ m/gzip: stdout: Broken pipe/ || $output =~ m/zstd: error 70 : Write error.*Broken pipe/) { - $broken_pipe = 1; - } elsif (!defined ($errstring) && $output !~ m/^\s*$/) { - $errstring = "Failed to extract config from VMA archive: $output\n"; - } - }; + # lzop/zcat exits with 1 when the pipe is closed early by vma, detect this and ignore the exit code later + my $broken_pipe; + my $errstring; + my $err = sub { + my $output = shift; + if ( + $output =~ m/lzop: Broken pipe: / + || $output =~ m/gzip: stdout: Broken pipe/ + || $output =~ m/zstd: error 70 : Write error.*Broken pipe/ + ) { + $broken_pipe = 1; + } elsif (!defined($errstring) && $output !~ m/^\s*$/) { + $errstring = "Failed to extract config from VMA archive: $output\n"; + } + }; - my $rc = eval { run_command($cmd, outfunc => $out, errfunc => $err, noerr => 1) }; - my $rerr = $@; + my $rc = eval { run_command($cmd, outfunc => $out, errfunc => $err, noerr => 1) }; + my $rerr = $@; - $broken_pipe ||= $rc == 141; # broken pipe from vma POV + $broken_pipe ||= $rc == 141; # broken pipe from vma POV - if (!$errstring && !$broken_pipe && $rc != 0) { - die "$rerr\n" if $rerr; - die "config extraction failed with exit code $rc\n"; - } - die "$errstring\n" if $errstring; + if (!$errstring && !$broken_pipe && $rc != 0) { + die "$rerr\n" if $rerr; + die "config extraction failed with exit code $rc\n"; + } + die "$errstring\n" if $errstring; } else { - run_command(["vma", "config", $archive], outfunc => $out); + run_command(["vma", "config", $archive], outfunc => $out); } return wantarray ? ($raw, undef) : $raw; @@ -1754,22 +1809,22 @@ sub extract_vzdump_config { my ($storeid, $volname) = parse_volume_id($volid); if (defined($storeid)) { - my $scfg = storage_config($cfg, $storeid); - if ($scfg->{type} eq 'pbs') { - storage_check_enabled($cfg, $storeid); - return PVE::Storage::PBSPlugin->extract_vzdump_config($scfg, $volname, $storeid); - } + my $scfg = storage_config($cfg, $storeid); + if ($scfg->{type} eq 'pbs') { + storage_check_enabled($cfg, $storeid); + return PVE::Storage::PBSPlugin->extract_vzdump_config($scfg, $volname, $storeid); + } - if (storage_has_feature($cfg, $storeid, 'backup-provider')) { - my $plugin = PVE::Storage::Plugin->lookup($scfg->{type}); - my $log_function = sub { - my ($log_level, $message) = @_; - my $prefix = $log_level eq 'err' ? 'ERROR' : uc($log_level); - print "$prefix: $message\n"; - }; - my $backup_provider = $plugin->new_backup_provider($scfg, $storeid, $log_function); - return $backup_provider->archive_get_guest_config($volname, $storeid); - } + if (storage_has_feature($cfg, $storeid, 'backup-provider')) { + my $plugin = PVE::Storage::Plugin->lookup($scfg->{type}); + my $log_function = sub { + my ($log_level, $message) = @_; + my $prefix = $log_level eq 'err' ? 'ERROR' : uc($log_level); + print "$prefix: $message\n"; + }; + my $backup_provider = $plugin->new_backup_provider($scfg, $storeid, $log_function); + return $backup_provider->archive_get_guest_config($volname, $storeid); + } } my $archive = abs_filesystem_path($cfg, $volid); @@ -1779,15 +1834,15 @@ sub extract_vzdump_config { my $type = $info->{type}; if ($type eq 'lxc' || $type eq 'openvz') { - return extract_vzdump_config_tar($archive, qr!^(\./etc/vzdump/(pct|vps)\.conf)$!); + return extract_vzdump_config_tar($archive, qr!^(\./etc/vzdump/(pct|vps)\.conf)$!); } elsif ($type eq 'qemu') { - if ($format eq 'tar') { - return extract_vzdump_config_tar($archive, qr!\(\./qemu-server\.conf\)!); - } else { - return extract_vzdump_config_vma($archive, $comp); - } + if ($format eq 'tar') { + return extract_vzdump_config_tar($archive, qr!\(\./qemu-server\.conf\)!); + } else { + return extract_vzdump_config_vma($archive, $comp); + } } else { - die "cannot determine backup guest type for backup archive '$volid'\n"; + die "cannot determine backup guest type for backup archive '$volid'\n"; } } @@ -1798,9 +1853,9 @@ sub prune_backups { die "storage '$storeid' does not support backups\n" if !$scfg->{content}->{backup}; if (!defined($keep)) { - die "no prune-backups options configured for storage '$storeid'\n" - if !defined($scfg->{'prune-backups'}); - $keep = PVE::JSONSchema::parse_property_string('prune-backups', $scfg->{'prune-backups'}); + die "no prune-backups options configured for storage '$storeid'\n" + if !defined($scfg->{'prune-backups'}); + $keep = PVE::JSONSchema::parse_property_string('prune-backups', $scfg->{'prune-backups'}); } activate_storage($cfg, $storeid); @@ -1818,24 +1873,24 @@ my $prune_mark = sub { my $newly_included = {}; foreach my $prune_entry (@{$prune_entries}) { - my $mark = $prune_entry->{mark}; - my $id = $id_func->($prune_entry->{ctime}); - $already_included->{$id} = 1 if defined($mark) && $mark eq 'keep'; + my $mark = $prune_entry->{mark}; + my $id = $id_func->($prune_entry->{ctime}); + $already_included->{$id} = 1 if defined($mark) && $mark eq 'keep'; } foreach my $prune_entry (@{$prune_entries}) { - my $mark = $prune_entry->{mark}; - my $id = $id_func->($prune_entry->{ctime}); + my $mark = $prune_entry->{mark}; + my $id = $id_func->($prune_entry->{ctime}); - next if defined($mark) || $already_included->{$id}; + next if defined($mark) || $already_included->{$id}; - if (!$newly_included->{$id}) { - last if scalar(keys %{$newly_included}) >= $keep_count; - $newly_included->{$id} = 1; - $prune_entry->{mark} = 'keep'; - } else { - $prune_entry->{mark} = 'remove'; - } + if (!$newly_included->{$id}) { + last if scalar(keys %{$newly_included}) >= $keep_count; + $newly_included->{$id} = 1; + $prune_entry->{mark} = 'keep'; + } else { + $prune_entry->{mark} = 'remove'; + } } }; @@ -1845,50 +1900,74 @@ sub prune_mark_backup_group { my @positive_opts = grep { $_ ne 'keep-all' && $keep->{$_} > 0 } keys $keep->%*; if ($keep->{'keep-all'} || scalar(@positive_opts) == 0) { - foreach my $prune_entry (@{$backup_group}) { - # preserve additional information like 'protected' - next if $prune_entry->{mark} && $prune_entry->{mark} ne 'remove'; - $prune_entry->{mark} = 'keep'; - } - return; + foreach my $prune_entry (@{$backup_group}) { + # preserve additional information like 'protected' + next if $prune_entry->{mark} && $prune_entry->{mark} ne 'remove'; + $prune_entry->{mark} = 'keep'; + } + return; } - my $prune_list = [ sort { $b->{ctime} <=> $a->{ctime} } @{$backup_group} ]; + my $prune_list = [sort { $b->{ctime} <=> $a->{ctime} } @{$backup_group}]; - $prune_mark->($prune_list, $keep->{'keep-last'}, sub { - my ($ctime) = @_; - return $ctime; - }); - $prune_mark->($prune_list, $keep->{'keep-hourly'}, sub { - my ($ctime) = @_; - my (undef, undef, $hour, $day, $month, $year) = localtime($ctime); - return "$hour/$day/$month/$year"; - }); - $prune_mark->($prune_list, $keep->{'keep-daily'}, sub { - my ($ctime) = @_; - my (undef, undef, undef, $day, $month, $year) = localtime($ctime); - return "$day/$month/$year"; - }); - $prune_mark->($prune_list, $keep->{'keep-weekly'}, sub { - my ($ctime) = @_; - my ($sec, $min, $hour, $day, $month, $year) = localtime($ctime); - my $iso_week = int(strftime("%V", $sec, $min, $hour, $day, $month, $year)); - my $iso_week_year = int(strftime("%G", $sec, $min, $hour, $day, $month, $year)); - return "$iso_week/$iso_week_year"; - }); - $prune_mark->($prune_list, $keep->{'keep-monthly'}, sub { - my ($ctime) = @_; - my (undef, undef, undef, undef, $month, $year) = localtime($ctime); - return "$month/$year"; - }); - $prune_mark->($prune_list, $keep->{'keep-yearly'}, sub { - my ($ctime) = @_; - my $year = (localtime($ctime))[5]; - return "$year"; - }); + $prune_mark->( + $prune_list, + $keep->{'keep-last'}, + sub { + my ($ctime) = @_; + return $ctime; + }, + ); + $prune_mark->( + $prune_list, + $keep->{'keep-hourly'}, + sub { + my ($ctime) = @_; + my (undef, undef, $hour, $day, $month, $year) = localtime($ctime); + return "$hour/$day/$month/$year"; + }, + ); + $prune_mark->( + $prune_list, + $keep->{'keep-daily'}, + sub { + my ($ctime) = @_; + my (undef, undef, undef, $day, $month, $year) = localtime($ctime); + return "$day/$month/$year"; + }, + ); + $prune_mark->( + $prune_list, + $keep->{'keep-weekly'}, + sub { + my ($ctime) = @_; + my ($sec, $min, $hour, $day, $month, $year) = localtime($ctime); + my $iso_week = int(strftime("%V", $sec, $min, $hour, $day, $month, $year)); + my $iso_week_year = int(strftime("%G", $sec, $min, $hour, $day, $month, $year)); + return "$iso_week/$iso_week_year"; + }, + ); + $prune_mark->( + $prune_list, + $keep->{'keep-monthly'}, + sub { + my ($ctime) = @_; + my (undef, undef, undef, undef, $month, $year) = localtime($ctime); + return "$month/$year"; + }, + ); + $prune_mark->( + $prune_list, + $keep->{'keep-yearly'}, + sub { + my ($ctime) = @_; + my $year = (localtime($ctime))[5]; + return "$year"; + }, + ); foreach my $prune_entry (@{$prune_list}) { - $prune_entry->{mark} //= 'remove'; + $prune_entry->{mark} //= 'remove'; } } @@ -1899,8 +1978,9 @@ sub volume_export : prototype($$$$$$$) { die "cannot export volume '$volid'\n" if !$storeid; my $scfg = storage_config($cfg, $storeid); my $plugin = PVE::Storage::Plugin->lookup($scfg->{type}); - return $plugin->volume_export($scfg, $storeid, $fh, $volname, $format, - $snapshot, $base_snapshot, $with_snapshots); + return $plugin->volume_export( + $scfg, $storeid, $fh, $volname, $format, $snapshot, $base_snapshot, $with_snapshots, + ); } sub volume_import : prototype($$$$$$$$) { @@ -1911,15 +1991,15 @@ sub volume_import : prototype($$$$$$$$) { my $scfg = storage_config($cfg, $storeid); my $plugin = PVE::Storage::Plugin->lookup($scfg->{type}); return $plugin->volume_import( - $scfg, - $storeid, - $fh, - $volname, - $format, - $snapshot, - $base_snapshot, - $with_snapshots, - $allow_rename, + $scfg, + $storeid, + $fh, + $volname, + $format, + $snapshot, + $base_snapshot, + $with_snapshots, + $allow_rename, ) // $volid; } @@ -1930,9 +2010,9 @@ sub volume_export_formats : prototype($$$$$) { return if !$storeid; my $scfg = storage_config($cfg, $storeid); my $plugin = PVE::Storage::Plugin->lookup($scfg->{type}); - return $plugin->volume_export_formats($scfg, $storeid, $volname, - $snapshot, $base_snapshot, - $with_snapshots); + return $plugin->volume_export_formats( + $scfg, $storeid, $volname, $snapshot, $base_snapshot, $with_snapshots, + ); } sub volume_import_formats : prototype($$$$$) { @@ -1943,19 +2023,16 @@ sub volume_import_formats : prototype($$$$$) { my $scfg = storage_config($cfg, $storeid); my $plugin = PVE::Storage::Plugin->lookup($scfg->{type}); return $plugin->volume_import_formats( - $scfg, - $storeid, - $volname, - $snapshot, - $base_snapshot, - $with_snapshots, + $scfg, $storeid, $volname, $snapshot, $base_snapshot, $with_snapshots, ); } sub volume_transfer_formats { my ($cfg, $src_volid, $dst_volid, $snapshot, $base_snapshot, $with_snapshots) = @_; - my @export_formats = volume_export_formats($cfg, $src_volid, $snapshot, $base_snapshot, $with_snapshots); - my @import_formats = volume_import_formats($cfg, $dst_volid, $snapshot, $base_snapshot, $with_snapshots); + my @export_formats = + volume_export_formats($cfg, $src_volid, $snapshot, $base_snapshot, $with_snapshots); + my @import_formats = + volume_import_formats($cfg, $dst_volid, $snapshot, $base_snapshot, $with_snapshots); my %import_hash = map { $_ => 1 } @import_formats; my @common = grep { $import_hash{$_} } @export_formats; return @common; @@ -1965,9 +2042,9 @@ sub volume_imported_message { my ($volid, $want_pattern) = @_; if ($want_pattern) { - return qr/successfully imported '([^']*)'$/; + return qr/successfully imported '([^']*)'$/; } else { - return "successfully imported '$volid'\n"; + return "successfully imported '$volid'\n"; } } @@ -1987,12 +2064,13 @@ sub volume_import_start { my $volid = "$storeid:$volname"; # find common import/export format, like volume_transfer_formats - my @import_formats = PVE::Storage::volume_import_formats($cfg, $volid, $opts->{snapshot}, undef, $with_snapshots); + my @import_formats = PVE::Storage::volume_import_formats($cfg, $volid, $opts->{snapshot}, undef, + $with_snapshots); my @export_formats = PVE::Tools::split_list($opts->{export_formats}); my %import_hash = map { $_ => 1 } @import_formats; my @common = grep { $import_hash{$_} } @export_formats; die "no matching import/export format found for storage '$storeid'\n" - if !@common; + if !@common; $format = $common[0]; my $input = IO::File->new(); @@ -2003,11 +2081,11 @@ sub volume_import_start { unlink $unix; my $cpid = open3($input, $info, $info, @$import) - or die "failed to spawn disk-import child - $!\n"; + or die "failed to spawn disk-import child - $!\n"; my $ready; eval { - PVE::Tools::run_with_timeout(5, sub { $ready = <$info>; }); + PVE::Tools::run_with_timeout(5, sub { $ready = <$info>; }); }; die "failed to read readyness from disk import child: $@\n" if $@; @@ -2015,19 +2093,19 @@ sub volume_import_start { print "$ready\n"; return { - fh => $info, - pid => $cpid, - socket => $unix, - format => $format, + fh => $info, + pid => $cpid, + socket => $unix, + format => $format, }; } sub volume_export_start { my ($cfg, $volid, $format, $log, $opts) = @_; - my $known_format = [ grep { $_ eq $format } $KNOWN_EXPORT_FORMATS->@* ]; + my $known_format = [grep { $_ eq $format } $KNOWN_EXPORT_FORMATS->@*]; if (!$known_format->@*) { - die "Cannot export '$volid' using unknown export format '$format'\n"; + die "Cannot export '$volid' using unknown export format '$format'\n"; } $format = $known_format->[0]; @@ -2053,7 +2131,7 @@ sub complete_storage { my $cfg = PVE::Storage::config(); - return $cmdname eq 'add' ? [] : [ PVE::Storage::storage_ids($cfg) ]; + return $cmdname eq 'add' ? [] : [PVE::Storage::storage_ids($cfg)]; } sub complete_storage_enabled { @@ -2062,9 +2140,9 @@ sub complete_storage_enabled { my $res = []; my $cfg = PVE::Storage::config(); - foreach my $sid (keys %{$cfg->{ids}}) { - next if !storage_check_enabled($cfg, $sid, undef, 1); - push @$res, $sid; + foreach my $sid (keys %{ $cfg->{ids} }) { + next if !storage_check_enabled($cfg, $sid, undef, 1); + push @$res, $sid; } return $res; } @@ -2083,27 +2161,27 @@ sub complete_volume { my $storage_list = complete_storage_enabled(); if ($cvalue =~ m/^([^:]+):/) { - $storage_list = [ $1 ]; + $storage_list = [$1]; } else { - if (scalar(@$storage_list) > 1) { - # only list storage IDs to avoid large listings - my $res = []; - foreach my $storeid (@$storage_list) { - # Hack: simply return 2 artificial values, so that - # completions does not finish - push @$res, "$storeid:volname", "$storeid:..."; - } - return $res; - } + if (scalar(@$storage_list) > 1) { + # only list storage IDs to avoid large listings + my $res = []; + foreach my $storeid (@$storage_list) { + # Hack: simply return 2 artificial values, so that + # completions does not finish + push @$res, "$storeid:volname", "$storeid:..."; + } + return $res; + } } my $res = []; foreach my $storeid (@$storage_list) { - my $vollist = PVE::Storage::volume_list($cfg, $storeid); + my $vollist = PVE::Storage::volume_list($cfg, $storeid); - foreach my $item (@$vollist) { - push @$res, $item->{volid}; - } + foreach my $item (@$vollist) { + push @$res, $item->{volid}; + } } return $res; @@ -2124,9 +2202,16 @@ sub rename_volume { $target_vmid = ($plugin->parse_volname($source_volname))[3] if !$target_vmid; - return $plugin->cluster_lock_storage($storeid, $scfg->{shared}, undef, sub { - return $plugin->rename_volume($scfg, $storeid, $source_volname, $target_vmid, $target_volname); - }); + return $plugin->cluster_lock_storage( + $storeid, + $scfg->{shared}, + undef, + sub { + return $plugin->rename_volume( + $scfg, $storeid, $source_volname, $target_vmid, $target_volname, + ); + }, + ); } # Various io-heavy operations require io/bandwidth limits which can be @@ -2142,64 +2227,68 @@ sub get_bandwidth_limit { # us. my $use_global_limits = 0; my $apply_limit = sub { - my ($bwlimit) = @_; - if (defined($bwlimit)) { - my $limits = PVE::JSONSchema::parse_property_string('bwlimit', $bwlimit); - my $limit = $limits->{$operation} // $limits->{default}; - if (defined($limit)) { - if (!$override || $limit < $override) { - $override = $limit; - } - return; - } - } - # If there was no applicable limit, try to apply the global ones. - $use_global_limits = 1; + my ($bwlimit) = @_; + if (defined($bwlimit)) { + my $limits = PVE::JSONSchema::parse_property_string('bwlimit', $bwlimit); + my $limit = $limits->{$operation} // $limits->{default}; + if (defined($limit)) { + if (!$override || $limit < $override) { + $override = $limit; + } + return; + } + } + # If there was no applicable limit, try to apply the global ones. + $use_global_limits = 1; }; my ($rpcenv, $authuser); if (defined($override)) { - $rpcenv = PVE::RPCEnvironment->get(); - $authuser = $rpcenv->get_user(); + $rpcenv = PVE::RPCEnvironment->get(); + $authuser = $rpcenv->get_user(); } # Apply per-storage limits - if there are storages involved. if (defined($storage_list) && grep { defined($_) } $storage_list->@*) { - my $config = config(); + my $config = config(); - # The Datastore.Allocate permission allows us to modify the per-storage - # limits, therefore it also allows us to override them. - # Since we have most likely multiple storages to check, do a quick check on - # the general '/storage' path to see if we can skip the checks entirely: - return $override if $rpcenv && $rpcenv->check($authuser, '/storage', ['Datastore.Allocate'], 1); + # The Datastore.Allocate permission allows us to modify the per-storage + # limits, therefore it also allows us to override them. + # Since we have most likely multiple storages to check, do a quick check on + # the general '/storage' path to see if we can skip the checks entirely: + return $override + if $rpcenv && $rpcenv->check($authuser, '/storage', ['Datastore.Allocate'], 1); - my %done; - foreach my $storage (@$storage_list) { - next if !defined($storage); - # Avoid duplicate checks: - next if $done{$storage}; - $done{$storage} = 1; + my %done; + foreach my $storage (@$storage_list) { + next if !defined($storage); + # Avoid duplicate checks: + next if $done{$storage}; + $done{$storage} = 1; - # Otherwise we may still have individual /storage/$ID permissions: - if (!$rpcenv || !$rpcenv->check($authuser, "/storage/$storage", ['Datastore.Allocate'], 1)) { - # And if not: apply the limits. - my $storecfg = storage_config($config, $storage); - $apply_limit->($storecfg->{bwlimit}); - } - } + # Otherwise we may still have individual /storage/$ID permissions: + if ( + !$rpcenv + || !$rpcenv->check($authuser, "/storage/$storage", ['Datastore.Allocate'], 1) + ) { + # And if not: apply the limits. + my $storecfg = storage_config($config, $storage); + $apply_limit->($storecfg->{bwlimit}); + } + } - # Storage limits take precedence over the datacenter defaults, so if - # a limit was applied: - return $override if !$use_global_limits; + # Storage limits take precedence over the datacenter defaults, so if + # a limit was applied: + return $override if !$use_global_limits; } # Sys.Modify on '/' means we can change datacenter.cfg which contains the # global default limits. if (!$rpcenv || !$rpcenv->check($authuser, '/', ['Sys.Modify'], 1)) { - # So if we cannot modify global limits, apply them to our currently - # requested override. - my $dc = cfs_read_file('datacenter.cfg'); - $apply_limit->($dc->{bwlimit}); + # So if we cannot modify global limits, apply them to our currently + # requested override. + my $dc = cfs_read_file('datacenter.cfg'); + $apply_limit->($dc->{bwlimit}); } return $override; @@ -2211,7 +2300,7 @@ sub assert_sid_unused { my $cfg = config(); if (my $scfg = storage_config($cfg, $sid, 1)) { - die "storage ID '$sid' already defined\n"; + die "storage ID '$sid' already defined\n"; } return undef; @@ -2239,7 +2328,7 @@ sub get_import_metadata { my $scfg = storage_config($cfg, $storeid); my $plugin = PVE::Storage::Plugin->lookup($scfg->{type}); if (!$plugin->can('get_import_metadata')) { - die "storage does not support the importer API\n"; + die "storage does not support the importer API\n"; } return $plugin->get_import_metadata($scfg, $volname, $storeid); diff --git a/src/PVE/Storage/BTRFSPlugin.pm b/src/PVE/Storage/BTRFSPlugin.pm index 5d472fa..8c79ea4 100644 --- a/src/PVE/Storage/BTRFSPlugin.pm +++ b/src/PVE/Storage/BTRFSPlugin.pm @@ -31,55 +31,55 @@ sub type { sub plugindata { return { - content => [ - { - images => 1, - rootdir => 1, - vztmpl => 1, - iso => 1, - backup => 1, - snippets => 1, - none => 1, - import => 1, - }, - { images => 1, rootdir => 1 }, - ], - format => [ { raw => 1, subvol => 1 }, 'raw', ], - 'sensitive-properties' => {}, + content => [ + { + images => 1, + rootdir => 1, + vztmpl => 1, + iso => 1, + backup => 1, + snippets => 1, + none => 1, + import => 1, + }, + { images => 1, rootdir => 1 }, + ], + format => [{ raw => 1, subvol => 1 }, 'raw'], + 'sensitive-properties' => {}, }; } sub properties { return { - nocow => { - description => "Set the NOCOW flag on files." - . " Disables data checksumming and causes data errors to be unrecoverable from" - . " while allowing direct I/O. Only use this if data does not need to be any more" - . " safe than on a single ext4 formatted disk with no underlying raid system.", - type => 'boolean', - default => 0, - }, + nocow => { + description => "Set the NOCOW flag on files." + . " Disables data checksumming and causes data errors to be unrecoverable from" + . " while allowing direct I/O. Only use this if data does not need to be any more" + . " safe than on a single ext4 formatted disk with no underlying raid system.", + type => 'boolean', + default => 0, + }, }; } sub options { return { - path => { fixed => 1 }, - nodes => { optional => 1 }, - shared => { optional => 1 }, - disable => { optional => 1 }, - maxfiles => { optional => 1 }, - 'prune-backups' => { optional => 1 }, - 'max-protected-backups' => { optional => 1 }, - content => { optional => 1 }, - format => { optional => 1 }, - is_mountpoint => { optional => 1 }, - nocow => { optional => 1 }, - mkdir => { optional => 1 }, - 'create-base-path' => { optional => 1 }, - 'create-subdirs' => { optional => 1 }, - preallocation => { optional => 1 }, - # TODO: The new variant of mkdir with `populate` vs `create`... + path => { fixed => 1 }, + nodes => { optional => 1 }, + shared => { optional => 1 }, + disable => { optional => 1 }, + maxfiles => { optional => 1 }, + 'prune-backups' => { optional => 1 }, + 'max-protected-backups' => { optional => 1 }, + content => { optional => 1 }, + format => { optional => 1 }, + is_mountpoint => { optional => 1 }, + nocow => { optional => 1 }, + mkdir => { optional => 1 }, + 'create-base-path' => { optional => 1 }, + 'create-subdirs' => { optional => 1 }, + preallocation => { optional => 1 }, + # TODO: The new variant of mkdir with `populate` vs `create`... }; } @@ -95,7 +95,8 @@ sub options { # Reuse `DirPlugin`'s `check_config`. This simply checks for invalid paths. sub check_config { my ($self, $sectionId, $config, $create, $skipSchemaCheck) = @_; - return PVE::Storage::DirPlugin::check_config($self, $sectionId, $config, $create, $skipSchemaCheck); + return PVE::Storage::DirPlugin::check_config($self, $sectionId, $config, $create, + $skipSchemaCheck); } my sub getfsmagic($) { @@ -106,7 +107,7 @@ my sub getfsmagic($) { # Just round up and extract what we need: my $buf = pack('x160'); if (0 != syscall(&PVE::Syscall::SYS_statfs, $path, $buf)) { - die "statfs on '$path' failed - $!\n"; + die "statfs on '$path' failed - $!\n"; } return unpack('L!', $buf); @@ -115,7 +116,7 @@ my sub getfsmagic($) { my sub assert_btrfs($) { my ($path) = @_; die "'$path' is not a btrfs file system\n" - if getfsmagic($path) != BTRFS_MAGIC; + if getfsmagic($path) != BTRFS_MAGIC; } sub activate_storage { @@ -126,8 +127,8 @@ sub activate_storage { my $mp = PVE::Storage::DirPlugin::parse_is_mountpoint($scfg); if (defined($mp) && !PVE::Storage::DirPlugin::path_is_mounted($mp, $cache->{mountdata})) { - die "unable to activate storage '$storeid' - directory is expected to be a mount point but" - ." is not mounted: '$mp'\n"; + die "unable to activate storage '$storeid' - directory is expected to be a mount point but" + . " is not mounted: '$mp'\n"; } assert_btrfs($path); # only assert this stuff now, ensures $path is there and better UX @@ -142,18 +143,14 @@ sub status { sub get_volume_attribute { my ($class, $scfg, $storeid, $volname, $attribute) = @_; - return PVE::Storage::DirPlugin::get_volume_attribute($class, $scfg, $storeid, $volname, $attribute); + return PVE::Storage::DirPlugin::get_volume_attribute($class, $scfg, $storeid, $volname, + $attribute); } sub update_volume_attribute { my ($class, $scfg, $storeid, $volname, $attribute, $value) = @_; return PVE::Storage::DirPlugin::update_volume_attribute( - $class, - $scfg, - $storeid, - $volname, - $attribute, - $value, + $class, $scfg, $storeid, $volname, $attribute, $value, ); } @@ -171,7 +168,7 @@ sub raw_name_to_dir($) { # For the subvolume directory Strip the `.` suffix: if ($raw =~ /^(.*)\.raw$/) { - return $1; + return $1; } __error "internal error: bad disk name: $raw"; @@ -181,7 +178,7 @@ sub raw_file_to_subvol($) { my ($file) = @_; if ($file =~ m|^(.*)/disk\.raw$|) { - return "$1"; + return "$1"; } __error "internal error: bad raw path: $file"; @@ -190,26 +187,25 @@ sub raw_file_to_subvol($) { sub filesystem_path { my ($class, $scfg, $volname, $snapname) = @_; - my ($vtype, $name, $vmid, undef, undef, $isBase, $format) = - $class->parse_volname($volname); + my ($vtype, $name, $vmid, undef, undef, $isBase, $format) = $class->parse_volname($volname); my $path = $class->get_subdir($scfg, $vtype); $path .= "/$vmid" if $vtype eq 'images'; if ($vtype eq 'images' && defined($format) && $format eq 'raw') { - my $dir = raw_name_to_dir($name); - if ($snapname) { - $dir .= "\@$snapname"; - } - $path .= "/$dir/disk.raw"; + my $dir = raw_name_to_dir($name); + if ($snapname) { + $dir .= "\@$snapname"; + } + $path .= "/$dir/disk.raw"; } elsif ($vtype eq 'images' && defined($format) && $format eq 'subvol') { - $path .= "/$name"; - if ($snapname) { - $path .= "\@$snapname"; - } + $path .= "/$name"; + if ($snapname) { + $path .= "\@$snapname"; + } } else { - $path .= "/$name"; + $path .= "/$name"; } return wantarray ? ($path, $vmid, $vtype) : $path; @@ -221,12 +217,12 @@ sub btrfs_cmd { my $msg = ''; my $func; if (defined($outfunc)) { - $func = sub { - my $part = &$outfunc(@_); - $msg .= $part if defined($part); - }; + $func = sub { + my $part = &$outfunc(@_); + $msg .= $part if defined($part); + }; } else { - $func = sub { $msg .= "$_[0]\n" }; + $func = sub { $msg .= "$_[0]\n" }; } run_command(['btrfs', '-q', @$cmd], errmsg => "command 'btrfs @$cmd' failed", outfunc => $func); @@ -257,7 +253,7 @@ sub create_base { my ($class, $storeid, $scfg, $volname) = @_; my ($vtype, $name, $vmid, $basename, $basevmid, $isBase, $format) = - $class->parse_volname($volname); + $class->parse_volname($volname); my $newname = $name; $newname =~ s/^vm-/base-/; @@ -265,7 +261,7 @@ sub create_base { # If we're not working with a 'raw' file, which is the only thing that's "different" for btrfs, # or a subvolume, we forward to the DirPlugin if ($format ne 'raw' && $format ne 'subvol') { - return PVE::Storage::Plugin::create_base(@_); + return PVE::Storage::Plugin::create_base(@_); } my $path = $class->filesystem_path($scfg, $volname); @@ -275,12 +271,12 @@ sub create_base { my $subvol = $path; my $newsubvol = $newpath; if ($format eq 'raw') { - $subvol = raw_file_to_subvol($subvol); - $newsubvol = raw_file_to_subvol($newsubvol); + $subvol = raw_file_to_subvol($subvol); + $newsubvol = raw_file_to_subvol($newsubvol); } rename($subvol, $newsubvol) - || die "rename '$subvol' to '$newsubvol' failed - $!\n"; + || die "rename '$subvol' to '$newsubvol' failed - $!\n"; eval { $class->btrfs_cmd(['property', 'set', $newsubvol, 'ro', 'true']) }; warn $@ if $@; @@ -291,12 +287,12 @@ sub clone_image { my ($class, $scfg, $storeid, $volname, $vmid, $snap) = @_; my ($vtype, $basename, $basevmid, undef, undef, $isBase, $format) = - $class->parse_volname($volname); + $class->parse_volname($volname); # If we're not working with a 'raw' file, which is the only thing that's "different" for btrfs, # or a subvolume, we forward to the DirPlugin if ($format ne 'raw' && $format ne 'subvol') { - return PVE::Storage::DirPlugin::clone_image(@_); + return PVE::Storage::DirPlugin::clone_image(@_); } my $imagedir = $class->get_subdir($scfg, 'images'); @@ -314,8 +310,8 @@ sub clone_image { my $subvol = $path; my $newsubvol = $newpath; if ($format eq 'raw') { - $subvol = raw_file_to_subvol($subvol); - $newsubvol = raw_file_to_subvol($newsubvol); + $subvol = raw_file_to_subvol($subvol); + $newsubvol = raw_file_to_subvol($newsubvol); } $class->btrfs_cmd(['subvolume', 'snapshot', '--', $subvol, $newsubvol]); @@ -327,7 +323,7 @@ sub alloc_image { my ($class, $storeid, $scfg, $vmid, $fmt, $name, $size) = @_; if ($fmt ne 'raw' && $fmt ne 'subvol') { - return $class->SUPER::alloc_image($storeid, $scfg, $vmid, $fmt, $name, $size); + return $class->SUPER::alloc_image($storeid, $scfg, $vmid, $fmt, $name, $size); } # From Plugin.pm: @@ -341,7 +337,7 @@ sub alloc_image { my (undef, $tmpfmt) = PVE::Storage::Plugin::parse_name_dir($name); die "illegal name '$name' - wrong extension for format ('$tmpfmt != '$fmt')\n" - if $tmpfmt ne $fmt; + if $tmpfmt ne $fmt; # End copy from Plugin.pm @@ -353,46 +349,46 @@ sub alloc_image { my $path; if ($fmt eq 'raw') { - $path = "$subvol/disk.raw"; + $path = "$subvol/disk.raw"; } if ($fmt eq 'subvol' && !!$size) { - # NOTE: `btrfs send/recv` actually drops quota information so supporting subvolumes with - # quotas doesn't play nice with send/recv. - die "btrfs quotas are currently not supported, use an unsized subvolume or a raw file\n"; + # NOTE: `btrfs send/recv` actually drops quota information so supporting subvolumes with + # quotas doesn't play nice with send/recv. + die "btrfs quotas are currently not supported, use an unsized subvolume or a raw file\n"; } $class->btrfs_cmd(['subvolume', 'create', '--', $subvol]); eval { - if ($fmt eq 'subvol') { - # Nothing to do for now... + if ($fmt eq 'subvol') { + # Nothing to do for now... - # This is how we *would* do it: - # # Use the subvol's default 0/$id qgroup - # eval { - # # This call should happen at storage creation instead and therefore governed by a - # # configuration option! - # # $class->btrfs_cmd(['quota', 'enable', $subvol]); - # my $id = $class->btrfs_get_subvol_id($subvol); - # $class->btrfs_cmd(['qgroup', 'limit', "${size}k", "0/$id", $subvol]); - # }; - } elsif ($fmt eq 'raw') { - sysopen my $fh, $path, O_WRONLY | O_CREAT | O_EXCL - or die "failed to create raw file '$path' - $!\n"; - chattr($fh, ~FS_NOCOW_FL, FS_NOCOW_FL) if $scfg->{nocow}; - truncate($fh, $size * 1024) - or die "failed to set file size for '$path' - $!\n"; - close($fh); - } else { - die "internal format error (format = $fmt)\n"; - } + # This is how we *would* do it: + # # Use the subvol's default 0/$id qgroup + # eval { + # # This call should happen at storage creation instead and therefore governed by a + # # configuration option! + # # $class->btrfs_cmd(['quota', 'enable', $subvol]); + # my $id = $class->btrfs_get_subvol_id($subvol); + # $class->btrfs_cmd(['qgroup', 'limit', "${size}k", "0/$id", $subvol]); + # }; + } elsif ($fmt eq 'raw') { + sysopen my $fh, $path, O_WRONLY | O_CREAT | O_EXCL + or die "failed to create raw file '$path' - $!\n"; + chattr($fh, ~FS_NOCOW_FL, FS_NOCOW_FL) if $scfg->{nocow}; + truncate($fh, $size * 1024) + or die "failed to set file size for '$path' - $!\n"; + close($fh); + } else { + die "internal format error (format = $fmt)\n"; + } }; if (my $err = $@) { - eval { $class->btrfs_cmd(['subvolume', 'delete', '--', $subvol]); }; - warn $@ if $@; - die $err; + eval { $class->btrfs_cmd(['subvolume', 'delete', '--', $subvol]); }; + warn $@ if $@; + die $err; } return "$vmid/$name"; @@ -402,7 +398,7 @@ sub alloc_image { my sub path_is_subvolume : prototype($) { my ($path) = @_; my @stat = stat($path) - or die "stat failed on '$path' - $!\n"; + or die "stat failed on '$path' - $!\n"; my ($ino, $mode) = @stat[1, 2]; return S_ISDIR($mode) && $ino == BTRFS_FIRST_FREE_OBJECTID; } @@ -415,36 +411,42 @@ my sub foreach_snapshot_of_subvol : prototype($$) { my $basename = basename($subvol); my $dir = dirname($subvol); - dir_glob_foreach($dir, $BTRFS_SNAPSHOT_REGEX, sub { - my ($volume, $name, $snap_name) = ($1, $2, $3); - return if !path_is_subvolume("$dir/$volume"); - return if $name ne $basename; - $code->($snap_name); - }); + dir_glob_foreach( + $dir, + $BTRFS_SNAPSHOT_REGEX, + sub { + my ($volume, $name, $snap_name) = ($1, $2, $3); + return if !path_is_subvolume("$dir/$volume"); + return if $name ne $basename; + $code->($snap_name); + }, + ); } sub free_image { my ($class, $storeid, $scfg, $volname, $isBase, $_format) = @_; - my ($vtype, undef, $vmid, undef, undef, undef, $format) = - $class->parse_volname($volname); + my ($vtype, undef, $vmid, undef, undef, undef, $format) = $class->parse_volname($volname); if (!defined($format) || $vtype ne 'images' || ($format ne 'subvol' && $format ne 'raw')) { - return $class->SUPER::free_image($storeid, $scfg, $volname, $isBase, $_format); + return $class->SUPER::free_image($storeid, $scfg, $volname, $isBase, $_format); } my $path = $class->filesystem_path($scfg, $volname); my $subvol = $path; if ($format eq 'raw') { - $subvol = raw_file_to_subvol($path); + $subvol = raw_file_to_subvol($path); } my @snapshot_vols; - foreach_snapshot_of_subvol($subvol, sub { - my ($snap_name) = @_; - push @snapshot_vols, "$subvol\@$snap_name"; - }); + foreach_snapshot_of_subvol( + $subvol, + sub { + my ($snap_name) = @_; + push @snapshot_vols, "$subvol\@$snap_name"; + }, + ); $class->btrfs_cmd(['subvolume', 'delete', '--', @snapshot_vols, $subvol]); # try to cleanup directory to not clutter storage with empty $vmid dirs if @@ -485,10 +487,10 @@ sub volume_size_info { my $format = ($class->parse_volname($volname))[6]; if (defined($format) && $format eq 'subvol') { - my $ctime = (stat($path))[10]; - my ($used, $size) = (0, 0); - #my ($used, $size) = btrfs_subvol_quota($class, $path); # uses wantarray - return wantarray ? ($size, 'subvol', $used, undef, $ctime) : $size; + my $ctime = (stat($path))[10]; + my ($used, $size) = (0, 0); + #my ($used, $size) = btrfs_subvol_quota($class, $path); # uses wantarray + return wantarray ? ($size, 'subvol', $used, undef, $ctime) : $size; } return PVE::Storage::Plugin::file_size_info($path, $timeout, $format); @@ -499,13 +501,13 @@ sub volume_resize { my $format = ($class->parse_volname($volname))[6]; if ($format eq 'subvol') { - # NOTE: `btrfs send/recv` actually drops quota information so supporting subvolumes with - # quotas doesn't play nice with send/recv. - die "cannot resize subvolume - btrfs quotas are currently not supported\n"; - # my $path = $class->filesystem_path($scfg, $volname); - # my $id = '0/' . $class->btrfs_get_subvol_id($path); - # $class->btrfs_cmd(['qgroup', 'limit', '--', "${size}k", "0/$id", $path]); - # return undef; + # NOTE: `btrfs send/recv` actually drops quota information so supporting subvolumes with + # quotas doesn't play nice with send/recv. + die "cannot resize subvolume - btrfs quotas are currently not supported\n"; + # my $path = $class->filesystem_path($scfg, $volname); + # my $id = '0/' . $class->btrfs_get_subvol_id($path); + # $class->btrfs_cmd(['qgroup', 'limit', '--', "${size}k", "0/$id", $path]); + # return undef; } return PVE::Storage::Plugin::volume_resize(@_); @@ -514,17 +516,17 @@ sub volume_resize { sub volume_snapshot { my ($class, $scfg, $storeid, $volname, $snap) = @_; - my ($name, $vmid, $format) = ($class->parse_volname($volname))[1,2,6]; + my ($name, $vmid, $format) = ($class->parse_volname($volname))[1, 2, 6]; if ($format ne 'subvol' && $format ne 'raw') { - return PVE::Storage::Plugin::volume_snapshot(@_); + return PVE::Storage::Plugin::volume_snapshot(@_); } my $path = $class->filesystem_path($scfg, $volname); my $snap_path = $class->filesystem_path($scfg, $volname, $snap); if ($format eq 'raw') { - $path = raw_file_to_subvol($path); - $snap_path = raw_file_to_subvol($snap_path); + $path = raw_file_to_subvol($path); + $snap_path = raw_file_to_subvol($snap_path); } my $snapshot_dir = $class->get_subdir($scfg, 'images') . "/$vmid"; @@ -537,24 +539,24 @@ sub volume_snapshot { sub volume_rollback_is_possible { my ($class, $scfg, $storeid, $volname, $snap, $blockers) = @_; - return 1; + return 1; } sub volume_snapshot_rollback { my ($class, $scfg, $storeid, $volname, $snap) = @_; - my ($name, $format) = ($class->parse_volname($volname))[1,6]; + my ($name, $format) = ($class->parse_volname($volname))[1, 6]; if ($format ne 'subvol' && $format ne 'raw') { - return PVE::Storage::Plugin::volume_snapshot_rollback(@_); + return PVE::Storage::Plugin::volume_snapshot_rollback(@_); } my $path = $class->filesystem_path($scfg, $volname); my $snap_path = $class->filesystem_path($scfg, $volname, $snap); if ($format eq 'raw') { - $path = raw_file_to_subvol($path); - $snap_path = raw_file_to_subvol($snap_path); + $path = raw_file_to_subvol($path); + $snap_path = raw_file_to_subvol($snap_path); } # Simple version would be: @@ -562,7 +564,7 @@ sub volume_snapshot_rollback { # create new # on error rename temp back # But for atomicity in case the rename after create-failure *also* fails, we create the new - # subvol first, then use RENAME_EXCHANGE, + # subvol first, then use RENAME_EXCHANGE, my $tmp_path = "$path.tmp.$$"; $class->btrfs_cmd(['subvolume', 'snapshot', '--', $snap_path, $tmp_path]); # The paths are absolute, so pass -1 as file descriptors. @@ -572,7 +574,7 @@ sub volume_snapshot_rollback { warn "failed to remove '$tmp_path' subvolume: $@" if $@; if (!$ok) { - die "failed to rotate '$tmp_path' into place at '$path' - $!\n"; + die "failed to rotate '$tmp_path' into place at '$path' - $!\n"; } return undef; @@ -581,16 +583,16 @@ sub volume_snapshot_rollback { sub volume_snapshot_delete { my ($class, $scfg, $storeid, $volname, $snap, $running) = @_; - my ($name, $vmid, $format) = ($class->parse_volname($volname))[1,2,6]; + my ($name, $vmid, $format) = ($class->parse_volname($volname))[1, 2, 6]; if ($format ne 'subvol' && $format ne 'raw') { - return PVE::Storage::Plugin::volume_snapshot_delete(@_); + return PVE::Storage::Plugin::volume_snapshot_delete(@_); } my $path = $class->filesystem_path($scfg, $volname, $snap); if ($format eq 'raw') { - $path = raw_file_to_subvol($path); + $path = raw_file_to_subvol($path); } $class->btrfs_cmd(['subvolume', 'delete', '--', $path]); @@ -602,39 +604,40 @@ sub volume_has_feature { my ($class, $scfg, $feature, $storeid, $volname, $snapname, $running) = @_; my $features = { - snapshot => { - current => { qcow2 => 1, raw => 1, subvol => 1 }, - snap => { qcow2 => 1, raw => 1, subvol => 1 } - }, - clone => { - base => { qcow2 => 1, raw => 1, subvol => 1, vmdk => 1 }, - current => { raw => 1 }, - snap => { raw => 1 }, - }, - template => { - current => { qcow2 => 1, raw => 1, vmdk => 1, subvol => 1 }, - }, - copy => { - base => { qcow2 => 1, raw => 1, subvol => 1, vmdk => 1 }, - current => { qcow2 => 1, raw => 1, subvol => 1, vmdk => 1 }, - snap => { qcow2 => 1, raw => 1, subvol => 1 }, - }, - sparseinit => { - base => { qcow2 => 1, raw => 1, vmdk => 1 }, - current => { qcow2 => 1, raw => 1, vmdk => 1 }, - }, - rename => { - current => { qcow2 => 1, raw => 1, vmdk => 1 }, - }, + snapshot => { + current => { qcow2 => 1, raw => 1, subvol => 1 }, + snap => { qcow2 => 1, raw => 1, subvol => 1 }, + }, + clone => { + base => { qcow2 => 1, raw => 1, subvol => 1, vmdk => 1 }, + current => { raw => 1 }, + snap => { raw => 1 }, + }, + template => { + current => { qcow2 => 1, raw => 1, vmdk => 1, subvol => 1 }, + }, + copy => { + base => { qcow2 => 1, raw => 1, subvol => 1, vmdk => 1 }, + current => { qcow2 => 1, raw => 1, subvol => 1, vmdk => 1 }, + snap => { qcow2 => 1, raw => 1, subvol => 1 }, + }, + sparseinit => { + base => { qcow2 => 1, raw => 1, vmdk => 1 }, + current => { qcow2 => 1, raw => 1, vmdk => 1 }, + }, + rename => { + current => { qcow2 => 1, raw => 1, vmdk => 1 }, + }, }; - my ($vtype, $name, $vmid, $basename, $basevmid, $isBase, $format) = $class->parse_volname($volname); + my ($vtype, $name, $vmid, $basename, $basevmid, $isBase, $format) = + $class->parse_volname($volname); my $key = undef; if ($snapname) { $key = 'snap'; } else { - $key = $isBase ? 'base' : 'current'; + $key = $isBase ? 'base' : 'current'; } return 1 if defined($features->{$feature}->{$key}->{$format}); @@ -650,51 +653,54 @@ sub list_images { # Copied from Plugin.pm, with file_size_info calls adapted: foreach my $fn (<$imagedir/[0-9][0-9]*/*>) { - # different to in Plugin.pm the regex below also excludes '@' as valid file name - next if $fn !~ m@^(/.+/(\d+)/([^/\@.]+(?:\.(qcow2|vmdk|subvol))?))$@; - $fn = $1; # untaint + # different to in Plugin.pm the regex below also excludes '@' as valid file name + next if $fn !~ m@^(/.+/(\d+)/([^/\@.]+(?:\.(qcow2|vmdk|subvol))?))$@; + $fn = $1; # untaint - my $owner = $2; - my $name = $3; - my $ext = $4; + my $owner = $2; + my $name = $3; + my $ext = $4; - next if !$vollist && defined($vmid) && ($owner ne $vmid); + next if !$vollist && defined($vmid) && ($owner ne $vmid); - my $volid = "$storeid:$owner/$name"; - my ($size, $format, $used, $parent, $ctime); + my $volid = "$storeid:$owner/$name"; + my ($size, $format, $used, $parent, $ctime); - if (!$ext) { # raw - $volid .= '.raw'; - $format = 'raw'; - ($size, undef, $used, $parent, $ctime) = - PVE::Storage::Plugin::file_size_info("$fn/disk.raw", undef, $format); - } elsif ($ext eq 'subvol') { - ($used, $size) = (0, 0); - #($used, $size) = btrfs_subvol_quota($class, $fn); - $format = 'subvol'; - } else { - $format = $ext; - ($size, undef, $used, $parent, $ctime) = eval { - PVE::Storage::Plugin::file_size_info($fn, undef, $format); - }; - if (my $err = $@) { - die $err if $err !~ m/Image is not in \S+ format$/; - warn "image '$fn' is not in expected format '$format', querying as raw\n"; - ($size, undef, $used, $parent, $ctime) = - PVE::Storage::Plugin::file_size_info($fn, undef, 'raw'); - $format = 'invalid'; - } - } - next if !defined($size); + if (!$ext) { # raw + $volid .= '.raw'; + $format = 'raw'; + ($size, undef, $used, $parent, $ctime) = + PVE::Storage::Plugin::file_size_info("$fn/disk.raw", undef, $format); + } elsif ($ext eq 'subvol') { + ($used, $size) = (0, 0); + #($used, $size) = btrfs_subvol_quota($class, $fn); + $format = 'subvol'; + } else { + $format = $ext; + ($size, undef, $used, $parent, $ctime) = + eval { PVE::Storage::Plugin::file_size_info($fn, undef, $format); }; + if (my $err = $@) { + die $err if $err !~ m/Image is not in \S+ format$/; + warn "image '$fn' is not in expected format '$format', querying as raw\n"; + ($size, undef, $used, $parent, $ctime) = + PVE::Storage::Plugin::file_size_info($fn, undef, 'raw'); + $format = 'invalid'; + } + } + next if !defined($size); - if ($vollist) { - next if ! grep { $_ eq $volid } @$vollist; - } + if ($vollist) { + next if !grep { $_ eq $volid } @$vollist; + } - my $info = { - volid => $volid, format => $format, - size => $size, vmid => $owner, used => $used, parent => $parent, - }; + my $info = { + volid => $volid, + format => $format, + size => $size, + vmid => $owner, + used => $used, + parent => $parent, + }; $info->{ctime} = $ctime if $ctime; @@ -730,118 +736,116 @@ sub volume_import_formats { # Same as export-formats, beware the parameter order: return volume_export_formats( - $class, - $scfg, - $storeid, - $volname, - $snapshot, - $base_snapshot, - $with_snapshots, + $class, $scfg, $storeid, $volname, $snapshot, $base_snapshot, $with_snapshots, ); } sub volume_export { my ( - $class, - $scfg, - $storeid, - $fh, - $volname, - $format, - $snapshot, - $base_snapshot, - $with_snapshots, + $class, + $scfg, + $storeid, + $fh, + $volname, + $format, + $snapshot, + $base_snapshot, + $with_snapshots, ) = @_; if ($format ne 'btrfs') { - return PVE::Storage::Plugin::volume_export(@_); + return PVE::Storage::Plugin::volume_export(@_); } die "format 'btrfs' only works on snapshots\n" - if !defined $snapshot; + if !defined $snapshot; die "'btrfs' format in incremental mode requires snapshots to be listed explicitly\n" - if defined($base_snapshot) && $with_snapshots && ref($with_snapshots) ne 'ARRAY'; + if defined($base_snapshot) && $with_snapshots && ref($with_snapshots) ne 'ARRAY'; my $volume_format = ($class->parse_volname($volname))[6]; die "btrfs-sending volumes of type $volume_format ('$volname') is not supported\n" - if $volume_format ne 'raw' && $volume_format ne 'subvol'; + if $volume_format ne 'raw' && $volume_format ne 'subvol'; my $path = $class->path($scfg, $volname, $storeid); if ($volume_format eq 'raw') { - $path = raw_file_to_subvol($path); + $path = raw_file_to_subvol($path); } my $cmd = ['btrfs', '-q', 'send', '-e']; if ($base_snapshot) { - my $base = $class->path($scfg, $volname, $storeid, $base_snapshot); - if ($volume_format eq 'raw') { - $base = raw_file_to_subvol($base); - } - push @$cmd, '-p', $base; + my $base = $class->path($scfg, $volname, $storeid, $base_snapshot); + if ($volume_format eq 'raw') { + $base = raw_file_to_subvol($base); + } + push @$cmd, '-p', $base; } push @$cmd, '--'; if (ref($with_snapshots) eq 'ARRAY') { - push @$cmd, (map { "$path\@$_" } ($with_snapshots // [])->@*); - push @$cmd, $path if !defined($base_snapshot); + push @$cmd, (map { "$path\@$_" } ($with_snapshots // [])->@*); + push @$cmd, $path if !defined($base_snapshot); } else { - foreach_snapshot_of_subvol($path, sub { - my ($snap_name) = @_; - # NOTE: if there is a $snapshot specified via the arguments, it is added last below. - push @$cmd, "$path\@$snap_name" if !(defined($snapshot) && $snap_name eq $snapshot); - }); + foreach_snapshot_of_subvol( + $path, + sub { + my ($snap_name) = @_; + # NOTE: if there is a $snapshot specified via the arguments, it is added last below. + push @$cmd, "$path\@$snap_name" + if !(defined($snapshot) && $snap_name eq $snapshot); + }, + ); } $path .= "\@$snapshot" if defined($snapshot); push @$cmd, $path; - run_command($cmd, output => '>&'.fileno($fh)); + run_command($cmd, output => '>&' . fileno($fh)); return; } sub volume_import { my ( - $class, - $scfg, - $storeid, - $fh, - $volname, - $format, - $snapshot, - $base_snapshot, - $with_snapshots, - $allow_rename, + $class, + $scfg, + $storeid, + $fh, + $volname, + $format, + $snapshot, + $base_snapshot, + $with_snapshots, + $allow_rename, ) = @_; if ($format ne 'btrfs') { - return PVE::Storage::Plugin::volume_import(@_); + return PVE::Storage::Plugin::volume_import(@_); } die "format 'btrfs' only works on snapshots\n" - if !defined $snapshot; + if !defined $snapshot; my ($vtype, $name, $vmid, $basename, $basevmid, $isBase, $volume_format) = - $class->parse_volname($volname); + $class->parse_volname($volname); die "btrfs-receiving volumes of type $volume_format ('$volname') is not supported\n" - if $volume_format ne 'raw' && $volume_format ne 'subvol'; + if $volume_format ne 'raw' && $volume_format ne 'subvol'; if (defined($base_snapshot)) { - my $path = $class->path($scfg, $volname, $storeid, $base_snapshot); - $path = raw_file_to_subvol($path) if $volume_format eq 'raw'; - die "base snapshot '$base_snapshot' not found - no such directory '$path'\n" - if !path_is_subvolume($path); + my $path = $class->path($scfg, $volname, $storeid, $base_snapshot); + $path = raw_file_to_subvol($path) if $volume_format eq 'raw'; + die "base snapshot '$base_snapshot' not found - no such directory '$path'\n" + if !path_is_subvolume($path); } my $destination = $class->filesystem_path($scfg, $volname); if ($volume_format eq 'raw') { - $destination = raw_file_to_subvol($destination); + $destination = raw_file_to_subvol($destination); } if (!defined($base_snapshot) && -e $destination) { - die "volume $volname already exists\n" if !$allow_rename; - $volname = $class->find_free_diskname($storeid, $scfg, $vmid, $volume_format, 1); + die "volume $volname already exists\n" if !$allow_rename; + $volname = $class->find_free_diskname($storeid, $scfg, $vmid, $volume_format, 1); } my $imagedir = $class->get_subdir($scfg, $vtype); @@ -850,105 +854,110 @@ sub volume_import { my $tmppath = "$imagedir/recv.$vmid.tmp"; mkdir($imagedir); # FIXME: if $scfg->{mkdir}; if (!mkdir($tmppath)) { - die "temp receive directory already exists at '$tmppath', incomplete concurrent import?\n" - if $! == EEXIST; - die "failed to create temporary receive directory at '$tmppath' - $!\n"; + die "temp receive directory already exists at '$tmppath', incomplete concurrent import?\n" + if $! == EEXIST; + die "failed to create temporary receive directory at '$tmppath' - $!\n"; } my $dh = IO::Dir->new($tmppath) - or die "failed to open temporary receive directory '$tmppath' - $!\n"; + or die "failed to open temporary receive directory '$tmppath' - $!\n"; eval { - run_command(['btrfs', '-q', 'receive', '-e', '--', $tmppath], input => '<&'.fileno($fh)); + run_command( + ['btrfs', '-q', 'receive', '-e', '--', $tmppath], + input => '<&' . fileno($fh), + ); - # Analyze the received subvolumes; - my ($diskname, $found_snapshot, @snapshots); - $dh->rewind; - while (defined(my $entry = $dh->read)) { - next if $entry eq '.' || $entry eq '..'; - next if $entry !~ /^$BTRFS_SNAPSHOT_REGEX$/; - my ($cur_diskname, $cur_snapshot) = ($1, $2); + # Analyze the received subvolumes; + my ($diskname, $found_snapshot, @snapshots); + $dh->rewind; + while (defined(my $entry = $dh->read)) { + next if $entry eq '.' || $entry eq '..'; + next if $entry !~ /^$BTRFS_SNAPSHOT_REGEX$/; + my ($cur_diskname, $cur_snapshot) = ($1, $2); - die "send stream included a non-snapshot subvolume\n" - if !defined($cur_snapshot); + die "send stream included a non-snapshot subvolume\n" + if !defined($cur_snapshot); - if (!defined($diskname)) { - $diskname = $cur_diskname; - } else { - die "multiple disks contained in stream ('$diskname' vs '$cur_diskname')\n" - if $diskname ne $cur_diskname; - } + if (!defined($diskname)) { + $diskname = $cur_diskname; + } else { + die "multiple disks contained in stream ('$diskname' vs '$cur_diskname')\n" + if $diskname ne $cur_diskname; + } - if ($cur_snapshot eq $snapshot) { - $found_snapshot = 1; - } else { - push @snapshots, $cur_snapshot; - } - } + if ($cur_snapshot eq $snapshot) { + $found_snapshot = 1; + } else { + push @snapshots, $cur_snapshot; + } + } - die "send stream did not contain the expected current snapshot '$snapshot'\n" - if !$found_snapshot; + die "send stream did not contain the expected current snapshot '$snapshot'\n" + if !$found_snapshot; - # Rotate the disk into place, first the current state: - # Note that read-only subvolumes cannot be moved into different directories, but for the - # "current" state we also want a writable copy, so start with that: - $class->btrfs_cmd(['property', 'set', '-f', "$tmppath/$diskname\@$snapshot", 'ro', 'false']); - PVE::Tools::renameat2( - -1, - "$tmppath/$diskname\@$snapshot", - -1, - $destination, - &PVE::Tools::RENAME_NOREPLACE, - ) or die "failed to move received snapshot '$tmppath/$diskname\@$snapshot'" - . " into place at '$destination' - $!\n"; + # Rotate the disk into place, first the current state: + # Note that read-only subvolumes cannot be moved into different directories, but for the + # "current" state we also want a writable copy, so start with that: + $class->btrfs_cmd( + ['property', 'set', '-f', "$tmppath/$diskname\@$snapshot", 'ro', 'false']); + PVE::Tools::renameat2( + -1, + "$tmppath/$diskname\@$snapshot", + -1, + $destination, + &PVE::Tools::RENAME_NOREPLACE, + ) + or die "failed to move received snapshot '$tmppath/$diskname\@$snapshot'" + . " into place at '$destination' - $!\n"; - # Now recreate the actual snapshot: - $class->btrfs_cmd([ - 'subvolume', - 'snapshot', - '-r', - '--', - $destination, - "$destination\@$snapshot", - ]); + # Now recreate the actual snapshot: + $class->btrfs_cmd([ + 'subvolume', 'snapshot', '-r', '--', $destination, "$destination\@$snapshot", + ]); - # Now go through the remaining snapshots (if any) - foreach my $snap (@snapshots) { - $class->btrfs_cmd(['property', 'set', '-f', "$tmppath/$diskname\@$snap", 'ro', 'false']); - PVE::Tools::renameat2( - -1, - "$tmppath/$diskname\@$snap", - -1, - "$destination\@$snap", - &PVE::Tools::RENAME_NOREPLACE, - ) or die "failed to move received snapshot '$tmppath/$diskname\@$snap'" - . " into place at '$destination\@$snap' - $!\n"; - eval { $class->btrfs_cmd(['property', 'set', "$destination\@$snap", 'ro', 'true']) }; - warn "failed to make $destination\@$snap read-only - $!\n" if $@; - } + # Now go through the remaining snapshots (if any) + foreach my $snap (@snapshots) { + $class->btrfs_cmd( + ['property', 'set', '-f', "$tmppath/$diskname\@$snap", 'ro', 'false']); + PVE::Tools::renameat2( + -1, + "$tmppath/$diskname\@$snap", + -1, + "$destination\@$snap", + &PVE::Tools::RENAME_NOREPLACE, + ) + or die "failed to move received snapshot '$tmppath/$diskname\@$snap'" + . " into place at '$destination\@$snap' - $!\n"; + eval { + $class->btrfs_cmd(['property', 'set', "$destination\@$snap", 'ro', 'true']); + }; + warn "failed to make $destination\@$snap read-only - $!\n" if $@; + } }; my $err = $@; eval { - # Cleanup all the received snapshots we did not move into place, so we can remove the temp - # directory. - if ($dh) { - $dh->rewind; - while (defined(my $entry = $dh->read)) { - next if $entry eq '.' || $entry eq '..'; - eval { $class->btrfs_cmd(['subvolume', 'delete', '--', "$tmppath/$entry"]) }; - warn $@ if $@; - } - $dh->close; undef $dh; - } - if (!rmdir($tmppath)) { - warn "failed to remove temporary directory '$tmppath' - $!\n" - } + # Cleanup all the received snapshots we did not move into place, so we can remove the temp + # directory. + if ($dh) { + $dh->rewind; + while (defined(my $entry = $dh->read)) { + next if $entry eq '.' || $entry eq '..'; + eval { $class->btrfs_cmd(['subvolume', 'delete', '--', "$tmppath/$entry"]) }; + warn $@ if $@; + } + $dh->close; + undef $dh; + } + if (!rmdir($tmppath)) { + warn "failed to remove temporary directory '$tmppath' - $!\n"; + } }; warn $@ if $@; if ($err) { - # clean up if the directory ended up being empty after an error - rmdir($tmppath); - die $err; + # clean up if the directory ended up being empty after an error + rmdir($tmppath); + die $err; } return "$storeid:$volname"; @@ -961,11 +970,13 @@ sub rename_volume { my $format = ($class->parse_volname($source_volname))[6]; if ($format ne 'raw' && $format ne 'subvol') { - return $class->SUPER::rename_volume($scfg, $storeid, $source_volname, $target_vmid, $target_volname); + return $class->SUPER::rename_volume( + $scfg, $storeid, $source_volname, $target_vmid, $target_volname, + ); } $target_volname = $class->find_free_diskname($storeid, $scfg, $target_vmid, $format, 1) - if !$target_volname; + if !$target_volname; $target_volname = "$target_vmid/$target_volname"; my $basedir = $class->get_subdir($scfg, 'images'); @@ -978,8 +989,8 @@ sub rename_volume { my $new_path = "${basedir}/${target_dir}"; die "target volume '${target_volname}' already exists\n" if -e $new_path; - rename $old_path, $new_path || - die "rename '$old_path' to '$new_path' failed - $!\n"; + rename $old_path, $new_path + || die "rename '$old_path' to '$new_path' failed - $!\n"; return "${storeid}:$target_volname"; } diff --git a/src/PVE/Storage/CIFSPlugin.pm b/src/PVE/Storage/CIFSPlugin.pm index f47861e..c1441e9 100644 --- a/src/PVE/Storage/CIFSPlugin.pm +++ b/src/PVE/Storage/CIFSPlugin.pm @@ -16,7 +16,7 @@ use base qw(PVE::Storage::Plugin); sub cifs_is_mounted : prototype($$) { my ($scfg, $mountdata) = @_; - my ($mountpoint, $server, $share) = $scfg->@{'path', 'server', 'share'}; + my ($mountpoint, $server, $share) = $scfg->@{ 'path', 'server', 'share' }; my $subdir = $scfg->{subdir} // ''; $server = "[$server]" if Net::IP::ip_is_ipv6($server); @@ -24,9 +24,9 @@ sub cifs_is_mounted : prototype($$) { $mountdata = PVE::ProcFSTools::parse_proc_mounts() if !$mountdata; return $mountpoint if grep { - $_->[2] =~ /^cifs/ && - $_->[0] =~ m|^\Q$source\E/?$| && - $_->[1] eq $mountpoint + $_->[2] =~ /^cifs/ + && $_->[0] =~ m|^\Q$source\E/?$| + && $_->[1] eq $mountpoint } @$mountdata; return undef; } @@ -40,7 +40,7 @@ sub cifs_delete_credentials { my ($storeid) = @_; if (my $cred_file = get_cred_file($storeid)) { - unlink($cred_file) or warn "removing cifs credientials '$cred_file' failed: $!\n"; + unlink($cred_file) or warn "removing cifs credientials '$cred_file' failed: $!\n"; } } @@ -61,7 +61,7 @@ sub get_cred_file { my $cred_file = cifs_cred_file_name($storeid); if (-e $cred_file) { - return $cred_file; + return $cred_file; } return undef; } @@ -69,7 +69,7 @@ sub get_cred_file { sub cifs_mount : prototype($$$$$) { my ($scfg, $storeid, $smbver, $user, $domain) = @_; - my ($mountpoint, $server, $share, $options) = $scfg->@{'path', 'server', 'share', 'options'}; + my ($mountpoint, $server, $share, $options) = $scfg->@{ 'path', 'server', 'share', 'options' }; my $subdir = $scfg->{subdir} // ''; $server = "[$server]" if Net::IP::ip_is_ipv6($server); @@ -78,10 +78,10 @@ sub cifs_mount : prototype($$$$$) { my $cmd = ['/bin/mount', '-t', 'cifs', $source, $mountpoint, '-o', 'soft', '-o']; if (my $cred_file = get_cred_file($storeid)) { - push @$cmd, "username=$user", '-o', "credentials=$cred_file"; - push @$cmd, '-o', "domain=$domain" if defined($domain); + push @$cmd, "username=$user", '-o', "credentials=$cred_file"; + push @$cmd, '-o', "domain=$domain" if defined($domain); } else { - push @$cmd, 'guest,username=guest'; + push @$cmd, 'guest,username=guest'; } push @$cmd, '-o', defined($smbver) ? "vers=$smbver" : "vers=default"; @@ -98,69 +98,79 @@ sub type { sub plugindata { return { - content => [ { images => 1, rootdir => 1, vztmpl => 1, iso => 1, - backup => 1, snippets => 1, import => 1}, { images => 1 }], - format => [ { raw => 1, qcow2 => 1, vmdk => 1 } , 'raw' ], - 'sensitive-properties' => { password => 1 }, + content => [ + { + images => 1, + rootdir => 1, + vztmpl => 1, + iso => 1, + backup => 1, + snippets => 1, + import => 1, + }, + { images => 1 }, + ], + format => [{ raw => 1, qcow2 => 1, vmdk => 1 }, 'raw'], + 'sensitive-properties' => { password => 1 }, }; } sub properties { return { - share => { - description => "CIFS share.", - type => 'string', - }, - password => { - description => "Password for accessing the share/datastore.", - type => 'string', - maxLength => 256, - }, - domain => { - description => "CIFS domain.", - type => 'string', - optional => 1, - maxLength => 256, - }, - smbversion => { - description => "SMB protocol version. 'default' if not set, negotiates the highest SMB2+" - ." version supported by both the client and server.", - type => 'string', - default => 'default', - enum => ['default', '2.0', '2.1', '3', '3.0', '3.11'], - optional => 1, - }, + share => { + description => "CIFS share.", + type => 'string', + }, + password => { + description => "Password for accessing the share/datastore.", + type => 'string', + maxLength => 256, + }, + domain => { + description => "CIFS domain.", + type => 'string', + optional => 1, + maxLength => 256, + }, + smbversion => { + description => + "SMB protocol version. 'default' if not set, negotiates the highest SMB2+" + . " version supported by both the client and server.", + type => 'string', + default => 'default', + enum => ['default', '2.0', '2.1', '3', '3.0', '3.11'], + optional => 1, + }, }; } sub options { return { - path => { fixed => 1 }, - 'content-dirs' => { optional => 1 }, - server => { fixed => 1 }, - share => { fixed => 1 }, - subdir => { optional => 1 }, - nodes => { optional => 1 }, - disable => { optional => 1 }, - maxfiles => { optional => 1 }, - 'prune-backups' => { optional => 1 }, - 'max-protected-backups' => { optional => 1 }, - content => { optional => 1 }, - format => { optional => 1 }, - username => { optional => 1 }, - password => { optional => 1}, - domain => { optional => 1}, - smbversion => { optional => 1}, - mkdir => { optional => 1 }, - 'create-base-path' => { optional => 1 }, - 'create-subdirs' => { optional => 1 }, - bwlimit => { optional => 1 }, - preallocation => { optional => 1 }, - options => { optional => 1 }, + path => { fixed => 1 }, + 'content-dirs' => { optional => 1 }, + server => { fixed => 1 }, + share => { fixed => 1 }, + subdir => { optional => 1 }, + nodes => { optional => 1 }, + disable => { optional => 1 }, + maxfiles => { optional => 1 }, + 'prune-backups' => { optional => 1 }, + 'max-protected-backups' => { optional => 1 }, + content => { optional => 1 }, + format => { optional => 1 }, + username => { optional => 1 }, + password => { optional => 1 }, + domain => { optional => 1 }, + smbversion => { optional => 1 }, + mkdir => { optional => 1 }, + 'create-base-path' => { optional => 1 }, + 'create-subdirs' => { optional => 1 }, + bwlimit => { optional => 1 }, + preallocation => { optional => 1 }, + options => { optional => 1 }, }; } - sub check_config { my ($class, $sectionId, $config, $create, $skipSchemaCheck) = @_; @@ -175,12 +185,12 @@ sub on_add_hook { my ($class, $storeid, $scfg, %sensitive) = @_; if (defined($sensitive{password})) { - cifs_set_credentials($sensitive{password}, $storeid); - if (!exists($scfg->{username})) { - warn "storage $storeid: ignoring password parameter, no user set\n"; - } + cifs_set_credentials($sensitive{password}, $storeid); + if (!exists($scfg->{username})) { + warn "storage $storeid: ignoring password parameter, no user set\n"; + } } else { - cifs_delete_credentials($storeid); + cifs_delete_credentials($storeid); } return; @@ -192,12 +202,12 @@ sub on_update_hook { return if !exists($sensitive{password}); if (defined($sensitive{password})) { - cifs_set_credentials($sensitive{password}, $storeid); - if (!exists($scfg->{username})) { - warn "storage $storeid: ignoring password parameter, no user set\n"; - } + cifs_set_credentials($sensitive{password}, $storeid); + if (!exists($scfg->{username})) { + warn "storage $storeid: ignoring password parameter, no user set\n"; + } } else { - cifs_delete_credentials($storeid); + cifs_delete_credentials($storeid); } return; @@ -215,10 +225,10 @@ sub status { my ($class, $storeid, $scfg, $cache) = @_; $cache->{mountdata} = PVE::ProcFSTools::parse_proc_mounts() - if !$cache->{mountdata}; + if !$cache->{mountdata}; return undef - if !cifs_is_mounted($scfg, $cache->{mountdata}); + if !cifs_is_mounted($scfg, $cache->{mountdata}); return $class->SUPER::status($storeid, $scfg, $cache); } @@ -227,19 +237,18 @@ sub activate_storage { my ($class, $storeid, $scfg, $cache) = @_; $cache->{mountdata} = PVE::ProcFSTools::parse_proc_mounts() - if !$cache->{mountdata}; + if !$cache->{mountdata}; my $path = $scfg->{path}; if (!cifs_is_mounted($scfg, $cache->{mountdata})) { - $class->config_aware_base_mkdir($scfg, $path); + $class->config_aware_base_mkdir($scfg, $path); - die "unable to activate storage '$storeid' - " . - "directory '$path' does not exist\n" if ! -d $path; + die "unable to activate storage '$storeid' - " . "directory '$path' does not exist\n" + if !-d $path; - cifs_mount($scfg, $storeid, $scfg->{smbversion}, - $scfg->{username}, $scfg->{domain}); + cifs_mount($scfg, $storeid, $scfg->{smbversion}, $scfg->{username}, $scfg->{domain}); } $class->SUPER::activate_storage($storeid, $scfg, $cache); @@ -249,45 +258,48 @@ sub deactivate_storage { my ($class, $storeid, $scfg, $cache) = @_; $cache->{mountdata} = PVE::ProcFSTools::parse_proc_mounts() - if !$cache->{mountdata}; + if !$cache->{mountdata}; my $path = $scfg->{path}; if (cifs_is_mounted($scfg, $cache->{mountdata})) { - my $cmd = ['/bin/umount', $path]; - run_command($cmd, errmsg => 'umount error'); + my $cmd = ['/bin/umount', $path]; + run_command($cmd, errmsg => 'umount error'); } } sub check_connection { my ($class, $storeid, $scfg) = @_; - my $servicename = '//'.$scfg->{server}.'/'.$scfg->{share}; + my $servicename = '//' . $scfg->{server} . '/' . $scfg->{share}; my $cmd = ['/usr/bin/smbclient', $servicename, '-d', '0']; if (defined($scfg->{smbversion}) && $scfg->{smbversion} ne 'default') { - # max-protocol version, so basically only relevant for smb2 vs smb3 - push @$cmd, '-m', "smb" . int($scfg->{smbversion}); + # max-protocol version, so basically only relevant for smb2 vs smb3 + push @$cmd, '-m', "smb" . int($scfg->{smbversion}); } if (my $cred_file = get_cred_file($storeid)) { - push @$cmd, '-U', $scfg->{username}, '-A', $cred_file; - push @$cmd, '-W', $scfg->{domain} if $scfg->{domain}; + push @$cmd, '-U', $scfg->{username}, '-A', $cred_file; + push @$cmd, '-W', $scfg->{domain} if $scfg->{domain}; } else { - push @$cmd, '-U', 'Guest','-N'; + push @$cmd, '-U', 'Guest', '-N'; } push @$cmd, '-c', 'echo 1 0'; my $out_str; my $out = sub { $out_str .= shift }; - eval { run_command($cmd, timeout => 10, outfunc => $out, errfunc => sub {}) }; + eval { + run_command($cmd, timeout => 10, outfunc => $out, errfunc => sub { }); + }; if (my $err = $@) { - die "$out_str\n" if defined($out_str) && - ($out_str =~ m/NT_STATUS_(ACCESS_DENIED|INVALID_PARAMETER|LOGON_FAILURE)/); - return 0; + die "$out_str\n" + if defined($out_str) + && ($out_str =~ m/NT_STATUS_(ACCESS_DENIED|INVALID_PARAMETER|LOGON_FAILURE)/); + return 0; } return 1; diff --git a/src/PVE/Storage/CephFSPlugin.pm b/src/PVE/Storage/CephFSPlugin.pm index 73edecb..67c79aa 100644 --- a/src/PVE/Storage/CephFSPlugin.pm +++ b/src/PVE/Storage/CephFSPlugin.pm @@ -27,13 +27,13 @@ sub cephfs_is_mounted { $mountdata = PVE::ProcFSTools::parse_proc_mounts() if !$mountdata; return $mountpoint if grep { - $_->[2] =~ m#^ceph|fuse\.ceph-fuse# && - $_->[0] =~ m#\Q:$subdir\E$|^ceph-fuse$# && - $_->[1] eq $mountpoint + $_->[2] =~ m#^ceph|fuse\.ceph-fuse# + && $_->[0] =~ m#\Q:$subdir\E$|^ceph-fuse$# + && $_->[1] eq $mountpoint } @$mountdata; warn "A filesystem is already mounted on $mountpoint\n" - if grep { $_->[1] eq $mountpoint } @$mountdata; + if grep { $_->[1] eq $mountpoint } @$mountdata; return undef; } @@ -42,12 +42,12 @@ sub cephfs_is_mounted { sub systemd_netmount { my ($where, $type, $what, $opts) = @_; -# don't do default deps, systemd v241 generator produces ordering deps on both -# local-fs(-pre) and remote-fs(-pre) targets if we use the required _netdev -# option. Over three corners this gets us an ordering cycle on shutdown, which -# may make shutdown hang if the random cycle breaking hits the "wrong" unit to -# delete. - my $unit = <<"EOF"; + # don't do default deps, systemd v241 generator produces ordering deps on both + # local-fs(-pre) and remote-fs(-pre) targets if we use the required _netdev + # option. Over three corners this gets us an ordering cycle on shutdown, which + # may make shutdown hang if the random cycle breaking hits the "wrong" unit to + # delete. + my $unit = <<"EOF"; [Unit] Description=${where} DefaultDependencies=no @@ -71,7 +71,7 @@ EOF file_set_contents($unit_path, $unit); run_command(['systemctl', 'daemon-reload'], errmsg => "daemon-reload error") - if $daemon_needs_reload; + if $daemon_needs_reload; run_command(['systemctl', 'start', $unit_fn], errmsg => "mount error"); } @@ -91,16 +91,16 @@ sub cephfs_mount { my @opts = (); if ($scfg->{fuse}) { - $type = 'fuse.ceph'; - push @opts, "ceph.id=$cmd_option->{userid}"; - push @opts, "ceph.keyfile=$secretfile" if defined($secretfile); - push @opts, "ceph.conf=$configfile" if defined($configfile); - push @opts, "ceph.client_fs=$fs_name" if defined($fs_name); + $type = 'fuse.ceph'; + push @opts, "ceph.id=$cmd_option->{userid}"; + push @opts, "ceph.keyfile=$secretfile" if defined($secretfile); + push @opts, "ceph.conf=$configfile" if defined($configfile); + push @opts, "ceph.client_fs=$fs_name" if defined($fs_name); } else { - push @opts, "name=$cmd_option->{userid}"; - push @opts, "secretfile=$secretfile" if defined($secretfile); - push @opts, "conf=$configfile" if defined($configfile); - push @opts, "fs=$fs_name" if defined($fs_name); + push @opts, "name=$cmd_option->{userid}"; + push @opts, "secretfile=$secretfile" if defined($secretfile); + push @opts, "conf=$configfile" if defined($configfile); + push @opts, "fs=$fs_name" if defined($fs_name); } push @opts, $scfg->{options} if $scfg->{options}; @@ -116,47 +116,48 @@ sub type { sub plugindata { return { - content => [ { vztmpl => 1, iso => 1, backup => 1, snippets => 1, import => 1 }, - { backup => 1 }], - 'sensitive-properties' => { keyring => 1 }, + content => + [{ vztmpl => 1, iso => 1, backup => 1, snippets => 1, import => 1 }, { backup => 1 }], + 'sensitive-properties' => { keyring => 1 }, }; } sub properties { return { - fuse => { - description => "Mount CephFS through FUSE.", - type => 'boolean', - }, - 'fs-name' => { - description => "The Ceph filesystem name.", - type => 'string', format => 'pve-configid', - }, + fuse => { + description => "Mount CephFS through FUSE.", + type => 'boolean', + }, + 'fs-name' => { + description => "The Ceph filesystem name.", + type => 'string', + format => 'pve-configid', + }, }; } sub options { return { - path => { fixed => 1 }, - 'content-dirs' => { optional => 1 }, - monhost => { optional => 1}, - nodes => { optional => 1 }, - subdir => { optional => 1 }, - disable => { optional => 1 }, - options => { optional => 1 }, - username => { optional => 1 }, - content => { optional => 1 }, - format => { optional => 1 }, - mkdir => { optional => 1 }, - 'create-base-path' => { optional => 1 }, - 'create-subdirs' => { optional => 1 }, - fuse => { optional => 1 }, - bwlimit => { optional => 1 }, - maxfiles => { optional => 1 }, - keyring => { optional => 1 }, - 'prune-backups' => { optional => 1 }, - 'max-protected-backups' => { optional => 1 }, - 'fs-name' => { optional => 1 }, + path => { fixed => 1 }, + 'content-dirs' => { optional => 1 }, + monhost => { optional => 1 }, + nodes => { optional => 1 }, + subdir => { optional => 1 }, + disable => { optional => 1 }, + options => { optional => 1 }, + username => { optional => 1 }, + content => { optional => 1 }, + format => { optional => 1 }, + mkdir => { optional => 1 }, + 'create-base-path' => { optional => 1 }, + 'create-subdirs' => { optional => 1 }, + fuse => { optional => 1 }, + bwlimit => { optional => 1 }, + maxfiles => { optional => 1 }, + keyring => { optional => 1 }, + 'prune-backups' => { optional => 1 }, + 'max-protected-backups' => { optional => 1 }, + 'fs-name' => { optional => 1 }, }; } @@ -182,11 +183,11 @@ sub on_update_hook { my ($class, $storeid, $scfg, %param) = @_; if (exists($param{keyring})) { - if (defined($param{keyring})) { - PVE::CephConfig::ceph_create_keyfile($scfg->{type}, $storeid, $param{keyring}); - } else { - PVE::CephConfig::ceph_remove_keyfile($scfg->{type}, $storeid); - } + if (defined($param{keyring})) { + PVE::CephConfig::ceph_create_keyfile($scfg->{type}, $storeid, $param{keyring}); + } else { + PVE::CephConfig::ceph_remove_keyfile($scfg->{type}, $storeid); + } } return; @@ -215,14 +216,14 @@ sub activate_storage { # NOTE: mkpath may hang if storage is mounted but not reachable if (!cephfs_is_mounted($scfg, $storeid, $cache->{mountdata})) { - my $path = $scfg->{path}; + my $path = $scfg->{path}; - $class->config_aware_base_mkdir($scfg, $path); + $class->config_aware_base_mkdir($scfg, $path); - die "unable to activate storage '$storeid' - " . - "directory '$path' does not exist\n" if ! -d $path; + die "unable to activate storage '$storeid' - " . "directory '$path' does not exist\n" + if !-d $path; - cephfs_mount($scfg, $storeid); + cephfs_mount($scfg, $storeid); } $class->SUPER::activate_storage($storeid, $scfg, $cache); @@ -236,7 +237,7 @@ sub deactivate_storage { my $path = $scfg->{path}; if (cephfs_is_mounted($scfg, $storeid, $cache->{mountdata})) { - run_command(['/bin/umount', $path], errmsg => 'umount error'); + run_command(['/bin/umount', $path], errmsg => 'umount error'); } } diff --git a/src/PVE/Storage/Common.pm b/src/PVE/Storage/Common.pm index bd9c951..89a70f4 100644 --- a/src/PVE/Storage/Common.pm +++ b/src/PVE/Storage/Common.pm @@ -50,11 +50,14 @@ Possible formats a guest image can have. # Those formats should either be allowed here or support for them should be phased out (at least in # the storage layer). Can still be added again in the future, should any plugin provider request it. -PVE::JSONSchema::register_standard_option('pve-storage-image-format', { - type => 'string', - enum => ['raw', 'qcow2', 'subvol', 'vmdk'], - description => "Format of the image.", -}); +PVE::JSONSchema::register_standard_option( + 'pve-storage-image-format', + { + type => 'string', + enum => ['raw', 'qcow2', 'subvol', 'vmdk'], + description => "Format of the image.", + }, +); =pod @@ -80,7 +83,7 @@ sub align_size_up : prototype($$) { my $padding = ($granularity - $size % $granularity) % $granularity; my $aligned_size = $size + $padding; print "size $size is not aligned to granularity $granularity, rounding up to $aligned_size\n" - if $aligned_size != $size; + if $aligned_size != $size; return $aligned_size; } @@ -103,7 +106,7 @@ sub deallocate : prototype($$$) { $length = int($length); if (syscall(PVE::Syscall::fallocate, fileno($file_handle), $mode, $offset, $length) != 0) { - die "fallocate: punch hole failed (offset: $offset, length: $length) - $!\n"; + die "fallocate: punch hole failed (offset: $offset, length: $length) - $!\n"; } } diff --git a/src/PVE/Storage/DirPlugin.pm b/src/PVE/Storage/DirPlugin.pm index 734309f..10e4f70 100644 --- a/src/PVE/Storage/DirPlugin.pm +++ b/src/PVE/Storage/DirPlugin.pm @@ -24,66 +24,78 @@ sub type { sub plugindata { return { - content => [ { images => 1, rootdir => 1, vztmpl => 1, iso => 1, backup => 1, snippets => 1, none => 1, import => 1 }, - { images => 1, rootdir => 1 }], - format => [ { raw => 1, qcow2 => 1, vmdk => 1, subvol => 1 } , 'raw' ], - 'sensitive-properties' => {}, + content => [ + { + images => 1, + rootdir => 1, + vztmpl => 1, + iso => 1, + backup => 1, + snippets => 1, + none => 1, + import => 1, + }, + { images => 1, rootdir => 1 }, + ], + format => [{ raw => 1, qcow2 => 1, vmdk => 1, subvol => 1 }, 'raw'], + 'sensitive-properties' => {}, }; } sub properties { return { - path => { - description => "File system path.", - type => 'string', format => 'pve-storage-path', - }, - mkdir => { - description => "Create the directory if it doesn't exist and populate it with default sub-dirs." - ." NOTE: Deprecated, use the 'create-base-path' and 'create-subdirs' options instead.", - type => 'boolean', - default => 'yes', - }, - 'create-base-path' => { - description => "Create the base directory if it doesn't exist.", - type => 'boolean', - default => 'yes', - }, - 'create-subdirs' => { - description => "Populate the directory with the default structure.", - type => 'boolean', - default => 'yes', - }, - is_mountpoint => { - description => - "Assume the given path is an externally managed mountpoint " . - "and consider the storage offline if it is not mounted. ". - "Using a boolean (yes/no) value serves as a shortcut to using the target path in this field.", - type => 'string', - default => 'no', - }, - bwlimit => get_standard_option('bwlimit'), + path => { + description => "File system path.", + type => 'string', + format => 'pve-storage-path', + }, + mkdir => { + description => + "Create the directory if it doesn't exist and populate it with default sub-dirs." + . " NOTE: Deprecated, use the 'create-base-path' and 'create-subdirs' options instead.", + type => 'boolean', + default => 'yes', + }, + 'create-base-path' => { + description => "Create the base directory if it doesn't exist.", + type => 'boolean', + default => 'yes', + }, + 'create-subdirs' => { + description => "Populate the directory with the default structure.", + type => 'boolean', + default => 'yes', + }, + is_mountpoint => { + description => "Assume the given path is an externally managed mountpoint " + . "and consider the storage offline if it is not mounted. " + . "Using a boolean (yes/no) value serves as a shortcut to using the target path in this field.", + type => 'string', + default => 'no', + }, + bwlimit => get_standard_option('bwlimit'), }; } sub options { return { - path => { fixed => 1 }, - 'content-dirs' => { optional => 1 }, - nodes => { optional => 1 }, - shared => { optional => 1 }, - disable => { optional => 1 }, - maxfiles => { optional => 1 }, - 'prune-backups' => { optional => 1 }, - 'max-protected-backups' => { optional => 1 }, - content => { optional => 1 }, - format => { optional => 1 }, - mkdir => { optional => 1 }, - 'create-base-path' => { optional => 1 }, - 'create-subdirs' => { optional => 1 }, - is_mountpoint => { optional => 1 }, - bwlimit => { optional => 1 }, - preallocation => { optional => 1 }, - }; + path => { fixed => 1 }, + 'content-dirs' => { optional => 1 }, + nodes => { optional => 1 }, + shared => { optional => 1 }, + disable => { optional => 1 }, + maxfiles => { optional => 1 }, + 'prune-backups' => { optional => 1 }, + 'max-protected-backups' => { optional => 1 }, + content => { optional => 1 }, + format => { optional => 1 }, + mkdir => { optional => 1 }, + 'create-base-path' => { optional => 1 }, + 'create-subdirs' => { optional => 1 }, + is_mountpoint => { optional => 1 }, + bwlimit => { optional => 1 }, + preallocation => { optional => 1 }, + }; } # Storage implementation @@ -106,7 +118,7 @@ sub parse_is_mountpoint { my $is_mp = $scfg->{is_mountpoint}; return undef if !defined $is_mp; if (defined(my $bool = PVE::JSONSchema::parse_boolean($is_mp))) { - return $bool ? $scfg->{path} : undef; + return $bool ? $scfg->{path} : undef; } return $is_mp; # contains a path } @@ -122,8 +134,8 @@ my $get_volume_notes_impl = sub { $path .= $class->SUPER::NOTES_EXT; if (-f $path) { - my $data = PVE::Tools::file_get_contents($path); - return eval { decode('UTF-8', $data, 1) } // $data; + my $data = PVE::Tools::file_get_contents($path); + return eval { decode('UTF-8', $data, 1) } // $data; } return ''; @@ -147,10 +159,10 @@ my $update_volume_notes_impl = sub { $path .= $class->SUPER::NOTES_EXT; if (defined($notes) && $notes ne '') { - my $encoded = encode('UTF-8', $notes); - PVE::Tools::file_set_contents($path, $encoded); + my $encoded = encode('UTF-8', $notes); + PVE::Tools::file_set_contents($path, $encoded); } else { - unlink $path or $! == ENOENT or die "could not delete notes - $!\n"; + unlink $path or $! == ENOENT or die "could not delete notes - $!\n"; } return; }; @@ -166,15 +178,15 @@ sub get_volume_attribute { my ($class, $scfg, $storeid, $volname, $attribute) = @_; if ($attribute eq 'notes') { - return $get_volume_notes_impl->($class, $scfg, $storeid, $volname); + return $get_volume_notes_impl->($class, $scfg, $storeid, $volname); } my ($vtype) = $class->parse_volname($volname); return if $vtype ne 'backup'; if ($attribute eq 'protected') { - my $path = $class->filesystem_path($scfg, $volname); - return -e PVE::Storage::protection_file_path($path) ? 1 : 0; + my $path = $class->filesystem_path($scfg, $volname); + return -e PVE::Storage::protection_file_path($path) ? 1 : 0; } return; @@ -184,28 +196,29 @@ sub update_volume_attribute { my ($class, $scfg, $storeid, $volname, $attribute, $value) = @_; if ($attribute eq 'notes') { - return $update_volume_notes_impl->($class, $scfg, $storeid, $volname, $value); + return $update_volume_notes_impl->($class, $scfg, $storeid, $volname, $value); } my ($vtype) = $class->parse_volname($volname); die "only backups support attribute '$attribute'\n" if $vtype ne 'backup'; if ($attribute eq 'protected') { - my $path = $class->filesystem_path($scfg, $volname); - my $protection_path = PVE::Storage::protection_file_path($path); + my $path = $class->filesystem_path($scfg, $volname); + my $protection_path = PVE::Storage::protection_file_path($path); - return if !((-e $protection_path) xor $value); # protection status already correct + return if !((-e $protection_path) xor $value); # protection status already correct - if ($value) { - my $fh = IO::File->new($protection_path, O_CREAT, 0644) - or die "unable to create protection file '$protection_path' - $!\n"; - close($fh); - } else { - unlink $protection_path or $! == ENOENT - or die "could not delete protection file '$protection_path' - $!\n"; - } + if ($value) { + my $fh = IO::File->new($protection_path, O_CREAT, 0644) + or die "unable to create protection file '$protection_path' - $!\n"; + close($fh); + } else { + unlink $protection_path + or $! == ENOENT + or die "could not delete protection file '$protection_path' - $!\n"; + } - return; + return; } die "attribute '$attribute' is not supported for storage type '$scfg->{type}'\n"; @@ -215,16 +228,15 @@ sub status { my ($class, $storeid, $scfg, $cache) = @_; if (defined(my $mp = parse_is_mountpoint($scfg))) { - $cache->{mountdata} = PVE::ProcFSTools::parse_proc_mounts() - if !$cache->{mountdata}; + $cache->{mountdata} = PVE::ProcFSTools::parse_proc_mounts() + if !$cache->{mountdata}; - return undef if !path_is_mounted($mp, $cache->{mountdata}); + return undef if !path_is_mounted($mp, $cache->{mountdata}); } return $class->SUPER::status($storeid, $scfg, $cache); } - sub activate_storage { my ($class, $storeid, $scfg, $cache) = @_; @@ -232,8 +244,8 @@ sub activate_storage { my $mp = parse_is_mountpoint($scfg); if (defined($mp) && !path_is_mounted($mp, $cache->{mountdata})) { - die "unable to activate storage '$storeid' - " . - "directory is expected to be a mount point but is not mounted: '$mp'\n"; + die "unable to activate storage '$storeid' - " + . "directory is expected to be a mount point but is not mounted: '$mp'\n"; } $class->config_aware_base_mkdir($scfg, $path); @@ -242,10 +254,11 @@ sub activate_storage { sub check_config { my ($self, $sectionId, $config, $create, $skipSchemaCheck) = @_; - my $opts = PVE::SectionConfig::check_config($self, $sectionId, $config, $create, $skipSchemaCheck); + my $opts = + PVE::SectionConfig::check_config($self, $sectionId, $config, $create, $skipSchemaCheck); return $opts if !$create; if ($opts->{path} !~ m|^/[-/a-zA-Z0-9_.@]+$|) { - die "illegal path for directory storage: $opts->{path}\n"; + die "illegal path for directory storage: $opts->{path}\n"; } # remove trailing slashes from path $opts->{path} = File::Spec->canonpath($opts->{path}); @@ -264,40 +277,40 @@ sub get_import_metadata { my $isOva = 0; if ($fmt =~ m/^ova/) { - $isOva = 1; - push @$warnings, { type => 'ova-needs-extracting' }; + $isOva = 1; + push @$warnings, { type => 'ova-needs-extracting' }; } my $path = $class->path($scfg, $volname, $storeid, undef); my $res = PVE::GuestImport::OVF::parse_ovf($path, $isOva); my $disks = {}; for my $disk ($res->{disks}->@*) { - my $id = $disk->{disk_address}; - my $size = $disk->{virtual_size}; - my $path = $disk->{relative_path}; - my $volid; - if ($isOva) { - $volid = "$storeid:$volname/$path"; - } else { - $volid = "$storeid:import/$path", - } - $disks->{$id} = { - volid => $volid, - defined($size) ? (size => $size) : (), - }; + my $id = $disk->{disk_address}; + my $size = $disk->{virtual_size}; + my $path = $disk->{relative_path}; + my $volid; + if ($isOva) { + $volid = "$storeid:$volname/$path"; + } else { + $volid = "$storeid:import/$path",; + } + $disks->{$id} = { + volid => $volid, + defined($size) ? (size => $size) : (), + }; } if (defined($res->{qm}->{bios}) && $res->{qm}->{bios} eq 'ovmf') { - $disks->{efidisk0} = 1; - push @$warnings, { type => 'efi-state-lost', key => 'bios', value => 'ovmf' }; + $disks->{efidisk0} = 1; + push @$warnings, { type => 'efi-state-lost', key => 'bios', value => 'ovmf' }; } return { - type => 'vm', - source => $volname, - 'create-args' => $res->{qm}, - 'disks' => $disks, - warnings => $warnings, - net => $res->{net}, + type => 'vm', + source => $volname, + 'create-args' => $res->{qm}, + 'disks' => $disks, + warnings => $warnings, + net => $res->{net}, }; } diff --git a/src/PVE/Storage/ESXiPlugin.pm b/src/PVE/Storage/ESXiPlugin.pm index bedeac8..ab5242d 100644 --- a/src/PVE/Storage/ESXiPlugin.pm +++ b/src/PVE/Storage/ESXiPlugin.pm @@ -29,35 +29,36 @@ sub type { sub plugindata { return { - content => [ { import => 1 }, { import => 1 }], - format => [ { raw => 1, qcow2 => 1, vmdk => 1 } , 'raw' ], - 'sensitive-properties' => { password => 1 }, + content => [{ import => 1 }, { import => 1 }], + format => [{ raw => 1, qcow2 => 1, vmdk => 1 }, 'raw'], + 'sensitive-properties' => { password => 1 }, }; } sub properties { return { - 'skip-cert-verification' => { - description => 'Disable TLS certificate verification, only enable on fully trusted networks!', - type => 'boolean', - default => 'false', - }, + 'skip-cert-verification' => { + description => + 'Disable TLS certificate verification, only enable on fully trusted networks!', + type => 'boolean', + default => 'false', + }, }; } sub options { return { - nodes => { optional => 1 }, - shared => { optional => 1 }, - disable => { optional => 1 }, - content => { optional => 1 }, - # FIXME: bwlimit => { optional => 1 }, - server => {}, - username => {}, - password => { optional => 1}, - 'skip-cert-verification' => { optional => 1}, - port => { optional => 1 }, - }; + nodes => { optional => 1 }, + shared => { optional => 1 }, + disable => { optional => 1 }, + content => { optional => 1 }, + # FIXME: bwlimit => { optional => 1 }, + server => {}, + username => {}, + password => { optional => 1 }, + 'skip-cert-verification' => { optional => 1 }, + port => { optional => 1 }, + }; } sub esxi_cred_file_name { @@ -69,7 +70,7 @@ sub esxi_delete_credentials { my ($storeid) = @_; if (my $cred_file = get_cred_file($storeid)) { - unlink($cred_file) or warn "removing esxi credientials '$cred_file' failed: $!\n"; + unlink($cred_file) or warn "removing esxi credientials '$cred_file' failed: $!\n"; } } @@ -90,7 +91,7 @@ sub get_cred_file { my $cred_file = esxi_cred_file_name($storeid); if (-e $cred_file) { - return $cred_file; + return $cred_file; } return undef; } @@ -112,7 +113,7 @@ sub mount_dir : prototype($) { my sub check_esxi_import_package : prototype() { die "pve-esxi-import-tools package not installed, cannot proceed\n" - if !-e $ESXI_LIST_VMS; + if !-e $ESXI_LIST_VMS; } my sub is_old : prototype($) { @@ -130,9 +131,9 @@ sub get_manifest : prototype($$$;$) { $force_query ||= is_old($manifest_file); if (!$force_query && -e $manifest_file) { - return PVE::Storage::ESXiPlugin::Manifest->new( - file_get_contents($manifest_file), - ); + return PVE::Storage::ESXiPlugin::Manifest->new( + file_get_contents($manifest_file), + ); } check_esxi_import_package(); @@ -140,7 +141,7 @@ sub get_manifest : prototype($$$;$) { my @extra_params; push @extra_params, '--skip-cert-verification' if $scfg->{'skip-cert-verification'}; if (my $port = $scfg->{port}) { - push @extra_params, '--port', $port; + push @extra_params, '--port', $port; } my $host = $scfg->{server}; my $user = $scfg->{username}; @@ -148,15 +149,15 @@ sub get_manifest : prototype($$$;$) { my $json = ''; my $errmsg = ''; eval { - run_command( - [$ESXI_LIST_VMS, @extra_params, $host, $user, $pwfile], - outfunc => sub { $json .= $_[0] . "\n" }, - errfunc => sub { $errmsg .= $_[0] . "\n" }, - ); + run_command( + [$ESXI_LIST_VMS, @extra_params, $host, $user, $pwfile], + outfunc => sub { $json .= $_[0] . "\n" }, + errfunc => sub { $errmsg .= $_[0] . "\n" }, + ); }; if ($@) { - # propagate listvms error output if any, otherwise use the error from run_command - die $errmsg || $@; + # propagate listvms error output if any, otherwise use the error from run_command + die $errmsg || $@; } my $result = PVE::Storage::ESXiPlugin::Manifest->new($json); @@ -189,7 +190,7 @@ sub esxi_mount : prototype($$$;$) { my $manifest_file = "$rundir/manifest.json"; my $mount_dir = mount_dir($storeid); if (!mkdir($mount_dir)) { - die "mkdir failed on $mount_dir $!\n" if !$!{EEXIST}; + die "mkdir failed on $mount_dir $!\n" if !$!{EEXIST}; } my $scope_name_base = scope_name_base($storeid); @@ -200,7 +201,7 @@ sub esxi_mount : prototype($$$;$) { my $hostport = $host; $hostport = "[$hostport]" if Net::IP::ip_is_ipv6($host); if (my $port = $scfg->{port}) { - $hostport .= ":$port"; + $hostport .= ":$port"; } pipe(my $rd, my $wr) or die "failed to create pipe: $!\n"; @@ -208,49 +209,49 @@ sub esxi_mount : prototype($$$;$) { my $pid = fork(); die "fork failed: $!\n" if !defined($pid); if (!$pid) { - eval { - undef $rd; - POSIX::setsid(); - PVE::Systemd::enter_systemd_scope( - $scope_name_base, - "Proxmox VE FUSE mount for ESXi storage $storeid (server $host)", - ); + eval { + undef $rd; + POSIX::setsid(); + PVE::Systemd::enter_systemd_scope( + $scope_name_base, + "Proxmox VE FUSE mount for ESXi storage $storeid (server $host)", + ); - my @extra_params; - push @extra_params, '--skip-cert-verification' if $scfg->{'skip-cert-verification'}; + my @extra_params; + push @extra_params, '--skip-cert-verification' if $scfg->{'skip-cert-verification'}; - my $flags = fcntl($wr, F_GETFD, 0) - // die "failed to get file descriptor flags: $!\n"; - fcntl($wr, F_SETFD, $flags & ~FD_CLOEXEC) - // die "failed to remove CLOEXEC flag from fd: $!\n"; - exec {$ESXI_FUSE_TOOL} - $ESXI_FUSE_TOOL, - @extra_params, - '--change-user', 'nobody', - '--change-group', 'nogroup', - '-o', 'allow_other', - '--ready-fd', fileno($wr), - '--user', $user, - '--password-file', $pwfile, - $hostport, - $manifest_file, - $mount_dir; - die "exec failed: $!\n"; - }; - if (my $err = $@) { - print {$wr} "ERROR: $err"; - } - POSIX::_exit(1); - }; + my $flags = fcntl($wr, F_GETFD, 0) + // die "failed to get file descriptor flags: $!\n"; + fcntl($wr, F_SETFD, $flags & ~FD_CLOEXEC) + // die "failed to remove CLOEXEC flag from fd: $!\n"; + exec {$ESXI_FUSE_TOOL} + $ESXI_FUSE_TOOL, + @extra_params, + '--change-user', 'nobody', + '--change-group', 'nogroup', + '-o', 'allow_other', + '--ready-fd', fileno($wr), + '--user', $user, + '--password-file', $pwfile, + $hostport, + $manifest_file, + $mount_dir; + die "exec failed: $!\n"; + }; + if (my $err = $@) { + print {$wr} "ERROR: $err"; + } + POSIX::_exit(1); + } undef $wr; my $result = do { local $/ = undef; <$rd> }; if ($result =~ /^ERROR: (.*)$/) { - die "$1\n"; + die "$1\n"; } if (waitpid($pid, POSIX::WNOHANG) == $pid) { - die "failed to spawn fuse mount, process exited with status $?\n"; + die "failed to spawn fuse mount, process exited with status $?\n"; } } @@ -261,7 +262,7 @@ sub esxi_unmount : prototype($$$) { my $scope = "${scope_name_base}.scope"; my $mount_dir = mount_dir($storeid); - my %silence_std_outs = (outfunc => sub {}, errfunc => sub {}); + my %silence_std_outs = (outfunc => sub { }, errfunc => sub { }); eval { run_command(['/bin/systemctl', 'reset-failed', $scope], %silence_std_outs) }; eval { run_command(['/bin/systemctl', 'stop', $scope], %silence_std_outs) }; run_command(['/bin/umount', $mount_dir]); @@ -271,7 +272,7 @@ sub esxi_unmount : prototype($$$) { sub split_path : prototype($) { my ($path) = @_; if ($path =~ m!^([^/]+)/([^/]+)/(.+)$!) { - return ($1, $2, $3); + return ($1, $2, $3); } return; } @@ -280,22 +281,18 @@ sub get_import_metadata : prototype($$$$$) { my ($class, $scfg, $volname, $storeid) = @_; if ($volname !~ m!^([^/]+)/.*\.vmx$!) { - die "volume '$volname' does not look like an importable vm config\n"; + die "volume '$volname' does not look like an importable vm config\n"; } my $vmx_path = $class->path($scfg, $volname, $storeid, undef); if (!is_mounted($storeid)) { - die "storage '$storeid' is not activated\n"; + die "storage '$storeid' is not activated\n"; } my $manifest = $class->get_manifest($storeid, $scfg, 0); my $contents = file_get_contents($vmx_path); my $vmx = PVE::Storage::ESXiPlugin::VMX->parse( - $storeid, - $scfg, - $volname, - $contents, - $manifest, + $storeid, $scfg, $volname, $contents, $manifest, ); return $vmx->get_create_args(); } @@ -305,13 +302,14 @@ sub query_vmdk_size : prototype($;$) { my ($filename, $timeout) = @_; my $json = eval { - my $json = ''; - run_command(['/usr/bin/qemu-img', 'info', '--output=json', $filename], - timeout => $timeout, - outfunc => sub { $json .= $_[0]; }, - errfunc => sub { warn "$_[0]\n"; } - ); - from_json($json) + my $json = ''; + run_command( + ['/usr/bin/qemu-img', 'info', '--output=json', $filename], + timeout => $timeout, + outfunc => sub { $json .= $_[0]; }, + errfunc => sub { warn "$_[0]\n"; }, + ); + from_json($json); }; warn $@ if $@; @@ -339,18 +337,18 @@ sub on_update_hook { my $connection_detail_changed = 1; if (exists($sensitive{password})) { - $connection_detail_changed = 1; - if (defined($sensitive{password})) { - esxi_set_credentials($sensitive{password}, $storeid); - } else { - esxi_delete_credentials($storeid); - } + $connection_detail_changed = 1; + if (defined($sensitive{password})) { + esxi_set_credentials($sensitive{password}, $storeid); + } else { + esxi_delete_credentials($storeid); + } } if ($connection_detail_changed) { - # best-effort deactivate storage so that it can get re-mounted with updated params - eval { $class->deactivate_storage($storeid, $scfg) }; - warn $@ if $@; + # best-effort deactivate storage so that it can get re-mounted with updated params + eval { $class->deactivate_storage($storeid, $scfg) }; + warn $@ if $@; } return; @@ -417,7 +415,7 @@ sub parse_volname { # may be a 'vmx' (config), the paths are arbitrary... die "failed to parse volname '$volname'\n" - if $volname !~ m!^([^/]+)/([^/]+)/(.+)$!; + if $volname !~ m!^([^/]+)/([^/]+)/(.+)$!; return ('import', $volname, 0, undef, undef, undef, 'vmx') if $volname =~ /\.vmx$/; @@ -441,20 +439,21 @@ sub list_volumes { my $res = []; for my $dc_name (keys $data->%*) { - my $dc = $data->{$dc_name}; - my $vms = $dc->{vms}; - for my $vm_name (keys $vms->%*) { - my $vm = $vms->{$vm_name}; - my $ds_name = $vm->{config}->{datastore}; - my $path = $vm->{config}->{path}; - push @$res, { - content => 'import', - format => 'vmx', - name => $vm_name, - volid => "$storeid:$dc_name/$ds_name/$path", - size => 0, - }; - } + my $dc = $data->{$dc_name}; + my $vms = $dc->{vms}; + for my $vm_name (keys $vms->%*) { + my $vm = $vms->{$vm_name}; + my $ds_name = $vm->{config}->{datastore}; + my $path = $vm->{config}->{path}; + push @$res, + { + content => 'import', + format => 'vmx', + name => $vm_name, + volid => "$storeid:$dc_name/$ds_name/$path", + size => 0, + }; + } } return $res; @@ -507,7 +506,8 @@ sub volume_export_formats { } sub volume_export { - my ($class, $scfg, $storeid, $fh, $volname, $format, $snapshot, $base_snapshot, $with_snapshots) = @_; + my ($class, $scfg, $storeid, $fh, $volname, $format, $snapshot, $base_snapshot, $with_snapshots) + = @_; # FIXME: maybe we can support raw+size via `qemu-img dd`? @@ -521,7 +521,18 @@ sub volume_import_formats { } sub volume_import { - my ($class, $scfg, $storeid, $fh, $volname, $format, $snapshot, $base_snapshot, $with_snapshots, $allow_rename) = @_; + my ( + $class, + $scfg, + $storeid, + $fh, + $volname, + $format, + $snapshot, + $base_snapshot, + $with_snapshots, + $allow_rename, + ) = @_; die "importing not supported for $class\n"; } @@ -536,7 +547,7 @@ sub volume_size_info { my ($class, $scfg, $storeid, $volname, $timeout) = @_; if ($volname =~ /\.vmx$/) { - return wantarray ? (0, 'vmx') : 0; + return wantarray ? (0, 'vmx') : 0; } my $filename = $class->path($scfg, $volname, $storeid, undef); @@ -554,6 +565,7 @@ sub volume_snapshot_delete { die "deleting snapshots is not supported for $class\n"; } + sub volume_snapshot_info { my ($class, $scfg, $storeid, $volname) = @_; @@ -600,8 +612,8 @@ sub datacenter_for_vm { my ($self, $vm) = @_; for my $dc_name (sort keys %$self) { - my $dc = $self->{$dc_name}; - return $dc_name if exists($dc->{vms}->{$vm}); + my $dc = $self->{$dc_name}; + return $dc_name if exists($dc->{vms}->{$vm}); } return; @@ -612,11 +624,11 @@ sub datastore_for_vm { my @dc_names = defined($datacenter) ? ($datacenter) : keys %$self; for my $dc_name (@dc_names) { - my $dc = $self->{$dc_name} - or die "no such datacenter '$datacenter'\n"; - if (defined(my $vm = $dc->{vms}->{$vm})) { - return $vm->{config}->{datastore}; - } + my $dc = $self->{$dc_name} + or die "no such datacenter '$datacenter'\n"; + if (defined(my $vm = $dc->{vms}->{$vm})) { + return $vm->{config}->{datastore}; + } } return; @@ -626,21 +638,21 @@ sub resolve_path { my ($self, $path) = @_; if ($path !~ m|^/|) { - return wantarray ? (undef, undef, $path) : $path; + return wantarray ? (undef, undef, $path) : $path; } for my $dc_name (sort keys %$self) { - my $dc = $self->{$dc_name}; + my $dc = $self->{$dc_name}; - my $datastores = $dc->{datastores}; + my $datastores = $dc->{datastores}; - for my $ds_name (keys %$datastores) { - my $ds_path = $datastores->{$ds_name}; - if (substr($path, 0, length($ds_path)) eq $ds_path) { - my $relpath = substr($path, length($ds_path)); - return wantarray ? ($dc_name, $ds_name, $relpath) : $relpath; - } - } + for my $ds_name (keys %$datastores) { + my $ds_path = $datastores->{$ds_name}; + if (substr($path, 0, length($ds_path)) eq $ds_path) { + my $relpath = substr($path, length($ds_path)); + return wantarray ? ($dc_name, $ds_name, $relpath) : $relpath; + } + } } return; @@ -651,20 +663,20 @@ sub config_path_for_vm { my @dc_names = defined($datacenter) ? ($datacenter) : keys %$self; for my $dc_name (@dc_names) { - my $dc = $self->{$dc_name} - or die "no such datacenter '$datacenter'\n"; + my $dc = $self->{$dc_name} + or die "no such datacenter '$datacenter'\n"; - my $vm = $dc->{vms}->{$vm} - or next; + my $vm = $dc->{vms}->{$vm} + or next; - my $cfg = $vm->{config}; - if (my (undef, $ds_name, $path) = $self->resolve_path($cfg->{path})) { - $ds_name //= $cfg->{datastore}; - return ($dc_name, $ds_name, $path); - } + my $cfg = $vm->{config}; + if (my (undef, $ds_name, $path) = $self->resolve_path($cfg->{path})) { + $ds_name //= $cfg->{datastore}; + return ($dc_name, $ds_name, $path); + } - die "failed to resolve path for vm '$vm' " - ."($dc_name, $cfg->{datastore}, $cfg->{path})\n"; + die "failed to resolve path for vm '$vm' " + . "($dc_name, $cfg->{datastore}, $cfg->{path})\n"; } die "no such vm '$vm'\n"; @@ -677,14 +689,14 @@ sub resolve_path_relative_to { my ($self, $vmx_path, $path) = @_; if ($path =~ m|^/|) { - if (my ($disk_dc, $disk_ds, $disk_path) = $self->resolve_path($path)) { - return "$disk_dc/$disk_ds/$disk_path"; - } - die "failed to resolve path '$path'\n"; + if (my ($disk_dc, $disk_ds, $disk_path) = $self->resolve_path($path)) { + return "$disk_dc/$disk_ds/$disk_path"; + } + die "failed to resolve path '$path'\n"; } my ($rel_dc, $rel_ds, $rel_path) = PVE::Storage::ESXiPlugin::split_path($vmx_path) - or die "bad path '$vmx_path'\n"; + or die "bad path '$vmx_path'\n"; $rel_path =~ s|/[^/]+$||; return "$rel_dc/$rel_ds/$rel_path/$path"; @@ -698,14 +710,14 @@ sub vm_for_vmx_path { my ($dc_name, $ds_name, $path) = PVE::Storage::ESXiPlugin::split_path($vmx_path); if (my $dc = $self->{$dc_name}) { - my $vms = $dc->{vms}; - for my $vm_name (keys %$vms) { - my $vm = $vms->{$vm_name}; - my $cfg_info = $vm->{config}; - if ($cfg_info->{datastore} eq $ds_name && $cfg_info->{path} eq $path) { - return $vm; - } - } + my $vms = $dc->{vms}; + for my $vm_name (keys %$vms) { + my $vm = $vms->{$vm_name}; + my $cfg_info = $vm->{config}; + if ($cfg_info->{datastore} eq $ds_name && $cfg_info->{path} eq $path) { + return $vm; + } + } } return; } @@ -720,7 +732,7 @@ use feature 'fc'; my sub unquote : prototype($) { my ($value) = @_; $value =~ s/^\"(.*)\"$/$1/s - or $value =~ s/^\'(.*)\'$/$1/s; + or $value =~ s/^\'(.*)\'$/$1/s; return $value; } @@ -730,14 +742,14 @@ sub parse : prototype($$$$$$) { my $conf = {}; for my $line (split(/\n/, $vmxdata)) { - $line =~ s/^\s+//; - $line =~ s/\s+$//; - next if $line !~ /^(\S+)\s*=\s*(.+)$/; - my ($key, $value) = ($1, $2); + $line =~ s/^\s+//; + $line =~ s/\s+$//; + next if $line !~ /^(\S+)\s*=\s*(.+)$/; + my ($key, $value) = ($1, $2); - $value = unquote($value); + $value = unquote($value); - $conf->{$key} = $value; + $conf->{$key} = $value; } $conf->{'pve.storeid'} = $storeid; @@ -757,7 +769,7 @@ sub manifest { $_[0]->{'pve.manifest'} } sub is_disk_entry : prototype($) { my ($id) = @_; if ($id =~ /^(scsi|ide|sata|nvme)(\d+:\d+)(:?\.file[nN]ame)?$/) { - return ($1, $2); + return ($1, $2); } return; } @@ -765,7 +777,7 @@ sub is_disk_entry : prototype($) { sub is_cdrom { my ($self, $bus, $slot) = @_; if (my $type = $self->{"${bus}${slot}.deviceType"}) { - return $type =~ /cdrom/; + return $type =~ /cdrom/; } return; } @@ -774,16 +786,16 @@ sub for_each_disk { my ($self, $code) = @_; for my $key (sort keys %$self) { - my ($bus, $slot) = is_disk_entry($key) - or next; - my $kind = $self->is_cdrom($bus, $slot) ? 'cdrom' : 'disk'; + my ($bus, $slot) = is_disk_entry($key) + or next; + my $kind = $self->is_cdrom($bus, $slot) ? 'cdrom' : 'disk'; - my $file = $self->{$key}; + my $file = $self->{$key}; - my ($maj, $min) = split(/:/, $slot, 2); - my $vdev = $self->{"${bus}${maj}.virtualDev"}; # may of course be undef... + my ($maj, $min) = split(/:/, $slot, 2); + my $vdev = $self->{"${bus}${maj}.virtualDev"}; # may of course be undef... - $code->($bus, $slot, $file, $vdev, $kind); + $code->($bus, $slot, $file, $vdev, $kind); } return; @@ -794,25 +806,25 @@ sub for_each_netdev { my $found_devs = {}; for my $key (keys %$self) { - next if $key !~ /^ethernet(\d+)\.(.+)$/; - my ($slot, $opt) = ($1, $2); + next if $key !~ /^ethernet(\d+)\.(.+)$/; + my ($slot, $opt) = ($1, $2); - my $dev = ($found_devs->{$slot} //= {}); - $dev->{$opt} = $self->{$key}; + my $dev = ($found_devs->{$slot} //= {}); + $dev->{$opt} = $self->{$key}; } for my $id (sort keys %$found_devs) { - my $dev = $found_devs->{$id}; + my $dev = $found_devs->{$id}; - next if ($dev->{present} // '') ne 'TRUE'; + next if ($dev->{present} // '') ne 'TRUE'; - my $ty = $dev->{addressType}; - my $mac = $dev->{address}; - if ($ty && fc($ty) =~ /^(static|generated|vpx)$/) { - $mac = $dev->{generatedAddress} // $mac; - } + my $ty = $dev->{addressType}; + my $mac = $dev->{address}; + if ($ty && fc($ty) =~ /^(static|generated|vpx)$/) { + $mac = $dev->{generatedAddress} // $mac; + } - $code->($id, $dev, $mac); + $code->($id, $dev, $mac); } return; @@ -823,18 +835,18 @@ sub for_each_serial { my $found_serials = {}; for my $key (sort keys %$self) { - next if $key !~ /^serial(\d+)\.(.+)$/; - my ($slot, $opt) = ($1, $2); - my $serial = ($found_serials->{$1} //= {}); - $serial->{$opt} = $self->{$key}; + next if $key !~ /^serial(\d+)\.(.+)$/; + my ($slot, $opt) = ($1, $2); + my $serial = ($found_serials->{$1} //= {}); + $serial->{$opt} = $self->{$key}; } for my $id (sort { $a <=> $b } keys %$found_serials) { - my $serial = $found_serials->{$id}; + my $serial = $found_serials->{$id}; - next if ($serial->{present} // '') ne 'TRUE'; + next if ($serial->{present} // '') ne 'TRUE'; - $code->($id, $serial); + $code->($id, $serial); } return; @@ -875,79 +887,79 @@ sub is_windows { } my %guest_types_windows = ( - 'dos' => 'other', - 'longhorn' => 'w2k8', - 'winNetBusiness' => 'w2k3', - 'windows9' => 'win10', - 'windows9-64' => 'win10', - 'windows9srv' => 'win10', - 'windows9srv-64' => 'win10', - 'windows11-64' => 'win11', - 'windows12-64' => 'win11', # FIXME / win12? - 'win2000AdvServ' => 'w2k', - 'win2000Pro' => 'w2k', - 'win2000Serv' => 'w2k', - 'win31' => 'other', - 'windows7' => 'win7', - 'windows7-64' => 'win7', - 'windows8' => 'win8', - 'windows8-64' => 'win8', - 'win95' => 'other', - 'win98' => 'other', - 'winNT' => 'wxp', # ? - 'winNetEnterprise' => 'w2k3', - 'winNetEnterprise-64' => 'w2k3', - 'winNetDatacenter' => 'w2k3', - 'winNetDatacenter-64' => 'w2k3', - 'winNetStandard' => 'w2k3', - 'winNetStandard-64' => 'w2k3', - 'winNetWeb' => 'w2k3', - 'winLonghorn' => 'w2k8', - 'winLonghorn-64' => 'w2k8', - 'windows7Server-64' => 'w2k8', - 'windows8Server-64' => 'win8', - 'windows9Server-64' => 'win10', - 'windows2019srv-64' => 'win10', + 'dos' => 'other', + 'longhorn' => 'w2k8', + 'winNetBusiness' => 'w2k3', + 'windows9' => 'win10', + 'windows9-64' => 'win10', + 'windows9srv' => 'win10', + 'windows9srv-64' => 'win10', + 'windows11-64' => 'win11', + 'windows12-64' => 'win11', # FIXME / win12? + 'win2000AdvServ' => 'w2k', + 'win2000Pro' => 'w2k', + 'win2000Serv' => 'w2k', + 'win31' => 'other', + 'windows7' => 'win7', + 'windows7-64' => 'win7', + 'windows8' => 'win8', + 'windows8-64' => 'win8', + 'win95' => 'other', + 'win98' => 'other', + 'winNT' => 'wxp', # ? + 'winNetEnterprise' => 'w2k3', + 'winNetEnterprise-64' => 'w2k3', + 'winNetDatacenter' => 'w2k3', + 'winNetDatacenter-64' => 'w2k3', + 'winNetStandard' => 'w2k3', + 'winNetStandard-64' => 'w2k3', + 'winNetWeb' => 'w2k3', + 'winLonghorn' => 'w2k8', + 'winLonghorn-64' => 'w2k8', + 'windows7Server-64' => 'w2k8', + 'windows8Server-64' => 'win8', + 'windows9Server-64' => 'win10', + 'windows2019srv-64' => 'win10', 'windows2019srvNext-64' => 'win11', 'windows2022srvNext-64' => 'win11', # FIXME / win12? - 'winVista' => 'wvista', - 'winVista-64' => 'wvista', - 'winXPPro' => 'wxp', - 'winXPPro-64' => 'wxp', + 'winVista' => 'wvista', + 'winVista-64' => 'wvista', + 'winXPPro' => 'wxp', + 'winXPPro-64' => 'wxp', ); my %guest_types_other = ( - 'freeBSD11' => 'other', + 'freeBSD11' => 'other', 'freeBSD11-64' => 'other', - 'freeBSD12' => 'other', + 'freeBSD12' => 'other', 'freeBSD12-64' => 'other', - 'freeBSD13' => 'other', + 'freeBSD13' => 'other', 'freeBSD13-64' => 'other', - 'freeBSD14' => 'other', + 'freeBSD14' => 'other', 'freeBSD14-64' => 'other', - 'freeBSD' => 'other', - 'freeBSD-64' => 'other', - 'os2' => 'other', - 'netware5' => 'other', - 'netware6' => 'other', - 'solaris10' => 'solaris', + 'freeBSD' => 'other', + 'freeBSD-64' => 'other', + 'os2' => 'other', + 'netware5' => 'other', + 'netware6' => 'other', + 'solaris10' => 'solaris', 'solaris10-64' => 'solaris', 'solaris11-64' => 'solaris', - 'other' => 'other', - 'other-64' => 'other', - 'openserver5' => 'other', - 'openserver6' => 'other', - 'unixware7' => 'other', - 'eComStation' => 'other', + 'other' => 'other', + 'other-64' => 'other', + 'openserver5' => 'other', + 'openserver6' => 'other', + 'unixware7' => 'other', + 'eComStation' => 'other', 'eComStation2' => 'other', - 'solaris8' => 'solaris', - 'solaris9' => 'solaris', - 'vmkernel' => 'other', - 'vmkernel5' => 'other', - 'vmkernel6' => 'other', - 'vmkernel65' => 'other', - 'vmkernel7' => 'other', - 'vmkernel8' => 'other', + 'solaris8' => 'solaris', + 'solaris9' => 'solaris', + 'vmkernel' => 'other', + 'vmkernel5' => 'other', + 'vmkernel6' => 'other', + 'vmkernel65' => 'other', + 'vmkernel7' => 'other', + 'vmkernel8' => 'other', ); # Best effort translation from vmware guest os type to pve. @@ -955,13 +967,13 @@ my %guest_types_other = ( sub guest_type { my ($self) = @_; if (defined(my $guest = $self->{guestOS})) { - if (defined(my $known_windows = $guest_types_windows{$guest})) { - return ($known_windows, 1); - } elsif (defined(my $known_other = $guest_types_other{$guest})) { - return ($known_other, 0); - } - # This covers all the 'Mac OS' types AFAICT - return ('other', 0) if $guest =~ /^darwin/; + if (defined(my $known_windows = $guest_types_windows{$guest})) { + return ($known_windows, 1); + } elsif (defined(my $known_other = $guest_types_other{$guest})) { + return ($known_other, 0); + } + # This covers all the 'Mac OS' types AFAICT + return ('other', 0) if $guest =~ /^darwin/; } # otherwise we'll just go with l26 defaults because why not... @@ -978,15 +990,16 @@ sub smbios1_uuid { # vmware stores space separated bytes and has 1 dash in the middle... $uuid =~ s/[^0-9a-fA-f]//g; - if ($uuid =~ /^ + if ( + $uuid =~ /^ ([0-9a-fA-F]{8}) ([0-9a-fA-F]{4}) ([0-9a-fA-F]{4}) ([0-9a-fA-F]{4}) ([0-9a-fA-F]{12}) - $/x) - { - return "$1-$2-$3-$4-$5"; + $/x + ) { + return "$1-$2-$3-$4-$5"; } return; } @@ -1006,8 +1019,8 @@ sub get_create_args { # NOTE: all types must be added to the return schema of the import-metadata API endpoint my $warn = sub { - my ($type, %properties) = @_; - push @$warnings, { type => $type, %properties }; + my ($type, %properties) = @_; + push @$warnings, { type => $type, %properties }; }; my ($cores, $sockets) = $self->cpu_info(); @@ -1016,11 +1029,11 @@ sub get_create_args { my $firmware = $self->firmware; if ($firmware eq 'efi') { - $create_args->{bios} = 'ovmf'; - $create_disks->{efidisk0} = 1; - $warn->('efi-state-lost', key => "bios", value => "ovmf"); + $create_args->{bios} = 'ovmf'; + $create_disks->{efidisk0} = 1; + $warn->('efi-state-lost', key => "bios", value => "ovmf"); } else { - $create_args->{bios} = 'seabios'; + $create_args->{bios} = 'seabios'; } my $memory = $self->memory; @@ -1029,30 +1042,30 @@ sub get_create_args { my $default_scsihw; my $scsihw; my $set_scsihw = sub { - if (defined($scsihw) && $scsihw ne $_[0]) { - warn "multiple different SCSI hardware types are not supported\n"; - return; - } - $scsihw = $_[0]; + if (defined($scsihw) && $scsihw ne $_[0]) { + warn "multiple different SCSI hardware types are not supported\n"; + return; + } + $scsihw = $_[0]; }; my ($ostype, $is_windows) = $self->guest_type(); $create_args->{ostype} //= $ostype if defined($ostype); if ($ostype eq 'l26') { - $default_scsihw = 'virtio-scsi-single'; + $default_scsihw = 'virtio-scsi-single'; } $self->for_each_netdev(sub { - my ($id, $dev, $mac) = @_; - $mac //= ''; - my $model = $dev->{virtualDev} // 'vmxnet3'; + my ($id, $dev, $mac) = @_; + $mac //= ''; + my $model = $dev->{virtualDev} // 'vmxnet3'; - my $param = { model => $model }; - $param->{macaddr} = $mac if length($mac); - $create_net->{"net$id"} = $param; + my $param = { model => $model }; + $param->{macaddr} = $mac if length($mac); + $create_net->{"net$id"} = $param; }); - my %counts = ( scsi => 0, sata => 0, ide => 0 ); + my %counts = (scsi => 0, sata => 0, ide => 0); my $boot_order = ''; @@ -1061,111 +1074,111 @@ sub get_create_args { # disks. my @nvmes; my $add_disk = sub { - my ($bus, $slot, $file, $devtype, $kind, $do_nvmes) = @_; + my ($bus, $slot, $file, $devtype, $kind, $do_nvmes) = @_; - my $vmbus = $bus; - if ($do_nvmes) { - $bus = 'scsi'; - } elsif ($bus eq 'nvme') { - push @nvmes, [$slot, $file, $devtype, $kind]; - return; - } + my $vmbus = $bus; + if ($do_nvmes) { + $bus = 'scsi'; + } elsif ($bus eq 'nvme') { + push @nvmes, [$slot, $file, $devtype, $kind]; + return; + } - my $path = eval { $manifest->resolve_path_relative_to($self->vmx_path, $file) }; - return if !defined($path); + my $path = eval { $manifest->resolve_path_relative_to($self->vmx_path, $file) }; + return if !defined($path); - if ($devtype) { - if ($devtype =~ /^lsi/i) { - $set_scsihw->('lsi'); - } elsif ($devtype eq 'pvscsi') { - $set_scsihw->('pvscsi'); # same name in pve - } - } + if ($devtype) { + if ($devtype =~ /^lsi/i) { + $set_scsihw->('lsi'); + } elsif ($devtype eq 'pvscsi') { + $set_scsihw->('pvscsi'); # same name in pve + } + } - my $disk_capacity; - if (defined(my $diskinfo = $vminfo->{disks})) { - my ($dc, $ds, $rel_path) = PVE::Storage::ESXiPlugin::split_path($path); - for my $disk ($diskinfo->@*) { - if ($disk->{datastore} eq $ds && $disk->{path} eq $rel_path) { - $disk_capacity = $disk->{capacity}; - last; - } - } - } + my $disk_capacity; + if (defined(my $diskinfo = $vminfo->{disks})) { + my ($dc, $ds, $rel_path) = PVE::Storage::ESXiPlugin::split_path($path); + for my $disk ($diskinfo->@*) { + if ($disk->{datastore} eq $ds && $disk->{path} eq $rel_path) { + $disk_capacity = $disk->{capacity}; + last; + } + } + } - my $count = $counts{$bus}++; - if ($kind eq 'cdrom') { - # We currently do not pass cdroms through via the esxi storage. - # Users should adapt import these from the storages directly/manually. - $create_args->{"${bus}${count}"} = "none,media=cdrom"; - # CD-ROM image will not get imported - $warn->('cdrom-image-ignored', key => "${bus}${count}", value => "$storeid:$path"); - } else { - $create_disks->{"${bus}${count}"} = { - volid => "$storeid:$path", - defined($disk_capacity) ? (size => $disk_capacity) : (), - }; - } + my $count = $counts{$bus}++; + if ($kind eq 'cdrom') { + # We currently do not pass cdroms through via the esxi storage. + # Users should adapt import these from the storages directly/manually. + $create_args->{"${bus}${count}"} = "none,media=cdrom"; + # CD-ROM image will not get imported + $warn->('cdrom-image-ignored', key => "${bus}${count}", value => "$storeid:$path"); + } else { + $create_disks->{"${bus}${count}"} = { + volid => "$storeid:$path", + defined($disk_capacity) ? (size => $disk_capacity) : (), + }; + } - $boot_order .= ';' if length($boot_order); - $boot_order .= $bus.$count; + $boot_order .= ';' if length($boot_order); + $boot_order .= $bus . $count; }; $self->for_each_disk($add_disk); if (@nvmes) { - for my $nvme (@nvmes) { - my ($slot, $file, $devtype, $kind) = @$nvme; - $warn->('nvme-unsupported', key => "nvme${slot}", value => "$file"); - $add_disk->('scsi', $slot, $file, $devtype, $kind, 1); - } + for my $nvme (@nvmes) { + my ($slot, $file, $devtype, $kind) = @$nvme; + $warn->('nvme-unsupported', key => "nvme${slot}", value => "$file"); + $add_disk->('scsi', $slot, $file, $devtype, $kind, 1); + } } $scsihw //= $default_scsihw; if ($firmware eq 'efi') { - if (!defined($scsihw) || $scsihw =~ /^lsi/) { - if ($is_windows) { - $scsihw = 'pvscsi'; - } else { - $scsihw = 'virtio-scsi-single'; - } - # OVMF is built without LSI drivers, scsi hardware was set to $scsihw - $warn->('ovmf-with-lsi-unsupported', key => 'scsihw', value => "$scsihw"); - } + if (!defined($scsihw) || $scsihw =~ /^lsi/) { + if ($is_windows) { + $scsihw = 'pvscsi'; + } else { + $scsihw = 'virtio-scsi-single'; + } + # OVMF is built without LSI drivers, scsi hardware was set to $scsihw + $warn->('ovmf-with-lsi-unsupported', key => 'scsihw', value => "$scsihw"); + } } $create_args->{scsihw} = $scsihw if defined($scsihw); $create_args->{boot} = "order=$boot_order"; if (defined(my $smbios1_uuid = $self->smbios1_uuid())) { - $create_args->{smbios1} = "uuid=$smbios1_uuid"; + $create_args->{smbios1} = "uuid=$smbios1_uuid"; } if (defined(my $name = $self->{displayName})) { - # name in pve is a 'dns-name', so... clean it - $name =~ s/\s/-/g; - $name =~ s/[^a-zA-Z0-9\-.]//g; - $name =~ s/^[.-]+//; - $name =~ s/[.-]+$//; - $create_args->{name} = $name if length($name); + # name in pve is a 'dns-name', so... clean it + $name =~ s/\s/-/g; + $name =~ s/[^a-zA-Z0-9\-.]//g; + $name =~ s/^[.-]+//; + $name =~ s/[.-]+$//; + $create_args->{name} = $name if length($name); } my $serid = 0; $self->for_each_serial(sub { - my ($id, $serial) = @_; - # currently we only support 'socket' type serials anyway - $warn->('serial-port-socket-only', key => "serial$serid"); - $create_args->{"serial$serid"} = 'socket'; - ++$serid; + my ($id, $serial) = @_; + # currently we only support 'socket' type serials anyway + $warn->('serial-port-socket-only', key => "serial$serid"); + $create_args->{"serial$serid"} = 'socket'; + ++$serid; }); - $warn->('guest-is-running') if defined($vminfo) && ($vminfo->{power}//'') ne 'poweredOff'; + $warn->('guest-is-running') if defined($vminfo) && ($vminfo->{power} // '') ne 'poweredOff'; return { - type => 'vm', - source => 'esxi', - 'create-args' => $create_args, - disks => $create_disks, - net => $create_net, - warnings => $warnings, + type => 'vm', + source => 'esxi', + 'create-args' => $create_args, + disks => $create_disks, + net => $create_net, + warnings => $warnings, }; } diff --git a/src/PVE/Storage/GlusterfsPlugin.pm b/src/PVE/Storage/GlusterfsPlugin.pm index 18493cb..e60b0d1 100644 --- a/src/PVE/Storage/GlusterfsPlugin.pm +++ b/src/PVE/Storage/GlusterfsPlugin.pm @@ -22,43 +22,48 @@ my $get_active_server = sub { my $defaultserver = $scfg->{server} ? $scfg->{server} : 'localhost'; if ($return_default_if_offline && !defined($scfg->{server2})) { - # avoid delays (there is no backup server anyways) - return $defaultserver; + # avoid delays (there is no backup server anyways) + return $defaultserver; } - my $serverlist = [ $defaultserver ]; + my $serverlist = [$defaultserver]; push @$serverlist, $scfg->{server2} if $scfg->{server2}; my $ctime = time(); foreach my $server (@$serverlist) { - my $stat = $server_test_results->{$server}; - return $server if $stat && $stat->{active} && (($ctime - $stat->{time}) <= 2); + my $stat = $server_test_results->{$server}; + return $server if $stat && $stat->{active} && (($ctime - $stat->{time}) <= 2); } foreach my $server (@$serverlist) { - my $status = 0; + my $status = 0; - if ($server && $server ne 'localhost' && $server ne '127.0.0.1' && $server ne '::1') { - # ping the gluster daemon default port (24007) as heuristic - $status = PVE::Network::tcp_ping($server, 24007, 2); + if ($server && $server ne 'localhost' && $server ne '127.0.0.1' && $server ne '::1') { + # ping the gluster daemon default port (24007) as heuristic + $status = PVE::Network::tcp_ping($server, 24007, 2); - } else { + } else { - my $parser = sub { - my $line = shift; + my $parser = sub { + my $line = shift; - if ($line =~ m/Status: Started$/) { - $status = 1; - } - }; + if ($line =~ m/Status: Started$/) { + $status = 1; + } + }; - my $cmd = ['/usr/sbin/gluster', 'volume', 'info', $scfg->{volume}]; + my $cmd = ['/usr/sbin/gluster', 'volume', 'info', $scfg->{volume}]; - run_command($cmd, errmsg => "glusterfs error", errfunc => sub {}, outfunc => $parser); - } + run_command( + $cmd, + errmsg => "glusterfs error", + errfunc => sub { }, + outfunc => $parser, + ); + } - $server_test_results->{$server} = { time => time(), active => $status }; - return $server if $status; + $server_test_results->{$server} = { time => time(), active => $status }; + return $server if $status; } return $defaultserver if $return_default_if_offline; @@ -72,9 +77,9 @@ sub glusterfs_is_mounted { $mountdata = PVE::ProcFSTools::parse_proc_mounts() if !$mountdata; return $mountpoint if grep { - $_->[2] eq 'fuse.glusterfs' && - $_->[0] =~ /^\S+:\Q$volume\E$/ && - $_->[1] eq $mountpoint + $_->[2] eq 'fuse.glusterfs' + && $_->[0] =~ /^\S+:\Q$volume\E$/ + && $_->[1] eq $mountpoint } @$mountdata; return undef; } @@ -97,55 +102,57 @@ sub type { sub plugindata { return { - content => [ { images => 1, vztmpl => 1, iso => 1, backup => 1, snippets => 1, import => 1}, - { images => 1 }], - format => [ { raw => 1, qcow2 => 1, vmdk => 1 } , 'raw' ], - 'sensitive-properties' => {}, + content => [ + { images => 1, vztmpl => 1, iso => 1, backup => 1, snippets => 1, import => 1 }, + { images => 1 }, + ], + format => [{ raw => 1, qcow2 => 1, vmdk => 1 }, 'raw'], + 'sensitive-properties' => {}, }; } sub properties { return { - volume => { - description => "Glusterfs Volume.", - type => 'string', - }, - server2 => { - description => "Backup volfile server IP or DNS name.", - type => 'string', format => 'pve-storage-server', - requires => 'server', - }, - transport => { - description => "Gluster transport: tcp or rdma", - type => 'string', - enum => ['tcp', 'rdma', 'unix'], - }, + volume => { + description => "Glusterfs Volume.", + type => 'string', + }, + server2 => { + description => "Backup volfile server IP or DNS name.", + type => 'string', + format => 'pve-storage-server', + requires => 'server', + }, + transport => { + description => "Gluster transport: tcp or rdma", + type => 'string', + enum => ['tcp', 'rdma', 'unix'], + }, }; } sub options { return { - path => { fixed => 1 }, - server => { optional => 1 }, - server2 => { optional => 1 }, - volume => { fixed => 1 }, - transport => { optional => 1 }, - nodes => { optional => 1 }, - disable => { optional => 1 }, - maxfiles => { optional => 1 }, - 'prune-backups' => { optional => 1 }, - 'max-protected-backups' => { optional => 1 }, - content => { optional => 1 }, - format => { optional => 1 }, - mkdir => { optional => 1 }, - 'create-base-path' => { optional => 1 }, - 'create-subdirs' => { optional => 1 }, - bwlimit => { optional => 1 }, - preallocation => { optional => 1 }, + path => { fixed => 1 }, + server => { optional => 1 }, + server2 => { optional => 1 }, + volume => { fixed => 1 }, + transport => { optional => 1 }, + nodes => { optional => 1 }, + disable => { optional => 1 }, + maxfiles => { optional => 1 }, + 'prune-backups' => { optional => 1 }, + 'max-protected-backups' => { optional => 1 }, + content => { optional => 1 }, + format => { optional => 1 }, + mkdir => { optional => 1 }, + 'create-base-path' => { optional => 1 }, + 'create-subdirs' => { optional => 1 }, + bwlimit => { optional => 1 }, + preallocation => { optional => 1 }, }; } - sub check_config { my ($class, $sectionId, $config, $create, $skipSchemaCheck) = @_; @@ -169,31 +176,30 @@ sub parse_name_dir { sub path { my ($class, $scfg, $volname, $storeid, $snapname) = @_; - my ($vtype, $name, $vmid, undef, undef, $isBase, $format) = - $class->parse_volname($volname); + my ($vtype, $name, $vmid, undef, undef, $isBase, $format) = $class->parse_volname($volname); # Note: qcow2/qed has internal snapshot, so path is always # the same (with or without snapshot => same file). - die "can't snapshot this image format\n" - if defined($snapname) && $format !~ m/^(qcow2|qed)$/; + die "can't snapshot this image format\n" + if defined($snapname) && $format !~ m/^(qcow2|qed)$/; my $path = undef; if ($vtype eq 'images') { - my $server = &$get_active_server($scfg, 1); - my $glustervolume = $scfg->{volume}; - my $transport = $scfg->{transport}; - my $protocol = "gluster"; + my $server = &$get_active_server($scfg, 1); + my $glustervolume = $scfg->{volume}; + my $transport = $scfg->{transport}; + my $protocol = "gluster"; - if ($transport) { - $protocol = "gluster+$transport"; - } + if ($transport) { + $protocol = "gluster+$transport"; + } - $path = "$protocol://$server/$glustervolume/images/$vmid/$name"; + $path = "$protocol://$server/$glustervolume/images/$vmid/$name"; } else { - my $dir = $class->get_subdir($scfg, $vtype); - $path = "$dir/$name"; + my $dir = $class->get_subdir($scfg, $vtype); + $path = "$dir/$name"; } return wantarray ? ($path, $vmid, $vtype) : $path; @@ -205,7 +211,7 @@ sub clone_image { die "storage definition has no path\n" if !$scfg->{path}; my ($vtype, $basename, $basevmid, undef, undef, $isBase, $format) = - $class->parse_volname($volname); + $class->parse_volname($volname); die "clone_image on wrong vtype '$vtype'\n" if $vtype ne 'images'; @@ -232,8 +238,17 @@ sub clone_image { my $glustervolume = $scfg->{volume}; my $volumepath = "gluster://$server/$glustervolume/images/$vmid/$name"; - my $cmd = ['/usr/bin/qemu-img', 'create', '-b', "../$basevmid/$basename", - '-F', $format, '-f', 'qcow2', $volumepath]; + my $cmd = [ + '/usr/bin/qemu-img', + 'create', + '-b', + "../$basevmid/$basename", + '-F', + $format, + '-f', + 'qcow2', + $volumepath, + ]; run_command($cmd, errmsg => "unable to create image"); @@ -272,9 +287,9 @@ sub alloc_image { eval { run_command($cmd, errmsg => "unable to create image"); }; if ($@) { - unlink $path; - rmdir $imagedir; - die "$@"; + unlink $path; + rmdir $imagedir; + die "$@"; } return "$vmid/$name"; @@ -284,7 +299,7 @@ sub status { my ($class, $storeid, $scfg, $cache) = @_; $cache->{mountdata} = PVE::ProcFSTools::parse_proc_mounts() - if !$cache->{mountdata}; + if !$cache->{mountdata}; my $path = $scfg->{path}; @@ -299,20 +314,20 @@ sub activate_storage { my ($class, $storeid, $scfg, $cache) = @_; $cache->{mountdata} = PVE::ProcFSTools::parse_proc_mounts() - if !$cache->{mountdata}; + if !$cache->{mountdata}; my $path = $scfg->{path}; my $volume = $scfg->{volume}; if (!glusterfs_is_mounted($volume, $path, $cache->{mountdata})) { - $class->config_aware_base_mkdir($scfg, $path); + $class->config_aware_base_mkdir($scfg, $path); - die "unable to activate storage '$storeid' - " . - "directory '$path' does not exist\n" if ! -d $path; + die "unable to activate storage '$storeid' - " . "directory '$path' does not exist\n" + if !-d $path; - my $server = &$get_active_server($scfg, 1); + my $server = &$get_active_server($scfg, 1); - glusterfs_mount($server, $volume, $path); + glusterfs_mount($server, $volume, $path); } $class->SUPER::activate_storage($storeid, $scfg, $cache); @@ -322,14 +337,14 @@ sub deactivate_storage { my ($class, $storeid, $scfg, $cache) = @_; $cache->{mountdata} = PVE::ProcFSTools::parse_proc_mounts() - if !$cache->{mountdata}; + if !$cache->{mountdata}; my $path = $scfg->{path}; my $volume = $scfg->{volume}; if (glusterfs_is_mounted($volume, $path, $cache->{mountdata})) { - my $cmd = ['/bin/umount', $path]; - run_command($cmd, errmsg => 'umount error'); + my $cmd = ['/bin/umount', $path]; + run_command($cmd, errmsg => 'umount error'); } } diff --git a/src/PVE/Storage/ISCSIDirectPlugin.pm b/src/PVE/Storage/ISCSIDirectPlugin.pm index d54dcd8..9b7f77c 100644 --- a/src/PVE/Storage/ISCSIDirectPlugin.pm +++ b/src/PVE/Storage/ISCSIDirectPlugin.pm @@ -18,30 +18,36 @@ sub iscsi_ls { my ($scfg) = @_; my $portal = $scfg->{portal}; - my $cmd = ['/usr/bin/iscsi-ls', '-s', 'iscsi://'.$portal ]; + my $cmd = ['/usr/bin/iscsi-ls', '-s', 'iscsi://' . $portal]; my $list = {}; my %unittobytes = ( - "k" => 1024, - "M" => 1024*1024, - "G" => 1024*1024*1024, - "T" => 1024*1024*1024*1024 + "k" => 1024, + "M" => 1024 * 1024, + "G" => 1024 * 1024 * 1024, + "T" => 1024 * 1024 * 1024 * 1024, ); eval { - run_command($cmd, errmsg => "iscsi error", errfunc => sub {}, outfunc => sub { - my $line = shift; - $line = trim($line); - if( $line =~ /Lun:(\d+)\s+([A-Za-z0-9\-\_\.\:]*)\s+\(Size:([0-9\.]*)(k|M|G|T)\)/ ) { - my $image = "lun".$1; - my $size = $3; - my $unit = $4; + run_command( + $cmd, + errmsg => "iscsi error", + errfunc => sub { }, + outfunc => sub { + my $line = shift; + $line = trim($line); + if ($line =~ /Lun:(\d+)\s+([A-Za-z0-9\-\_\.\:]*)\s+\(Size:([0-9\.]*)(k|M|G|T)\)/ + ) { + my $image = "lun" . $1; + my $size = $3; + my $unit = $4; - $list->{$image} = { - name => $image, - size => $size * $unittobytes{$unit}, - format => 'raw', - }; - } - }); + $list->{$image} = { + name => $image, + size => $size * $unittobytes{$unit}, + format => 'raw', + }; + } + }, + ); }; my $err = $@; @@ -58,9 +64,9 @@ sub type { sub plugindata { return { - content => [ {images => 1, none => 1}, { images => 1 }], - select_existing => 1, - 'sensitive-properties' => {}, + content => [{ images => 1, none => 1 }, { images => 1 }], + select_existing => 1, + 'sensitive-properties' => {}, }; } @@ -68,9 +74,9 @@ sub options { return { portal => { fixed => 1 }, target => { fixed => 1 }, - nodes => { optional => 1}, - disable => { optional => 1}, - content => { optional => 1}, + nodes => { optional => 1 }, + disable => { optional => 1 }, + content => { optional => 1 }, bwlimit => { optional => 1 }, }; } @@ -80,9 +86,8 @@ sub options { sub parse_volname { my ($class, $volname) = @_; - if ($volname =~ m/^lun(\d+)$/) { - return ('images', $1, undef, undef, undef, undef, 'raw'); + return ('images', $1, undef, undef, undef, undef, 'raw'); } die "unable to parse iscsi volume name '$volname'\n"; @@ -93,7 +98,7 @@ sub path { my ($class, $scfg, $volname, $storeid, $snapname) = @_; die "volume snapshot is not possible on iscsi device\n" - if defined($snapname); + if defined($snapname); my ($vtype, $lun, $vmid) = $class->parse_volname($volname); @@ -138,20 +143,20 @@ sub list_images { my $dat = iscsi_ls($scfg); foreach my $volname (keys %$dat) { - my $volid = "$storeid:$volname"; + my $volid = "$storeid:$volname"; - if ($vollist) { - my $found = grep { $_ eq $volid } @$vollist; - next if !$found; - } else { - # we have no owner for iscsi devices - next if defined($vmid); - } + if ($vollist) { + my $found = grep { $_ eq $volid } @$vollist; + next if !$found; + } else { + # we have no owner for iscsi devices + next if defined($vmid); + } - my $info = $dat->{$volname}; - $info->{volid} = $volid; + my $info = $dat->{$volname}; + $info->{volid} = $volid; - push @$res, $info; + push @$res, $info; } return $res; @@ -164,7 +169,7 @@ sub status { my $free = 0; my $used = 0; my $active = 1; - return ($total,$free,$used,$active); + return ($total, $free, $used, $active); return undef; } @@ -228,17 +233,16 @@ sub volume_has_feature { my ($class, $scfg, $feature, $storeid, $volname, $snapname, $running) = @_; my $features = { - copy => { current => 1}, + copy => { current => 1 }, }; - my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) = - $class->parse_volname($volname); + my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) = $class->parse_volname($volname); my $key = undef; - if($snapname){ - $key = 'snap'; - }else{ - $key = $isBase ? 'base' : 'current'; + if ($snapname) { + $key = 'snap'; + } else { + $key = $isBase ? 'base' : 'current'; } return 1 if $features->{$feature}->{$key}; @@ -256,15 +260,15 @@ sub volume_export_formats { sub volume_export { my ( - $class, - $scfg, - $storeid, - $fh, - $volname, - $format, - $snapshot, - $base_snapshot, - $with_snapshots, + $class, + $scfg, + $storeid, + $fh, + $volname, + $format, + $snapshot, + $base_snapshot, + $with_snapshots, ) = @_; die "volume export format $format not available for $class\n" if $format ne 'raw+size'; @@ -276,8 +280,8 @@ sub volume_export { my $json = ''; run_command( - ['/usr/bin/qemu-img', 'info', '-f', 'raw', '--output=json', $file], - outfunc => sub { $json .= shift }, + ['/usr/bin/qemu-img', 'info', '-f', 'raw', '--output=json', $file], + outfunc => sub { $json .= shift }, ); die "failed to query size information for '$file' with qemu-img\n" if !$json; my $info = eval { decode_json($json) }; @@ -289,8 +293,8 @@ sub volume_export { PVE::Storage::Plugin::write_common_header($fh, $size); run_command( - ['qemu-img', 'dd', 'bs=64k', "if=$file", '-f', 'raw', '-O', 'raw'], - output => '>&'.fileno($fh), + ['qemu-img', 'dd', 'bs=64k', "if=$file", '-f', 'raw', '-O', 'raw'], + output => '>&' . fileno($fh), ); return; } diff --git a/src/PVE/Storage/ISCSIPlugin.pm b/src/PVE/Storage/ISCSIPlugin.pm index 39cb4a7..7691ec6 100644 --- a/src/PVE/Storage/ISCSIPlugin.pm +++ b/src/PVE/Storage/ISCSIPlugin.pm @@ -9,7 +9,8 @@ use IO::File; use PVE::JSONSchema qw(get_standard_option); use PVE::Storage::Plugin; -use PVE::Tools qw(run_command file_read_firstline trim dir_glob_regex dir_glob_foreach $IPV4RE $IPV6RE); +use PVE::Tools + qw(run_command file_read_firstline trim dir_glob_regex dir_glob_foreach $IPV4RE $IPV6RE); use base qw(PVE::Storage::Plugin); @@ -25,8 +26,8 @@ my sub assert_iscsi_support { $found_iscsi_adm_exe = -x $ISCSIADM; if (!$found_iscsi_adm_exe) { - die "error: no iscsi support - please install open-iscsi\n" if !$noerr; - warn "warning: no iscsi support - please install open-iscsi\n"; + die "error: no iscsi support - please install open-iscsi\n" if !$noerr; + warn "warning: no iscsi support - please install open-iscsi\n"; } return $found_iscsi_adm_exe; } @@ -41,18 +42,24 @@ sub iscsi_session_list { my $res = {}; eval { - run_command($cmd, errmsg => 'iscsi session scan failed', outfunc => sub { - my $line = shift; - # example: tcp: [1] 192.168.122.252:3260,1 iqn.2003-01.org.linux-iscsi.proxmox-nfs.x8664:sn.00567885ba8f (non-flash) - if ($line =~ m/^tcp:\s+\[(\S+)\]\s+((?:$IPV4RE|\[$IPV6RE\]):\d+)\,\S+\s+(\S+)\s+\S+?\s*$/) { - my ($session_id, $portal, $target) = ($1, $2, $3); - # there can be several sessions per target (multipath) - push @{$res->{$target}}, { session_id => $session_id, portal => $portal }; - } - }); + run_command( + $cmd, + errmsg => 'iscsi session scan failed', + outfunc => sub { + my $line = shift; + # example: tcp: [1] 192.168.122.252:3260,1 iqn.2003-01.org.linux-iscsi.proxmox-nfs.x8664:sn.00567885ba8f (non-flash) + if ($line =~ + m/^tcp:\s+\[(\S+)\]\s+((?:$IPV4RE|\[$IPV6RE\]):\d+)\,\S+\s+(\S+)\s+\S+?\s*$/ + ) { + my ($session_id, $portal, $target) = ($1, $2, $3); + # there can be several sessions per target (multipath) + push @{ $res->{$target} }, { session_id => $session_id, portal => $portal }; + } + }, + ); }; if (my $err = $@) { - die $err if $err !~ m/: No active sessions.$/i; + die $err if $err !~ m/: No active sessions.$/i; } return $res; @@ -62,7 +69,7 @@ sub iscsi_test_session { my ($sid) = @_; if ($sid !~ m/^[0-9]+$/) { - die "session_id: '$sid' is not a number\n"; + die "session_id: '$sid' is not a number\n"; } my $state = file_read_firstline("/sys/class/iscsi_session/session${sid}/state"); return defined($state) && $state eq 'LOGGED_IN'; @@ -73,13 +80,13 @@ sub iscsi_test_portal { $cache //= {}; if (defined($target)) { - # check session state instead if available - my $sessions = iscsi_session($cache, $target); - for my $session ($sessions->@*) { - next if $session->{portal} ne $portal; - my $state = iscsi_test_session($session->{session_id}); - return $state if $state; - } + # check session state instead if available + my $sessions = iscsi_session($cache, $target); + for my $session ($sessions->@*) { + next if $session->{portal} ne $portal; + my $state = iscsi_test_session($session->{session_id}); + return $state if $state; + } } # check portal via tcp my ($server, $port) = PVE::Tools::parse_host_and_port($portal); @@ -95,25 +102,28 @@ sub iscsi_portals { my $res = []; my $cmd = [$ISCSIADM, '--mode', 'node']; eval { - run_command($cmd, outfunc => sub { - my $line = shift; + run_command( + $cmd, + outfunc => sub { + my $line = shift; - if ($line =~ $ISCSI_TARGET_RE) { - my ($portal, $portal_target) = ($1, $2); - if ($portal_target eq $target) { - push @{$res}, $portal; - } - } - }); + if ($line =~ $ISCSI_TARGET_RE) { + my ($portal, $portal_target) = ($1, $2); + if ($portal_target eq $target) { + push @{$res}, $portal; + } + } + }, + ); }; my $err = $@; warn $err if $err; if ($err || !scalar(@$res)) { - return [ $portal_in ]; + return [$portal_in]; } else { - return $res; + return $res; } } @@ -124,24 +134,27 @@ sub iscsi_discovery { my $res = {}; for my $portal ($portals->@*) { - next if !iscsi_test_portal($target_in, $portal, $cache); # fixme: raise exception here? + next if !iscsi_test_portal($target_in, $portal, $cache); # fixme: raise exception here? - my $cmd = [$ISCSIADM, '--mode', 'discovery', '--type', 'sendtargets', '--portal', $portal]; - eval { - run_command($cmd, outfunc => sub { - my $line = shift; + my $cmd = [$ISCSIADM, '--mode', 'discovery', '--type', 'sendtargets', '--portal', $portal]; + eval { + run_command( + $cmd, + outfunc => sub { + my $line = shift; - if ($line =~ $ISCSI_TARGET_RE) { - my ($portal, $target) = ($1, $2); - # one target can have more than one portal (multipath) - # and sendtargets should return all of them in single call - push @{$res->{$target}}, $portal; - } - }); - }; + if ($line =~ $ISCSI_TARGET_RE) { + my ($portal, $target) = ($1, $2); + # one target can have more than one portal (multipath) + # and sendtargets should return all of them in single call + push @{ $res->{$target} }, $portal; + } + }, + ); + }; - # In case of multipath we can stop after receiving targets from any available portal - last if scalar(keys %$res) > 0; + # In case of multipath we can stop after receiving targets from any available portal + last if scalar(keys %$res) > 0; } return $res; @@ -157,19 +170,24 @@ sub iscsi_login { # Disable retries to avoid blocking pvestatd for too long, next iteration will retry anyway eval { - my $cmd = [ - $ISCSIADM, - '--mode', 'node', - '--targetname', $target, - '--op', 'update', - '--name', 'node.session.initial_login_retry_max', - '--value', '0', - ]; - run_command($cmd); + my $cmd = [ + $ISCSIADM, + '--mode', + 'node', + '--targetname', + $target, + '--op', + 'update', + '--name', + 'node.session.initial_login_retry_max', + '--value', + '0', + ]; + run_command($cmd); }; warn $@ if $@; - run_command([$ISCSIADM, '--mode', 'node', '--targetname', $target, '--login']); + run_command([$ISCSIADM, '--mode', 'node', '--targetname', $target, '--login']); } sub iscsi_logout { @@ -190,22 +208,24 @@ sub iscsi_session_rescan { my $rstat = stat($rescan_filename); if (!$rstat) { - if (my $fh = IO::File->new($rescan_filename, "a")) { - utime undef, undef, $fh; - close($fh); - } + if (my $fh = IO::File->new($rescan_filename, "a")) { + utime undef, undef, $fh; + close($fh); + } } else { - my $atime = $rstat->atime; - my $tdiff = time() - $atime; - # avoid frequent rescans - return if !($tdiff < 0 || $tdiff > 10); - utime undef, undef, $rescan_filename; + my $atime = $rstat->atime; + my $tdiff = time() - $atime; + # avoid frequent rescans + return if !($tdiff < 0 || $tdiff > 10); + utime undef, undef, $rescan_filename; } foreach my $session (@$session_list) { - my $cmd = [$ISCSIADM, '--mode', 'session', '--sid', $session->{session_id}, '--rescan']; - eval { run_command($cmd, outfunc => sub {}); }; - warn $@ if $@; + my $cmd = [$ISCSIADM, '--mode', 'session', '--sid', $session->{session_id}, '--rescan']; + eval { + run_command($cmd, outfunc => sub { }); + }; + warn $@ if $@; } } @@ -216,19 +236,19 @@ sub load_stable_scsi_paths { my $stabledir = "/dev/disk/by-id"; if (my $dh = IO::Dir->new($stabledir)) { - foreach my $tmp (sort $dh->read) { - # exclude filenames with part in name (same disk but partitions) - # use only filenames with scsi(with multipath i have the same device - # with dm-uuid-mpath , dm-name and scsi in name) - if($tmp !~ m/-part\d+$/ && ($tmp =~ m/^scsi-/ || $tmp =~ m/^dm-uuid-mpath-/)) { - my $path = "$stabledir/$tmp"; - my $bdevdest = readlink($path); - if ($bdevdest && $bdevdest =~ m|^../../([^/]+)|) { - $stable_paths->{$1}=$tmp; - } - } - } - $dh->close; + foreach my $tmp (sort $dh->read) { + # exclude filenames with part in name (same disk but partitions) + # use only filenames with scsi(with multipath i have the same device + # with dm-uuid-mpath , dm-name and scsi in name) + if ($tmp !~ m/-part\d+$/ && ($tmp =~ m/^scsi-/ || $tmp =~ m/^dm-uuid-mpath-/)) { + my $path = "$stabledir/$tmp"; + my $bdevdest = readlink($path); + if ($bdevdest && $bdevdest =~ m|^../../([^/]+)|) { + $stable_paths->{$1} = $tmp; + } + } + } + $dh->close; } return $stable_paths; } @@ -241,56 +261,67 @@ sub iscsi_device_list { my $stable_paths = load_stable_scsi_paths(); - dir_glob_foreach($dirname, 'session(\d+)', sub { - my ($ent, $session) = @_; + dir_glob_foreach( + $dirname, + 'session(\d+)', + sub { + my ($ent, $session) = @_; - my $target = file_read_firstline("$dirname/$ent/targetname"); - return if !$target; + my $target = file_read_firstline("$dirname/$ent/targetname"); + return if !$target; - my (undef, $host) = dir_glob_regex("$dirname/$ent/device", 'target(\d+):.*'); - return if !defined($host); + my (undef, $host) = dir_glob_regex("$dirname/$ent/device", 'target(\d+):.*'); + return if !defined($host); - dir_glob_foreach("/sys/bus/scsi/devices", "$host:" . '(\d+):(\d+):(\d+)', sub { - my ($tmp, $channel, $id, $lun) = @_; + dir_glob_foreach( + "/sys/bus/scsi/devices", + "$host:" . '(\d+):(\d+):(\d+)', + sub { + my ($tmp, $channel, $id, $lun) = @_; - my $type = file_read_firstline("/sys/bus/scsi/devices/$tmp/type"); - return if !defined($type) || $type ne '0'; # list disks only + my $type = file_read_firstline("/sys/bus/scsi/devices/$tmp/type"); + return if !defined($type) || $type ne '0'; # list disks only - my $bdev; - if (-d "/sys/bus/scsi/devices/$tmp/block") { # newer kernels - (undef, $bdev) = dir_glob_regex("/sys/bus/scsi/devices/$tmp/block/", '([A-Za-z]\S*)'); - } else { - (undef, $bdev) = dir_glob_regex("/sys/bus/scsi/devices/$tmp", 'block:(\S+)'); - } - return if !$bdev; + my $bdev; + if (-d "/sys/bus/scsi/devices/$tmp/block") { # newer kernels + (undef, $bdev) = + dir_glob_regex("/sys/bus/scsi/devices/$tmp/block/", '([A-Za-z]\S*)'); + } else { + (undef, $bdev) = + dir_glob_regex("/sys/bus/scsi/devices/$tmp", 'block:(\S+)'); + } + return if !$bdev; - #check multipath - if (-d "/sys/block/$bdev/holders") { - my $multipathdev = dir_glob_regex("/sys/block/$bdev/holders", '[A-Za-z]\S*'); - $bdev = $multipathdev if $multipathdev; - } + #check multipath + if (-d "/sys/block/$bdev/holders") { + my $multipathdev = + dir_glob_regex("/sys/block/$bdev/holders", '[A-Za-z]\S*'); + $bdev = $multipathdev if $multipathdev; + } - my $blockdev = $stable_paths->{$bdev}; - return if !$blockdev; + my $blockdev = $stable_paths->{$bdev}; + return if !$blockdev; - my $size = file_read_firstline("/sys/block/$bdev/size"); - return if !$size; + my $size = file_read_firstline("/sys/block/$bdev/size"); + return if !$size; - my $volid = "$channel.$id.$lun.$blockdev"; + my $volid = "$channel.$id.$lun.$blockdev"; - $res->{$target}->{$volid} = { - 'format' => 'raw', - 'size' => int($size * 512), - 'vmid' => 0, # not assigned to any vm - 'channel' => int($channel), - 'id' => int($id), - 'lun' => int($lun), - }; + $res->{$target}->{$volid} = { + 'format' => 'raw', + 'size' => int($size * 512), + 'vmid' => 0, # not assigned to any vm + 'channel' => int($channel), + 'id' => int($id), + 'lun' => int($lun), + }; - #print "TEST: $target $session $host,$bus,$tg,$lun $blockdev\n"; - }); + #print "TEST: $target $session $host,$bus,$tg,$lun $blockdev\n"; + }, + ); - }); + }, + ); return $res; } @@ -303,22 +334,23 @@ sub type { sub plugindata { return { - content => [ {images => 1, none => 1}, { images => 1 }], - select_existing => 1, - 'sensitive-properties' => {}, + content => [{ images => 1, none => 1 }, { images => 1 }], + select_existing => 1, + 'sensitive-properties' => {}, }; } sub properties { return { - target => { - description => "iSCSI target.", - type => 'string', - }, - portal => { - description => "iSCSI portal (IP or DNS name with optional port).", - type => 'string', format => 'pve-storage-portal-dns', - }, + target => { + description => "iSCSI target.", + type => 'string', + }, + portal => { + description => "iSCSI portal (IP or DNS name with optional port).", + type => 'string', + format => 'pve-storage-portal-dns', + }, }; } @@ -326,10 +358,10 @@ sub options { return { portal => { fixed => 1 }, target => { fixed => 1 }, - nodes => { optional => 1}, - disable => { optional => 1}, - content => { optional => 1}, - bwlimit => { optional => 1 }, + nodes => { optional => 1 }, + disable => { optional => 1 }, + content => { optional => 1 }, + bwlimit => { optional => 1 }, }; } @@ -339,7 +371,7 @@ sub parse_volname { my ($class, $volname) = @_; if ($volname =~ m!^\d+\.\d+\.\d+\.([^/\s]+)$!) { - return ('images', $1, undef, undef, undef, undef, 'raw'); + return ('images', $1, undef, undef, undef, undef, 'raw'); } die "unable to parse iscsi volume name '$volname'\n"; @@ -389,7 +421,7 @@ sub list_volumes { my $res = $class->list_images($storeid, $scfg, $vmid); for my $item (@$res) { - $item->{content} = 'images'; # we only have images + $item->{content} = 'images'; # we only have images } return $res; @@ -408,23 +440,23 @@ sub list_images { if (my $dat = $cache->{iscsi_devices}->{$target}) { - foreach my $volname (keys %$dat) { + foreach my $volname (keys %$dat) { - my $volid = "$storeid:$volname"; + my $volid = "$storeid:$volname"; - if ($vollist) { - my $found = grep { $_ eq $volid } @$vollist; - next if !$found; - } else { - # we have no owner for iscsi devices - next if defined($vmid); - } + if ($vollist) { + my $found = grep { $_ eq $volid } @$vollist; + next if !$found; + } else { + # we have no owner for iscsi devices + next if defined($vmid); + } - my $info = $dat->{$volname}; - $info->{volid} = $volid; + my $info = $dat->{$volname}; + $info->{volid} = $volid; - push @$res, $info; - } + push @$res, $info; + } } return $res; @@ -455,23 +487,23 @@ sub activate_storage { my $do_login = !defined($sessions); if (!$do_login) { - # We should check that sessions for all portals are available - my $session_portals = [ map { $_->{portal} } (@$sessions) ]; + # We should check that sessions for all portals are available + my $session_portals = [map { $_->{portal} } (@$sessions)]; - for my $portal (@$portals) { - if (!grep(/^\Q$portal\E$/, @$session_portals)) { - $do_login = 1; - last; - } - } + for my $portal (@$portals) { + if (!grep(/^\Q$portal\E$/, @$session_portals)) { + $do_login = 1; + last; + } + } } if ($do_login) { - eval { iscsi_login($scfg->{target}, $portals, $cache); }; - warn $@ if $@; + eval { iscsi_login($scfg->{target}, $portals, $cache); }; + warn $@ if $@; } else { - # make sure we get all devices - iscsi_session_rescan($sessions); + # make sure we get all devices + iscsi_session_rescan($sessions); } } @@ -481,7 +513,7 @@ sub deactivate_storage { return if !assert_iscsi_support(1); if (defined(iscsi_session($cache, $scfg->{target}))) { - iscsi_logout($scfg->{target}); + iscsi_logout($scfg->{target}); } } @@ -490,17 +522,17 @@ my $check_devices_part_of_target = sub { my $found = 0; for my $path (@$device_paths) { - if ($path =~ m!^/devices/platform/host\d+/session(\d+)/target\d+:\d:\d!) { - my $session_id = $1; + if ($path =~ m!^/devices/platform/host\d+/session(\d+)/target\d+:\d:\d!) { + my $session_id = $1; - my $targetname = file_read_firstline( - "/sys/class/iscsi_session/session$session_id/targetname", - ); - if ($targetname && ($targetname eq $target)) { - $found = 1; - last; - } - } + my $targetname = file_read_firstline( + "/sys/class/iscsi_session/session$session_id/targetname", + ); + if ($targetname && ($targetname eq $target)) { + $found = 1; + last; + } + } } return $found; }; @@ -514,15 +546,15 @@ my $udev_query_path = sub { my $device_path; my $cmd = [ - 'udevadm', - 'info', - '--query=path', - $dev, + 'udevadm', 'info', '--query=path', $dev, ]; eval { - run_command($cmd, outfunc => sub { - $device_path = shift; - }); + run_command( + $cmd, + outfunc => sub { + $device_path = shift; + }, + ); }; die "failed to query device path for '$dev': $@\n" if $@; @@ -540,23 +572,27 @@ $resolve_virtual_devices = sub { my $resolved = []; if ($dev =~ m!^/devices/virtual/block/!) { - dir_glob_foreach("/sys/$dev/slaves", '([^.].+)', sub { - my ($slave) = @_; + dir_glob_foreach( + "/sys/$dev/slaves", + '([^.].+)', + sub { + my ($slave) = @_; - # don't check devices multiple times - return if $visited->{$slave}; - $visited->{$slave} = 1; + # don't check devices multiple times + return if $visited->{$slave}; + $visited->{$slave} = 1; - my $path; - eval { $path = $udev_query_path->("/dev/$slave"); }; - return if $@; + my $path; + eval { $path = $udev_query_path->("/dev/$slave"); }; + return if $@; - my $nested_resolved = $resolve_virtual_devices->($path, $visited); + my $nested_resolved = $resolve_virtual_devices->($path, $visited); - push @$resolved, @$nested_resolved; - }); + push @$resolved, @$nested_resolved; + }, + ); } else { - push @$resolved, $dev; + push @$resolved, $dev; } return $resolved; @@ -570,7 +606,7 @@ sub activate_volume { die "failed to get realpath for '$path': $!\n" if !$real_path; # in case $path does not exist or is not a symlink, check if the returned # $real_path is a block device - die "resolved realpath '$real_path' is not a block device\n" if ! -b $real_path; + die "resolved realpath '$real_path' is not a block device\n" if !-b $real_path; my $device_path = $udev_query_path->($real_path); my $resolved_paths = $resolve_virtual_devices->($device_path); @@ -585,8 +621,8 @@ sub check_connection { my $portals = iscsi_portals($scfg->{target}, $scfg->{portal}); for my $portal (@$portals) { - my $result = iscsi_test_portal($scfg->{target}, $portal, $cache); - return $result if $result; + my $result = iscsi_test_portal($scfg->{target}, $portal, $cache); + return $result if $result; } return 0; @@ -601,17 +637,16 @@ sub volume_has_feature { my ($class, $scfg, $feature, $storeid, $volname, $snapname, $running) = @_; my $features = { - copy => { current => 1}, + copy => { current => 1 }, }; - my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) = - $class->parse_volname($volname); + my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) = $class->parse_volname($volname); my $key = undef; - if ($snapname){ - $key = 'snap'; + if ($snapname) { + $key = 'snap'; } else { - $key = $isBase ? 'base' : 'current'; + $key = $isBase ? 'base' : 'current'; } return 1 if $features->{$feature}->{$key}; @@ -629,15 +664,15 @@ sub volume_export_formats { sub volume_export { my ( - $class, - $scfg, - $storeid, - $fh, - $volname, - $format, - $snapshot, - $base_snapshot, - $with_snapshots, + $class, + $scfg, + $storeid, + $fh, + $volname, + $format, + $snapshot, + $base_snapshot, + $with_snapshots, ) = @_; die "volume export format $format not available for $class\n" if $format ne 'raw+size'; @@ -647,13 +682,16 @@ sub volume_export { my $file = $class->filesystem_path($scfg, $volname, $snapshot); my $size; - run_command(['/sbin/blockdev', '--getsize64', $file], outfunc => sub { - my ($line) = @_; - die "unexpected output from /sbin/blockdev: $line\n" if $line !~ /^(\d+)$/; - $size = int($1); - }); + run_command( + ['/sbin/blockdev', '--getsize64', $file], + outfunc => sub { + my ($line) = @_; + die "unexpected output from /sbin/blockdev: $line\n" if $line !~ /^(\d+)$/; + $size = int($1); + }, + ); PVE::Storage::Plugin::write_common_header($fh, $size); - run_command(['dd', "if=$file", "bs=64k", "status=progress"], output => '>&'.fileno($fh)); + run_command(['dd', "if=$file", "bs=64k", "status=progress"], output => '>&' . fileno($fh)); return; } diff --git a/src/PVE/Storage/LVMPlugin.pm b/src/PVE/Storage/LVMPlugin.pm index 2ebec88..1a992e8 100644 --- a/src/PVE/Storage/LVMPlugin.pm +++ b/src/PVE/Storage/LVMPlugin.pm @@ -20,7 +20,7 @@ my $ignore_no_medium_warnings = sub { # ignore those, most of the time they're from (virtual) IPMI/iKVM devices # and just spam the log.. if ($line !~ /open failed: No medium found/) { - print STDERR "$line\n"; + print STDERR "$line\n"; } }; @@ -32,35 +32,51 @@ sub lvm_pv_info { my $has_label = 0; my $cmd = ['/usr/bin/file', '-L', '-s', $device]; - run_command($cmd, outfunc => sub { - my $line = shift; - $has_label = 1 if $line =~ m/LVM2/; - }); + run_command( + $cmd, + outfunc => sub { + my $line = shift; + $has_label = 1 if $line =~ m/LVM2/; + }, + ); return undef if !$has_label; - $cmd = ['/sbin/pvs', '--separator', ':', '--noheadings', '--units', 'k', - '--unbuffered', '--nosuffix', '--options', - 'pv_name,pv_size,vg_name,pv_uuid', $device]; + $cmd = [ + '/sbin/pvs', + '--separator', + ':', + '--noheadings', + '--units', + 'k', + '--unbuffered', + '--nosuffix', + '--options', + 'pv_name,pv_size,vg_name,pv_uuid', + $device, + ]; my $pvinfo; - run_command($cmd, outfunc => sub { - my $line = shift; + run_command( + $cmd, + outfunc => sub { + my $line = shift; - $line = trim($line); + $line = trim($line); - my ($pvname, $size, $vgname, $uuid) = split(':', $line); + my ($pvname, $size, $vgname, $uuid) = split(':', $line); - die "found multiple pvs entries for device '$device'\n" - if $pvinfo; + die "found multiple pvs entries for device '$device'\n" + if $pvinfo; - $pvinfo = { - pvname => $pvname, - size => int($size), - vgname => $vgname, - uuid => $uuid, - }; - }); + $pvinfo = { + pvname => $pvname, + size => int($size), + vgname => $vgname, + uuid => $uuid, + }; + }, + ); return $pvinfo; } @@ -69,9 +85,9 @@ sub clear_first_sector { my ($dev) = shift; if (my $fh = IO::File->new($dev, "w")) { - my $buf = 0 x 512; - syswrite $fh, $buf; - $fh->close(); + my $buf = 0 x 512; + syswrite $fh, $buf; + $fh->close(); } } @@ -81,8 +97,8 @@ sub lvm_create_volume_group { my $res = lvm_pv_info($device); if ($res->{vgname}) { - return if $res->{vgname} eq $vgname; # already created - die "device '$device' is already used by volume group '$res->{vgname}'\n"; + return if $res->{vgname} eq $vgname; # already created + die "device '$device' is already used by volume group '$res->{vgname}'\n"; } clear_first_sector($device); # else pvcreate fails @@ -96,58 +112,76 @@ sub lvm_create_volume_group { $cmd = ['/sbin/vgcreate', $vgname, $device]; # push @$cmd, '-c', 'y' if $shared; # we do not use this yet - run_command($cmd, errmsg => "vgcreate $vgname $device error", errfunc => $ignore_no_medium_warnings, outfunc => $ignore_no_medium_warnings); + run_command( + $cmd, + errmsg => "vgcreate $vgname $device error", + errfunc => $ignore_no_medium_warnings, + outfunc => $ignore_no_medium_warnings, + ); } sub lvm_destroy_volume_group { my ($vgname) = @_; run_command( - ['vgremove', '-y', $vgname], - errmsg => "unable to remove volume group $vgname", - errfunc => $ignore_no_medium_warnings, - outfunc => $ignore_no_medium_warnings, + ['vgremove', '-y', $vgname], + errmsg => "unable to remove volume group $vgname", + errfunc => $ignore_no_medium_warnings, + outfunc => $ignore_no_medium_warnings, ); } sub lvm_vgs { my ($includepvs) = @_; - my $cmd = ['/sbin/vgs', '--separator', ':', '--noheadings', '--units', 'b', - '--unbuffered', '--nosuffix', '--options']; + my $cmd = [ + '/sbin/vgs', + '--separator', + ':', + '--noheadings', + '--units', + 'b', + '--unbuffered', + '--nosuffix', + '--options', + ]; my $cols = [qw(vg_name vg_size vg_free lv_count)]; if ($includepvs) { - push @$cols, qw(pv_name pv_size pv_free); + push @$cols, qw(pv_name pv_size pv_free); } push @$cmd, join(',', @$cols); my $vgs = {}; eval { - run_command($cmd, outfunc => sub { - my $line = shift; - $line = trim($line); + run_command( + $cmd, + outfunc => sub { + my $line = shift; + $line = trim($line); - my ($name, $size, $free, $lvcount, $pvname, $pvsize, $pvfree) = split (':', $line); + my ($name, $size, $free, $lvcount, $pvname, $pvsize, $pvfree) = + split(':', $line); - $vgs->{$name} //= { - size => int ($size), - free => int ($free), - lvcount => int($lvcount) - }; + $vgs->{$name} //= { + size => int($size), + free => int($free), + lvcount => int($lvcount), + }; - if (defined($pvname) && defined($pvsize) && defined($pvfree)) { - push @{$vgs->{$name}->{pvs}}, { - name => $pvname, - size => int($pvsize), - free => int($pvfree), - }; - } - }, - errfunc => $ignore_no_medium_warnings, - ); + if (defined($pvname) && defined($pvsize) && defined($pvfree)) { + push @{ $vgs->{$name}->{pvs} }, + { + name => $pvname, + size => int($pvsize), + free => int($pvfree), + }; + } + }, + errfunc => $ignore_no_medium_warnings, + ); }; my $err = $@; @@ -161,49 +195,73 @@ sub lvm_vgs { sub lvm_list_volumes { my ($vgname) = @_; - my $option_list = 'vg_name,lv_name,lv_size,lv_attr,pool_lv,data_percent,metadata_percent,snap_percent,uuid,tags,metadata_size,time'; + my $option_list = + 'vg_name,lv_name,lv_size,lv_attr,pool_lv,data_percent,metadata_percent,snap_percent,uuid,tags,metadata_size,time'; my $cmd = [ - '/sbin/lvs', '--separator', ':', '--noheadings', '--units', 'b', - '--unbuffered', '--nosuffix', - '--config', 'report/time_format="%s"', - '--options', $option_list, + '/sbin/lvs', + '--separator', + ':', + '--noheadings', + '--units', + 'b', + '--unbuffered', + '--nosuffix', + '--config', + 'report/time_format="%s"', + '--options', + $option_list, ]; push @$cmd, $vgname if $vgname; my $lvs = {}; - run_command($cmd, outfunc => sub { - my $line = shift; + run_command( + $cmd, + outfunc => sub { + my $line = shift; - $line = trim($line); + $line = trim($line); - my ($vg_name, $lv_name, $lv_size, $lv_attr, $pool_lv, $data_percent, $meta_percent, $snap_percent, $uuid, $tags, $meta_size, $ctime) = split(':', $line); - return if !$vg_name; - return if !$lv_name; + my ( + $vg_name, + $lv_name, + $lv_size, + $lv_attr, + $pool_lv, + $data_percent, + $meta_percent, + $snap_percent, + $uuid, + $tags, + $meta_size, + $ctime, + ) = split(':', $line); + return if !$vg_name; + return if !$lv_name; - my $lv_type = substr($lv_attr, 0, 1); + my $lv_type = substr($lv_attr, 0, 1); - my $d = { - lv_size => int($lv_size), - lv_state => substr($lv_attr, 4, 1), - lv_type => $lv_type, - }; - $d->{pool_lv} = $pool_lv if $pool_lv; - $d->{tags} = $tags if $tags; - $d->{ctime} = $ctime; + my $d = { + lv_size => int($lv_size), + lv_state => substr($lv_attr, 4, 1), + lv_type => $lv_type, + }; + $d->{pool_lv} = $pool_lv if $pool_lv; + $d->{tags} = $tags if $tags; + $d->{ctime} = $ctime; - if ($lv_type eq 't') { - $data_percent ||= 0; - $meta_percent ||= 0; - $snap_percent ||= 0; - $d->{metadata_size} = int($meta_size); - $d->{metadata_used} = int(($meta_percent * $meta_size)/100); - $d->{used} = int(($data_percent * $lv_size)/100); - } - $lvs->{$vg_name}->{$lv_name} = $d; - }, - errfunc => $ignore_no_medium_warnings, + if ($lv_type eq 't') { + $data_percent ||= 0; + $meta_percent ||= 0; + $snap_percent ||= 0; + $d->{metadata_size} = int($meta_size); + $d->{metadata_used} = int(($meta_percent * $meta_size) / 100); + $d->{used} = int(($data_percent * $lv_size) / 100); + } + $lvs->{$vg_name}->{$lv_name} = $d; + }, + errfunc => $ignore_no_medium_warnings, ); return $lvs; @@ -217,48 +275,50 @@ sub type { sub plugindata { return { - content => [ {images => 1, rootdir => 1}, { images => 1 }], - 'sensitive-properties' => {}, + content => [{ images => 1, rootdir => 1 }, { images => 1 }], + 'sensitive-properties' => {}, }; } sub properties { return { - vgname => { - description => "Volume group name.", - type => 'string', format => 'pve-storage-vgname', - }, - base => { - description => "Base volume. This volume is automatically activated.", - type => 'string', format => 'pve-volume-id', - }, - saferemove => { - description => "Zero-out data when removing LVs.", - type => 'boolean', - }, - saferemove_throughput => { - description => "Wipe throughput (cstream -t parameter value).", - type => 'string', - }, - tagged_only => { - description => "Only use logical volumes tagged with 'pve-vm-ID'.", - type => 'boolean', - } + vgname => { + description => "Volume group name.", + type => 'string', + format => 'pve-storage-vgname', + }, + base => { + description => "Base volume. This volume is automatically activated.", + type => 'string', + format => 'pve-volume-id', + }, + saferemove => { + description => "Zero-out data when removing LVs.", + type => 'boolean', + }, + saferemove_throughput => { + description => "Wipe throughput (cstream -t parameter value).", + type => 'string', + }, + tagged_only => { + description => "Only use logical volumes tagged with 'pve-vm-ID'.", + type => 'boolean', + }, }; } sub options { return { - vgname => { fixed => 1 }, - nodes => { optional => 1 }, - shared => { optional => 1 }, - disable => { optional => 1 }, - saferemove => { optional => 1 }, - saferemove_throughput => { optional => 1 }, - content => { optional => 1 }, - base => { fixed => 1, optional => 1 }, - tagged_only => { optional => 1 }, - bwlimit => { optional => 1 }, + vgname => { fixed => 1 }, + nodes => { optional => 1 }, + shared => { optional => 1 }, + disable => { optional => 1 }, + saferemove => { optional => 1 }, + saferemove_throughput => { optional => 1 }, + content => { optional => 1 }, + base => { fixed => 1, optional => 1 }, + tagged_only => { optional => 1 }, + bwlimit => { optional => 1 }, }; } @@ -268,21 +328,21 @@ sub on_add_hook { my ($class, $storeid, $scfg, %param) = @_; if (my $base = $scfg->{base}) { - my ($baseid, $volname) = PVE::Storage::parse_volume_id($base); + my ($baseid, $volname) = PVE::Storage::parse_volume_id($base); - my $cfg = PVE::Storage::config(); - my $basecfg = PVE::Storage::storage_config ($cfg, $baseid, 1); - die "base storage ID '$baseid' does not exist\n" if !$basecfg; + my $cfg = PVE::Storage::config(); + my $basecfg = PVE::Storage::storage_config($cfg, $baseid, 1); + die "base storage ID '$baseid' does not exist\n" if !$basecfg; - # we only support iscsi for now - die "unsupported base type '$basecfg->{type}'" - if $basecfg->{type} ne 'iscsi'; + # we only support iscsi for now + die "unsupported base type '$basecfg->{type}'" + if $basecfg->{type} ne 'iscsi'; - my $path = PVE::Storage::path($cfg, $base); + my $path = PVE::Storage::path($cfg, $base); - PVE::Storage::activate_storage($cfg, $baseid); + PVE::Storage::activate_storage($cfg, $baseid); - lvm_create_volume_group($path, $scfg->{vgname}, $scfg->{shared}); + lvm_create_volume_group($path, $scfg->{vgname}, $scfg->{shared}); } return; @@ -294,7 +354,7 @@ sub parse_volname { PVE::Storage::Plugin::parse_lvm_name($volname); if ($volname =~ m/^(vm-(\d+)-\S+)$/) { - return ('images', $1, $2, undef, undef, undef, 'raw'); + return ('images', $1, $2, undef, undef, undef, 'raw'); } die "unable to parse lvm volume name '$volname'\n"; @@ -303,7 +363,7 @@ sub parse_volname { sub filesystem_path { my ($class, $scfg, $volname, $snapname) = @_; - die "lvm snapshot is not implemented"if defined($snapname); + die "lvm snapshot is not implemented" if defined($snapname); my ($vtype, $name, $vmid) = $class->parse_volname($volname); @@ -333,7 +393,7 @@ sub find_free_diskname { my $lvs = lvm_list_volumes($vg); - my $disk_list = [ keys %{$lvs->{$vg}} ]; + my $disk_list = [keys %{ $lvs->{$vg} }]; return PVE::Storage::Plugin::get_next_vm_diskname($disk_list, $storeid, $vmid, undef, $scfg); } @@ -342,12 +402,12 @@ sub lvcreate { my ($vg, $name, $size, $tags) = @_; if ($size =~ m/\d$/) { # no unit is given - $size .= "k"; # default to kilobytes + $size .= "k"; # default to kilobytes } my $cmd = ['/sbin/lvcreate', '-aly', '-Wy', '--yes', '--size', $size, '--name', $name]; for my $tag (@$tags) { - push @$cmd, '--addtag', $tag; + push @$cmd, '--addtag', $tag; } push @$cmd, $vg; @@ -358,8 +418,8 @@ sub lvrename { my ($vg, $oldname, $newname) = @_; run_command( - ['/sbin/lvrename', $vg, $oldname, $newname], - errmsg => "lvrename '${vg}/${oldname}' to '${newname}' error", + ['/sbin/lvrename', $vg, $oldname, $newname], + errmsg => "lvrename '${vg}/${oldname}' to '${newname}' error", ); } @@ -369,20 +429,20 @@ sub alloc_image { die "unsupported format '$fmt'" if $fmt ne 'raw'; die "illegal name '$name' - should be 'vm-$vmid-*'\n" - if $name && $name !~ m/^vm-$vmid-/; + if $name && $name !~ m/^vm-$vmid-/; my $vgs = lvm_vgs(); my $vg = $scfg->{vgname}; - die "no such volume group '$vg'\n" if !defined ($vgs->{$vg}); + die "no such volume group '$vg'\n" if !defined($vgs->{$vg}); my $free = int($vgs->{$vg}->{free}); die "not enough free space ($free < $size)\n" if $free < $size; $name = $class->find_free_diskname($storeid, $scfg, $vmid) - if !$name; + if !$name; lvcreate($vg, $name, $size, ["pve-vm-$vmid"]); @@ -398,31 +458,47 @@ sub free_image { # and to allow thin provisioning my $zero_out_worker = sub { - print "zero-out data on image $volname (/dev/$vg/del-$volname)\n"; + print "zero-out data on image $volname (/dev/$vg/del-$volname)\n"; - # wipe throughput up to 10MB/s by default; may be overwritten with saferemove_throughput - my $throughput = '-10485760'; - if ($scfg->{saferemove_throughput}) { - $throughput = $scfg->{saferemove_throughput}; - } + # wipe throughput up to 10MB/s by default; may be overwritten with saferemove_throughput + my $throughput = '-10485760'; + if ($scfg->{saferemove_throughput}) { + $throughput = $scfg->{saferemove_throughput}; + } - my $cmd = [ - '/usr/bin/cstream', - '-i', '/dev/zero', - '-o', "/dev/$vg/del-$volname", - '-T', '10', - '-v', '1', - '-b', '1048576', - '-t', "$throughput" - ]; - eval { run_command($cmd, errmsg => "zero out finished (note: 'No space left on device' is ok here)"); }; - warn $@ if $@; + my $cmd = [ + '/usr/bin/cstream', + '-i', + '/dev/zero', + '-o', + "/dev/$vg/del-$volname", + '-T', + '10', + '-v', + '1', + '-b', + '1048576', + '-t', + "$throughput", + ]; + eval { + run_command( + $cmd, + errmsg => "zero out finished (note: 'No space left on device' is ok here)", + ); + }; + warn $@ if $@; - $class->cluster_lock_storage($storeid, $scfg->{shared}, undef, sub { - my $cmd = ['/sbin/lvremove', '-f', "$vg/del-$volname"]; - run_command($cmd, errmsg => "lvremove '$vg/del-$volname' error"); - }); - print "successfully removed volume $volname ($vg/del-$volname)\n"; + $class->cluster_lock_storage( + $storeid, + $scfg->{shared}, + undef, + sub { + my $cmd = ['/sbin/lvremove', '-f', "$vg/del-$volname"]; + run_command($cmd, errmsg => "lvremove '$vg/del-$volname' error"); + }, + ); + print "successfully removed volume $volname ($vg/del-$volname)\n"; }; my $cmd = ['/sbin/lvchange', '-aly', "$vg/$volname"]; @@ -431,14 +507,14 @@ sub free_image { run_command($cmd, errmsg => "can't refresh LV '$vg/$volname' to zero-out its data"); if ($scfg->{saferemove}) { - # avoid long running task, so we only rename here - $cmd = ['/sbin/lvrename', $vg, $volname, "del-$volname"]; - run_command($cmd, errmsg => "lvrename '$vg/$volname' error"); - return $zero_out_worker; + # avoid long running task, so we only rename here + $cmd = ['/sbin/lvrename', $vg, $volname, "del-$volname"]; + run_command($cmd, errmsg => "lvrename '$vg/$volname' error"); + return $zero_out_worker; } else { - my $tmpvg = $scfg->{vgname}; - $cmd = ['/sbin/lvremove', '-f', "$tmpvg/$volname"]; - run_command($cmd, errmsg => "lvremove '$tmpvg/$volname' error"); + my $tmpvg = $scfg->{vgname}; + $cmd = ['/sbin/lvremove', '-f', "$tmpvg/$volname"]; + run_command($cmd, errmsg => "lvremove '$tmpvg/$volname' error"); } return undef; @@ -461,32 +537,36 @@ sub list_images { if (my $dat = $cache->{lvs}->{$vgname}) { - foreach my $volname (keys %$dat) { + foreach my $volname (keys %$dat) { - next if $volname !~ m/^vm-(\d+)-/; - my $owner = $1; + next if $volname !~ m/^vm-(\d+)-/; + my $owner = $1; - my $info = $dat->{$volname}; + my $info = $dat->{$volname}; - next if $scfg->{tagged_only} && !&$check_tags($info->{tags}); + next if $scfg->{tagged_only} && !&$check_tags($info->{tags}); - # Allow mirrored and RAID LVs - next if $info->{lv_type} !~ m/^[-mMrR]$/; + # Allow mirrored and RAID LVs + next if $info->{lv_type} !~ m/^[-mMrR]$/; - my $volid = "$storeid:$volname"; + my $volid = "$storeid:$volname"; - if ($vollist) { - my $found = grep { $_ eq $volid } @$vollist; - next if !$found; - } else { - next if defined($vmid) && ($owner ne $vmid); - } + if ($vollist) { + my $found = grep { $_ eq $volid } @$vollist; + next if !$found; + } else { + next if defined($vmid) && ($owner ne $vmid); + } - push @$res, { - volid => $volid, format => 'raw', size => $info->{lv_size}, vmid => $owner, - ctime => $info->{ctime}, - }; - } + push @$res, + { + volid => $volid, + format => 'raw', + size => $info->{lv_size}, + vmid => $owner, + ctime => $info->{ctime}, + }; + } } return $res; @@ -499,8 +579,8 @@ sub status { my $vgname = $scfg->{vgname}; - if (my $info = $cache->{vgs}->{$vgname}) { - return ($info->{size}, $info->{free}, $info->{size} - $info->{free}, 1); + if (my $info = $cache->{vgs}->{$vgname}) { + return ($info->{size}, $info->{free}, $info->{size} - $info->{free}, 1); } return undef; @@ -513,12 +593,17 @@ sub activate_storage { # In LVM2, vgscans take place automatically; # this is just to be sure - if ($cache->{vgs} && !$cache->{vgscaned} && - !$cache->{vgs}->{$scfg->{vgname}}) { - $cache->{vgscaned} = 1; - my $cmd = ['/sbin/vgscan', '--ignorelockingfailure', '--mknodes']; - eval { run_command($cmd, outfunc => sub {}); }; - warn $@ if $@; + if ( + $cache->{vgs} + && !$cache->{vgscaned} + && !$cache->{vgs}->{ $scfg->{vgname} } + ) { + $cache->{vgscaned} = 1; + my $cmd = ['/sbin/vgscan', '--ignorelockingfailure', '--mknodes']; + eval { + run_command($cmd, outfunc => sub { }); + }; + warn $@ if $@; } # we do not acticate any volumes here ('vgchange -aly') @@ -549,7 +634,7 @@ sub deactivate_volume { my ($class, $storeid, $scfg, $volname, $snapname, $cache) = @_; my $path = $class->path($scfg, $volname, $storeid, $snapname); - return if ! -b $path; + return if !-b $path; my $cmd = ['/sbin/lvchange', '-aln', $path]; run_command($cmd, errmsg => "can't deactivate LV '$path'"); @@ -558,14 +643,19 @@ sub deactivate_volume { sub volume_resize { my ($class, $scfg, $storeid, $volname, $size, $running) = @_; - $size = ($size/1024/1024) . "M"; + $size = ($size / 1024 / 1024) . "M"; my $path = $class->path($scfg, $volname); my $cmd = ['/sbin/lvextend', '-L', $size, $path]; - $class->cluster_lock_storage($storeid, $scfg->{shared}, undef, sub { - run_command($cmd, errmsg => "error resizing volume '$path'"); - }); + $class->cluster_lock_storage( + $storeid, + $scfg->{shared}, + undef, + sub { + run_command($cmd, errmsg => "error resizing volume '$path'"); + }, + ); return 1; } @@ -574,14 +664,29 @@ sub volume_size_info { my ($class, $scfg, $storeid, $volname, $timeout) = @_; my $path = $class->filesystem_path($scfg, $volname); - my $cmd = ['/sbin/lvs', '--separator', ':', '--noheadings', '--units', 'b', - '--unbuffered', '--nosuffix', '--options', 'lv_size', $path]; + my $cmd = [ + '/sbin/lvs', + '--separator', + ':', + '--noheadings', + '--units', + 'b', + '--unbuffered', + '--nosuffix', + '--options', + 'lv_size', + $path, + ]; my $size; - run_command($cmd, timeout => $timeout, errmsg => "can't get size of '$path'", - outfunc => sub { - $size = int(shift); - }); + run_command( + $cmd, + timeout => $timeout, + errmsg => "can't get size of '$path'", + outfunc => sub { + $size = int(shift); + }, + ); return wantarray ? ($size, 'raw', 0, undef) : $size; } @@ -607,18 +712,17 @@ sub volume_has_feature { my ($class, $scfg, $feature, $storeid, $volname, $snapname, $running) = @_; my $features = { - copy => { base => 1, current => 1}, - rename => {current => 1}, + copy => { base => 1, current => 1 }, + rename => { current => 1 }, }; - my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) = - $class->parse_volname($volname); + my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) = $class->parse_volname($volname); my $key = undef; - if($snapname){ - $key = 'snap'; - }else{ - $key = $isBase ? 'base' : 'current'; + if ($snapname) { + $key = 'snap'; + } else { + $key = $isBase ? 'base' : 'current'; } return 1 if $features->{$feature}->{$key}; @@ -628,27 +732,33 @@ sub volume_has_feature { sub volume_export_formats { my ($class, $scfg, $storeid, $volname, $snapshot, $base_snapshot, $with_snapshots) = @_; return () if defined($snapshot); # lvm-thin only - return volume_import_formats($class, $scfg, $storeid, $volname, $snapshot, $base_snapshot, $with_snapshots); + return volume_import_formats( + $class, $scfg, $storeid, $volname, $snapshot, $base_snapshot, $with_snapshots, + ); } sub volume_export { - my ($class, $scfg, $storeid, $fh, $volname, $format, $snapshot, $base_snapshot, $with_snapshots) = @_; + my ($class, $scfg, $storeid, $fh, $volname, $format, $snapshot, $base_snapshot, $with_snapshots) + = @_; die "volume export format $format not available for $class\n" - if $format ne 'raw+size'; + if $format ne 'raw+size'; die "cannot export volumes together with their snapshots in $class\n" - if $with_snapshots; + if $with_snapshots; die "cannot export a snapshot in $class\n" if defined($snapshot); die "cannot export an incremental stream in $class\n" if defined($base_snapshot); my $file = $class->path($scfg, $volname, $storeid); my $size; # should be faster than querying LVM, also checks for the device file's availability - run_command(['/sbin/blockdev', '--getsize64', $file], outfunc => sub { - my ($line) = @_; - die "unexpected output from /sbin/blockdev: $line\n" if $line !~ /^(\d+)$/; - $size = int($1); - }); + run_command( + ['/sbin/blockdev', '--getsize64', $file], + outfunc => sub { + my ($line) = @_; + die "unexpected output from /sbin/blockdev: $line\n" if $line !~ /^(\d+)$/; + $size = int($1); + }, + ); PVE::Storage::Plugin::write_common_header($fh, $size); - run_command(['dd', "if=$file", "bs=64k", "status=progress"], output => '>&'.fileno($fh)); + run_command(['dd', "if=$file", "bs=64k", "status=progress"], output => '>&' . fileno($fh)); } sub volume_import_formats { @@ -659,53 +769,64 @@ sub volume_import_formats { } sub volume_import { - my ($class, $scfg, $storeid, $fh, $volname, $format, $snapshot, $base_snapshot, $with_snapshots, $allow_rename) = @_; + my ( + $class, + $scfg, + $storeid, + $fh, + $volname, + $format, + $snapshot, + $base_snapshot, + $with_snapshots, + $allow_rename, + ) = @_; die "volume import format $format not available for $class\n" - if $format ne 'raw+size'; + if $format ne 'raw+size'; die "cannot import volumes together with their snapshots in $class\n" - if $with_snapshots; + if $with_snapshots; die "cannot import an incremental stream in $class\n" if defined($base_snapshot); my ($vtype, $name, $vmid, $basename, $basevmid, $isBase, $file_format) = - $class->parse_volname($volname); + $class->parse_volname($volname); die "cannot import format $format into a file of format $file_format\n" - if $file_format ne 'raw'; + if $file_format ne 'raw'; my $vg = $scfg->{vgname}; my $lvs = lvm_list_volumes($vg); if ($lvs->{$vg}->{$volname}) { - die "volume $vg/$volname already exists\n" if !$allow_rename; - warn "volume $vg/$volname already exists - importing with a different name\n"; - $name = undef; + die "volume $vg/$volname already exists\n" if !$allow_rename; + warn "volume $vg/$volname already exists - importing with a different name\n"; + $name = undef; } my ($size) = PVE::Storage::Plugin::read_common_header($fh); $size = PVE::Storage::Common::align_size_up($size, 1024) / 1024; eval { - my $allocname = $class->alloc_image($storeid, $scfg, $vmid, 'raw', $name, $size); - my $oldname = $volname; - $volname = $allocname; - if (defined($name) && $allocname ne $oldname) { - die "internal error: unexpected allocated name: '$allocname' != '$oldname'\n"; - } - my $file = $class->path($scfg, $volname, $storeid) - or die "internal error: failed to get path to newly allocated volume $volname\n"; + my $allocname = $class->alloc_image($storeid, $scfg, $vmid, 'raw', $name, $size); + my $oldname = $volname; + $volname = $allocname; + if (defined($name) && $allocname ne $oldname) { + die "internal error: unexpected allocated name: '$allocname' != '$oldname'\n"; + } + my $file = $class->path($scfg, $volname, $storeid) + or die "internal error: failed to get path to newly allocated volume $volname\n"; - $class->volume_import_write($fh, $file); + $class->volume_import_write($fh, $file); }; if (my $err = $@) { - my $cleanup_worker = eval { $class->free_image($storeid, $scfg, $volname, 0) }; - warn $@ if $@; + my $cleanup_worker = eval { $class->free_image($storeid, $scfg, $volname, 0) }; + warn $@ if $@; - if ($cleanup_worker) { - my $rpcenv = PVE::RPCEnvironment::get(); - my $authuser = $rpcenv->get_user(); + if ($cleanup_worker) { + my $rpcenv = PVE::RPCEnvironment::get(); + my $authuser = $rpcenv->get_user(); - $rpcenv->fork_worker('imgdel', undef, $authuser, $cleanup_worker); - } + $rpcenv->fork_worker('imgdel', undef, $authuser, $cleanup_worker); + } - die $err; + die $err; } return "$storeid:$volname"; @@ -713,29 +834,22 @@ sub volume_import { sub volume_import_write { my ($class, $input_fh, $output_file) = @_; - run_command(['dd', "of=$output_file", 'bs=64k'], - input => '<&'.fileno($input_fh)); + run_command(['dd', "of=$output_file", 'bs=64k'], input => '<&' . fileno($input_fh)); } sub rename_volume { my ($class, $scfg, $storeid, $source_volname, $target_vmid, $target_volname) = @_; my ( - undef, - $source_image, - $source_vmid, - $base_name, - $base_vmid, - undef, - $format + undef, $source_image, $source_vmid, $base_name, $base_vmid, undef, $format, ) = $class->parse_volname($source_volname); $target_volname = $class->find_free_diskname($storeid, $scfg, $target_vmid, $format) - if !$target_volname; + if !$target_volname; my $vg = $scfg->{vgname}; my $lvs = lvm_list_volumes($vg); die "target volume '${target_volname}' already exists\n" - if ($lvs->{$vg}->{$target_volname}); + if ($lvs->{$vg}->{$target_volname}); lvrename($vg, $source_volname, $target_volname); return "${storeid}:${target_volname}"; diff --git a/src/PVE/Storage/LunCmd/Comstar.pm b/src/PVE/Storage/LunCmd/Comstar.pm index 527e4ba..0f491cd 100644 --- a/src/PVE/Storage/LunCmd/Comstar.pm +++ b/src/PVE/Storage/LunCmd/Comstar.pm @@ -17,12 +17,12 @@ my $get_lun_cmd_map = sub { my $sbdadmcmd = "/usr/sbin/sbdadm"; my $cmdmap = { - create_lu => { cmd => $stmfadmcmd, method => 'create-lu' }, - delete_lu => { cmd => $stmfadmcmd, method => 'delete-lu' }, - import_lu => { cmd => $stmfadmcmd, method => 'import-lu' }, - modify_lu => { cmd => $stmfadmcmd, method => 'modify-lu' }, - add_view => { cmd => $stmfadmcmd, method => 'add-view' }, - list_view => { cmd => $stmfadmcmd, method => 'list-view' }, + create_lu => { cmd => $stmfadmcmd, method => 'create-lu' }, + delete_lu => { cmd => $stmfadmcmd, method => 'delete-lu' }, + import_lu => { cmd => $stmfadmcmd, method => 'import-lu' }, + modify_lu => { cmd => $stmfadmcmd, method => 'modify-lu' }, + add_view => { cmd => $stmfadmcmd, method => 'add-view' }, + list_view => { cmd => $stmfadmcmd, method => 'list-view' }, list_lu => { cmd => $sbdadmcmd, method => 'list-lu' }, }; @@ -45,15 +45,15 @@ sub run_lun_command { $timeout = 10 if !$timeout; my $output = sub { - my $line = shift; - $msg .= "$line\n"; + my $line = shift; + $msg .= "$line\n"; }; if ($method eq 'create_lu') { - my $wcd = 'false'; + my $wcd = 'false'; if ($scfg->{nowritecache}) { - $wcd = 'true'; - } + $wcd = 'true'; + } my $prefix = '600144f'; my $digest = md5_hex($params[0]); $digest =~ /(\w{7}(.*))/; @@ -68,13 +68,13 @@ sub run_lun_command { @params = undef; } elsif ($method eq 'add_view') { if ($scfg->{comstar_tg}) { - unshift @params, $scfg->{comstar_tg}; - unshift @params, '--target-group'; - } + unshift @params, $scfg->{comstar_tg}; + unshift @params, '--target-group'; + } if ($scfg->{comstar_hg}) { - unshift @params, $scfg->{comstar_hg}; - unshift @params, '--host-group'; - } + unshift @params, $scfg->{comstar_hg}; + unshift @params, '--host-group'; + } } my $cmdmap = $get_lun_cmd_map->($method); @@ -83,7 +83,15 @@ sub run_lun_command { $target = 'root@' . $scfg->{portal}; - my $cmd = [@ssh_cmd, '-i', "$id_rsa_path/$scfg->{portal}_id_rsa", $target, $luncmd, $lunmethod, @params]; + my $cmd = [ + @ssh_cmd, + '-i', + "$id_rsa_path/$scfg->{portal}_id_rsa", + $target, + $luncmd, + $lunmethod, + @params, + ]; run_command($cmd, outfunc => $output, timeout => $timeout); diff --git a/src/PVE/Storage/LunCmd/Iet.pm b/src/PVE/Storage/LunCmd/Iet.pm index 5b09b88..bd93ac3 100644 --- a/src/PVE/Storage/LunCmd/Iet.pm +++ b/src/PVE/Storage/LunCmd/Iet.pm @@ -48,36 +48,42 @@ my $execute_command = sub { $timeout = 10 if !$timeout; my $output = sub { - my $line = shift; - $msg .= "$line\n"; + my $line = shift; + $msg .= "$line\n"; }; my $errfunc = sub { - my $line = shift; - $err .= "$line"; + my $line = shift; + $err .= "$line"; }; if ($exec eq 'scp') { $target = 'root@[' . $scfg->{portal} . ']'; - $cmd = [@scp_cmd, '-i', "$id_rsa_path/$scfg->{portal}_id_rsa", '--', $method, "$target:$params[0]"]; + $cmd = [ + @scp_cmd, + '-i', + "$id_rsa_path/$scfg->{portal}_id_rsa", + '--', + $method, + "$target:$params[0]", + ]; } else { $target = 'root@' . $scfg->{portal}; - $cmd = [@ssh_cmd, '-i', "$id_rsa_path/$scfg->{portal}_id_rsa", $target, '--', $method, @params]; + $cmd = [@ssh_cmd, '-i', "$id_rsa_path/$scfg->{portal}_id_rsa", $target, '--', $method, + @params]; } - eval { - run_command($cmd, outfunc => $output, errfunc => $errfunc, timeout => $timeout); - }; + eval { run_command($cmd, outfunc => $output, errfunc => $errfunc, timeout => $timeout); }; if ($@) { $res = { result => 0, msg => $err, - } + }; } else { $res = { result => 1, msg => $msg, - } + }; } return $res; @@ -104,10 +110,9 @@ my $read_config = sub { $target = 'root@' . $scfg->{portal}; - my $cmd = [@ssh_cmd, '-i', "$id_rsa_path/$scfg->{portal}_id_rsa", $target, $luncmd, $CONFIG_FILE]; - eval { - run_command($cmd, outfunc => $output, errfunc => $errfunc, timeout => $timeout); - }; + my $cmd = + [@ssh_cmd, '-i', "$id_rsa_path/$scfg->{portal}_id_rsa", $target, $luncmd, $CONFIG_FILE]; + eval { run_command($cmd, outfunc => $output, errfunc => $errfunc, timeout => $timeout); }; if ($@) { die $err if ($err !~ /No such file or directory/); die "No configuration found. Install iet on $scfg->{portal}" if $msg eq ''; @@ -141,7 +146,7 @@ my $parser = sub { foreach (@cfgfile) { $line++; if ($_ =~ /^\s*Target\s*([\w\-\:\.]+)\s*$/) { - if ($1 eq $scfg->{target} && ! $cfg_target) { + if ($1 eq $scfg->{target} && !$cfg_target) { # start colect info die "$line: Parse error [$_]" if $SETTINGS; $SETTINGS->{target} = $1; @@ -157,7 +162,7 @@ my $parser = sub { } else { if ($cfg_target) { $SETTINGS->{text} .= "$_\n"; - next if ($_ =~ /^\s*#/ || ! $_); + next if ($_ =~ /^\s*#/ || !$_); my $option = $_; if ($_ =~ /^(\w+)\s*#/) { $option = $1; @@ -176,7 +181,7 @@ my $parser = sub { foreach (@lun) { my @lun_opt = split '=', $_; die "$line: Parse error [$option]" unless (scalar(@lun_opt) == 2); - $conf->{$lun_opt[0]} = $lun_opt[1]; + $conf->{ $lun_opt[0] } = $lun_opt[1]; } if ($conf->{Path} && $conf->{Path} =~ /^$base\/$scfg->{pool}\/([\w\-]+)$/) { $conf->{include} = 1; @@ -184,7 +189,7 @@ my $parser = sub { $conf->{include} = 0; } $conf->{lun} = $num; - push @{$SETTINGS->{luns}}, $conf; + push @{ $SETTINGS->{luns} }, $conf; } else { die "$line: Parse error [$option]"; } @@ -202,19 +207,24 @@ my $update_config = sub { my $config = ''; while ((my $option, my $value) = each(%$SETTINGS)) { - next if ($option eq 'include' || $option eq 'luns' || $option eq 'Path' || $option eq 'text' || $option eq 'used'); + next + if ($option eq 'include' + || $option eq 'luns' + || $option eq 'Path' + || $option eq 'text' + || $option eq 'used'); if ($option eq 'target') { $config = "\n\nTarget " . $SETTINGS->{target} . "\n" . $config; } else { $config .= "\t$option\t\t\t$value\n"; } } - foreach my $lun (@{$SETTINGS->{luns}}) { + foreach my $lun (@{ $SETTINGS->{luns} }) { my $lun_opt = ''; while ((my $option, my $value) = each(%$lun)) { next if ($option eq 'include' || $option eq 'lun' || $option eq 'Path'); if ($lun_opt eq '') { - $lun_opt = $option . '=' . $value; + $lun_opt = $option . '=' . $value; } else { $lun_opt .= ',' . $option . '=' . $value; } @@ -260,12 +270,12 @@ my $get_lu_name = sub { my $used = (); my $i; - if (! exists $SETTINGS->{used}) { + if (!exists $SETTINGS->{used}) { for ($i = 0; $i < $MAX_LUNS; $i++) { $used->{$i} = 0; } - foreach my $lun (@{$SETTINGS->{luns}}) { - $used->{$lun->{lun}} = 1; + foreach my $lun (@{ $SETTINGS->{luns} }) { + $used->{ $lun->{lun} } = 1; } $SETTINGS->{used} = $used; } @@ -282,14 +292,14 @@ my $get_lu_name = sub { my $init_lu_name = sub { my $used = (); - if (! exists($SETTINGS->{used})) { + if (!exists($SETTINGS->{used})) { for (my $i = 0; $i < $MAX_LUNS; $i++) { $used->{$i} = 0; } $SETTINGS->{used} = $used; } - foreach my $lun (@{$SETTINGS->{luns}}) { - $SETTINGS->{used}->{$lun->{lun}} = 1; + foreach my $lun (@{ $SETTINGS->{luns} }) { + $SETTINGS->{used}->{ $lun->{lun} } = 1; } }; @@ -297,7 +307,7 @@ my $free_lu_name = sub { my ($lu_name) = @_; my $new; - foreach my $lun (@{$SETTINGS->{luns}}) { + foreach my $lun (@{ $SETTINGS->{luns} }) { if ($lun->{lun} != $lu_name) { push @$new, $lun; } @@ -310,7 +320,8 @@ my $free_lu_name = sub { my $make_lun = sub { my ($scfg, $path) = @_; - die 'Maximum number of LUNs per target is 16384' if scalar @{$SETTINGS->{luns}} >= $MAX_LUNS; + die 'Maximum number of LUNs per target is 16384' + if scalar @{ $SETTINGS->{luns} } >= $MAX_LUNS; my $lun = $get_lu_name->(); my $conf = { @@ -319,7 +330,7 @@ my $make_lun = sub { Type => 'blockio', include => 1, }; - push @{$SETTINGS->{luns}}, $conf; + push @{ $SETTINGS->{luns} }, $conf; return $conf; }; @@ -329,7 +340,7 @@ my $list_view = sub { my $lun = undef; my $object = $params[0]; - foreach my $lun (@{$SETTINGS->{luns}}) { + foreach my $lun (@{ $SETTINGS->{luns} }) { next unless $lun->{include} == 1; if ($lun->{Path} =~ /^$object$/) { return $lun->{lun} if (defined($lun->{lun})); @@ -345,7 +356,7 @@ my $list_lun = sub { my $name = undef; my $object = $params[0]; - foreach my $lun (@{$SETTINGS->{luns}}) { + foreach my $lun (@{ $SETTINGS->{luns} }) { next unless $lun->{include} == 1; if ($lun->{Path} =~ /^$object$/) { return $lun->{Path}; @@ -381,12 +392,12 @@ my $create_lun = sub { my $delete_lun = sub { my ($scfg, $timeout, $method, @params) = @_; - my $res = {msg => undef}; + my $res = { msg => undef }; my $path = $params[0]; my $tid = $get_target_tid->($scfg); - foreach my $lun (@{$SETTINGS->{luns}}) { + foreach my $lun (@{ $SETTINGS->{luns} }) { if ($lun->{Path} eq $path) { @params = ('--op', 'delete', "--tid=$tid", "--lun=$lun->{lun}"); $res = $execute_command->($scfg, 'ssh', $timeout, $ietadm, @params); @@ -417,7 +428,7 @@ my $modify_lun = sub { my $path = $params[1]; my $tid = $get_target_tid->($scfg); - foreach my $cfg (@{$SETTINGS->{luns}}) { + foreach my $cfg (@{ $SETTINGS->{luns} }) { if ($cfg->{Path} eq $path) { $lun = $cfg; last; @@ -446,13 +457,13 @@ my $get_lun_cmd_map = sub { my ($method) = @_; my $cmdmap = { - create_lu => { cmd => $create_lun }, - delete_lu => { cmd => $delete_lun }, - import_lu => { cmd => $import_lun }, - modify_lu => { cmd => $modify_lun }, - add_view => { cmd => $add_view }, - list_view => { cmd => $list_view }, - list_lu => { cmd => $list_lun }, + create_lu => { cmd => $create_lun }, + delete_lu => { cmd => $delete_lun }, + import_lu => { cmd => $import_lun }, + modify_lu => { cmd => $modify_lun }, + add_view => { cmd => $add_view }, + list_view => { cmd => $list_view }, + list_lu => { cmd => $list_lun }, }; die "unknown command '$method'" unless exists $cmdmap->{$method}; diff --git a/src/PVE/Storage/LunCmd/Istgt.pm b/src/PVE/Storage/LunCmd/Istgt.pm index 2f758f9..3dc9d43 100644 --- a/src/PVE/Storage/LunCmd/Istgt.pm +++ b/src/PVE/Storage/LunCmd/Istgt.pm @@ -10,12 +10,12 @@ use warnings; use PVE::Tools qw(run_command file_read_firstline trim dir_glob_regex dir_glob_foreach); my @CONFIG_FILES = ( - '/usr/local/etc/istgt/istgt.conf', # FreeBSD, FreeNAS - '/var/etc/iscsi/istgt.conf' # NAS4Free + '/usr/local/etc/istgt/istgt.conf', # FreeBSD, FreeNAS + '/var/etc/iscsi/istgt.conf' # NAS4Free ); my @DAEMONS = ( - '/usr/local/etc/rc.d/istgt', # FreeBSD, FreeNAS - '/var/etc/rc.d/istgt' # NAS4Free + '/usr/local/etc/rc.d/istgt', # FreeBSD, FreeNAS + '/var/etc/rc.d/istgt' # NAS4Free ); # A logical unit can max have 63 LUNs @@ -69,13 +69,13 @@ my $read_config = sub { $timeout = 10 if !$timeout; my $output = sub { - my $line = shift; - $msg .= "$line\n"; + my $line = shift; + $msg .= "$line\n"; }; my $errfunc = sub { - my $line = shift; - $err .= "$line"; + my $line = shift; + $err .= "$line"; }; $target = 'root@' . $scfg->{portal}; @@ -83,7 +83,8 @@ my $read_config = sub { my $daemon = 0; foreach my $config (@CONFIG_FILES) { $err = undef; - my $cmd = [@ssh_cmd, '-i', "$id_rsa_path/$scfg->{portal}_id_rsa", $target, $luncmd, $config]; + my $cmd = + [@ssh_cmd, '-i', "$id_rsa_path/$scfg->{portal}_id_rsa", $target, $luncmd, $config]; eval { run_command($cmd, outfunc => $output, errfunc => $errfunc, timeout => $timeout); }; @@ -119,17 +120,17 @@ my $parse_size = sub { return 0 if !$text; if ($text =~ m/^(\d+(\.\d+)?)([TGMK]B)?$/) { - my ($size, $reminder, $unit) = ($1, $2, $3); - return $size if !$unit; - if ($unit eq 'KB') { - $size *= 1024; - } elsif ($unit eq 'MB') { - $size *= 1024*1024; - } elsif ($unit eq 'GB') { - $size *= 1024*1024*1024; - } elsif ($unit eq 'TB') { - $size *= 1024*1024*1024*1024; - } + my ($size, $reminder, $unit) = ($1, $2, $3); + return $size if !$unit; + if ($unit eq 'KB') { + $size *= 1024; + } elsif ($unit eq 'MB') { + $size *= 1024 * 1024; + } elsif ($unit eq 'GB') { + $size *= 1024 * 1024 * 1024; + } elsif ($unit eq 'TB') { + $size *= 1024 * 1024 * 1024 * 1024; + } if ($reminder) { $size = ceil($size); } @@ -151,9 +152,9 @@ my $size_with_unit = sub { if ($size =~ m/^\d+$/) { ++$n and $size /= 1024 until $size < 1024; if ($size =~ /\./) { - return sprintf "%.2f%s", $size, ( qw[bytes KB MB GB TB] )[ $n ]; + return sprintf "%.2f%s", $size, (qw[bytes KB MB GB TB])[$n]; } else { - return sprintf "%d%s", $size, ( qw[bytes KB MB GB TB] )[ $n ]; + return sprintf "%d%s", $size, (qw[bytes KB MB GB TB])[$n]; } } die "$size: Not a number"; @@ -164,18 +165,18 @@ my $lun_dumper = sub { my $config = ''; $config .= "\n[$lun]\n"; - $config .= 'TargetName ' . $SETTINGS->{$lun}->{TargetName} . "\n"; - $config .= 'Mapping ' . $SETTINGS->{$lun}->{Mapping} . "\n"; - $config .= 'AuthGroup ' . $SETTINGS->{$lun}->{AuthGroup} . "\n"; - $config .= 'UnitType ' . $SETTINGS->{$lun}->{UnitType} . "\n"; - $config .= 'QueueDepth ' . $SETTINGS->{$lun}->{QueueDepth} . "\n"; + $config .= 'TargetName ' . $SETTINGS->{$lun}->{TargetName} . "\n"; + $config .= 'Mapping ' . $SETTINGS->{$lun}->{Mapping} . "\n"; + $config .= 'AuthGroup ' . $SETTINGS->{$lun}->{AuthGroup} . "\n"; + $config .= 'UnitType ' . $SETTINGS->{$lun}->{UnitType} . "\n"; + $config .= 'QueueDepth ' . $SETTINGS->{$lun}->{QueueDepth} . "\n"; - foreach my $conf (@{$SETTINGS->{$lun}->{luns}}) { - $config .= "$conf->{lun} Storage " . $conf->{Storage}; + foreach my $conf (@{ $SETTINGS->{$lun}->{luns} }) { + $config .= "$conf->{lun} Storage " . $conf->{Storage}; $config .= ' ' . $size_with_unit->($conf->{Size}) . "\n"; foreach ($conf->{options}) { if ($_) { - $config .= "$conf->{lun} Option " . $_ . "\n"; + $config .= "$conf->{lun} Option " . $_ . "\n"; } } } @@ -189,11 +190,11 @@ my $get_lu_name = sub { my $used = (); my $i; - if (! exists $SETTINGS->{$target}->{used}) { + if (!exists $SETTINGS->{$target}->{used}) { for ($i = 0; $i < $MAX_LUNS; $i++) { $used->{$i} = 0; } - foreach my $lun (@{$SETTINGS->{$target}->{luns}}) { + foreach my $lun (@{ $SETTINGS->{$target}->{luns} }) { $lun->{lun} =~ /^LUN(\d+)$/; $used->{$1} = 1; } @@ -213,13 +214,13 @@ my $init_lu_name = sub { my ($target) = @_; my $used = (); - if (! exists($SETTINGS->{$target}->{used})) { + if (!exists($SETTINGS->{$target}->{used})) { for (my $i = 0; $i < $MAX_LUNS; $i++) { $used->{$i} = 0; } $SETTINGS->{$target}->{used} = $used; } - foreach my $lun (@{$SETTINGS->{$target}->{luns}}) { + foreach my $lun (@{ $SETTINGS->{$target}->{luns} }) { $lun->{lun} =~ /^LUN(\d+)$/; $SETTINGS->{$target}->{used}->{$1} = 1; } @@ -236,7 +237,8 @@ my $make_lun = sub { my ($scfg, $path) = @_; my $target = $SETTINGS->{current}; - die 'Maximum number of LUNs per target is 63' if scalar @{$SETTINGS->{$target}->{luns}} >= $MAX_LUNS; + die 'Maximum number of LUNs per target is 63' + if scalar @{ $SETTINGS->{$target}->{luns} } >= $MAX_LUNS; my @options = (); my $lun = $get_lu_name->($target); @@ -249,7 +251,7 @@ my $make_lun = sub { Size => 'AUTO', options => @options, }; - push @{$SETTINGS->{$target}->{luns}}, $conf; + push @{ $SETTINGS->{$target}->{luns} }, $conf; return $conf->{lun}; }; @@ -290,7 +292,7 @@ my $parser = sub { if ($arg2 =~ /^Storage\s*(.+)/i) { $SETTINGS->{$lun}->{$arg1}->{storage} = $1; } elsif ($arg2 =~ /^Option\s*(.+)/i) { - push @{$SETTINGS->{$lun}->{$arg1}->{options}}, $1; + push @{ $SETTINGS->{$lun}->{$arg1}->{options} }, $1; } else { $SETTINGS->{$lun}->{$arg1} = $arg2; } @@ -307,10 +309,10 @@ my $parser = sub { my $base = get_base; for (my $i = 1; $i <= $max; $i++) { - my $target = $SETTINGS->{nodebase}.':'.$SETTINGS->{"LogicalUnit$i"}->{TargetName}; + my $target = $SETTINGS->{nodebase} . ':' . $SETTINGS->{"LogicalUnit$i"}->{TargetName}; if ($target eq $scfg->{target}) { my $lu = (); - while ((my $key, my $val) = each(%{$SETTINGS->{"LogicalUnit$i"}})) { + while ((my $key, my $val) = each(%{ $SETTINGS->{"LogicalUnit$i"} })) { if ($key =~ /^LUN\d+/) { $val->{storage} =~ /^([\w\/\-]+)\s+(\w+)/; my $storage = $1; @@ -318,7 +320,7 @@ my $parser = sub { my $conf = undef; my @options = (); if ($val->{options}) { - @options = @{$val->{options}}; + @options = @{ $val->{options} }; } if ($storage =~ /^$base\/$scfg->{pool}\/([\w\-]+)$/) { $conf = { @@ -326,7 +328,7 @@ my $parser = sub { Storage => $storage, Size => $size, options => @options, - } + }; } push @$lu, $conf if $conf; delete $SETTINGS->{"LogicalUnit$i"}->{$key}; @@ -349,9 +351,9 @@ my $list_lun = sub { my $name = undef; my $object = $params[0]; - for my $key (keys %$SETTINGS) { + for my $key (keys %$SETTINGS) { next unless $key =~ /^LogicalUnit\d+$/; - foreach my $lun (@{$SETTINGS->{$key}->{luns}}) { + foreach my $lun (@{ $SETTINGS->{$key}->{luns} }) { if ($lun->{Storage} =~ /^$object$/) { return $lun->{Storage}; } @@ -399,7 +401,7 @@ my $delete_lun = sub { my $target = $SETTINGS->{current}; my $luns = (); - foreach my $conf (@{$SETTINGS->{$target}->{luns}}) { + foreach my $conf (@{ $SETTINGS->{$target}->{luns} }) { if ($conf->{Storage} =~ /^$params[0]$/) { $free_lu_name->($target, $conf->{lun}); } else { @@ -448,7 +450,7 @@ my $add_view = sub { params => \@params, }; } else { - @params = ('-HUP', '`cat '. "$SETTINGS->{pidfile}`"); + @params = ('-HUP', '`cat ' . "$SETTINGS->{pidfile}`"); $cmdmap = { cmd => 'ssh', method => 'kill', @@ -477,9 +479,9 @@ my $list_view = sub { my $lun = undef; my $object = $params[0]; - for my $key (keys %$SETTINGS) { + for my $key (keys %$SETTINGS) { next unless $key =~ /^LogicalUnit\d+$/; - foreach my $lun (@{$SETTINGS->{$key}->{luns}}) { + foreach my $lun (@{ $SETTINGS->{$key}->{luns} }) { if ($lun->{Storage} =~ /^$object$/) { if ($lun->{lun} =~ /^LUN(\d+)/) { return $1; @@ -496,13 +498,13 @@ my $get_lun_cmd_map = sub { my ($method) = @_; my $cmdmap = { - create_lu => { cmd => $create_lun }, - delete_lu => { cmd => $delete_lun }, - import_lu => { cmd => $import_lun }, - modify_lu => { cmd => $modify_lun }, - add_view => { cmd => $add_view }, - list_view => { cmd => $list_view }, - list_lu => { cmd => $list_lun }, + create_lu => { cmd => $create_lun }, + delete_lu => { cmd => $delete_lun }, + import_lu => { cmd => $import_lun }, + modify_lu => { cmd => $modify_lun }, + add_view => { cmd => $add_view }, + list_view => { cmd => $list_view }, + list_lu => { cmd => $list_lun }, }; die "unknown command '$method'" unless exists $cmdmap->{$method}; @@ -522,8 +524,8 @@ sub run_lun_command { my $is_add_view = 0; my $output = sub { - my $line = shift; - $msg .= "$line\n"; + my $line = shift; + $msg .= "$line\n"; }; $target = 'root@' . $scfg->{portal}; @@ -531,18 +533,31 @@ sub run_lun_command { $parser->($scfg) unless $SETTINGS; my $cmdmap = $get_lun_cmd_map->($method); if ($method eq 'add_view') { - $is_add_view = 1 ; + $is_add_view = 1; $timeout = 15; } if (ref $cmdmap->{cmd} eq 'CODE') { $res = $cmdmap->{cmd}->($scfg, $timeout, $method, @params); if (ref $res) { $method = $res->{method}; - @params = @{$res->{params}}; + @params = @{ $res->{params} }; if ($res->{cmd} eq 'scp') { - $cmd = [@scp_cmd, '-i', "$id_rsa_path/$scfg->{portal}_id_rsa", $method, "$target:$params[0]"]; + $cmd = [ + @scp_cmd, + '-i', + "$id_rsa_path/$scfg->{portal}_id_rsa", + $method, + "$target:$params[0]", + ]; } else { - $cmd = [@ssh_cmd, '-i', "$id_rsa_path/$scfg->{portal}_id_rsa", $target, $method, @params]; + $cmd = [ + @ssh_cmd, + '-i', + "$id_rsa_path/$scfg->{portal}_id_rsa", + $target, + $method, + @params, + ]; } } else { return $res; @@ -550,12 +565,18 @@ sub run_lun_command { } else { $luncmd = $cmdmap->{cmd}; $method = $cmdmap->{method}; - $cmd = [@ssh_cmd, '-i', "$id_rsa_path/$scfg->{portal}_id_rsa", $target, $luncmd, $method, @params]; + $cmd = [ + @ssh_cmd, + '-i', + "$id_rsa_path/$scfg->{portal}_id_rsa", + $target, + $luncmd, + $method, + @params, + ]; } - eval { - run_command($cmd, outfunc => $output, timeout => $timeout); - }; + eval { run_command($cmd, outfunc => $output, timeout => $timeout); }; if ($@ && $is_add_view) { my $err = $@; if ($OLD_CONFIG) { @@ -565,15 +586,11 @@ sub run_lun_command { print $fh $OLD_CONFIG; close $fh; $cmd = [@scp_cmd, '-i', "$id_rsa_path/$scfg->{portal}_id_rsa", $file, $CONFIG_FILE]; - eval { - run_command($cmd, outfunc => $output, timeout => $timeout); - }; + eval { run_command($cmd, outfunc => $output, timeout => $timeout); }; $err1 = $@ if $@; unlink $file; die "$err\n$err1" if $err1; - eval { - run_lun_command($scfg, undef, 'add_view', 'restart'); - }; + eval { run_lun_command($scfg, undef, 'add_view', 'restart'); }; die "$err\n$@" if ($@); } die $err; diff --git a/src/PVE/Storage/LunCmd/LIO.pm b/src/PVE/Storage/LunCmd/LIO.pm index 9264e46..183d43b 100644 --- a/src/PVE/Storage/LunCmd/LIO.pm +++ b/src/PVE/Storage/LunCmd/LIO.pm @@ -29,8 +29,8 @@ sub get_base; # targetcli constants # config file location differs from distro to distro my @CONFIG_FILES = ( - '/etc/rtslib-fb-target/saveconfig.json', # Debian 9.x et al - '/etc/target/saveconfig.json' , # ArchLinux, CentOS + '/etc/rtslib-fb-target/saveconfig.json', # Debian 9.x et al + '/etc/target/saveconfig.json', # ArchLinux, CentOS ); my $BACKSTORE = '/backstores/block'; @@ -58,21 +58,27 @@ my $execute_remote_command = sub { my $errfunc = sub { $err .= "$_[0]\n" }; $target = 'root@' . $scfg->{portal}; - $cmd = [@ssh_cmd, '-i', "$id_rsa_path/$scfg->{portal}_id_rsa", $target, '--', $remote_command, @params]; + $cmd = [ + @ssh_cmd, + '-i', + "$id_rsa_path/$scfg->{portal}_id_rsa", + $target, + '--', + $remote_command, + @params, + ]; - eval { - run_command($cmd, outfunc => $output, errfunc => $errfunc, timeout => $timeout); - }; + eval { run_command($cmd, outfunc => $output, errfunc => $errfunc, timeout => $timeout); }; if ($@) { - $res = { - result => 0, - msg => $err, - } + $res = { + result => 0, + msg => $err, + }; } else { - $res = { - result => 1, - msg => $msg, - } + $res = { + result => 1, + msg => $msg, + }; } return $res; @@ -96,14 +102,15 @@ my $read_config = sub { $target = 'root@' . $scfg->{portal}; foreach my $oneFile (@CONFIG_FILES) { - my $cmd = [@ssh_cmd, '-i', "$id_rsa_path/$scfg->{portal}_id_rsa", $target, $luncmd, $oneFile]; - eval { - run_command($cmd, outfunc => $output, errfunc => $errfunc, timeout => $timeout); - }; - if ($@) { - die $err if ($err !~ /No such file or directory/); - } - return $msg if $msg ne ''; + my $cmd = + [@ssh_cmd, '-i', "$id_rsa_path/$scfg->{portal}_id_rsa", $target, $luncmd, $oneFile]; + eval { + run_command($cmd, outfunc => $output, errfunc => $errfunc, timeout => $timeout); + }; + if ($@) { + die $err if ($err !~ /No such file or directory/); + } + return $msg if $msg ne ''; } die "No configuration found. Install targetcli on $scfg->{portal}\n" if $msg eq ''; @@ -123,11 +130,11 @@ my $get_config = sub { # Return settings of a specific target my $get_target_settings = sub { - my ($scfg) = @_; + my ($scfg) = @_; - my $id = "$scfg->{portal}.$scfg->{target}"; - return undef if !$SETTINGS; - return $SETTINGS->{$id}; + my $id = "$scfg->{portal}.$scfg->{target}"; + return undef if !$SETTINGS; + return $SETTINGS->{$id}; }; # fetches and parses targetcli config from the portal @@ -137,46 +144,47 @@ my $parser = sub { my $tpg_tag; if ($tpg =~ /^tpg(\d+)$/) { - $tpg_tag = $1; + $tpg_tag = $1; } else { - die "Target Portal Group has invalid value, must contain string 'tpg' and a suffix number, eg 'tpg17'\n"; + die + "Target Portal Group has invalid value, must contain string 'tpg' and a suffix number, eg 'tpg17'\n"; } my $config = $get_config->($scfg); my $jsonconfig = JSON->new->utf8->decode($config); my $haveTarget = 0; - foreach my $target (@{$jsonconfig->{targets}}) { - # only interested in iSCSI targets - next if !($target->{fabric} eq 'iscsi' && $target->{wwn} eq $scfg->{target}); - # find correct TPG - foreach my $tpg (@{$target->{tpgs}}) { - if ($tpg->{tag} == $tpg_tag) { - my $res = []; - foreach my $lun (@{$tpg->{luns}}) { - my ($idx, $storage_object); - if ($lun->{index} =~ /^(\d+)$/) { - $idx = $1; - } - if ($lun->{storage_object} =~ m|^($BACKSTORE/.*)$|) { - $storage_object = $1; - } - die "Invalid lun definition in config!\n" - if !(defined($idx) && defined($storage_object)); - push @$res, { index => $idx, storage_object => $storage_object }; - } + foreach my $target (@{ $jsonconfig->{targets} }) { + # only interested in iSCSI targets + next if !($target->{fabric} eq 'iscsi' && $target->{wwn} eq $scfg->{target}); + # find correct TPG + foreach my $tpg (@{ $target->{tpgs} }) { + if ($tpg->{tag} == $tpg_tag) { + my $res = []; + foreach my $lun (@{ $tpg->{luns} }) { + my ($idx, $storage_object); + if ($lun->{index} =~ /^(\d+)$/) { + $idx = $1; + } + if ($lun->{storage_object} =~ m|^($BACKSTORE/.*)$|) { + $storage_object = $1; + } + die "Invalid lun definition in config!\n" + if !(defined($idx) && defined($storage_object)); + push @$res, { index => $idx, storage_object => $storage_object }; + } - my $id = "$scfg->{portal}.$scfg->{target}"; - $SETTINGS->{$id}->{luns} = $res; - $haveTarget = 1; - last; - } - } + my $id = "$scfg->{portal}.$scfg->{target}"; + $SETTINGS->{$id}->{luns} = $res; + $haveTarget = 1; + last; + } + } } # seriously unhappy if the target server lacks iSCSI target configuration ... if (!$haveTarget) { - die "target portal group tpg$tpg_tag not found!\n"; + die "target portal group tpg$tpg_tag not found!\n"; } }; @@ -194,10 +202,10 @@ my $free_lu_name = sub { my $new = []; my $target = $get_target_settings->($scfg); - foreach my $lun (@{$target->{luns}}) { - if ($lun->{storage_object} ne "$BACKSTORE/$lu_name") { - push @$new, $lun; - } + foreach my $lun (@{ $target->{luns} }) { + if ($lun->{storage_object} ne "$BACKSTORE/$lu_name") { + push @$new, $lun; + } } $target->{luns} = $new; @@ -208,12 +216,12 @@ my $register_lun = sub { my ($scfg, $idx, $volname) = @_; my $conf = { - index => $idx, - storage_object => "$BACKSTORE/$volname", - is_new => 1, + index => $idx, + storage_object => "$BACKSTORE/$volname", + is_new => 1, }; my $target = $get_target_settings->($scfg); - push @{$target->{luns}}, $conf; + push @{ $target->{luns} }, $conf; return $conf; }; @@ -225,17 +233,17 @@ my $extract_volname = sub { my $base = get_base; if ($lunpath =~ /^$base\/$scfg->{pool}\/([\w\-]+)$/) { - $volname = $1; - my $prefix = $get_backstore_prefix->($scfg); - my $target = $get_target_settings->($scfg); - foreach my $lun (@{$target->{luns}}) { - # If we have a lun with the pool prefix matching this vol, then return this one - # like pool-pve-vm-100-disk-0 - # Else, just fallback to the old name scheme which is vm-100-disk-0 - if ($lun->{storage_object} =~ /^$BACKSTORE\/($prefix$volname)$/) { - return $1; - } - } + $volname = $1; + my $prefix = $get_backstore_prefix->($scfg); + my $target = $get_target_settings->($scfg); + foreach my $lun (@{ $target->{luns} }) { + # If we have a lun with the pool prefix matching this vol, then return this one + # like pool-pve-vm-100-disk-0 + # Else, just fallback to the old name scheme which is vm-100-disk-0 + if ($lun->{storage_object} =~ /^$BACKSTORE\/($prefix$volname)$/) { + return $1; + } + } } return $volname; @@ -252,10 +260,10 @@ my $list_view = sub { return undef if !defined($volname); # nothing to search for.. - foreach my $lun (@{$target->{luns}}) { - if ($lun->{storage_object} eq "$BACKSTORE/$volname") { - return $lun->{index}; - } + foreach my $lun (@{ $target->{luns} }) { + if ($lun->{storage_object} eq "$BACKSTORE/$volname") { + return $lun->{index}; + } } return $lun; @@ -269,10 +277,10 @@ my $list_lun = sub { my $volname = $extract_volname->($scfg, $object); my $target = $get_target_settings->($scfg); - foreach my $lun (@{$target->{luns}}) { - if ($lun->{storage_object} eq "$BACKSTORE/$volname") { - return $object; - } + foreach my $lun (@{ $target->{luns} }) { + if ($lun->{storage_object} eq "$BACKSTORE/$volname") { + return $object; + } } return undef; @@ -283,7 +291,7 @@ my $create_lun = sub { my ($scfg, $timeout, $method, @params) = @_; if ($list_lun->($scfg, $timeout, $method, @params)) { - die "$params[0]: LUN already exists!"; + die "$params[0]: LUN already exists!"; } my $device = $params[0]; @@ -294,18 +302,18 @@ my $create_lun = sub { my $tpg = $scfg->{lio_tpg} || die "Target Portal Group not set, aborting!\n"; # step 1: create backstore for device - my @cliparams = ($BACKSTORE, 'create', "name=$volname", "dev=$device" ); + my @cliparams = ($BACKSTORE, 'create', "name=$volname", "dev=$device"); my $res = $execute_remote_command->($scfg, $timeout, $targetcli, @cliparams); die $res->{msg} if !$res->{result}; # step 2: enable unmap support on the backstore - @cliparams = ($BACKSTORE . '/' . $volname, 'set', 'attribute', 'emulate_tpu=1' ); + @cliparams = ($BACKSTORE . '/' . $volname, 'set', 'attribute', 'emulate_tpu=1'); $res = $execute_remote_command->($scfg, $timeout, $targetcli, @cliparams); die $res->{msg} if !$res->{result}; # step 3: register lun with target # targetcli /iscsi/iqn.2018-04.at.bestsolution.somehost:target/tpg1/luns/ create /backstores/block/foobar - @cliparams = ("/iscsi/$scfg->{target}/$tpg/luns/", 'create', "$BACKSTORE/$volname" ); + @cliparams = ("/iscsi/$scfg->{target}/$tpg/luns/", 'create', "$BACKSTORE/$volname"); $res = $execute_remote_command->($scfg, $timeout, $targetcli, @cliparams); die $res->{msg} if !$res->{result}; @@ -314,9 +322,9 @@ my $create_lun = sub { # changed without our knowledge, so relying on the number that targetcli returns my $lun_idx; if ($res->{msg} =~ /LUN (\d+)/) { - $lun_idx = $1; + $lun_idx = $1; } else { - die "unable to determine new LUN index: $res->{msg}"; + die "unable to determine new LUN index: $res->{msg}"; } $register_lun->($scfg, $lun_idx, $volname); @@ -330,7 +338,7 @@ my $create_lun = sub { my $delete_lun = sub { my ($scfg, $timeout, $method, @params) = @_; - my $res = {msg => undef}; + my $res = { msg => undef }; my $tpg = $scfg->{lio_tpg} || die "Target Portal Group not set, aborting!\n"; @@ -338,30 +346,30 @@ my $delete_lun = sub { my $volname = $extract_volname->($scfg, $path); my $target = $get_target_settings->($scfg); - foreach my $lun (@{$target->{luns}}) { - next if $lun->{storage_object} ne "$BACKSTORE/$volname"; + foreach my $lun (@{ $target->{luns} }) { + next if $lun->{storage_object} ne "$BACKSTORE/$volname"; - # step 1: delete the lun - my @cliparams = ("/iscsi/$scfg->{target}/$tpg/luns/", 'delete', "lun$lun->{index}" ); - my $res = $execute_remote_command->($scfg, $timeout, $targetcli, @cliparams); - do { - die $res->{msg}; - } unless $res->{result}; + # step 1: delete the lun + my @cliparams = ("/iscsi/$scfg->{target}/$tpg/luns/", 'delete', "lun$lun->{index}"); + my $res = $execute_remote_command->($scfg, $timeout, $targetcli, @cliparams); + do { + die $res->{msg}; + } unless $res->{result}; - # step 2: delete the backstore - @cliparams = ($BACKSTORE, 'delete', $volname); - $res = $execute_remote_command->($scfg, $timeout, $targetcli, @cliparams); - do { - die $res->{msg}; - } unless $res->{result}; + # step 2: delete the backstore + @cliparams = ($BACKSTORE, 'delete', $volname); + $res = $execute_remote_command->($scfg, $timeout, $targetcli, @cliparams); + do { + die $res->{msg}; + } unless $res->{result}; - # step 3: save to be safe ... - $execute_remote_command->($scfg, $timeout, $targetcli, 'saveconfig'); + # step 3: save to be safe ... + $execute_remote_command->($scfg, $timeout, $targetcli, 'saveconfig'); - # update internal cache - $free_lu_name->($scfg, $volname); + # update internal cache + $free_lu_name->($scfg, $volname); - last; + last; } return $res->{msg}; @@ -387,13 +395,13 @@ my $add_view = sub { }; my %lun_cmd_map = ( - create_lu => $create_lun, - delete_lu => $delete_lun, - import_lu => $import_lun, - modify_lu => $modify_lun, - add_view => $add_view, - list_view => $list_view, - list_lu => $list_lun, + create_lu => $create_lun, + delete_lu => $delete_lun, + import_lu => $import_lun, + modify_lu => $modify_lun, + add_view => $add_view, + list_view => $list_view, + list_lu => $list_lun, ); sub run_lun_command { @@ -403,8 +411,8 @@ sub run_lun_command { my $timediff = time - $SETTINGS_TIMESTAMP; my $target = $get_target_settings->($scfg); if (!$target || $timediff > $SETTINGS_MAXAGE) { - $SETTINGS_TIMESTAMP = time; - $parser->($scfg); + $SETTINGS_TIMESTAMP = time; + $parser->($scfg); } die "unknown command '$method'" unless exists $lun_cmd_map{$method}; diff --git a/src/PVE/Storage/LvmThinPlugin.pm b/src/PVE/Storage/LvmThinPlugin.pm index 49a4dcb..c244c91 100644 --- a/src/PVE/Storage/LvmThinPlugin.pm +++ b/src/PVE/Storage/LvmThinPlugin.pm @@ -30,28 +30,29 @@ sub type { sub plugindata { return { - content => [ {images => 1, rootdir => 1}, { images => 1, rootdir => 1}], - 'sensitive-properties' => {}, + content => [{ images => 1, rootdir => 1 }, { images => 1, rootdir => 1 }], + 'sensitive-properties' => {}, }; } sub properties { return { - thinpool => { - description => "LVM thin pool LV name.", - type => 'string', format => 'pve-storage-vgname', - }, + thinpool => { + description => "LVM thin pool LV name.", + type => 'string', + format => 'pve-storage-vgname', + }, }; } sub options { return { - thinpool => { fixed => 1 }, - vgname => { fixed => 1 }, + thinpool => { fixed => 1 }, + vgname => { fixed => 1 }, nodes => { optional => 1 }, - disable => { optional => 1 }, - content => { optional => 1 }, - bwlimit => { optional => 1 }, + disable => { optional => 1 }, + content => { optional => 1 }, + bwlimit => { optional => 1 }, }; } @@ -64,7 +65,7 @@ sub parse_volname { PVE::Storage::Plugin::parse_lvm_name($volname); if ($volname =~ m/^((vm|base)-(\d+)-\S+)$/) { - return ('images', $1, $3, undef, undef, $2 eq 'base', 'raw'); + return ('images', $1, $3, undef, undef, $2 eq 'base', 'raw'); } die "unable to parse lvm volume name '$volname'\n"; @@ -77,7 +78,7 @@ sub filesystem_path { my $vg = $scfg->{vgname}; - my $path = defined($snapname) ? "/dev/$vg/snap_${name}_$snapname": "/dev/$vg/$name"; + my $path = defined($snapname) ? "/dev/$vg/snap_${name}_$snapname" : "/dev/$vg/$name"; return wantarray ? ($path, $vmid, $vtype) : $path; } @@ -88,19 +89,27 @@ sub alloc_image { die "unsupported format '$fmt'" if $fmt ne 'raw'; die "illegal name '$name' - should be 'vm-$vmid-*'\n" - if $name && $name !~ m/^vm-$vmid-/; + if $name && $name !~ m/^vm-$vmid-/; my $vgs = PVE::Storage::LVMPlugin::lvm_vgs(); my $vg = $scfg->{vgname}; - die "no such volume group '$vg'\n" if !defined ($vgs->{$vg}); + die "no such volume group '$vg'\n" if !defined($vgs->{$vg}); $name = $class->find_free_diskname($storeid, $scfg, $vmid) - if !$name; + if !$name; - my $cmd = ['/sbin/lvcreate', '-aly', '-V', "${size}k", '--name', $name, - '--thinpool', "$vg/$scfg->{thinpool}" ]; + my $cmd = [ + '/sbin/lvcreate', + '-aly', + '-V', + "${size}k", + '--name', + $name, + '--thinpool', + "$vg/$scfg->{thinpool}", + ]; run_command($cmd, errmsg => "lvcreate '$vg/$name' error"); @@ -114,20 +123,20 @@ sub free_image { my $lvs = PVE::Storage::LVMPlugin::lvm_list_volumes($vg); - if (my $dat = $lvs->{$scfg->{vgname}}) { + if (my $dat = $lvs->{ $scfg->{vgname} }) { - # remove all volume snapshots first - foreach my $lv (keys %$dat) { - next if $lv !~ m/^snap_${volname}_${PVE::JSONSchema::CONFIGID_RE}$/; - my $cmd = ['/sbin/lvremove', '-f', "$vg/$lv"]; - run_command($cmd, errmsg => "lvremove snapshot '$vg/$lv' error"); - } + # remove all volume snapshots first + foreach my $lv (keys %$dat) { + next if $lv !~ m/^snap_${volname}_${PVE::JSONSchema::CONFIGID_RE}$/; + my $cmd = ['/sbin/lvremove', '-f', "$vg/$lv"]; + run_command($cmd, errmsg => "lvremove snapshot '$vg/$lv' error"); + } - # finally remove original (if exists) - if ($dat->{$volname}) { - my $cmd = ['/sbin/lvremove', '-f', "$vg/$volname"]; - run_command($cmd, errmsg => "lvremove '$vg/$volname' error"); - } + # finally remove original (if exists) + if ($dat->{$volname}) { + my $cmd = ['/sbin/lvremove', '-f', "$vg/$volname"]; + run_command($cmd, errmsg => "lvremove '$vg/$volname' error"); + } } return undef; @@ -144,31 +153,35 @@ sub list_images { if (my $dat = $cache->{lvs}->{$vgname}) { - foreach my $volname (keys %$dat) { + foreach my $volname (keys %$dat) { - next if $volname !~ m/^(vm|base)-(\d+)-/; - my $owner = $2; + next if $volname !~ m/^(vm|base)-(\d+)-/; + my $owner = $2; - my $info = $dat->{$volname}; + my $info = $dat->{$volname}; - next if $info->{lv_type} ne 'V'; + next if $info->{lv_type} ne 'V'; - next if $info->{pool_lv} ne $scfg->{thinpool}; + next if $info->{pool_lv} ne $scfg->{thinpool}; - my $volid = "$storeid:$volname"; + my $volid = "$storeid:$volname"; - if ($vollist) { - my $found = grep { $_ eq $volid } @$vollist; - next if !$found; - } else { - next if defined($vmid) && ($owner ne $vmid); - } + if ($vollist) { + my $found = grep { $_ eq $volid } @$vollist; + next if !$found; + } else { + next if defined($vmid) && ($owner ne $vmid); + } - push @$res, { - volid => $volid, format => 'raw', size => $info->{lv_size}, vmid => $owner, - ctime => $info->{ctime}, - }; - } + push @$res, + { + volid => $volid, + format => 'raw', + size => $info->{lv_size}, + vmid => $owner, + ctime => $info->{ctime}, + }; + } } return $res; @@ -181,13 +194,13 @@ sub list_thinpools { my $thinpools = []; foreach my $vg (keys %$lvs) { - foreach my $lvname (keys %{$lvs->{$vg}}) { - next if $lvs->{$vg}->{$lvname}->{lv_type} ne 't'; - my $lv = $lvs->{$vg}->{$lvname}; - $lv->{lv} = $lvname; - $lv->{vg} = $vg; - push @$thinpools, $lv; - } + foreach my $lvname (keys %{ $lvs->{$vg} }) { + next if $lvs->{$vg}->{$lvname}->{lv_type} ne 't'; + my $lv = $lvs->{$vg}->{$lvname}; + $lv->{lv} = $lvname; + $lv->{vg} = $vg; + push @$thinpools, $lv; + } } return $thinpools; @@ -198,17 +211,17 @@ sub status { my $lvs = $cache->{lvs} ||= PVE::Storage::LVMPlugin::lvm_list_volumes(); - return if !$lvs->{$scfg->{vgname}}; + return if !$lvs->{ $scfg->{vgname} }; - my $info = $lvs->{$scfg->{vgname}}->{$scfg->{thinpool}}; + my $info = $lvs->{ $scfg->{vgname} }->{ $scfg->{thinpool} }; return if !$info || $info->{lv_type} ne 't' || !$info->{lv_size}; return ( - $info->{lv_size}, - $info->{lv_size} - $info->{used}, - $info->{used}, - $info->{lv_state} eq 'a' ? 1 : 0, + $info->{lv_size}, + $info->{lv_size} - $info->{used}, + $info->{used}, + $info->{lv_state} eq 'a' ? 1 : 0, ); } @@ -221,7 +234,10 @@ my $activate_lv = sub { return if $lvs->{$vg}->{$lv}->{lv_state} eq 'a'; - run_command(['lvchange', '-ay', '-K', "$vg/$lv"], errmsg => "activating LV '$vg/$lv' failed"); + run_command( + ['lvchange', '-ay', '-K', "$vg/$lv"], + errmsg => "activating LV '$vg/$lv' failed", + ); $lvs->{$vg}->{$lv}->{lv_state} = 'a'; # update cache @@ -256,7 +272,7 @@ sub deactivate_volume { run_command(['lvchange', '-an', "$vg/$lv"], errmsg => "deactivate_volume '$vg/$lv' error"); $cache->{lvs}->{$vg}->{$lv}->{lv_state} = '-' # update cache - if $cache->{lvs} && $cache->{lvs}->{$vg} && $cache->{lvs}->{$vg}->{$lv}; + if $cache->{lvs} && $cache->{lvs}->{$vg} && $cache->{lvs}->{$vg}->{$lv}; return; } @@ -269,14 +285,13 @@ sub clone_image { my $lv; if ($snap) { - $lv = "$vg/snap_${volname}_$snap"; + $lv = "$vg/snap_${volname}_$snap"; } else { - my ($vtype, undef, undef, undef, undef, $isBase, $format) = - $class->parse_volname($volname); + my ($vtype, undef, undef, undef, undef, $isBase, $format) = $class->parse_volname($volname); - die "clone_image only works on base images\n" if !$isBase; + die "clone_image only works on base images\n" if !$isBase; - $lv = "$vg/$volname"; + $lv = "$vg/$volname"; } my $name = $class->find_free_diskname($storeid, $scfg, $vmid); @@ -290,8 +305,7 @@ sub clone_image { sub create_base { my ($class, $storeid, $scfg, $volname) = @_; - my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) = - $class->parse_volname($volname); + my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) = $class->parse_volname($volname); die "create_base not possible with base image\n" if $isBase; @@ -299,11 +313,11 @@ sub create_base { my $lvs = PVE::Storage::LVMPlugin::lvm_list_volumes($vg); if (my $dat = $lvs->{$vg}) { - # to avoid confusion, reject if we find volume snapshots - foreach my $lv (keys %$dat) { - die "unable to create base volume - found snaphost '$lv'\n" - if $lv =~ m/^snap_${volname}_(\w+)$/; - } + # to avoid confusion, reject if we find volume snapshots + foreach my $lv (keys %$dat) { + die "unable to create base volume - found snaphost '$lv'\n" + if $lv =~ m/^snap_${volname}_(\w+)$/; + } } my $newname = $name; @@ -362,22 +376,21 @@ sub volume_has_feature { my ($class, $scfg, $feature, $storeid, $volname, $snapname, $running) = @_; my $features = { - snapshot => { current => 1 }, - clone => { base => 1, snap => 1}, - template => { current => 1}, - copy => { base => 1, current => 1, snap => 1}, - sparseinit => { base => 1, current => 1}, - rename => {current => 1}, + snapshot => { current => 1 }, + clone => { base => 1, snap => 1 }, + template => { current => 1 }, + copy => { base => 1, current => 1, snap => 1 }, + sparseinit => { base => 1, current => 1 }, + rename => { current => 1 }, }; - my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) = - $class->parse_volname($volname); + my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) = $class->parse_volname($volname); my $key = undef; - if($snapname){ - $key = 'snap'; - }else{ - $key = $isBase ? 'base' : 'current'; + if ($snapname) { + $key = 'snap'; + } else { + $key = $isBase ? 'base' : 'current'; } return 1 if $features->{$feature}->{$key}; @@ -385,51 +398,62 @@ sub volume_has_feature { } sub volume_import { - my ($class, $scfg, $storeid, $fh, $volname, $format, $snapshot, $base_snapshot, $with_snapshots, $allow_rename) = @_; + my ( + $class, + $scfg, + $storeid, + $fh, + $volname, + $format, + $snapshot, + $base_snapshot, + $with_snapshots, + $allow_rename, + ) = @_; my ($vtype, $name, $vmid, $basename, $basevmid, $isBase, $file_format) = - $class->parse_volname($volname); + $class->parse_volname($volname); if (!$isBase) { - return $class->SUPER::volume_import( - $scfg, - $storeid, - $fh, - $volname, - $format, - $snapshot, - $base_snapshot, - $with_snapshots, - $allow_rename - ); + return $class->SUPER::volume_import( + $scfg, + $storeid, + $fh, + $volname, + $format, + $snapshot, + $base_snapshot, + $with_snapshots, + $allow_rename, + ); } else { - my $tempname; - my $vg = $scfg->{vgname}; - my $lvs = PVE::Storage::LVMPlugin::lvm_list_volumes($vg); - if ($lvs->{$vg}->{$volname}) { - die "volume $vg/$volname already exists\n" if !$allow_rename; - warn "volume $vg/$volname already exists - importing with a different name\n"; + my $tempname; + my $vg = $scfg->{vgname}; + my $lvs = PVE::Storage::LVMPlugin::lvm_list_volumes($vg); + if ($lvs->{$vg}->{$volname}) { + die "volume $vg/$volname already exists\n" if !$allow_rename; + warn "volume $vg/$volname already exists - importing with a different name\n"; - $tempname = $class->find_free_diskname($storeid, $scfg, $vmid); - } else { - $tempname = $volname; - $tempname =~ s/base/vm/; - } + $tempname = $class->find_free_diskname($storeid, $scfg, $vmid); + } else { + $tempname = $volname; + $tempname =~ s/base/vm/; + } - my $newvolid = $class->SUPER::volume_import( - $scfg, - $storeid, - $fh, - $tempname, - $format, - $snapshot, - $base_snapshot, - $with_snapshots, - $allow_rename - ); - ($storeid,my $newname) = PVE::Storage::parse_volume_id($newvolid); + my $newvolid = $class->SUPER::volume_import( + $scfg, + $storeid, + $fh, + $tempname, + $format, + $snapshot, + $base_snapshot, + $with_snapshots, + $allow_rename, + ); + ($storeid, my $newname) = PVE::Storage::parse_volume_id($newvolid); - $volname = $class->create_base($storeid, $scfg, $newname); + $volname = $class->create_base($storeid, $scfg, $newname); } return "$storeid:$volname"; @@ -438,8 +462,10 @@ sub volume_import { # used in LVMPlugin->volume_import sub volume_import_write { my ($class, $input_fh, $output_file) = @_; - run_command(['dd', "of=$output_file", 'conv=sparse', 'bs=64k'], - input => '<&'.fileno($input_fh)); + run_command( + ['dd', "of=$output_file", 'conv=sparse', 'bs=64k'], + input => '<&' . fileno($input_fh), + ); } 1; diff --git a/src/PVE/Storage/NFSPlugin.pm b/src/PVE/Storage/NFSPlugin.pm index cb2ae18..65c5e11 100644 --- a/src/PVE/Storage/NFSPlugin.pm +++ b/src/PVE/Storage/NFSPlugin.pm @@ -24,9 +24,9 @@ sub nfs_is_mounted { $mountdata = PVE::ProcFSTools::parse_proc_mounts() if !$mountdata; return $mountpoint if grep { - $_->[2] =~ /^nfs/ && - $_->[0] =~ m|^\Q$source\E/?$| && - $_->[1] eq $mountpoint + $_->[2] =~ /^nfs/ + && $_->[0] =~ m|^\Q$source\E/?$| + && $_->[1] eq $mountpoint } @$mountdata; return undef; } @@ -39,8 +39,8 @@ sub nfs_mount { my $cmd = ['/bin/mount', '-t', 'nfs', $source, $mountpoint]; if ($options) { - push @$cmd, '-o', $options; - } + push @$cmd, '-o', $options; + } run_command($cmd, errmsg => "mount error"); } @@ -53,49 +53,60 @@ sub type { sub plugindata { return { - content => [ { images => 1, rootdir => 1, vztmpl => 1, iso => 1, backup => 1, snippets => 1, import => 1 }, - { images => 1 }], - format => [ { raw => 1, qcow2 => 1, vmdk => 1 } , 'raw' ], - 'sensitive-properties' => {}, + content => [ + { + images => 1, + rootdir => 1, + vztmpl => 1, + iso => 1, + backup => 1, + snippets => 1, + import => 1, + }, + { images => 1 }, + ], + format => [{ raw => 1, qcow2 => 1, vmdk => 1 }, 'raw'], + 'sensitive-properties' => {}, }; -} +} sub properties { return { - export => { - description => "NFS export path.", - type => 'string', format => 'pve-storage-path', - }, - server => { - description => "Server IP or DNS name.", - type => 'string', format => 'pve-storage-server', - }, + export => { + description => "NFS export path.", + type => 'string', + format => 'pve-storage-path', + }, + server => { + description => "Server IP or DNS name.", + type => 'string', + format => 'pve-storage-server', + }, }; } sub options { return { - path => { fixed => 1 }, - 'content-dirs' => { optional => 1 }, - server => { fixed => 1 }, - export => { fixed => 1 }, - nodes => { optional => 1 }, - disable => { optional => 1 }, - maxfiles => { optional => 1 }, - 'prune-backups' => { optional => 1 }, - 'max-protected-backups' => { optional => 1 }, - options => { optional => 1 }, - content => { optional => 1 }, - format => { optional => 1 }, - mkdir => { optional => 1 }, - 'create-base-path' => { optional => 1 }, - 'create-subdirs' => { optional => 1 }, - bwlimit => { optional => 1 }, - preallocation => { optional => 1 }, + path => { fixed => 1 }, + 'content-dirs' => { optional => 1 }, + server => { fixed => 1 }, + export => { fixed => 1 }, + nodes => { optional => 1 }, + disable => { optional => 1 }, + maxfiles => { optional => 1 }, + 'prune-backups' => { optional => 1 }, + 'max-protected-backups' => { optional => 1 }, + options => { optional => 1 }, + content => { optional => 1 }, + format => { optional => 1 }, + mkdir => { optional => 1 }, + 'create-base-path' => { optional => 1 }, + 'create-subdirs' => { optional => 1 }, + bwlimit => { optional => 1 }, + preallocation => { optional => 1 }, }; } - sub check_config { my ($class, $sectionId, $config, $create, $skipSchemaCheck) = @_; @@ -110,13 +121,13 @@ sub status { my ($class, $storeid, $scfg, $cache) = @_; $cache->{mountdata} = PVE::ProcFSTools::parse_proc_mounts() - if !$cache->{mountdata}; + if !$cache->{mountdata}; my $path = $scfg->{path}; my $server = $scfg->{server}; my $export = $scfg->{export}; - return undef if !nfs_is_mounted($server, $export, $path, $cache->{mountdata}); + return undef if !nfs_is_mounted($server, $export, $path, $cache->{mountdata}); return $class->SUPER::status($storeid, $scfg, $cache); } @@ -125,20 +136,20 @@ sub activate_storage { my ($class, $storeid, $scfg, $cache) = @_; $cache->{mountdata} = PVE::ProcFSTools::parse_proc_mounts() - if !$cache->{mountdata}; + if !$cache->{mountdata}; my $path = $scfg->{path}; my $server = $scfg->{server}; my $export = $scfg->{export}; if (!nfs_is_mounted($server, $export, $path, $cache->{mountdata})) { - # NOTE: only call mkpath when not mounted (avoid hang when NFS server is offline - $class->config_aware_base_mkdir($scfg, $path); + # NOTE: only call mkpath when not mounted (avoid hang when NFS server is offline + $class->config_aware_base_mkdir($scfg, $path); - die "unable to activate storage '$storeid' - " . - "directory '$path' does not exist\n" if ! -d $path; + die "unable to activate storage '$storeid' - " . "directory '$path' does not exist\n" + if !-d $path; - nfs_mount($server, $export, $path, $scfg->{options}); + nfs_mount($server, $export, $path, $scfg->{options}); } $class->SUPER::activate_storage($storeid, $scfg, $cache); @@ -148,15 +159,15 @@ sub deactivate_storage { my ($class, $storeid, $scfg, $cache) = @_; $cache->{mountdata} = PVE::ProcFSTools::parse_proc_mounts() - if !$cache->{mountdata}; + if !$cache->{mountdata}; my $path = $scfg->{path}; my $server = $scfg->{server}; my $export = $scfg->{export}; - if (nfs_is_mounted($server, $export, $path, $cache->{mountdata})) { - my $cmd = ['/bin/umount', $path]; - run_command($cmd, errmsg => 'umount error'); + if (nfs_is_mounted($server, $export, $path, $cache->{mountdata})) { + my $cmd = ['/bin/umount', $path]; + run_command($cmd, errmsg => 'umount error'); } } @@ -170,33 +181,35 @@ sub check_connection { my $is_v4 = defined($opts) && $opts =~ /vers=4.*/; if ($is_v4) { - my $ip = PVE::JSONSchema::pve_verify_ip($server, 1); - if (!defined($ip)) { - $ip = PVE::Network::get_ip_from_hostname($server); - } + my $ip = PVE::JSONSchema::pve_verify_ip($server, 1); + if (!defined($ip)) { + $ip = PVE::Network::get_ip_from_hostname($server); + } - my $transport = PVE::JSONSchema::pve_verify_ipv4($ip, 1) ? 'tcp' : 'tcp6'; + my $transport = PVE::JSONSchema::pve_verify_ipv4($ip, 1) ? 'tcp' : 'tcp6'; - # nfsv4 uses a pseudo-filesystem always beginning with / - # no exports are listed - $cmd = ['/usr/sbin/rpcinfo', '-T', $transport, $ip, 'nfs', '4']; + # nfsv4 uses a pseudo-filesystem always beginning with / + # no exports are listed + $cmd = ['/usr/sbin/rpcinfo', '-T', $transport, $ip, 'nfs', '4']; } else { - $cmd = ['/sbin/showmount', '--no-headers', '--exports', $server]; + $cmd = ['/sbin/showmount', '--no-headers', '--exports', $server]; } - eval { run_command($cmd, timeout => 10, outfunc => sub {}, errfunc => sub {}) }; + eval { + run_command($cmd, timeout => 10, outfunc => sub { }, errfunc => sub { }); + }; if (my $err = $@) { - if ($is_v4) { - my $port = 2049; - $port = $1 if defined($opts) && $opts =~ /port=(\d+)/; + if ($is_v4) { + my $port = 2049; + $port = $1 if defined($opts) && $opts =~ /port=(\d+)/; - # rpcinfo is expected to work when the port is 0 (see 'man 5 nfs') and tcp_ping() - # defaults to port 7 when passing in 0. - return 0 if $port == 0; + # rpcinfo is expected to work when the port is 0 (see 'man 5 nfs') and tcp_ping() + # defaults to port 7 when passing in 0. + return 0 if $port == 0; - return PVE::Network::tcp_ping($server, $port, 2); - } - return 0; + return PVE::Network::tcp_ping($server, $port, 2); + } + return 0; } return 1; diff --git a/src/PVE/Storage/PBSPlugin.pm b/src/PVE/Storage/PBSPlugin.pm index 9f75794..00170f5 100644 --- a/src/PVE/Storage/PBSPlugin.pm +++ b/src/PVE/Storage/PBSPlugin.pm @@ -29,51 +29,53 @@ sub type { sub plugindata { return { - content => [ {backup => 1, none => 1}, { backup => 1 }], - 'sensitive-properties' => { - 'encryption-key' => 1, - 'master-pubkey' => 1, - password => 1, - }, + content => [{ backup => 1, none => 1 }, { backup => 1 }], + 'sensitive-properties' => { + 'encryption-key' => 1, + 'master-pubkey' => 1, + password => 1, + }, }; } sub properties { return { - datastore => { - description => "Proxmox Backup Server datastore name.", - type => 'string', - }, - # openssl s_client -connect :8007 2>&1 |openssl x509 -fingerprint -sha256 - fingerprint => get_standard_option('fingerprint-sha256'), - 'encryption-key' => { - description => "Encryption key. Use 'autogen' to generate one automatically without passphrase.", - type => 'string', - }, - 'master-pubkey' => { - description => "Base64-encoded, PEM-formatted public RSA key. Used to encrypt a copy of the encryption-key which will be added to each encrypted backup.", - type => 'string', - }, + datastore => { + description => "Proxmox Backup Server datastore name.", + type => 'string', + }, + # openssl s_client -connect :8007 2>&1 |openssl x509 -fingerprint -sha256 + fingerprint => get_standard_option('fingerprint-sha256'), + 'encryption-key' => { + description => + "Encryption key. Use 'autogen' to generate one automatically without passphrase.", + type => 'string', + }, + 'master-pubkey' => { + description => + "Base64-encoded, PEM-formatted public RSA key. Used to encrypt a copy of the encryption-key which will be added to each encrypted backup.", + type => 'string', + }, }; } sub options { return { - server => { fixed => 1 }, - datastore => { fixed => 1 }, - namespace => { optional => 1 }, - port => { optional => 1 }, - nodes => { optional => 1}, - disable => { optional => 1}, - content => { optional => 1}, - username => { optional => 1 }, - password => { optional => 1 }, - 'encryption-key' => { optional => 1 }, - 'master-pubkey' => { optional => 1 }, - maxfiles => { optional => 1 }, - 'prune-backups' => { optional => 1 }, - 'max-protected-backups' => { optional => 1 }, - fingerprint => { optional => 1 }, + server => { fixed => 1 }, + datastore => { fixed => 1 }, + namespace => { optional => 1 }, + port => { optional => 1 }, + nodes => { optional => 1 }, + disable => { optional => 1 }, + content => { optional => 1 }, + username => { optional => 1 }, + password => { optional => 1 }, + 'encryption-key' => { optional => 1 }, + 'master-pubkey' => { optional => 1 }, + maxfiles => { optional => 1 }, + 'prune-backups' => { optional => 1 }, + 'max-protected-backups' => { optional => 1 }, + fingerprint => { optional => 1 }, }; } @@ -131,8 +133,8 @@ sub pbs_delete_encryption_key { my $pwfile = pbs_encryption_key_file_name($scfg, $storeid); if (!unlink $pwfile) { - return if $! == ENOENT; - die "failed to delete encryption key! $!\n"; + return if $! == ENOENT; + die "failed to delete encryption key! $!\n"; } delete $scfg->{'encryption-key'}; } @@ -153,13 +155,13 @@ sub pbs_open_encryption_key { my $keyfd; if (!open($keyfd, '<', $encryption_key_file)) { - if ($! == ENOENT) { - my $encryption_fp = $scfg->{'encryption-key'}; - die "encryption configured ('$encryption_fp') but no encryption key file found!\n" - if $encryption_fp; - return undef; - } - die "failed to open encryption key: $encryption_key_file: $!\n"; + if ($! == ENOENT) { + my $encryption_fp = $scfg->{'encryption-key'}; + die "encryption configured ('$encryption_fp') but no encryption key file found!\n" + if $encryption_fp; + return undef; + } + die "failed to open encryption key: $encryption_key_file: $!\n"; } return $keyfd; @@ -186,8 +188,8 @@ sub pbs_delete_master_pubkey { my $pwfile = pbs_master_pubkey_file_name($scfg, $storeid); if (!unlink $pwfile) { - return if $! == ENOENT; - die "failed to delete master public key! $!\n"; + return if $! == ENOENT; + die "failed to delete master public key! $!\n"; } delete $scfg->{'master-pubkey'}; } @@ -208,12 +210,12 @@ sub pbs_open_master_pubkey { my $keyfd; if (!open($keyfd, '<', $master_pubkey_file)) { - if ($! == ENOENT) { - die "master public key configured but no key file found!\n" - if $scfg->{'master-pubkey'}; - return undef; - } - die "failed to open master public key: $master_pubkey_file: $!\n"; + if ($! == ENOENT) { + die "master public key configured but no key file found!\n" + if $scfg->{'master-pubkey'}; + return undef; + } + die "failed to open master public key: $master_pubkey_file: $!\n"; } return $keyfd; @@ -244,24 +246,24 @@ my sub api_param_from_volname : prototype($$$) { my @tm = (POSIX::strptime($timestr, "%FT%TZ")); # expect sec, min, hour, mday, mon, year - die "error parsing time from '$volname'" if grep { !defined($_) } @tm[0..5]; + die "error parsing time from '$volname'" if grep { !defined($_) } @tm[0 .. 5]; my $btime; { - local $ENV{TZ} = 'UTC'; # $timestr is UTC + local $ENV{TZ} = 'UTC'; # $timestr is UTC - # Fill in isdst to avoid undef warning. No daylight saving time for UTC. - $tm[8] //= 0; + # Fill in isdst to avoid undef warning. No daylight saving time for UTC. + $tm[8] //= 0; - my $since_epoch = mktime(@tm) or die "error converting time from '$volname'\n"; - $btime = int($since_epoch); + my $since_epoch = mktime(@tm) or die "error converting time from '$volname'\n"; + $btime = int($since_epoch); } return { - (ns($scfg, 'ns')), - 'backup-type' => $btype, - 'backup-id' => $bid, - 'backup-time' => $btime, + (ns($scfg, 'ns')), + 'backup-type' => $btype, + 'backup-id' => $bid, + 'backup-time' => $btime, }; } @@ -283,7 +285,7 @@ my sub do_raw_client_cmd { my $client_exe = '/usr/bin/proxmox-backup-client'; die "executable not found '$client_exe'! Proxmox backup client not installed?\n" - if ! -x $client_exe; + if !-x $client_exe; my $repo = PVE::PBSClient::get_repository($scfg); @@ -298,29 +300,29 @@ my sub do_raw_client_cmd { # This must live in the top scope to not get closed before the `run_command` my ($keyfd, $master_fd); if ($use_crypto) { - if (defined($keyfd = pbs_open_encryption_key($scfg, $storeid))) { - my $flags = fcntl($keyfd, F_GETFD, 0) - // die "failed to get file descriptor flags: $!\n"; - fcntl($keyfd, F_SETFD, $flags & ~FD_CLOEXEC) - or die "failed to remove FD_CLOEXEC from encryption key file descriptor\n"; - push @$cmd, '--crypt-mode=encrypt', '--keyfd='.fileno($keyfd); - if ($use_master && defined($master_fd = pbs_open_master_pubkey($scfg, $storeid))) { - my $flags = fcntl($master_fd, F_GETFD, 0) - // die "failed to get file descriptor flags: $!\n"; - fcntl($master_fd, F_SETFD, $flags & ~FD_CLOEXEC) - or die "failed to remove FD_CLOEXEC from master public key file descriptor\n"; - push @$cmd, '--master-pubkey-fd='.fileno($master_fd); - } - } else { - push @$cmd, '--crypt-mode=none'; - } + if (defined($keyfd = pbs_open_encryption_key($scfg, $storeid))) { + my $flags = fcntl($keyfd, F_GETFD, 0) + // die "failed to get file descriptor flags: $!\n"; + fcntl($keyfd, F_SETFD, $flags & ~FD_CLOEXEC) + or die "failed to remove FD_CLOEXEC from encryption key file descriptor\n"; + push @$cmd, '--crypt-mode=encrypt', '--keyfd=' . fileno($keyfd); + if ($use_master && defined($master_fd = pbs_open_master_pubkey($scfg, $storeid))) { + my $flags = fcntl($master_fd, F_GETFD, 0) + // die "failed to get file descriptor flags: $!\n"; + fcntl($master_fd, F_SETFD, $flags & ~FD_CLOEXEC) + or die "failed to remove FD_CLOEXEC from master public key file descriptor\n"; + push @$cmd, '--master-pubkey-fd=' . fileno($master_fd); + } + } else { + push @$cmd, '--crypt-mode=none'; + } } push @$cmd, @$param if defined($param); push @$cmd, "--repository", $repo; if ($client_cmd ne 'status' && defined(my $ns = $scfg->{namespace})) { - push @$cmd, '--ns', $ns; + push @$cmd, '--ns', $ns; } local $ENV{PBS_PASSWORD} = pbs_get_password($scfg, $storeid); @@ -332,7 +334,7 @@ my sub do_raw_client_cmd { local $ENV{PROXMOX_OUTPUT_NO_HEADER} = 1; if (my $logfunc = $opts{logfunc}) { - $logfunc->("run: " . join(' ', @$cmd)); + $logfunc->("run: " . join(' ', @$cmd)); } run_command($cmd, %opts); @@ -357,12 +359,15 @@ sub run_client_cmd { my $outfunc = sub { $json_str .= "$_[0]\n" }; $param = [] if !defined($param); - $param = [ $param ] if !ref($param); + $param = [$param] if !ref($param); $param = [@$param, '--output-format=json'] if !$no_output; - do_raw_client_cmd($scfg, $storeid, $client_cmd, $param, - outfunc => $outfunc, errmsg => 'proxmox-backup-client failed'); + do_raw_client_cmd( + $scfg, $storeid, $client_cmd, $param, + outfunc => $outfunc, + errmsg => 'proxmox-backup-client failed', + ); return undef if $no_output; @@ -383,15 +388,18 @@ sub extract_vzdump_config { my $config_name; if ($format eq 'pbs-vm') { - $config_name = 'qemu-server.conf'; - } elsif ($format eq 'pbs-ct') { - $config_name = 'pct.conf'; + $config_name = 'qemu-server.conf'; + } elsif ($format eq 'pbs-ct') { + $config_name = 'pct.conf'; } else { - die "unable to extract configuration for backup format '$format'\n"; + die "unable to extract configuration for backup format '$format'\n"; } - do_raw_client_cmd($scfg, $storeid, 'restore', [ $name, $config_name, '-' ], - outfunc => $outfunc, errmsg => 'proxmox-backup-client failed'); + do_raw_client_cmd( + $scfg, $storeid, 'restore', [$name, $config_name, '-'], + outfunc => $outfunc, + errmsg => 'proxmox-backup-client failed', + ); return $config; } @@ -407,19 +415,19 @@ sub prune_backups { my $backup_groups = {}; if (defined($vmid) && defined($type)) { - # no need to get the list of volumes, we only got a single backup group anyway - $backup_groups->{"$type/$vmid"} = 1; + # no need to get the list of volumes, we only got a single backup group anyway + $backup_groups->{"$type/$vmid"} = 1; } else { - my $backups = eval { $class->list_volumes($storeid, $scfg, $vmid, ['backup']) }; - die "failed to get list of all backups to prune - $@" if $@; + my $backups = eval { $class->list_volumes($storeid, $scfg, $vmid, ['backup']) }; + die "failed to get list of all backups to prune - $@" if $@; - foreach my $backup (@{$backups}) { - (my $backup_type = $backup->{format}) =~ s/^pbs-//; - next if defined($type) && $backup_type ne $type; + foreach my $backup (@{$backups}) { + (my $backup_type = $backup->{format}) =~ s/^pbs-//; + next if defined($type) && $backup_type ne $type; - my $backup_group = "$backup_type/$backup->{vmid}"; - $backup_groups->{$backup_group} = 1; - } + my $backup_group = "$backup_type/$backup->{vmid}"; + $backup_groups->{$backup_group} = 1; + } } my @param; @@ -427,13 +435,13 @@ sub prune_backups { my $keep_all = delete $keep->{'keep-all'}; if (!$keep_all) { - foreach my $opt (keys %{$keep}) { - next if $keep->{$opt} == 0; - push @param, "--$opt"; - push @param, "$keep->{$opt}"; - } + foreach my $opt (keys %{$keep}) { + next if $keep->{$opt} == 0; + push @param, "--$opt"; + push @param, "$keep->{$opt}"; + } } else { # no need to pass anything to PBS - $keep = { 'keep-all' => 1 }; + $keep = { 'keep-all' => 1 }; } push @param, '--dry-run' if $dryrun; @@ -442,39 +450,40 @@ sub prune_backups { my $failed; foreach my $backup_group (keys %{$backup_groups}) { - $logfunc->('info', "running 'proxmox-backup-client prune' for '$backup_group'") - if !$dryrun; - eval { - my $res = run_client_cmd($scfg, $storeid, 'prune', [ $backup_group, @param ]); + $logfunc->('info', "running 'proxmox-backup-client prune' for '$backup_group'") + if !$dryrun; + eval { + my $res = run_client_cmd($scfg, $storeid, 'prune', [$backup_group, @param]); - foreach my $backup (@{$res}) { - die "result from proxmox-backup-client is not as expected\n" - if !defined($backup->{'backup-time'}) - || !defined($backup->{'backup-type'}) - || !defined($backup->{'backup-id'}) - || !defined($backup->{'keep'}); + foreach my $backup (@{$res}) { + die "result from proxmox-backup-client is not as expected\n" + if !defined($backup->{'backup-time'}) + || !defined($backup->{'backup-type'}) + || !defined($backup->{'backup-id'}) + || !defined($backup->{'keep'}); - my $ctime = $backup->{'backup-time'}; - my $type = $backup->{'backup-type'}; - my $vmid = $backup->{'backup-id'}; - my $volid = print_volid($storeid, $type, $vmid, $ctime); + my $ctime = $backup->{'backup-time'}; + my $type = $backup->{'backup-type'}; + my $vmid = $backup->{'backup-id'}; + my $volid = print_volid($storeid, $type, $vmid, $ctime); - my $mark = $backup->{keep} ? 'keep' : 'remove'; - $mark = 'protected' if $backup->{protected}; + my $mark = $backup->{keep} ? 'keep' : 'remove'; + $mark = 'protected' if $backup->{protected}; - push @{$prune_list}, { - ctime => $ctime, - mark => $mark, - type => $type eq 'vm' ? 'qemu' : 'lxc', - vmid => $vmid, - volid => $volid, - }; - } - }; - if (my $err = $@) { - $logfunc->('err', "prune '$backup_group': $err\n"); - $failed = 1; - } + push @{$prune_list}, + { + ctime => $ctime, + mark => $mark, + type => $type eq 'vm' ? 'qemu' : 'lxc', + vmid => $vmid, + volid => $volid, + }; + } + }; + if (my $err = $@) { + $logfunc->('err', "prune '$backup_group': $err\n"); + $failed = 1; + } } die "error pruning backups - check log\n" if $failed; @@ -485,7 +494,7 @@ my $autogen_encryption_key = sub { my ($scfg, $storeid) = @_; my $encfile = pbs_encryption_key_file_name($scfg, $storeid); if (-f $encfile) { - rename $encfile, "$encfile.old"; + rename $encfile, "$encfile.old"; } my $cmd = ['proxmox-backup-client', 'key', 'create', '--kdf', 'none', $encfile]; run_command($cmd, errmsg => 'failed to create encryption key'); @@ -498,38 +507,38 @@ sub on_add_hook { my $res = {}; if (defined(my $password = $param{password})) { - pbs_set_password($scfg, $storeid, $password); + pbs_set_password($scfg, $storeid, $password); } else { - pbs_delete_password($scfg, $storeid); + pbs_delete_password($scfg, $storeid); } if (defined(my $encryption_key = $param{'encryption-key'})) { - my $decoded_key; - if ($encryption_key eq 'autogen') { - $res->{'encryption-key'} = $autogen_encryption_key->($scfg, $storeid); - $decoded_key = decode_json($res->{'encryption-key'}); - } else { - $decoded_key = eval { decode_json($encryption_key) }; - if ($@ || !exists($decoded_key->{data})) { - die "Value does not seems like a valid, JSON formatted encryption key!\n"; - } - pbs_set_encryption_key($scfg, $storeid, $encryption_key); - $res->{'encryption-key'} = $encryption_key; - } - $scfg->{'encryption-key'} = $decoded_key->{fingerprint} || 1; + my $decoded_key; + if ($encryption_key eq 'autogen') { + $res->{'encryption-key'} = $autogen_encryption_key->($scfg, $storeid); + $decoded_key = decode_json($res->{'encryption-key'}); + } else { + $decoded_key = eval { decode_json($encryption_key) }; + if ($@ || !exists($decoded_key->{data})) { + die "Value does not seems like a valid, JSON formatted encryption key!\n"; + } + pbs_set_encryption_key($scfg, $storeid, $encryption_key); + $res->{'encryption-key'} = $encryption_key; + } + $scfg->{'encryption-key'} = $decoded_key->{fingerprint} || 1; } else { - pbs_delete_encryption_key($scfg, $storeid); + pbs_delete_encryption_key($scfg, $storeid); } if (defined(my $master_key = delete $param{'master-pubkey'})) { - die "'master-pubkey' can only be used together with 'encryption-key'\n" - if !defined($scfg->{'encryption-key'}); + die "'master-pubkey' can only be used together with 'encryption-key'\n" + if !defined($scfg->{'encryption-key'}); - my $decoded = decode_base64($master_key); - pbs_set_master_pubkey($scfg, $storeid, $decoded); - $scfg->{'master-pubkey'} = 1; + my $decoded = decode_base64($master_key); + pbs_set_master_pubkey($scfg, $storeid, $decoded); + $scfg->{'master-pubkey'} = 1; } else { - pbs_delete_master_pubkey($scfg, $storeid); + pbs_delete_master_pubkey($scfg, $storeid); } return $res; @@ -541,43 +550,43 @@ sub on_update_hook { my $res = {}; if (exists($param{password})) { - if (defined($param{password})) { - pbs_set_password($scfg, $storeid, $param{password}); - } else { - pbs_delete_password($scfg, $storeid); - } + if (defined($param{password})) { + pbs_set_password($scfg, $storeid, $param{password}); + } else { + pbs_delete_password($scfg, $storeid); + } } if (exists($param{'encryption-key'})) { - if (defined(my $encryption_key = delete($param{'encryption-key'}))) { - my $decoded_key; - if ($encryption_key eq 'autogen') { - $res->{'encryption-key'} = $autogen_encryption_key->($scfg, $storeid); - $decoded_key = decode_json($res->{'encryption-key'}); - } else { - $decoded_key = eval { decode_json($encryption_key) }; - if ($@ || !exists($decoded_key->{data})) { - die "Value does not seems like a valid, JSON formatted encryption key!\n"; - } - pbs_set_encryption_key($scfg, $storeid, $encryption_key); - $res->{'encryption-key'} = $encryption_key; - } - $scfg->{'encryption-key'} = $decoded_key->{fingerprint} || 1; - } else { - pbs_delete_encryption_key($scfg, $storeid); - delete $scfg->{'encryption-key'}; - } + if (defined(my $encryption_key = delete($param{'encryption-key'}))) { + my $decoded_key; + if ($encryption_key eq 'autogen') { + $res->{'encryption-key'} = $autogen_encryption_key->($scfg, $storeid); + $decoded_key = decode_json($res->{'encryption-key'}); + } else { + $decoded_key = eval { decode_json($encryption_key) }; + if ($@ || !exists($decoded_key->{data})) { + die "Value does not seems like a valid, JSON formatted encryption key!\n"; + } + pbs_set_encryption_key($scfg, $storeid, $encryption_key); + $res->{'encryption-key'} = $encryption_key; + } + $scfg->{'encryption-key'} = $decoded_key->{fingerprint} || 1; + } else { + pbs_delete_encryption_key($scfg, $storeid); + delete $scfg->{'encryption-key'}; + } } if (exists($param{'master-pubkey'})) { - if (defined(my $master_key = delete($param{'master-pubkey'}))) { - my $decoded = decode_base64($master_key); + if (defined(my $master_key = delete($param{'master-pubkey'}))) { + my $decoded = decode_base64($master_key); - pbs_set_master_pubkey($scfg, $storeid, $decoded); - $scfg->{'master-pubkey'} = 1; - } else { - pbs_delete_master_pubkey($scfg, $storeid); - } + pbs_set_master_pubkey($scfg, $storeid, $decoded); + $scfg->{'master-pubkey'} = 1; + } else { + pbs_delete_master_pubkey($scfg, $storeid); + } } return $res; @@ -596,19 +605,21 @@ sub on_delete_hook { sub parse_volname { my ($class, $volname) = @_; - if ($volname =~ m!^backup/([^\s_]+)/([^\s_]+)/([0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z)$!) { - my $btype = $1; - my $bid = $2; - my $btime = $3; - my $format = "pbs-$btype"; + if ($volname =~ + m!^backup/([^\s_]+)/([^\s_]+)/([0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z)$! + ) { + my $btype = $1; + my $bid = $2; + my $btime = $3; + my $format = "pbs-$btype"; - my $name = "$btype/$bid/$btime"; + my $name = "$btype/$bid/$btime"; - if ($bid =~ m/^\d+$/) { - return ('backup', $name, $bid, undef, undef, undef, $format); - } else { - return ('backup', $name, undef, undef, undef, undef, $format); - } + if ($bid =~ m/^\d+$/) { + return ('backup', $name, $bid, undef, undef, undef, $format); + } else { + return ('backup', $name, undef, undef, undef, undef, $format); + } } die "unable to parse PBS volume name '$volname'\n"; @@ -618,7 +629,7 @@ sub path { my ($class, $scfg, $volname, $storeid, $snapname) = @_; die "volume snapshot is not possible on pbs storage" - if defined($snapname); + if defined($snapname); my ($vtype, $name, $vmid) = $class->parse_volname($volname); @@ -627,8 +638,8 @@ sub path { # artificial url - we currently do not use that anywhere my $path = "pbs://$repo/$name"; if (defined(my $ns = $scfg->{namespace})) { - $ns =~ s|/|%2f|g; # other characters to escape aren't allowed in the namespace schema - $path .= "?ns=$ns"; + $ns =~ s|/|%2f|g; # other characters to escape aren't allowed in the namespace schema + $path .= "?ns=$ns"; } return ($path, $vmid, $vtype); @@ -657,12 +668,11 @@ sub free_image { my ($vtype, $name, $vmid) = $class->parse_volname($volname); - run_client_cmd($scfg, $storeid, "forget", [ $name ], 1); + run_client_cmd($scfg, $storeid, "forget", [$name], 1); return; } - sub list_images { my ($class, $storeid, $scfg, $vmid, $vollist, $cache) = @_; @@ -678,13 +688,13 @@ my sub snapshot_files_encrypted { my $any; my $all = 1; for my $file (@$files) { - my $fn = $file->{filename}; - next if $fn eq 'client.log.blob' || $fn eq 'index.json.blob'; + my $fn = $file->{filename}; + next if $fn eq 'client.log.blob' || $fn eq 'index.json.blob'; - my $crypt = $file->{'crypt-mode'}; + my $crypt = $file->{'crypt-mode'}; - $all = 0 if !$crypt || $crypt ne 'encrypt'; - $any ||= defined($crypt) && $crypt eq 'encrypt'; + $all = 0 if !$crypt || $crypt ne 'encrypt'; + $any ||= defined($crypt) && $crypt eq 'encrypt'; } return $any && $all; } @@ -699,22 +709,22 @@ my sub pbs_api_connect { my $user = $scfg->{username} // 'root@pam'; if (my $tokenid = PVE::AccessControl::pve_verify_tokenid($user, 1)) { - $params->{apitoken} = "PBSAPIToken=${tokenid}:${password}"; + $params->{apitoken} = "PBSAPIToken=${tokenid}:${password}"; } else { - $params->{password} = $password; - $params->{username} = $user; + $params->{password} = $password; + $params->{username} = $user; } if (my $fp = $scfg->{fingerprint}) { - $params->{cached_fingerprints}->{uc($fp)} = 1; + $params->{cached_fingerprints}->{ uc($fp) } = 1; } my $conn = PVE::APIClient::LWP->new( - %$params, - host => $scfg->{server}, - port => $scfg->{port} // 8007, - timeout => ($timeout // 7), # cope with a 401 (3s api delay) and high latency - cookie_name => 'PBSAuthCookie', + %$params, + host => $scfg->{server}, + port => $scfg->{port} // 8007, + timeout => ($timeout // 7), # cope with a 401 (3s api delay) and high latency + cookie_name => 'PBSAuthCookie', ); return $conn; @@ -738,37 +748,37 @@ sub list_volumes { die "error listing snapshots - $@" if $@; foreach my $item (@$data) { - my $btype = $item->{"backup-type"}; - my $bid = $item->{"backup-id"}; - my $epoch = $item->{"backup-time"}; - my $size = $item->{size} // 1; + my $btype = $item->{"backup-type"}; + my $bid = $item->{"backup-id"}; + my $epoch = $item->{"backup-time"}; + my $size = $item->{size} // 1; - next if !($btype eq 'vm' || $btype eq 'ct'); - next if $bid !~ m/^\d+$/; - next if defined($vmid) && $bid ne $vmid; + next if !($btype eq 'vm' || $btype eq 'ct'); + next if $bid !~ m/^\d+$/; + next if defined($vmid) && $bid ne $vmid; - my $volid = print_volid($storeid, $btype, $bid, $epoch); + my $volid = print_volid($storeid, $btype, $bid, $epoch); - my $info = { - volid => $volid, - format => "pbs-$btype", - size => $size, - content => 'backup', - vmid => int($bid), - ctime => $epoch, - subtype => $btype eq 'vm' ? 'qemu' : 'lxc', # convert to PVE backup type - }; + my $info = { + volid => $volid, + format => "pbs-$btype", + size => $size, + content => 'backup', + vmid => int($bid), + ctime => $epoch, + subtype => $btype eq 'vm' ? 'qemu' : 'lxc', # convert to PVE backup type + }; - $info->{verification} = $item->{verification} if defined($item->{verification}); - $info->{notes} = $item->{comment} if defined($item->{comment}); - $info->{protected} = 1 if $item->{protected}; - if (defined($item->{fingerprint})) { - $info->{encrypted} = $item->{fingerprint}; - } elsif (snapshot_files_encrypted($item->{files})) { - $info->{encrypted} = '1'; - } + $info->{verification} = $item->{verification} if defined($item->{verification}); + $info->{notes} = $item->{comment} if defined($item->{comment}); + $info->{protected} = 1 if $item->{protected}; + if (defined($item->{fingerprint})) { + $info->{encrypted} = $item->{fingerprint}; + } elsif (snapshot_files_encrypted($item->{files})) { + $info->{encrypted} = '1'; + } - push @$res, $info; + push @$res, $info; } return $res; @@ -783,15 +793,15 @@ sub status { my $active = 0; eval { - my $res = run_client_cmd($scfg, $storeid, "status"); + my $res = run_client_cmd($scfg, $storeid, "status"); - $active = 1; - $total = $res->{total}; - $used = $res->{used}; - $free = $res->{avail}; + $active = 1; + $total = $res->{total}; + $used = $res->{used}; + $free = $res->{avail}; }; if (my $err = $@) { - warn $err; + warn $err; } return ($total, $free, $used, $active); @@ -826,9 +836,9 @@ sub activate_storage { my $datastore = $scfg->{datastore}; for my $ds (@$datastores) { - if ($ds->{store} eq $datastore) { - return 1; - } + if ($ds->{store} eq $datastore) { + return 1; + } } die "$storeid: Cannot find datastore '$datastore', check permissions and existence!\n"; @@ -860,9 +870,9 @@ sub deactivate_volume { sub get_volume_notes { my ($class, $scfg, $storeid, $volname, $timeout) = @_; - my (undef, $name, undef, undef, undef, undef, $format) = $class->parse_volname($volname); + my (undef, $name, undef, undef, undef, undef, $format) = $class->parse_volname($volname); - my $data = run_client_cmd($scfg, $storeid, "snapshot", [ "notes", "show", $name ]); + my $data = run_client_cmd($scfg, $storeid, "snapshot", ["notes", "show", $name]); return $data->{notes}; } @@ -872,9 +882,9 @@ sub get_volume_notes { sub update_volume_notes { my ($class, $scfg, $storeid, $volname, $notes, $timeout) = @_; - my (undef, $name, undef, undef, undef, undef, $format) = $class->parse_volname($volname); + my (undef, $name, undef, undef, undef, undef, $format) = $class->parse_volname($volname); - run_client_cmd($scfg, $storeid, "snapshot", [ "notes", "update", $name, $notes ], 1); + run_client_cmd($scfg, $storeid, "snapshot", ["notes", "update", $name, $notes], 1); return undef; } @@ -883,22 +893,22 @@ sub get_volume_attribute { my ($class, $scfg, $storeid, $volname, $attribute) = @_; if ($attribute eq 'notes') { - return $class->get_volume_notes($scfg, $storeid, $volname); + return $class->get_volume_notes($scfg, $storeid, $volname); } if ($attribute eq 'protected') { - my $param = api_param_from_volname($class, $scfg, $volname); + my $param = api_param_from_volname($class, $scfg, $volname); - my $password = pbs_get_password($scfg, $storeid); - my $conn = pbs_api_connect($scfg, $password); - my $datastore = $scfg->{datastore}; + my $password = pbs_get_password($scfg, $storeid); + my $conn = pbs_api_connect($scfg, $password); + my $datastore = $scfg->{datastore}; - my $res = eval { $conn->get("/api2/json/admin/datastore/$datastore/$attribute", $param); }; - if (my $err = $@) { - return if $err->{code} == 404; # not supported - die $err; - } - return $res; + my $res = eval { $conn->get("/api2/json/admin/datastore/$datastore/$attribute", $param); }; + if (my $err = $@) { + return if $err->{code} == 404; # not supported + die $err; + } + return $res; } return; @@ -908,24 +918,24 @@ sub update_volume_attribute { my ($class, $scfg, $storeid, $volname, $attribute, $value) = @_; if ($attribute eq 'notes') { - return $class->update_volume_notes($scfg, $storeid, $volname, $value); + return $class->update_volume_notes($scfg, $storeid, $volname, $value); } if ($attribute eq 'protected') { - my $param = api_param_from_volname($class, $scfg, $volname); - $param->{$attribute} = $value; + my $param = api_param_from_volname($class, $scfg, $volname); + $param->{$attribute} = $value; - my $password = pbs_get_password($scfg, $storeid); - my $conn = pbs_api_connect($scfg, $password); - my $datastore = $scfg->{datastore}; + my $password = pbs_get_password($scfg, $storeid); + my $conn = pbs_api_connect($scfg, $password); + my $datastore = $scfg->{datastore}; - eval { $conn->put("/api2/json/admin/datastore/$datastore/$attribute", $param); }; - if (my $err = $@) { - die "Server is not recent enough to support feature '$attribute'\n" - if $err->{code} == 404; - die $err; - } - return; + eval { $conn->put("/api2/json/admin/datastore/$datastore/$attribute", $param); }; + if (my $err = $@) { + die "Server is not recent enough to support feature '$attribute'\n" + if $err->{code} == 404; + die $err; + } + return; } die "attribute '$attribute' is not supported for storage type '$scfg->{type}'\n"; @@ -934,15 +944,15 @@ sub update_volume_attribute { sub volume_size_info { my ($class, $scfg, $storeid, $volname, $timeout) = @_; - my ($vtype, $name, undef, undef, undef, undef, $format) = $class->parse_volname($volname); + my ($vtype, $name, undef, undef, undef, undef, $format) = $class->parse_volname($volname); - my $data = run_client_cmd($scfg, $storeid, "files", [ $name ]); + my $data = run_client_cmd($scfg, $storeid, "files", [$name]); my $size = 0; foreach my $info (@$data) { - if ($info->{size} && $info->{size} =~ /^(\d+)$/) { # untaints - $size += $1; - } + if ($info->{size} && $info->{size} =~ /^(\d+)$/) { # untaints + $size += $1; + } } my $used = $size; diff --git a/src/PVE/Storage/Plugin.pm b/src/PVE/Storage/Plugin.pm index 4e16420..1d8a8f9 100644 --- a/src/PVE/Storage/Plugin.pm +++ b/src/PVE/Storage/Plugin.pm @@ -21,7 +21,7 @@ use JSON; use base qw(PVE::SectionConfig); -use constant KNOWN_COMPRESSION_FORMATS => ('gz', 'lzo', 'zst', 'bz2'); +use constant KNOWN_COMPRESSION_FORMATS => ('gz', 'lzo', 'zst', 'bz2'); use constant COMPRESSOR_RE => join('|', KNOWN_COMPRESSION_FORMATS); use constant LOG_EXT => ".log"; @@ -35,16 +35,7 @@ our @COMMON_TAR_FLAGS = qw( ); our @SHARED_STORAGE = ( - 'iscsi', - 'nfs', - 'cifs', - 'rbd', - 'cephfs', - 'iscsidirect', - 'glusterfs', - 'zfs', - 'drbd', - 'pbs', + 'iscsi', 'nfs', 'cifs', 'rbd', 'cephfs', 'iscsidirect', 'glusterfs', 'zfs', 'drbd', 'pbs', ); our $QCOW2_PREALLOCATION = { @@ -62,53 +53,56 @@ our $RAW_PREALLOCATION = { our $MAX_VOLUMES_PER_GUEST = 1024; -cfs_register_file ('storage.cfg', - sub { __PACKAGE__->parse_config(@_); }, - sub { __PACKAGE__->write_config(@_); }); +cfs_register_file( + 'storage.cfg', + sub { __PACKAGE__->parse_config(@_); }, + sub { __PACKAGE__->write_config(@_); }, +); my %prune_option = ( optional => 1, - type => 'integer', minimum => '0', + type => 'integer', + minimum => '0', format_description => 'N', ); our $prune_backups_format = { 'keep-all' => { - type => 'boolean', - description => 'Keep all backups. Conflicts with the other options when true.', - optional => 1, + type => 'boolean', + description => 'Keep all backups. Conflicts with the other options when true.', + optional => 1, }, 'keep-last' => { - %prune_option, - description => 'Keep the last backups.', + %prune_option, description => 'Keep the last backups.', }, 'keep-hourly' => { - %prune_option, - description => 'Keep backups for the last different hours. If there is more' . - 'than one backup for a single hour, only the latest one is kept.' + %prune_option, + description => 'Keep backups for the last different hours. If there is more' + . 'than one backup for a single hour, only the latest one is kept.', }, 'keep-daily' => { - %prune_option, - description => 'Keep backups for the last different days. If there is more' . - 'than one backup for a single day, only the latest one is kept.' + %prune_option, + description => 'Keep backups for the last different days. If there is more' + . 'than one backup for a single day, only the latest one is kept.', }, 'keep-weekly' => { - %prune_option, - description => 'Keep backups for the last different weeks. If there is more' . - 'than one backup for a single week, only the latest one is kept.' + %prune_option, + description => 'Keep backups for the last different weeks. If there is more' + . 'than one backup for a single week, only the latest one is kept.', }, 'keep-monthly' => { - %prune_option, - description => 'Keep backups for the last different months. If there is more' . - 'than one backup for a single month, only the latest one is kept.' + %prune_option, + description => 'Keep backups for the last different months. If there is more' + . 'than one backup for a single month, only the latest one is kept.', }, 'keep-yearly' => { - %prune_option, - description => 'Keep backups for the last different years. If there is more' . - 'than one backup for a single year, only the latest one is kept.' + %prune_option, + description => 'Keep backups for the last different years. If there is more' + . 'than one backup for a single year, only the latest one is kept.', }, }; PVE::JSONSchema::register_format('prune-backups', $prune_backups_format, \&validate_prune_backups); + sub validate_prune_backups { my ($prune_backups) = @_; @@ -117,104 +111,123 @@ sub validate_prune_backups { my $keep_all = delete $res->{'keep-all'}; if (scalar(grep { $_ > 0 } values %{$res}) == 0) { - $res = { 'keep-all' => 1 }; + $res = { 'keep-all' => 1 }; } elsif ($keep_all) { - die "keep-all cannot be set together with other options.\n"; + die "keep-all cannot be set together with other options.\n"; } return $res; } -register_standard_option('prune-backups', { - description => "The retention options with shorter intervals are processed first " . - "with --keep-last being the very first one. Each option covers a " . - "specific period of time. We say that backups within this period " . - "are covered by this option. The next option does not take care " . - "of already covered backups and only considers older backups.", - optional => 1, - type => 'string', - format => 'prune-backups', -}); +register_standard_option( + 'prune-backups', + { + description => "The retention options with shorter intervals are processed first " + . "with --keep-last being the very first one. Each option covers a " + . "specific period of time. We say that backups within this period " + . "are covered by this option. The next option does not take care " + . "of already covered backups and only considers older backups.", + optional => 1, + type => 'string', + format => 'prune-backups', + }, +); my $defaultData = { propertyList => { - type => { description => "Storage type." }, - storage => get_standard_option('pve-storage-id', - { completion => \&PVE::Storage::complete_storage }), - nodes => get_standard_option('pve-node-list', { - description => "List of nodes for which the storage configuration applies.", - optional => 1, - }), - content => { - description => "Allowed content types.\n\nNOTE: the value " . - "'rootdir' is used for Containers, and value 'images' for VMs.\n", - type => 'string', format => 'pve-storage-content-list', - optional => 1, - completion => \&PVE::Storage::complete_content_type, - }, - disable => { - description => "Flag to disable the storage.", - type => 'boolean', - optional => 1, - }, - maxfiles => { - description => "Deprecated: use 'prune-backups' instead. " . - "Maximal number of backup files per VM. Use '0' for unlimited.", - type => 'integer', - minimum => 0, - optional => 1, - }, - 'prune-backups' => get_standard_option('prune-backups'), - 'max-protected-backups' => { - description => "Maximal number of protected backups per guest. Use '-1' for unlimited.", - type => 'integer', - minimum => -1, - optional => 1, - default => "Unlimited for users with Datastore.Allocate privilege, 5 for other users", - }, - shared => { - description => "Indicate that this is a single storage with the same contents on all " - ."nodes (or all listed in the 'nodes' option). It will not make the contents of a " - ."local storage automatically accessible to other nodes, it just marks an already " - ."shared storage as such!", - type => 'boolean', - optional => 1, - }, - subdir => { - description => "Subdir to mount.", - type => 'string', format => 'pve-storage-path', - optional => 1, - }, - format => get_standard_option('pve-storage-image-format', { - description => "Default image format.", - optional => 1, - }), - preallocation => { - description => "Preallocation mode for raw and qcow2 images. " . - "Using 'metadata' on raw images results in preallocation=off.", - type => 'string', enum => ['off', 'metadata', 'falloc', 'full'], - default => 'metadata', - optional => 1, - }, - 'content-dirs' => { - description => "Overrides for default content type directories.", - type => "string", format => "pve-dir-override-list", - optional => 1, - }, - options => { - description => "NFS/CIFS mount options (see 'man nfs' or 'man mount.cifs')", - type => 'string', - format => 'pve-storage-options', - optional => 1, - }, - port => { - description => "Use this port to connect to the storage instead of the default one (for" - ." example, with PBS or ESXi). For NFS and CIFS, use the 'options' option to" - ." configure the port via the mount options.", - type => 'integer', - minimum => 1, - maximum => 65535, - optional => 1, - }, + type => { description => "Storage type." }, + storage => get_standard_option( + 'pve-storage-id', + { completion => \&PVE::Storage::complete_storage }, + ), + nodes => get_standard_option( + 'pve-node-list', + { + description => "List of nodes for which the storage configuration applies.", + optional => 1, + }, + ), + content => { + description => "Allowed content types.\n\nNOTE: the value " + . "'rootdir' is used for Containers, and value 'images' for VMs.\n", + type => 'string', + format => 'pve-storage-content-list', + optional => 1, + completion => \&PVE::Storage::complete_content_type, + }, + disable => { + description => "Flag to disable the storage.", + type => 'boolean', + optional => 1, + }, + maxfiles => { + description => "Deprecated: use 'prune-backups' instead. " + . "Maximal number of backup files per VM. Use '0' for unlimited.", + type => 'integer', + minimum => 0, + optional => 1, + }, + 'prune-backups' => get_standard_option('prune-backups'), + 'max-protected-backups' => { + description => + "Maximal number of protected backups per guest. Use '-1' for unlimited.", + type => 'integer', + minimum => -1, + optional => 1, + default => + "Unlimited for users with Datastore.Allocate privilege, 5 for other users", + }, + shared => { + description => + "Indicate that this is a single storage with the same contents on all " + . "nodes (or all listed in the 'nodes' option). It will not make the contents of a " + . "local storage automatically accessible to other nodes, it just marks an already " + . "shared storage as such!", + type => 'boolean', + optional => 1, + }, + subdir => { + description => "Subdir to mount.", + type => 'string', + format => 'pve-storage-path', + optional => 1, + }, + format => get_standard_option( + 'pve-storage-image-format', + { + description => "Default image format.", + optional => 1, + }, + ), + preallocation => { + description => "Preallocation mode for raw and qcow2 images. " + . "Using 'metadata' on raw images results in preallocation=off.", + type => 'string', + enum => ['off', 'metadata', 'falloc', 'full'], + default => 'metadata', + optional => 1, + }, + 'content-dirs' => { + description => "Overrides for default content type directories.", + type => "string", + format => "pve-dir-override-list", + optional => 1, + }, + options => { + description => "NFS/CIFS mount options (see 'man nfs' or 'man mount.cifs')", + type => 'string', + format => 'pve-storage-options', + optional => 1, + }, + port => { + description => + "Use this port to connect to the storage instead of the default one (for" + . " example, with PBS or ESXi). For NFS and CIFS, use the 'options' option to" + . " configure the port via the mount options.", + type => 'integer', + minimum => 1, + maximum => 65535, + optional => 1, + }, }, }; @@ -223,7 +236,7 @@ sub content_hash_to_string { my @cta; foreach my $ct (keys %$hash) { - push @cta, $ct if $hash->{$ct}; + push @cta, $ct if $hash->{$ct}; } return join(',', @cta); @@ -250,7 +263,7 @@ sub sensitive_properties { my $data = $defaultData->{plugindata}->{$type}; if (my $sensitive_properties = $data->{'sensitive-properties'}) { - return [sort keys $sensitive_properties->%*]; + return [sort keys $sensitive_properties->%*]; } # For backwards compatibility. This list was hardcoded in the API module previously. @@ -262,7 +275,7 @@ sub storage_has_feature { my $data = $defaultData->{plugindata}->{$type}; if (my $features = $data->{features}) { - return $features->{$feature}; + return $features->{$feature}; } return; } @@ -274,49 +287,53 @@ sub default_format { my $def = $defaultData->{plugindata}->{$type}; my $def_format = 'raw'; - my $valid_formats = [ $def_format ]; + my $valid_formats = [$def_format]; if (defined($def->{format})) { - $def_format = $scfg->{format} || $def->{format}->[1]; - $valid_formats = [ sort keys %{$def->{format}->[0]} ]; + $def_format = $scfg->{format} || $def->{format}->[1]; + $valid_formats = [sort keys %{ $def->{format}->[0] }]; } return wantarray ? ($def_format, $valid_formats) : $def_format; } PVE::JSONSchema::register_format('pve-storage-path', \&verify_path); + sub verify_path { my ($path, $noerr) = @_; # fixme: exclude more shell meta characters? # we need absolute paths if ($path !~ m|^/[^;\(\)]+|) { - return undef if $noerr; - die "value does not look like a valid absolute path\n"; + return undef if $noerr; + die "value does not look like a valid absolute path\n"; } return $path; } PVE::JSONSchema::register_format('pve-storage-server', \&verify_server); + sub verify_server { my ($server, $noerr) = @_; - if (!(PVE::JSONSchema::pve_verify_ip($server, 1) || - PVE::JSONSchema::pve_verify_dns_name($server, 1))) - { - return undef if $noerr; - die "value does not look like a valid server name or IP address\n"; + if (!( + PVE::JSONSchema::pve_verify_ip($server, 1) + || PVE::JSONSchema::pve_verify_dns_name($server, 1) + )) { + return undef if $noerr; + die "value does not look like a valid server name or IP address\n"; } return $server; } PVE::JSONSchema::register_format('pve-storage-vgname', \&parse_lvm_name); + sub parse_lvm_name { my ($name, $noerr) = @_; if ($name !~ m/^[a-z0-9][a-z0-9\-\_\.]*[a-z0-9]$/i) { - return undef if $noerr; - die "lvm name '$name' contains illegal characters\n"; + return undef if $noerr; + die "lvm name '$name' contains illegal characters\n"; } return $name; @@ -336,18 +353,20 @@ sub parse_lvm_name { #} PVE::JSONSchema::register_format('pve-storage-portal-dns', \&verify_portal_dns); + sub verify_portal_dns { my ($portal, $noerr) = @_; # IP or DNS name with optional port if (!PVE::Tools::parse_host_and_port($portal)) { - return undef if $noerr; - die "value does not look like a valid portal address\n"; + return undef if $noerr; + die "value does not look like a valid portal address\n"; } return $portal; } PVE::JSONSchema::register_format('pve-storage-content', \&verify_content); + sub verify_content { my ($ct, $noerr) = @_; @@ -356,8 +375,8 @@ sub verify_content { my $valid_content = valid_content_types('dir'); # dir includes all other types if (!$valid_content->{$ct}) { - return undef if $noerr; - die "invalid content type '$ct'\n"; + return undef if $noerr; + die "invalid content type '$ct'\n"; } return $ct; @@ -368,53 +387,60 @@ sub verify_content { # TODO PVE 9 - remove after doing a versioned breaks for pve-guest-common, which was using this # format. PVE::JSONSchema::register_format('pve-storage-format', \&verify_format); + sub verify_format { my ($fmt, $noerr) = @_; if ($fmt !~ m/^(raw|qcow2|vmdk|subvol)$/) { - return undef if $noerr; - die "invalid format '$fmt'\n"; + return undef if $noerr; + die "invalid format '$fmt'\n"; } return $fmt; } PVE::JSONSchema::register_format('pve-storage-options', \&verify_options); + sub verify_options { my ($value, $noerr) = @_; # mount options (see man fstab) if ($value !~ m/^\S+$/) { - return undef if $noerr; - die "invalid options '$value'\n"; + return undef if $noerr; + die "invalid options '$value'\n"; } return $value; } PVE::JSONSchema::register_format('pve-volume-id', \&parse_volume_id); + sub parse_volume_id { my ($volid, $noerr) = @_; if ($volid =~ m/^([a-z][a-z0-9\-\_\.]*[a-z0-9]):(.+)$/i) { - return wantarray ? ($1, $2) : $1; + return wantarray ? ($1, $2) : $1; } return undef if $noerr; die "unable to parse volume ID '$volid'\n"; } PVE::JSONSchema::register_format('pve-dir-override', \&verify_dir_override); + sub verify_dir_override { my ($value, $noerr) = @_; if ($value =~ m/^([a-z]+)=([^.]*(?:\.?[^.]+)+)$/) { - my ($content_type, $relative_path) = ($1, $2); - if (verify_content($content_type, $noerr)) { - # linux has 4k max-path, but limit total length to lower as its concat'd for full path - if (length($relative_path) < 1023 && !(grep { length($_) >= 255 } split('/', $relative_path))) { - return $value; - } - } + my ($content_type, $relative_path) = ($1, $2); + if (verify_content($content_type, $noerr)) { + # linux has 4k max-path, but limit total length to lower as its concat'd for full path + if ( + length($relative_path) < 1023 + && !(grep { length($_) >= 255 } split('/', $relative_path)) + ) { + return $value; + } + } } return undef if $noerr; @@ -429,12 +455,12 @@ sub parse_section_header { my ($class, $line) = @_; if ($line =~ m/^(\S+):\s*(\S+)\s*$/) { - my ($type, $storeid) = (lc($1), $2); - my $errmsg = undef; # set if you want to skip whole section - eval { PVE::JSONSchema::parse_storage_id($storeid); }; - $errmsg = $@ if $@; - my $config = {}; # to return additional attributes - return ($type, $storeid, $errmsg, $config); + my ($type, $storeid) = (lc($1), $2); + my $errmsg = undef; # set if you want to skip whole section + eval { PVE::JSONSchema::parse_storage_id($storeid); }; + $errmsg = $@ if $@; + my $config = {}; # to return additional attributes + return ($type, $storeid, $errmsg, $config); } return undef; } @@ -445,68 +471,68 @@ sub decode_value { my $def = $defaultData->{plugindata}->{$type}; if ($key eq 'content') { - my $valid_content = $def->{content}->[0]; + my $valid_content = $def->{content}->[0]; - my $res = {}; + my $res = {}; - foreach my $c (PVE::Tools::split_list($value)) { - if (!$valid_content->{$c}) { - warn "storage does not support content type '$c'\n"; - next; - } - $res->{$c} = 1; - } + foreach my $c (PVE::Tools::split_list($value)) { + if (!$valid_content->{$c}) { + warn "storage does not support content type '$c'\n"; + next; + } + $res->{$c} = 1; + } - if ($res->{none} && scalar (keys %$res) > 1) { - die "unable to combine 'none' with other content types\n"; - } + if ($res->{none} && scalar(keys %$res) > 1) { + die "unable to combine 'none' with other content types\n"; + } - if (scalar(keys $res->%*) == 0 && !$valid_content->{none}) { - die "storage does not support content type 'none'\n"; - } + if (scalar(keys $res->%*) == 0 && !$valid_content->{none}) { + die "storage does not support content type 'none'\n"; + } - return $res; + return $res; } elsif ($key eq 'format') { - my $valid_formats = $def->{format}->[0]; + my $valid_formats = $def->{format}->[0]; - if (!$valid_formats->{$value}) { - warn "storage does not support format '$value'\n"; - next; - } + if (!$valid_formats->{$value}) { + warn "storage does not support format '$value'\n"; + next; + } - return $value; + return $value; } elsif ($key eq 'nodes') { - my $res = {}; + my $res = {}; - foreach my $node (PVE::Tools::split_list($value)) { - if (PVE::JSONSchema::pve_verify_node_name($node)) { - $res->{$node} = 1; - } - } + foreach my $node (PVE::Tools::split_list($value)) { + if (PVE::JSONSchema::pve_verify_node_name($node)) { + $res->{$node} = 1; + } + } - # fixme: - # no node restrictions for local storage - #if ($storeid && $storeid eq 'local' && scalar(keys(%$res))) { - # die "storage '$storeid' does not allow node restrictions\n"; - #} + # fixme: + # no node restrictions for local storage + #if ($storeid && $storeid eq 'local' && scalar(keys(%$res))) { + # die "storage '$storeid' does not allow node restrictions\n"; + #} - return $res; + return $res; } elsif ($key eq 'content-dirs') { - my $valid_content = $def->{content}->[0]; - my $res = {}; + my $valid_content = $def->{content}->[0]; + my $res = {}; - foreach my $dir (PVE::Tools::split_list($value)) { - my ($content, $path) = split(/=/, $dir, 2); + foreach my $dir (PVE::Tools::split_list($value)) { + my ($content, $path) = split(/=/, $dir, 2); - if (!$valid_content->{$content}) { - warn "storage does not support content type '$content'\n"; - next; - } + if (!$valid_content->{$content}) { + warn "storage does not support content type '$content'\n"; + next; + } - $res->{$content} = $path; - } + $res->{$content} = $path; + } - return $res; + return $res; } return $value; @@ -516,13 +542,13 @@ sub encode_value { my ($class, $type, $key, $value) = @_; if ($key eq 'nodes') { - return join(',', keys(%$value)); + return join(',', keys(%$value)); } elsif ($key eq 'content') { - my $res = content_hash_to_string($value) || 'none'; - return $res; + my $res = content_hash_to_string($value) || 'none'; + return $res; } elsif ($key eq 'content-dirs') { - my $res = dirs_hash_to_string($value); - return $res; + my $res = dirs_hash_to_string($value); + return $res; } return $value; @@ -536,22 +562,25 @@ sub parse_config { # make sure we have a reasonable 'local:' storage # we want 'local' to be always the same 'type' (on all cluster nodes) - if (!$ids->{local} || $ids->{local}->{type} ne 'dir' || - ($ids->{local}->{path} && $ids->{local}->{path} ne '/var/lib/vz')) { - $ids->{local} = { - type => 'dir', - priority => 0, # force first entry - path => '/var/lib/vz', - 'prune-backups' => 'keep-all=1', - content => { - backup => 1, - images => 1, - iso => 1, - rootdir => 1, - snippets => 1, - vztmpl => 1, - }, - }; + if ( + !$ids->{local} + || $ids->{local}->{type} ne 'dir' + || ($ids->{local}->{path} && $ids->{local}->{path} ne '/var/lib/vz') + ) { + $ids->{local} = { + type => 'dir', + priority => 0, # force first entry + path => '/var/lib/vz', + 'prune-backups' => 'keep-all=1', + content => { + backup => 1, + images => 1, + iso => 1, + rootdir => 1, + snippets => 1, + vztmpl => 1, + }, + }; } # make sure we have a path @@ -561,17 +590,17 @@ sub parse_config { delete($ids->{local}->{nodes}); foreach my $storeid (keys %$ids) { - my $d = $ids->{$storeid}; - my $type = $d->{type}; + my $d = $ids->{$storeid}; + my $type = $d->{type}; - my $def = $defaultData->{plugindata}->{$type}; + my $def = $defaultData->{plugindata}->{$type}; - if ($def->{content}) { - $d->{content} = $def->{content}->[1] if !$d->{content}; - } - if (grep { $_ eq $type } @SHARED_STORAGE) { - $d->{shared} = 1; - } + if ($def->{content}) { + $d->{content} = $def->{content}->[1] if !$d->{content}; + } + if (grep { $_ eq $type } @SHARED_STORAGE) { + $d->{shared} = 1; + } } return $cfg; @@ -583,20 +612,20 @@ sub preallocation_cmd_option { my $prealloc = $scfg->{preallocation}; if ($fmt eq 'qcow2') { - $prealloc = $prealloc // 'metadata'; + $prealloc = $prealloc // 'metadata'; - die "preallocation mode '$prealloc' not supported by format '$fmt'\n" - if !$QCOW2_PREALLOCATION->{$prealloc}; + die "preallocation mode '$prealloc' not supported by format '$fmt'\n" + if !$QCOW2_PREALLOCATION->{$prealloc}; - return "preallocation=$prealloc"; + return "preallocation=$prealloc"; } elsif ($fmt eq 'raw') { - $prealloc = $prealloc // 'off'; - $prealloc = 'off' if $prealloc eq 'metadata'; + $prealloc = $prealloc // 'off'; + $prealloc = 'off' if $prealloc eq 'metadata'; - die "preallocation mode '$prealloc' not supported by format '$fmt'\n" - if !$RAW_PREALLOCATION->{$prealloc}; + die "preallocation mode '$prealloc' not supported by format '$fmt'\n" + if !$RAW_PREALLOCATION->{$prealloc}; - return "preallocation=$prealloc"; + return "preallocation=$prealloc"; } return; @@ -641,14 +670,14 @@ sub cluster_lock_storage { my $res; if (!$shared) { - my $lockid = "pve-storage-$storeid"; - my $lockdir = "/var/lock/pve-manager"; - mkdir $lockdir; - $res = PVE::Tools::lock_file("$lockdir/$lockid", $timeout, $func, @param); - die $@ if $@; + my $lockid = "pve-storage-$storeid"; + my $lockdir = "/var/lock/pve-manager"; + mkdir $lockdir; + $res = PVE::Tools::lock_file("$lockdir/$lockid", $timeout, $func, @param); + die $@ if $@; } else { - $res = PVE::Cluster::cfs_lock_storage($storeid, $timeout, $func, @param); - die $@ if $@; + $res = PVE::Cluster::cfs_lock_storage($storeid, $timeout, $func, @param); + die $@ if $@; } return $res; } @@ -657,7 +686,7 @@ sub parse_name_dir { my $name = shift; if ($name =~ m!^((base-)?[^/\s]+\.(raw|qcow2|vmdk|subvol))$!) { - return ($1, $3, $2); # (name, format, isBase) + return ($1, $3, $2); # (name, format, isBase) } die "unable to parse volume filename '$name'\n"; @@ -667,35 +696,39 @@ sub parse_volname { my ($class, $volname) = @_; if ($volname =~ m!^(\d+)/(\S+)/(\d+)/(\S+)$!) { - my ($basedvmid, $basename) = ($1, $2); - parse_name_dir($basename); - my ($vmid, $name) = ($3, $4); - my (undef, $format, $isBase) = parse_name_dir($name); - return ('images', $name, $vmid, $basename, $basedvmid, $isBase, $format); + my ($basedvmid, $basename) = ($1, $2); + parse_name_dir($basename); + my ($vmid, $name) = ($3, $4); + my (undef, $format, $isBase) = parse_name_dir($name); + return ('images', $name, $vmid, $basename, $basedvmid, $isBase, $format); } elsif ($volname =~ m!^(\d+)/(\S+)$!) { - my ($vmid, $name) = ($1, $2); - my (undef, $format, $isBase) = parse_name_dir($name); - return ('images', $name, $vmid, undef, undef, $isBase, $format); + my ($vmid, $name) = ($1, $2); + my (undef, $format, $isBase) = parse_name_dir($name); + return ('images', $name, $vmid, undef, undef, $isBase, $format); } elsif ($volname =~ m!^iso/([^/]+$PVE::Storage::ISO_EXT_RE_0)$!) { - return ('iso', $1, undef, undef, undef, undef, 'raw'); + return ('iso', $1, undef, undef, undef, undef, 'raw'); } elsif ($volname =~ m!^vztmpl/([^/]+$PVE::Storage::VZTMPL_EXT_RE_1)$!) { - return ('vztmpl', $1, undef, undef, undef, undef, 'raw'); + return ('vztmpl', $1, undef, undef, undef, undef, 'raw'); } elsif ($volname =~ m!^rootdir/(\d+)$!) { - return ('rootdir', $1, $1); + return ('rootdir', $1, $1); } elsif ($volname =~ m!^backup/([^/]+$PVE::Storage::BACKUP_EXT_RE_2)$!) { - my $fn = $1; - if ($fn =~ m/^vzdump-(openvz|lxc|qemu)-(\d+)-.+/) { - return ('backup', $fn, $2, undef, undef, undef, 'raw'); - } - return ('backup', $fn, undef, undef, undef, undef, 'raw'); + my $fn = $1; + if ($fn =~ m/^vzdump-(openvz|lxc|qemu)-(\d+)-.+/) { + return ('backup', $fn, $2, undef, undef, undef, 'raw'); + } + return ('backup', $fn, undef, undef, undef, undef, 'raw'); } elsif ($volname =~ m!^snippets/([^/]+)$!) { - return ('snippets', $1, undef, undef, undef, undef, 'raw'); - } elsif ($volname =~ m!^import/(${PVE::Storage::SAFE_CHAR_WITH_WHITESPACE_CLASS_RE}+\.ova\/${PVE::Storage::OVA_CONTENT_RE_1})$!) { - my $packed_image = $1; - my $format = $2; - return ('import', $packed_image, undef, undef, undef, undef, "ova+$format"); - } elsif ($volname =~ m!^import/(${PVE::Storage::SAFE_CHAR_WITH_WHITESPACE_CLASS_RE}+$PVE::Storage::IMPORT_EXT_RE_1)$!) { - return ('import', $1, undef, undef, undef, undef, $2); + return ('snippets', $1, undef, undef, undef, undef, 'raw'); + } elsif ($volname =~ + m!^import/(${PVE::Storage::SAFE_CHAR_WITH_WHITESPACE_CLASS_RE}+\.ova\/${PVE::Storage::OVA_CONTENT_RE_1})$! + ) { + my $packed_image = $1; + my $format = $2; + return ('import', $packed_image, undef, undef, undef, undef, "ova+$format"); + } elsif ($volname =~ + m!^import/(${PVE::Storage::SAFE_CHAR_WITH_WHITESPACE_CLASS_RE}+$PVE::Storage::IMPORT_EXT_RE_1)$! + ) { + return ('import', $1, undef, undef, undef, undef, $2); } die "unable to parse directory volume name '$volname'\n"; @@ -731,13 +764,12 @@ sub get_subdir { sub filesystem_path { my ($class, $scfg, $volname, $snapname) = @_; - my ($vtype, $name, $vmid, undef, undef, $isBase, $format) = - $class->parse_volname($volname); + my ($vtype, $name, $vmid, undef, undef, $isBase, $format) = $class->parse_volname($volname); # Note: qcow2/qed has internal snapshot, so path is always # the same (with or without snapshot => same file). die "can't snapshot this image format\n" - if defined($snapname) && $format !~ m/^(qcow2|qed)$/; + if defined($snapname) && $format !~ m/^(qcow2|qed)$/; my $dir = $class->get_subdir($scfg, $vtype); @@ -761,7 +793,7 @@ sub create_base { die "storage definition has no path\n" if !$scfg->{path}; my ($vtype, $name, $vmid, $basename, $basevmid, $isBase, $format) = - $class->parse_volname($volname); + $class->parse_volname($volname); die "create_base on wrong vtype '$vtype'\n" if $vtype ne 'images'; @@ -773,20 +805,22 @@ sub create_base { die "file_size_info on '$volname' failed\n" if !defined($size); die "volname '$volname' contains wrong information about parent\n" - if $basename && (!$parent || $parent ne "../$basevmid/$basename"); + if $basename && (!$parent || $parent ne "../$basevmid/$basename"); my $newname = $name; $newname =~ s/^vm-/base-/; - my $newvolname = $basename ? "$basevmid/$basename/$vmid/$newname" : - "$vmid/$newname"; + my $newvolname = + $basename + ? "$basevmid/$basename/$vmid/$newname" + : "$vmid/$newname"; my $newpath = $class->filesystem_path($scfg, $newvolname); die "file '$newpath' already exists\n" if -f $newpath; - rename($path, $newpath) || - die "rename '$path' to '$newpath' failed - $!\n"; + rename($path, $newpath) + || die "rename '$path' to '$newpath' failed - $!\n"; # We try to protect base volume @@ -805,15 +839,15 @@ my $get_vm_disk_number = sub { my $disk_regex = qr/(vm|base)-$vmid-disk-(\d+)$suffix/; my $type = $scfg->{type}; - my $def = { %{$defaultData->{plugindata}->{$type}} }; + my $def = { %{ $defaultData->{plugindata}->{$type} } }; - my $valid = $def->{format}[0]; + my $valid = $def->{format}->[0]; if ($valid->{subvol}) { - $disk_regex = qr/(vm|base|subvol|basevol)-$vmid-disk-(\d+)/; + $disk_regex = qr/(vm|base|subvol|basevol)-$vmid-disk-(\d+)/; } if ($disk_name =~ m/$disk_regex/) { - return $2; + return $2; } return undef; @@ -828,17 +862,17 @@ sub get_next_vm_diskname { my $disk_ids = {}; foreach my $disk (@$disk_list) { - my $disknum = $get_vm_disk_number->($disk, $scfg, $vmid, $suffix); - $disk_ids->{$disknum} = 1 if defined($disknum); + my $disknum = $get_vm_disk_number->($disk, $scfg, $vmid, $suffix); + $disk_ids->{$disknum} = 1 if defined($disknum); } for (my $i = 0; $i < $MAX_VOLUMES_PER_GUEST; $i++) { - if (!$disk_ids->{$i}) { - return "$prefix-$vmid-disk-$i$suffix"; - } + if (!$disk_ids->{$i}) { + return "$prefix-$vmid-disk-$i$suffix"; + } } - die "unable to allocate an image name for VM $vmid in storage '$storeid'\n" + die "unable to allocate an image name for VM $vmid in storage '$storeid'\n"; } sub find_free_diskname { @@ -846,7 +880,7 @@ sub find_free_diskname { my $disks = $class->list_images($storeid, $scfg, $vmid); - my $disk_list = [ map { $_->{volid} } @$disks ]; + my $disk_list = [map { $_->{volid} } @$disks]; return get_next_vm_diskname($disk_list, $storeid, $vmid, $fmt, $scfg, $add_fmt_suffix); } @@ -858,7 +892,7 @@ sub clone_image { die "storage definition has no path\n" if !$scfg->{path}; my ($vtype, $basename, $basevmid, undef, undef, $isBase, $format) = - $class->parse_volname($volname); + $class->parse_volname($volname); die "clone_image on wrong vtype '$vtype'\n" if $vtype ne 'images'; @@ -883,12 +917,21 @@ sub clone_image { # Note: we use relative paths, so we need to call chdir before qemu-img eval { - local $CWD = $imagedir; + local $CWD = $imagedir; - my $cmd = ['/usr/bin/qemu-img', 'create', '-b', "../$basevmid/$basename", - '-F', $format, '-f', 'qcow2', $path]; + my $cmd = [ + '/usr/bin/qemu-img', + 'create', + '-b', + "../$basevmid/$basename", + '-F', + $format, + '-f', + 'qcow2', + $path, + ]; - run_command($cmd); + run_command($cmd); }; my $err = $@; @@ -910,35 +953,35 @@ sub alloc_image { my (undef, $tmpfmt) = parse_name_dir($name); die "illegal name '$name' - wrong extension for format ('$tmpfmt != '$fmt')\n" - if $tmpfmt ne $fmt; + if $tmpfmt ne $fmt; my $path = "$imagedir/$name"; die "disk image '$path' already exists\n" if -e $path; if ($fmt eq 'subvol') { - # only allow this if size = 0, so that user knows what he is doing - die "storage does not support subvol quotas\n" if $size != 0; + # only allow this if size = 0, so that user knows what he is doing + die "storage does not support subvol quotas\n" if $size != 0; - my $old_umask = umask(0022); - my $err; - mkdir($path) or $err = "unable to create subvol '$path' - $!\n"; - umask $old_umask; - die $err if $err; + my $old_umask = umask(0022); + my $err; + mkdir($path) or $err = "unable to create subvol '$path' - $!\n"; + umask $old_umask; + die $err if $err; } else { - my $cmd = ['/usr/bin/qemu-img', 'create']; + my $cmd = ['/usr/bin/qemu-img', 'create']; - my $prealloc_opt = preallocation_cmd_option($scfg, $fmt); - push @$cmd, '-o', $prealloc_opt if defined($prealloc_opt); + my $prealloc_opt = preallocation_cmd_option($scfg, $fmt); + push @$cmd, '-o', $prealloc_opt if defined($prealloc_opt); - push @$cmd, '-f', $fmt, $path, "${size}K"; + push @$cmd, '-f', $fmt, $path, "${size}K"; - eval { run_command($cmd, errmsg => "unable to create image"); }; - if ($@) { - unlink $path; - rmdir $imagedir; - die "$@"; - } + eval { run_command($cmd, errmsg => "unable to create image"); }; + if ($@) { + unlink $path; + rmdir $imagedir; + die "$@"; + } } return "$vmid/$name"; @@ -948,25 +991,25 @@ sub free_image { my ($class, $storeid, $scfg, $volname, $isBase, $format) = @_; die "cannot remove protected volume '$volname' on '$storeid'\n" - if $class->get_volume_attribute($scfg, $storeid, $volname, 'protected'); + if $class->get_volume_attribute($scfg, $storeid, $volname, 'protected'); my $path = $class->filesystem_path($scfg, $volname); if ($isBase) { - # try to remove immutable flag - eval { run_command(['/usr/bin/chattr', '-i', $path]); }; - warn $@ if $@; + # try to remove immutable flag + eval { run_command(['/usr/bin/chattr', '-i', $path]); }; + warn $@ if $@; } if (defined($format) && ($format eq 'subvol')) { - File::Path::remove_tree($path); + File::Path::remove_tree($path); } else { - if (!(-f $path || -l $path)) { - warn "disk image '$path' does not exist\n"; - return undef; - } + if (!(-f $path || -l $path)) { + warn "disk image '$path' does not exist\n"; + return undef; + } - unlink($path) || die "unlink '$path' failed - $!\n"; + unlink($path) || die "unlink '$path' failed - $!\n"; } # try to cleanup directory to not clutter storage with empty $vmid dirs if @@ -993,15 +1036,15 @@ sub file_size_info { # TODO PVE 9 make $file_format mandatory warn "file_size_info: detected call without \$file_format parameter\n" - if !defined($file_format); + if !defined($file_format); # compat for old parameter order # TODO PVE 9 remove if (defined($file_format) && ($file_format eq '1' || $file_format eq '0')) { - warn "file_size_info: detected call with legacy parameter order: \$untrusted before" - ." \$file_format\n"; - $untrusted = $file_format; - $file_format = undef; + warn "file_size_info: detected call with legacy parameter order: \$untrusted before" + . " \$file_format\n"; + $untrusted = $file_format; + $file_format = undef; } $file_format = undef if $file_format && $file_format eq 'auto-detect'; @@ -1009,39 +1052,40 @@ sub file_size_info { my $st = File::stat::stat($filename); if (!defined($st)) { - my $extramsg = -l $filename ? ' - dangling symlink?' : ''; - my $msg = "failed to stat '$filename'$extramsg\n"; - if ($untrusted) { - die $msg; - } else { - warn $msg; - return undef; - } + my $extramsg = -l $filename ? ' - dangling symlink?' : ''; + my $msg = "failed to stat '$filename'$extramsg\n"; + if ($untrusted) { + die $msg; + } else { + warn $msg; + return undef; + } } my $handle_error = sub { - my ($msg) = @_; - if ($untrusted) { - die $msg; - } else { - warn $msg; - return wantarray ? (undef, undef, undef, undef, $st->ctime) : undef; - } + my ($msg) = @_; + if ($untrusted) { + die $msg; + } else { + warn $msg; + return wantarray ? (undef, undef, undef, undef, $st->ctime) : undef; + } }; if (S_ISDIR($st->mode)) { - $handle_error->("expected format '$file_format', but '$filename' is a directory\n") - if $file_format && $file_format ne 'subvol'; - return wantarray ? (0, 'subvol', 0, undef, $st->ctime) : 0; + $handle_error->("expected format '$file_format', but '$filename' is a directory\n") + if $file_format && $file_format ne 'subvol'; + return wantarray ? (0, 'subvol', 0, undef, $st->ctime) : 0; } elsif ($file_format && $file_format eq 'subvol') { - $handle_error->("expected format '$file_format', but '$filename' is not a directory\n"); + $handle_error->("expected format '$file_format', but '$filename' is not a directory\n"); } # TODO PVE 9 - consider upgrading to "die" if an unsupported format is passed in after # evaluating breakage potential. if ($file_format && !grep { $_ eq $file_format } @checked_qemu_img_formats) { - warn "file_size_info: '$filename': falling back to 'raw' from unknown format '$file_format'\n"; - $file_format = 'raw'; + warn + "file_size_info: '$filename': falling back to 'raw' from unknown format '$file_format'\n"; + $file_format = 'raw'; } my $cmd = ['/usr/bin/qemu-img', 'info', '--output=json', $filename]; push $cmd->@*, '-f', $file_format if $file_format; @@ -1049,42 +1093,44 @@ sub file_size_info { my $json = ''; my $err_output = ''; eval { - run_command($cmd, - timeout => $timeout, - outfunc => sub { $json .= shift }, - errfunc => sub { $err_output .= shift . "\n"}, - ); + run_command( + $cmd, + timeout => $timeout, + outfunc => sub { $json .= shift }, + errfunc => sub { $err_output .= shift . "\n" }, + ); }; warn $@ if $@; if ($err_output) { - # if qemu did not output anything to stdout we die with stderr as an error - die $err_output if !$json; - # otherwise we warn about it and try to parse the json - warn $err_output; + # if qemu did not output anything to stdout we die with stderr as an error + die $err_output if !$json; + # otherwise we warn about it and try to parse the json + warn $err_output; } if (!$json) { - die "failed to query file information with qemu-img\n" if $untrusted; - # skip decoding if there was no output, e.g. if there was a timeout. - return wantarray ? (undef, undef, undef, undef, $st->ctime) : undef; + die "failed to query file information with qemu-img\n" if $untrusted; + # skip decoding if there was no output, e.g. if there was a timeout. + return wantarray ? (undef, undef, undef, undef, $st->ctime) : undef; } my $info = eval { decode_json($json) }; $handle_error->("could not parse qemu-img info command output for '$filename' - $@\n") if $@; if ($untrusted) { - if (my $format_specific = $info->{'format-specific'}) { - if ($format_specific->{type} eq 'qcow2' && $format_specific->{data}->{"data-file"}) { - die "$filename: 'data-file' references are not allowed!\n"; - } elsif ($format_specific->{type} eq 'vmdk') { - my $extents = $format_specific->{data}->{extents}; - my $children = $info->{children}; - die "$filename: multiple children or extents are not allowed!\n" - if scalar($children->@*) > 1 || scalar($extents->@*) > 1; - } - } + if (my $format_specific = $info->{'format-specific'}) { + if ($format_specific->{type} eq 'qcow2' && $format_specific->{data}->{"data-file"}) { + die "$filename: 'data-file' references are not allowed!\n"; + } elsif ($format_specific->{type} eq 'vmdk') { + my $extents = $format_specific->{data}->{extents}; + my $children = $info->{children}; + die "$filename: multiple children or extents are not allowed!\n" + if scalar($children->@*) > 1 || scalar($extents->@*) > 1; + } + } } - my ($size, $format, $used, $parent) = $info->@{qw(virtual-size format actual-size backing-filename)}; + my ($size, $format, $used, $parent) = + $info->@{qw(virtual-size format actual-size backing-filename)}; die "backing file not allowed for untrusted image '$filename'!\n" if $untrusted && $parent; @@ -1099,12 +1145,12 @@ sub file_size_info { ($format) = ($format =~ /^(\S+)$/); # untaint die "format '$format' includes whitespace\n" if !defined($format); if (defined($parent)) { - warn "strange parent name path '$parent' found\n" if $parent =~ m/[^\S]/; - ($parent) = ($parent =~ /^(\S+)$/); # untaint + warn "strange parent name path '$parent' found\n" if $parent =~ m/[^\S]/; + ($parent) = ($parent =~ /^(\S+)$/); # untaint } die "qemu-img bug: queried format does not match format in result '$file_format ne $format'" - if $file_format && $file_format ne $format; + if $file_format && $file_format ne $format; return wantarray ? ($size, $format, $used, $parent, $st->ctime) : $size; } @@ -1134,12 +1180,12 @@ sub get_volume_attribute { my ($class, $scfg, $storeid, $volname, $attribute) = @_; if ($attribute eq 'notes') { - my $notes = eval { $class->get_volume_notes($scfg, $storeid, $volname); }; - if (my $err = $@) { - return if $err =~ m/^volume notes are not supported/; - die $err; - } - return $notes; + my $notes = eval { $class->get_volume_notes($scfg, $storeid, $volname); }; + if (my $err = $@) { + return if $err =~ m/^volume notes are not supported/; + die $err; + } + return $notes; } return; @@ -1150,7 +1196,7 @@ sub update_volume_attribute { my ($class, $scfg, $storeid, $volname, $attribute, $value) = @_; if ($attribute eq 'notes') { - $class->update_volume_notes($scfg, $storeid, $volname, $value); + $class->update_volume_notes($scfg, $storeid, $volname, $value); } die "attribute '$attribute' is not supported for storage type '$scfg->{type}'\n"; @@ -1175,7 +1221,7 @@ sub volume_resize { my $format = ($class->parse_volname($volname))[6]; - my $cmd = ['/usr/bin/qemu-img', 'resize', '-f', $format, $path , $size]; + my $cmd = ['/usr/bin/qemu-img', 'resize', '-f', $format, $path, $size]; run_command($cmd, timeout => 10); @@ -1189,7 +1235,7 @@ sub volume_snapshot { my $path = $class->filesystem_path($scfg, $volname); - my $cmd = ['/usr/bin/qemu-img', 'snapshot','-c', $snap, $path]; + my $cmd = ['/usr/bin/qemu-img', 'snapshot', '-c', $snap, $path]; run_command($cmd); @@ -1212,7 +1258,7 @@ sub volume_snapshot_rollback { my $path = $class->filesystem_path($scfg, $volname); - my $cmd = ['/usr/bin/qemu-img', 'snapshot','-a', $snap, $path]; + my $cmd = ['/usr/bin/qemu-img', 'snapshot', '-a', $snap, $path]; run_command($cmd); @@ -1230,7 +1276,7 @@ sub volume_snapshot_delete { $class->deactivate_volume($storeid, $scfg, $volname, $snap, {}); - my $cmd = ['/usr/bin/qemu-img', 'snapshot','-d', $snap, $path]; + my $cmd = ['/usr/bin/qemu-img', 'snapshot', '-d', $snap, $path]; run_command($cmd); @@ -1241,6 +1287,7 @@ sub volume_snapshot_needs_fsfreeze { return 0; } + sub storage_can_replicate { my ($class, $scfg, $storeid, $format) = @_; @@ -1251,49 +1298,49 @@ sub volume_has_feature { my ($class, $scfg, $feature, $storeid, $volname, $snapname, $running, $opts) = @_; my $features = { - snapshot => { - current => { qcow2 => 1 }, - snap => { qcow2 => 1 }, - }, - clone => { - base => { qcow2 => 1, raw => 1, vmdk => 1 }, - }, - template => { - current => { qcow2 => 1, raw => 1, vmdk => 1, subvol => 1 }, - }, - copy => { - base => { qcow2 => 1, raw => 1, vmdk => 1 }, - current => { qcow2 => 1, raw => 1, vmdk => 1 }, - snap => { qcow2 => 1 }, - }, - sparseinit => { - base => { qcow2 => 1, raw => 1, vmdk => 1 }, - current => { qcow2 => 1, raw => 1, vmdk => 1 }, - }, - rename => { - current => {qcow2 => 1, raw => 1, vmdk => 1}, - }, + snapshot => { + current => { qcow2 => 1 }, + snap => { qcow2 => 1 }, + }, + clone => { + base => { qcow2 => 1, raw => 1, vmdk => 1 }, + }, + template => { + current => { qcow2 => 1, raw => 1, vmdk => 1, subvol => 1 }, + }, + copy => { + base => { qcow2 => 1, raw => 1, vmdk => 1 }, + current => { qcow2 => 1, raw => 1, vmdk => 1 }, + snap => { qcow2 => 1 }, + }, + sparseinit => { + base => { qcow2 => 1, raw => 1, vmdk => 1 }, + current => { qcow2 => 1, raw => 1, vmdk => 1 }, + }, + rename => { + current => { qcow2 => 1, raw => 1, vmdk => 1 }, + }, }; if ($feature eq 'clone') { - if ( - defined($opts->{valid_target_formats}) - && !(grep { $_ eq 'qcow2' } @{$opts->{valid_target_formats}}) - ) { - return 0; # clone_image creates a qcow2 volume - } + if ( + defined($opts->{valid_target_formats}) + && !(grep { $_ eq 'qcow2' } @{ $opts->{valid_target_formats} }) + ) { + return 0; # clone_image creates a qcow2 volume + } } elsif ($feature eq 'rename') { - return 0 if $class->can('api') && $class->api() < 10; + return 0 if $class->can('api') && $class->api() < 10; } - - my ($vtype, $name, $vmid, $basename, $basevmid, $isBase, $format) = $class->parse_volname($volname); + my ($vtype, $name, $vmid, $basename, $basevmid, $isBase, $format) = + $class->parse_volname($volname); my $key = undef; - if($snapname){ - $key = 'snap'; - }else{ - $key = $isBase ? 'base' : 'current'; + if ($snapname) { + $key = 'snap'; + } else { + $key = $isBase ? 'base' : 'current'; } return 1 if defined($features->{$feature}->{$key}->{$format}); @@ -1307,53 +1354,55 @@ sub list_images { my $imagedir = $class->get_subdir($scfg, 'images'); my ($defFmt, $vaidFmts) = default_format($scfg); - my $fmts = join ('|', @$vaidFmts); + my $fmts = join('|', @$vaidFmts); my $res = []; foreach my $fn (<$imagedir/[0-9][0-9]*/*>) { - next if $fn !~ m!^(/.+/(\d+)/([^/]+\.($fmts)))$!; - $fn = $1; # untaint + next if $fn !~ m!^(/.+/(\d+)/([^/]+\.($fmts)))$!; + $fn = $1; # untaint - my $owner = $2; - my $name = $3; - my $format = $4; + my $owner = $2; + my $name = $3; + my $format = $4; - next if !$vollist && defined($vmid) && ($owner ne $vmid); + next if !$vollist && defined($vmid) && ($owner ne $vmid); - my ($size, undef, $used, $parent, $ctime) = eval { - file_size_info($fn, undef, $format); - }; - if (my $err = $@) { - die $err if $err !~ m/Image is not in \S+ format$/; - warn "image '$fn' is not in expected format '$format', querying as raw\n"; - ($size, undef, $used, $parent, $ctime) = file_size_info($fn, undef, 'raw'); - $format = 'invalid'; - } - next if !defined($size); + my ($size, undef, $used, $parent, $ctime) = eval { file_size_info($fn, undef, $format); }; + if (my $err = $@) { + die $err if $err !~ m/Image is not in \S+ format$/; + warn "image '$fn' is not in expected format '$format', querying as raw\n"; + ($size, undef, $used, $parent, $ctime) = file_size_info($fn, undef, 'raw'); + $format = 'invalid'; + } + next if !defined($size); - my $volid; - if ($parent && $parent =~ m!^../(\d+)/([^/]+\.($fmts))$!) { - my ($basevmid, $basename) = ($1, $2); - $volid = "$storeid:$basevmid/$basename/$owner/$name"; - } else { - $volid = "$storeid:$owner/$name"; - } + my $volid; + if ($parent && $parent =~ m!^../(\d+)/([^/]+\.($fmts))$!) { + my ($basevmid, $basename) = ($1, $2); + $volid = "$storeid:$basevmid/$basename/$owner/$name"; + } else { + $volid = "$storeid:$owner/$name"; + } - if ($vollist) { - my $found = grep { $_ eq $volid } @$vollist; - next if !$found; - } + if ($vollist) { + my $found = grep { $_ eq $volid } @$vollist; + next if !$found; + } - my $info = { - volid => $volid, format => $format, - size => $size, vmid => $owner, used => $used, parent => $parent - }; + my $info = { + volid => $volid, + format => $format, + size => $size, + vmid => $owner, + used => $used, + parent => $parent, + }; - $info->{ctime} = $ctime if $ctime; + $info->{ctime} = $ctime if $ctime; - push @$res, $info; + push @$res, $info; } return $res; @@ -1366,65 +1415,68 @@ my $get_subdir_files = sub { my $res = []; foreach my $fn (<$path/*>) { - my $st = File::stat::stat($fn); + my $st = File::stat::stat($fn); - next if (!$st || S_ISDIR($st->mode)); + next if (!$st || S_ISDIR($st->mode)); - my $info; + my $info; - if ($tt eq 'iso') { - next if $fn !~ m!/([^/]+$PVE::Storage::ISO_EXT_RE_0)$!i; + if ($tt eq 'iso') { + next if $fn !~ m!/([^/]+$PVE::Storage::ISO_EXT_RE_0)$!i; - $info = { volid => "$sid:iso/$1", format => 'iso' }; + $info = { volid => "$sid:iso/$1", format => 'iso' }; - } elsif ($tt eq 'vztmpl') { - next if $fn !~ m!/([^/]+$PVE::Storage::VZTMPL_EXT_RE_1)$!; + } elsif ($tt eq 'vztmpl') { + next if $fn !~ m!/([^/]+$PVE::Storage::VZTMPL_EXT_RE_1)$!; - $info = { volid => "$sid:vztmpl/$1", format => "t$2" }; + $info = { volid => "$sid:vztmpl/$1", format => "t$2" }; - } elsif ($tt eq 'backup') { - next if $fn !~ m!/([^/]+$PVE::Storage::BACKUP_EXT_RE_2)$!; - my $original = $fn; - my $format = $2; - $fn = $1; + } elsif ($tt eq 'backup') { + next if $fn !~ m!/([^/]+$PVE::Storage::BACKUP_EXT_RE_2)$!; + my $original = $fn; + my $format = $2; + $fn = $1; - # only match for VMID now, to avoid false positives (VMID in parent directory name) - next if defined($vmid) && $fn !~ m/\S+-$vmid-\S+/; + # only match for VMID now, to avoid false positives (VMID in parent directory name) + next if defined($vmid) && $fn !~ m/\S+-$vmid-\S+/; - $info = { volid => "$sid:backup/$fn", format => $format }; + $info = { volid => "$sid:backup/$fn", format => $format }; - my $archive_info = eval { PVE::Storage::archive_info($fn) } // {}; + my $archive_info = eval { PVE::Storage::archive_info($fn) } // {}; - $info->{ctime} = $archive_info->{ctime} if defined($archive_info->{ctime}); - $info->{subtype} = $archive_info->{type} // 'unknown'; + $info->{ctime} = $archive_info->{ctime} if defined($archive_info->{ctime}); + $info->{subtype} = $archive_info->{type} // 'unknown'; - if (defined($vmid) || $fn =~ m!\-([1-9][0-9]{2,8})\-[^/]+\.${format}$!) { - $info->{vmid} = $vmid // $1; - } + if (defined($vmid) || $fn =~ m!\-([1-9][0-9]{2,8})\-[^/]+\.${format}$!) { + $info->{vmid} = $vmid // $1; + } - my $notes_fn = $original.NOTES_EXT; - if (-f $notes_fn) { - my $notes = PVE::Tools::file_read_firstline($notes_fn); - $info->{notes} = eval { decode('UTF-8', $notes, 1) } // $notes if defined($notes); - } + my $notes_fn = $original . NOTES_EXT; + if (-f $notes_fn) { + my $notes = PVE::Tools::file_read_firstline($notes_fn); + $info->{notes} = eval { decode('UTF-8', $notes, 1) } // $notes + if defined($notes); + } - $info->{protected} = 1 if -e PVE::Storage::protection_file_path($original); - } elsif ($tt eq 'snippets') { + $info->{protected} = 1 if -e PVE::Storage::protection_file_path($original); + } elsif ($tt eq 'snippets') { - $info = { - volid => "$sid:snippets/". basename($fn), - format => 'snippet', - }; - } elsif ($tt eq 'import') { - next if $fn !~ m!/(${PVE::Storage::SAFE_CHAR_CLASS_RE}+$PVE::Storage::IMPORT_EXT_RE_1)$!i; + $info = { + volid => "$sid:snippets/" . basename($fn), + format => 'snippet', + }; + } elsif ($tt eq 'import') { + next + if $fn !~ + m!/(${PVE::Storage::SAFE_CHAR_CLASS_RE}+$PVE::Storage::IMPORT_EXT_RE_1)$!i; - $info = { volid => "$sid:import/$1", format => "$2" }; - } + $info = { volid => "$sid:import/$1", format => "$2" }; + } - $info->{size} = $st->size; - $info->{ctime} //= $st->ctime; + $info->{size} = $st->size; + $info->{ctime} //= $st->ctime; - push @$res, $info; + push @$res, $info; } return $res; @@ -1438,47 +1490,47 @@ sub list_volumes { my $res = []; my $vmlist = PVE::Cluster::get_vmlist(); foreach my $type (@$content_types) { - my $data; + my $data; - if ($type eq 'images' || $type eq 'rootdir') { - $data = $class->list_images($storeid, $scfg, $vmid); - } elsif ($scfg->{path}) { - my $path = $class->get_subdir($scfg, $type); + if ($type eq 'images' || $type eq 'rootdir') { + $data = $class->list_images($storeid, $scfg, $vmid); + } elsif ($scfg->{path}) { + my $path = $class->get_subdir($scfg, $type); - if ($type eq 'iso' && !defined($vmid)) { - $data = $get_subdir_files->($storeid, $path, 'iso'); - } elsif ($type eq 'vztmpl'&& !defined($vmid)) { - $data = $get_subdir_files->($storeid, $path, 'vztmpl'); - } elsif ($type eq 'backup') { - $data = $get_subdir_files->($storeid, $path, 'backup', $vmid); - } elsif ($type eq 'snippets') { - $data = $get_subdir_files->($storeid, $path, 'snippets'); - } elsif ($type eq 'import') { - $data = $get_subdir_files->($storeid, $path, 'import'); - } - } + if ($type eq 'iso' && !defined($vmid)) { + $data = $get_subdir_files->($storeid, $path, 'iso'); + } elsif ($type eq 'vztmpl' && !defined($vmid)) { + $data = $get_subdir_files->($storeid, $path, 'vztmpl'); + } elsif ($type eq 'backup') { + $data = $get_subdir_files->($storeid, $path, 'backup', $vmid); + } elsif ($type eq 'snippets') { + $data = $get_subdir_files->($storeid, $path, 'snippets'); + } elsif ($type eq 'import') { + $data = $get_subdir_files->($storeid, $path, 'import'); + } + } - next if !$data; + next if !$data; - foreach my $item (@$data) { - if ($type eq 'images' || $type eq 'rootdir') { - my $vminfo = $vmlist->{ids}->{$item->{vmid}}; - my $vmtype; - if (defined($vminfo)) { - $vmtype = $vminfo->{type}; - } - if (defined($vmtype) && $vmtype eq 'lxc') { - $item->{content} = 'rootdir'; - } else { - $item->{content} = 'images'; - } - next if $type ne $item->{content}; - } else { - $item->{content} = $type; - } + foreach my $item (@$data) { + if ($type eq 'images' || $type eq 'rootdir') { + my $vminfo = $vmlist->{ids}->{ $item->{vmid} }; + my $vmtype; + if (defined($vminfo)) { + $vmtype = $vminfo->{type}; + } + if (defined($vmtype) && $vmtype eq 'lxc') { + $item->{content} = 'rootdir'; + } else { + $item->{content} = 'images'; + } + next if $type ne $item->{content}; + } else { + $item->{content} = $type; + } - push @$res, $item; - } + push @$res, $item; + } } return $res; @@ -1518,45 +1570,45 @@ sub activate_storage { # this path test may hang indefinitely on unresponsive mounts my $timeout = 2; - if (! PVE::Tools::run_fork_with_timeout($timeout, sub {-d $path})) { - die "unable to activate storage '$storeid' - " . - "directory '$path' does not exist or is unreachable\n"; + if (!PVE::Tools::run_fork_with_timeout($timeout, sub { -d $path })) { + die "unable to activate storage '$storeid' - " + . "directory '$path' does not exist or is unreachable\n"; } # TODO: mkdir is basically deprecated since 8.0, but we don't warn here until 8.4 or 9.0, as we # only got the replacement in 8.0, so no real replacement window, and its really noisy. if (defined($scfg->{content})) { - # (opt-out) create content dirs and check validity - if ( - (!defined($scfg->{'create-subdirs'}) || $scfg->{'create-subdirs'}) - # FIXME The mkdir option is deprecated. Remove with PVE 9? - && (!defined($scfg->{mkdir}) || $scfg->{mkdir}) - ) { - for my $vtype (sort keys %$vtype_subdirs) { - # OpenVZMigrate uses backup (dump) dir - if ( - defined($scfg->{content}->{$vtype}) - || ($vtype eq 'backup' && defined($scfg->{content}->{'rootdir'})) - ) { - my $subdir = $class->get_subdir($scfg, $vtype); - mkpath $subdir if $subdir ne $path; - } - } - } + # (opt-out) create content dirs and check validity + if ( + (!defined($scfg->{'create-subdirs'}) || $scfg->{'create-subdirs'}) + # FIXME The mkdir option is deprecated. Remove with PVE 9? + && (!defined($scfg->{mkdir}) || $scfg->{mkdir}) + ) { + for my $vtype (sort keys %$vtype_subdirs) { + # OpenVZMigrate uses backup (dump) dir + if ( + defined($scfg->{content}->{$vtype}) + || ($vtype eq 'backup' && defined($scfg->{content}->{'rootdir'})) + ) { + my $subdir = $class->get_subdir($scfg, $vtype); + mkpath $subdir if $subdir ne $path; + } + } + } - # check that content dirs are pairwise inequal - my $resolved_subdirs = {}; - for my $vtype (sort keys $scfg->{content}->%*) { - my $subdir = $class->get_subdir($scfg, $vtype); - my $abs_subdir = abs_path($subdir); - next if !defined($abs_subdir); + # check that content dirs are pairwise inequal + my $resolved_subdirs = {}; + for my $vtype (sort keys $scfg->{content}->%*) { + my $subdir = $class->get_subdir($scfg, $vtype); + my $abs_subdir = abs_path($subdir); + next if !defined($abs_subdir); - die "storage '$storeid' uses directory $abs_subdir for multiple content types\n" - if defined($abs_subdir) && defined($resolved_subdirs->{$abs_subdir}); + die "storage '$storeid' uses directory $abs_subdir for multiple content types\n" + if defined($abs_subdir) && defined($resolved_subdirs->{$abs_subdir}); - $resolved_subdirs->{$abs_subdir} = 1; - } + $resolved_subdirs->{$abs_subdir} = 1; + } } } @@ -1586,9 +1638,9 @@ sub activate_volume { # check is volume exists if ($scfg->{path}) { - die "volume '$storeid:$volname' does not exist\n" if ! -e $path; + die "volume '$storeid:$volname' does not exist\n" if !-e $path; } else { - die "volume '$storeid:$volname' does not exist\n" if ! -b $path; + die "volume '$storeid:$volname' does not exist\n" if !-b $path; } } @@ -1615,60 +1667,60 @@ sub prune_backups { my $prune_list = []; foreach my $backup (@{$backups}) { - my $volid = $backup->{volid}; - my $archive_info = eval { PVE::Storage::archive_info($volid) } // {}; - my $backup_type = $archive_info->{type} // 'unknown'; - my $backup_vmid = $archive_info->{vmid} // $backup->{vmid}; + my $volid = $backup->{volid}; + my $archive_info = eval { PVE::Storage::archive_info($volid) } // {}; + my $backup_type = $archive_info->{type} // 'unknown'; + my $backup_vmid = $archive_info->{vmid} // $backup->{vmid}; - next if defined($type) && $type ne $backup_type; + next if defined($type) && $type ne $backup_type; - my $prune_entry = { - ctime => $backup->{ctime}, - type => $backup_type, - volid => $volid, - }; + my $prune_entry = { + ctime => $backup->{ctime}, + type => $backup_type, + volid => $volid, + }; - $prune_entry->{vmid} = $backup_vmid if defined($backup_vmid); + $prune_entry->{vmid} = $backup_vmid if defined($backup_vmid); - if ($archive_info->{is_std_name}) { - die "internal error - got no VMID\n" if !defined($backup_vmid); - die "internal error - got wrong VMID '$backup_vmid' != '$vmid'\n" - if defined($vmid) && $backup_vmid ne $vmid; + if ($archive_info->{is_std_name}) { + die "internal error - got no VMID\n" if !defined($backup_vmid); + die "internal error - got wrong VMID '$backup_vmid' != '$vmid'\n" + if defined($vmid) && $backup_vmid ne $vmid; - $prune_entry->{ctime} = $archive_info->{ctime}; - my $group = "$backup_type/$backup_vmid"; - push @{$backup_groups->{$group}}, $prune_entry; - } else { - # ignore backups that don't use the standard naming scheme - $prune_entry->{mark} = 'renamed'; - } + $prune_entry->{ctime} = $archive_info->{ctime}; + my $group = "$backup_type/$backup_vmid"; + push @{ $backup_groups->{$group} }, $prune_entry; + } else { + # ignore backups that don't use the standard naming scheme + $prune_entry->{mark} = 'renamed'; + } - $prune_entry->{mark} = 'protected' if $backup->{protected}; + $prune_entry->{mark} = 'protected' if $backup->{protected}; - push @{$prune_list}, $prune_entry; + push @{$prune_list}, $prune_entry; } foreach my $backup_group (values %{$backup_groups}) { - PVE::Storage::prune_mark_backup_group($backup_group, $keep); + PVE::Storage::prune_mark_backup_group($backup_group, $keep); } my $failed; if (!$dryrun) { - foreach my $prune_entry (@{$prune_list}) { - next if $prune_entry->{mark} ne 'remove'; + foreach my $prune_entry (@{$prune_list}) { + next if $prune_entry->{mark} ne 'remove'; - my $volid = $prune_entry->{volid}; - $logfunc->('info', "removing backup '$volid'"); - eval { - my (undef, $volname) = parse_volume_id($volid); - my $archive_path = $class->filesystem_path($scfg, $volname); - PVE::Storage::archive_remove($archive_path); - }; - if (my $err = $@) { - $logfunc->('err', "error when removing backup '$volid' - $err\n"); - $failed = 1; - } - } + my $volid = $prune_entry->{volid}; + $logfunc->('info', "removing backup '$volid'"); + eval { + my (undef, $volname) = parse_volume_id($volid); + my $archive_path = $class->filesystem_path($scfg, $volname); + PVE::Storage::archive_remove($archive_path); + }; + if (my $err = $@) { + $logfunc->('err', "error when removing backup '$volid' - $err\n"); + $failed = 1; + } + } } die "error pruning backups - check log\n" if $failed; @@ -1720,37 +1772,57 @@ sub read_common_header($) { # Export a volume into a file handle as a stream of desired format. sub volume_export { - my ($class, $scfg, $storeid, $fh, $volname, $format, $snapshot, $base_snapshot, $with_snapshots) = @_; + my ($class, $scfg, $storeid, $fh, $volname, $format, $snapshot, $base_snapshot, $with_snapshots) + = @_; my $err_msg = "volume export format $format not available for $class\n"; if ($scfg->{path} && !defined($snapshot) && !defined($base_snapshot)) { - my ($file) = $class->path($scfg, $volname, $storeid) or die $err_msg; - my $file_format = ($class->parse_volname($volname))[6]; - my $size = file_size_info($file, undef, $file_format); + my ($file) = $class->path($scfg, $volname, $storeid) or die $err_msg; + my $file_format = ($class->parse_volname($volname))[6]; + my $size = file_size_info($file, undef, $file_format); - if ($format eq 'raw+size') { - die $err_msg if $with_snapshots || $file_format eq 'subvol'; - write_common_header($fh, $size); - if ($file_format eq 'raw') { - run_command(['dd', "if=$file", "bs=4k", "status=progress"], output => '>&'.fileno($fh)); - } else { - run_command(['qemu-img', 'convert', '-f', $file_format, '-O', 'raw', $file, '/dev/stdout'], - output => '>&'.fileno($fh)); - } - return; - } elsif ($format =~ /^(qcow2|vmdk)\+size$/) { - my $data_format = $1; - die $err_msg if !$with_snapshots || $file_format ne $data_format; - write_common_header($fh, $size); - run_command(['dd', "if=$file", "bs=4k", "status=progress"], output => '>&'.fileno($fh)); - return; - } elsif ($format eq 'tar+size') { - die $err_msg if $file_format ne 'subvol'; - write_common_header($fh, $size); - run_command(['tar', @COMMON_TAR_FLAGS, '-cf', '-', '-C', $file, '.'], - output => '>&'.fileno($fh)); - return; - } + if ($format eq 'raw+size') { + die $err_msg if $with_snapshots || $file_format eq 'subvol'; + write_common_header($fh, $size); + if ($file_format eq 'raw') { + run_command( + ['dd', "if=$file", "bs=4k", "status=progress"], + output => '>&' . fileno($fh), + ); + } else { + run_command( + [ + 'qemu-img', + 'convert', + '-f', + $file_format, + '-O', + 'raw', + $file, + '/dev/stdout', + ], + output => '>&' . fileno($fh), + ); + } + return; + } elsif ($format =~ /^(qcow2|vmdk)\+size$/) { + my $data_format = $1; + die $err_msg if !$with_snapshots || $file_format ne $data_format; + write_common_header($fh, $size); + run_command( + ['dd', "if=$file", "bs=4k", "status=progress"], + output => '>&' . fileno($fh), + ); + return; + } elsif ($format eq 'tar+size') { + die $err_msg if $file_format ne 'subvol'; + write_common_header($fh, $size); + run_command( + ['tar', @COMMON_TAR_FLAGS, '-cf', '-', '-C', $file, '.'], + output => '>&' . fileno($fh), + ); + return; + } } die $err_msg; } @@ -1758,74 +1830,89 @@ sub volume_export { sub volume_export_formats { my ($class, $scfg, $storeid, $volname, $snapshot, $base_snapshot, $with_snapshots) = @_; if ($scfg->{path} && !defined($snapshot) && !defined($base_snapshot)) { - my $format = ($class->parse_volname($volname))[6]; + my $format = ($class->parse_volname($volname))[6]; - if ($with_snapshots) { - return ($format.'+size') if ($format eq 'qcow2' || $format eq 'vmdk'); - return (); - } - return ('tar+size') if $format eq 'subvol'; - return ('raw+size'); + if ($with_snapshots) { + return ($format . '+size') if ($format eq 'qcow2' || $format eq 'vmdk'); + return (); + } + return ('tar+size') if $format eq 'subvol'; + return ('raw+size'); } return (); } # Import data from a stream, creating a new or replacing or adding to an existing volume. sub volume_import { - my ($class, $scfg, $storeid, $fh, $volname, $format, $snapshot, $base_snapshot, $with_snapshots, $allow_rename) = @_; + my ( + $class, + $scfg, + $storeid, + $fh, + $volname, + $format, + $snapshot, + $base_snapshot, + $with_snapshots, + $allow_rename, + ) = @_; die "volume import format '$format' not available for $class\n" - if $format !~ /^(raw|tar|qcow2|vmdk)\+size$/; + if $format !~ /^(raw|tar|qcow2|vmdk)\+size$/; my $data_format = $1; die "format $format cannot be imported without snapshots\n" - if !$with_snapshots && ($data_format eq 'qcow2' || $data_format eq 'vmdk'); + if !$with_snapshots && ($data_format eq 'qcow2' || $data_format eq 'vmdk'); die "format $format cannot be imported with snapshots\n" - if $with_snapshots && ($data_format eq 'raw' || $data_format eq 'tar'); + if $with_snapshots && ($data_format eq 'raw' || $data_format eq 'tar'); my ($vtype, $name, $vmid, $basename, $basevmid, $isBase, $file_format) = - $class->parse_volname($volname); + $class->parse_volname($volname); # XXX: Should we bother with conversion routines at this level? This won't # happen without manual CLI usage, so for now we just error out... die "cannot import format $format into a file of format $file_format\n" - if $data_format ne $file_format && !($data_format eq 'tar' && $file_format eq 'subvol'); + if $data_format ne $file_format && !($data_format eq 'tar' && $file_format eq 'subvol'); # Check for an existing file first since interrupting alloc_image doesn't # free it. my ($file) = $class->path($scfg, $volname, $storeid); if (-e $file) { - die "file '$file' already exists\n" if !$allow_rename; - warn "file '$file' already exists - importing with a different name\n"; - $name = undef; + die "file '$file' already exists\n" if !$allow_rename; + warn "file '$file' already exists - importing with a different name\n"; + $name = undef; } my ($size) = read_common_header($fh); $size = PVE::Storage::Common::align_size_up($size, 1024) / 1024; eval { - my $allocname = $class->alloc_image($storeid, $scfg, $vmid, $file_format, $name, $size); - my $oldname = $volname; - $volname = $allocname; - if (defined($name) && $allocname ne $oldname) { - die "internal error: unexpected allocated name: '$allocname' != '$oldname'\n"; - } - my ($file) = $class->path($scfg, $volname, $storeid) - or die "internal error: failed to get path to newly allocated volume $volname\n"; - if ($data_format eq 'raw' || $data_format eq 'qcow2' || $data_format eq 'vmdk') { - run_command(['dd', "of=$file", 'conv=sparse', 'bs=64k'], - input => '<&'.fileno($fh)); - } elsif ($data_format eq 'tar') { - run_command(['tar', @COMMON_TAR_FLAGS, '-C', $file, '-xf', '-'], - input => '<&'.fileno($fh)); - } else { - die "volume import format '$format' not available for $class"; - } + my $allocname = $class->alloc_image($storeid, $scfg, $vmid, $file_format, $name, $size); + my $oldname = $volname; + $volname = $allocname; + if (defined($name) && $allocname ne $oldname) { + die "internal error: unexpected allocated name: '$allocname' != '$oldname'\n"; + } + my ($file) = $class->path($scfg, $volname, $storeid) + or die "internal error: failed to get path to newly allocated volume $volname\n"; + if ($data_format eq 'raw' || $data_format eq 'qcow2' || $data_format eq 'vmdk') { + run_command( + ['dd', "of=$file", 'conv=sparse', 'bs=64k'], + input => '<&' . fileno($fh), + ); + } elsif ($data_format eq 'tar') { + run_command( + ['tar', @COMMON_TAR_FLAGS, '-C', $file, '-xf', '-'], + input => '<&' . fileno($fh), + ); + } else { + die "volume import format '$format' not available for $class"; + } }; if (my $err = $@) { - eval { $class->free_image($storeid, $scfg, $volname, 0, $file_format) }; - warn $@ if $@; - die $err; + eval { $class->free_image($storeid, $scfg, $volname, 0, $file_format) }; + warn $@ if $@; + die $err; } return "$storeid:$volname"; @@ -1834,13 +1921,13 @@ sub volume_import { sub volume_import_formats { my ($class, $scfg, $storeid, $volname, $snapshot, $base_snapshot, $with_snapshots) = @_; if ($scfg->{path} && !defined($base_snapshot)) { - my $format = ($class->parse_volname($volname))[6]; - if ($with_snapshots) { - return ($format.'+size') if ($format eq 'qcow2' || $format eq 'vmdk'); - return (); - } - return ('tar+size') if $format eq 'subvol'; - return ('raw+size'); + my $format = ($class->parse_volname($volname))[6]; + if ($with_snapshots) { + return ($format . '+size') if ($format eq 'qcow2' || $format eq 'vmdk'); + return (); + } + return ('tar+size') if $format eq 'subvol'; + return ('raw+size'); } return (); } @@ -1851,17 +1938,11 @@ sub rename_volume { die "no path found\n" if !$scfg->{path}; my ( - undef, - $source_image, - $source_vmid, - $base_name, - $base_vmid, - undef, - $format + undef, $source_image, $source_vmid, $base_name, $base_vmid, undef, $format, ) = $class->parse_volname($source_volname); $target_volname = $class->find_free_diskname($storeid, $scfg, $target_vmid, $format, 1) - if !$target_volname; + if !$target_volname; my $basedir = $class->get_subdir($scfg, 'images'); @@ -1874,8 +1955,8 @@ sub rename_volume { my $base = $base_name ? "${base_vmid}/${base_name}/" : ''; - rename($old_path, $new_path) || - die "rename '$old_path' to '$new_path' failed - $!\n"; + rename($old_path, $new_path) + || die "rename '$old_path' to '$new_path' failed - $!\n"; return "${storeid}:${base}${target_vmid}/${target_volname}"; } @@ -1900,7 +1981,7 @@ sub config_aware_base_mkdir { # FIXME the mkdir parameter is deprecated and create-base-path should be used if ($scfg->{'create-base-path'} // $scfg->{mkdir} // 1) { - mkpath($path); + mkpath($path); } } diff --git a/src/PVE/Storage/RBDPlugin.pm b/src/PVE/Storage/RBDPlugin.pm index 73bc97e..c0bbe2c 100644 --- a/src/PVE/Storage/RBDPlugin.pm +++ b/src/PVE/Storage/RBDPlugin.pm @@ -10,7 +10,7 @@ use Net::IP; use POSIX qw(ceil); use PVE::CephConfig; -use PVE::Cluster qw(cfs_read_file);; +use PVE::Cluster qw(cfs_read_file); use PVE::JSONSchema qw(get_standard_option); use PVE::ProcFSTools; use PVE::RADOS; @@ -32,7 +32,7 @@ my $librados_connect = sub { my ($scfg, $storeid, $options) = @_; $options->{timeout} = 60 - if !defined($options->{timeout}) && PVE::RPCEnvironment->is_worker(); + if !defined($options->{timeout}) && PVE::RPCEnvironment->is_worker(); my $librados_config = PVE::CephConfig::ceph_connect_option($scfg, $storeid, $options->%*); @@ -47,27 +47,27 @@ my sub get_rbd_path { $path .= "/$scfg->{namespace}" if defined($scfg->{namespace}); $path .= "/$volume" if defined($volume); return $path; -}; +} my sub get_rbd_dev_path { my ($scfg, $storeid, $volume) = @_; my $cluster_id = ''; if ($scfg->{fsid}) { - # NOTE: the config doesn't support this currently (but it could!), hack for qemu-server tests - $cluster_id = $scfg->{fsid}; + # NOTE: the config doesn't support this currently (but it could!), hack for qemu-server tests + $cluster_id = $scfg->{fsid}; } elsif ($scfg->{monhost}) { - my $rados = $librados_connect->($scfg, $storeid); - $cluster_id = $rados->mon_command({ prefix => 'fsid', format => 'json' })->{fsid}; + my $rados = $librados_connect->($scfg, $storeid); + $cluster_id = $rados->mon_command({ prefix => 'fsid', format => 'json' })->{fsid}; } else { - $cluster_id = cfs_read_file('ceph.conf')->{global}->{fsid}; + $cluster_id = cfs_read_file('ceph.conf')->{global}->{fsid}; } my $uuid_pattern = "([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})"; if ($cluster_id =~ qr/^${uuid_pattern}$/is) { - $cluster_id = $1; # use untained value + $cluster_id = $1; # use untained value } else { - die "cluster fsid has invalid format\n"; + die "cluster fsid has invalid format\n"; } my $rbd_path = get_rbd_path($scfg, $volume); @@ -75,11 +75,11 @@ my sub get_rbd_dev_path { my $path = "/dev/rbd/${rbd_path}"; if (!-e $pve_path && -e $path) { - # possibly mapped before rbd-pve rule existed - my $real_dev = abs_path($path); - my ($rbd_id) = ($real_dev =~ m|/dev/rbd([0-9]+)$|); - my $dev_cluster_id = file_read_firstline("/sys/devices/rbd/${rbd_id}/cluster_fsid"); - return $path if $cluster_id eq $dev_cluster_id; + # possibly mapped before rbd-pve rule existed + my $real_dev = abs_path($path); + my ($rbd_id) = ($real_dev =~ m|/dev/rbd([0-9]+)$|); + my $dev_cluster_id = file_read_firstline("/sys/devices/rbd/${rbd_id}/cluster_fsid"); + return $path if $cluster_id eq $dev_cluster_id; } return $pve_path; } @@ -88,25 +88,26 @@ my $rbd_cmd = sub { my ($scfg, $storeid, $op, @options) = @_; my $cmd_option = PVE::CephConfig::ceph_connect_option($scfg, $storeid); - my $pool = $scfg->{pool} ? $scfg->{pool} : 'rbd'; + my $pool = $scfg->{pool} ? $scfg->{pool} : 'rbd'; my $cmd = ['/usr/bin/rbd']; if ($op eq 'import') { - push $cmd->@*, '--dest-pool', $pool; + push $cmd->@*, '--dest-pool', $pool; } else { - push $cmd->@*, '-p', $pool; + push $cmd->@*, '-p', $pool; } if (defined(my $namespace = $scfg->{namespace})) { - # some subcommands will fail if the --namespace parameter is present - my $no_namespace_parameter = { - unmap => 1, - }; - push @$cmd, '--namespace', "$namespace" if !$no_namespace_parameter->{$op}; + # some subcommands will fail if the --namespace parameter is present + my $no_namespace_parameter = { + unmap => 1, + }; + push @$cmd, '--namespace', "$namespace" if !$no_namespace_parameter->{$op}; } push @$cmd, '-c', $cmd_option->{ceph_conf} if ($cmd_option->{ceph_conf}); push @$cmd, '-m', $cmd_option->{mon_host} if ($cmd_option->{mon_host}); - push @$cmd, '--auth_supported', $cmd_option->{auth_supported} if ($cmd_option->{auth_supported}); + push @$cmd, '--auth_supported', $cmd_option->{auth_supported} + if ($cmd_option->{auth_supported}); push @$cmd, '-n', "client.$cmd_option->{userid}" if ($cmd_option->{userid}); push @$cmd, '--keyring', $cmd_option->{keyring} if ($cmd_option->{keyring}); @@ -125,42 +126,45 @@ my $krbd_feature_update = sub { my ($kmajor, $kminor) = PVE::ProcFSTools::kernel_version(); if ($kmajor > 5 || $kmajor == 5 && $kminor >= 3) { - # 'deep-flatten' can only be disabled, not enabled after image creation - push @enable, 'fast-diff', 'object-map'; + # 'deep-flatten' can only be disabled, not enabled after image creation + push @enable, 'fast-diff', 'object-map'; } else { - push @disable, 'fast-diff', 'object-map', 'deep-flatten'; + push @disable, 'fast-diff', 'object-map', 'deep-flatten'; } if ($kmajor >= 5) { - push @enable, 'exclusive-lock'; + push @enable, 'exclusive-lock'; } else { - push @disable, 'exclusive-lock'; + push @disable, 'exclusive-lock'; } my $active_features_list = (rbd_volume_info($scfg, $storeid, $name))[4]; my $active_features = { map { $_ => 1 } @$active_features_list }; - my $to_disable = join(',', grep { $active_features->{$_} } @disable); - my $to_enable = join(',', grep { !$active_features->{$_} } @enable ); + my $to_disable = join(',', grep { $active_features->{$_} } @disable); + my $to_enable = join(',', grep { !$active_features->{$_} } @enable); if ($to_disable) { - print "disable RBD image features this kernel RBD drivers is not compatible with: $to_disable\n"; - my $cmd = $rbd_cmd->($scfg, $storeid, 'feature', 'disable', $name, $to_disable); - run_rbd_command( - $cmd, - errmsg => "could not disable krbd-incompatible image features '$to_disable' for rbd image: $name", - ); + print + "disable RBD image features this kernel RBD drivers is not compatible with: $to_disable\n"; + my $cmd = $rbd_cmd->($scfg, $storeid, 'feature', 'disable', $name, $to_disable); + run_rbd_command( + $cmd, + errmsg => + "could not disable krbd-incompatible image features '$to_disable' for rbd image: $name", + ); } if ($to_enable) { - print "enable RBD image features this kernel RBD drivers supports: $to_enable\n"; - eval { - my $cmd = $rbd_cmd->($scfg, $storeid, 'feature', 'enable', $name, $to_enable); - run_rbd_command( - $cmd, - errmsg => "could not enable krbd-compatible image features '$to_enable' for rbd image: $name", - ); - }; - warn "$@" if $@; + print "enable RBD image features this kernel RBD drivers supports: $to_enable\n"; + eval { + my $cmd = $rbd_cmd->($scfg, $storeid, 'feature', 'enable', $name, $to_enable); + run_rbd_command( + $cmd, + errmsg => + "could not enable krbd-compatible image features '$to_enable' for rbd image: $name", + ); + }; + warn "$@" if $@; } }; @@ -170,24 +174,26 @@ sub run_rbd_command { my $lasterr; my $errmsg = $args{errmsg} . ": " || ""; if (!exists($args{errfunc})) { - # ' error: 2014-02-06 11:51:59.839135 7f09f94d0760 -1 librbd: snap_unprotect: can't unprotect; - # at least 1 child(ren) in pool cephstor1 - $args{errfunc} = sub { - my $line = shift; - if ($line =~ m/^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d+ [0-9a-f]+ [\-\d]+ librbd: (.*)$/) { - $lasterr = "$1\n"; - } else { - $lasterr = $line; - } - print STDERR $lasterr; - *STDERR->flush(); - }; + # ' error: 2014-02-06 11:51:59.839135 7f09f94d0760 -1 librbd: snap_unprotect: can't unprotect; + # at least 1 child(ren) in pool cephstor1 + $args{errfunc} = sub { + my $line = shift; + if ($line =~ + m/^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d+ [0-9a-f]+ [\-\d]+ librbd: (.*)$/ + ) { + $lasterr = "$1\n"; + } else { + $lasterr = $line; + } + print STDERR $lasterr; + *STDERR->flush(); + }; } eval { run_command($cmd, %args); }; if (my $err = $@) { - die $errmsg . $lasterr if length($lasterr); - die $err; + die $errmsg . $lasterr if length($lasterr); + die $err; } return undef; @@ -200,33 +206,33 @@ sub rbd_ls { my $parser = sub { $raw .= shift }; my $cmd = $rbd_cmd->($scfg, $storeid, 'ls', '-l', '--format', 'json'); - run_rbd_command($cmd, errmsg => "rbd error", errfunc => sub {}, outfunc => $parser); + run_rbd_command($cmd, errmsg => "rbd error", errfunc => sub { }, outfunc => $parser); my $result; if ($raw eq '') { - $result = []; + $result = []; } elsif ($raw =~ m/^(\[.*\])$/s) { # untaint - $result = JSON::decode_json($1); + $result = JSON::decode_json($1); } else { - die "got unexpected data from rbd ls: '$raw'\n"; + die "got unexpected data from rbd ls: '$raw'\n"; } my $list = {}; foreach my $el (@$result) { - next if defined($el->{snapshot}); + next if defined($el->{snapshot}); - my $image = $el->{image}; + my $image = $el->{image}; - my ($owner) = $image =~ m/^(?:vm|base)-(\d+)-/; - next if !defined($owner); + my ($owner) = $image =~ m/^(?:vm|base)-(\d+)-/; + next if !defined($owner); - $list->{$image} = { - name => $image, - size => $el->{size}, - parent => $get_parent_image_name->($el->{parent}), - vmid => $owner - }; + $list->{$image} = { + name => $image, + size => $el->{size}, + parent => $get_parent_image_name->($el->{parent}), + vmid => $owner, + }; } return $list; @@ -238,28 +244,33 @@ sub rbd_ls_snap { my $cmd = $rbd_cmd->($scfg, $storeid, 'snap', 'ls', $name, '--format', 'json'); my $raw = ''; - run_rbd_command($cmd, errmsg => "rbd error", errfunc => sub {}, outfunc => sub { $raw .= shift; }); + run_rbd_command( + $cmd, + errmsg => "rbd error", + errfunc => sub { }, + outfunc => sub { $raw .= shift; }, + ); my $list; if ($raw =~ m/^(\[.*\])$/s) { # untaint - $list = eval { JSON::decode_json($1) }; - die "invalid JSON output from 'rbd snap ls $name': $@\n" if $@; + $list = eval { JSON::decode_json($1) }; + die "invalid JSON output from 'rbd snap ls $name': $@\n" if $@; } else { - die "got unexpected data from 'rbd snap ls $name': '$raw'\n"; + die "got unexpected data from 'rbd snap ls $name': '$raw'\n"; } $list = [] if !defined($list); my $res = {}; foreach my $el (@$list) { - my $snap = $el->{name}; - my $protected = defined($el->{protected}) && $el->{protected} eq "true" ? 1 : undef; - $res->{$snap} = { - name => $snap, - id => $el->{id} // undef, - size => $el->{size} // 0, - protected => $protected, - }; + my $snap = $el->{name}; + my $protected = defined($el->{protected}) && $el->{protected} eq "true" ? 1 : undef; + $res->{$snap} = { + name => $snap, + id => $el->{id} // undef, + size => $el->{size} // 0, + protected => $protected, + }; } return $res; } @@ -271,7 +282,7 @@ sub rbd_volume_info { my @options = ('info', $volname, '--format', 'json'); if ($snap) { - push @options, '--snap', $snap; + push @options, '--snap', $snap; } $cmd = $rbd_cmd->($scfg, $storeid, @options); @@ -279,19 +290,20 @@ sub rbd_volume_info { my $raw = ''; my $parser = sub { $raw .= shift }; - run_rbd_command($cmd, errmsg => "rbd error", errfunc => sub {}, outfunc => $parser); + run_rbd_command($cmd, errmsg => "rbd error", errfunc => sub { }, outfunc => $parser); my $volume; if ($raw eq '') { - $volume = {}; + $volume = {}; } elsif ($raw =~ m/^(\{.*\})$/s) { # untaint - $volume = JSON::decode_json($1); + $volume = JSON::decode_json($1); } else { - die "got unexpected data from rbd info: '$raw'\n"; + die "got unexpected data from rbd info: '$raw'\n"; } $volume->{parent} = $get_parent_image_name->($volume->{parent}); - $volume->{protected} = defined($volume->{protected}) && $volume->{protected} eq "true" ? 1 : undef; + $volume->{protected} = + defined($volume->{protected}) && $volume->{protected} eq "true" ? 1 : undef; return $volume->@{qw(size parent format protected features)}; } @@ -305,31 +317,31 @@ sub rbd_volume_du { my $raw = ''; my $parser = sub { $raw .= shift }; - run_rbd_command($cmd, errmsg => "rbd error", errfunc => sub {}, outfunc => $parser); + run_rbd_command($cmd, errmsg => "rbd error", errfunc => sub { }, outfunc => $parser); my $volume; if ($raw eq '') { - $volume = {}; + $volume = {}; } elsif ($raw =~ m/^(\{.*\})$/s) { # untaint - $volume = JSON::decode_json($1); + $volume = JSON::decode_json($1); } else { - die "got unexpected data from rbd du: '$raw'\n"; + die "got unexpected data from rbd du: '$raw'\n"; } if (!defined($volume->{images})) { - die "got no images from rbd du\n"; + die "got no images from rbd du\n"; } # `rbd du` returns array of images for name matching `volname`, # including snapshots. my $images = $volume->{images}; foreach my $image (@$images) { - next if defined($image->{snapshot}); - next if !defined($image->{used_size}) || !defined($image->{name}); + next if defined($image->{snapshot}); + next if !defined($image->{used_size}) || !defined($image->{name}); - # Return `used_size` of first volume with matching name which - # is not a snapshot. - return $image->{used_size} if $image->{name} eq $volname; + # Return `used_size` of first volume with matching name which + # is not a snapshot. + return $image->{used_size} if $image->{name} eq $volname; } die "got no matching image from rbd du\n"; @@ -341,18 +353,22 @@ my sub rbd_volume_exists { my $cmd = $rbd_cmd->($scfg, $storeid, 'ls', '--format', 'json'); my $raw = ''; run_rbd_command( - $cmd, errmsg => "rbd error", errfunc => sub {}, outfunc => sub { $raw .= shift; }); + $cmd, + errmsg => "rbd error", + errfunc => sub { }, + outfunc => sub { $raw .= shift; }, + ); my $list; if ($raw =~ m/^(\[.*\])$/s) { # untaint - $list = eval { JSON::decode_json($1); }; - die "invalid JSON output from 'rbd ls': $@\n" if $@; + $list = eval { JSON::decode_json($1); }; + die "invalid JSON output from 'rbd ls': $@\n" if $@; } else { - die "got unexpected data from 'rbd ls': '$raw'\n"; + die "got unexpected data from 'rbd ls': '$raw'\n"; } for my $name ($list->@*) { - return 1 if $name eq $volname; + return 1 if $name eq $volname; } return 0; @@ -366,62 +382,63 @@ sub type { sub plugindata { return { - content => [ {images => 1, rootdir => 1}, { images => 1 }], - 'sensitive-properties' => { keyring => 1 }, + content => [{ images => 1, rootdir => 1 }, { images => 1 }], + 'sensitive-properties' => { keyring => 1 }, }; } sub properties { return { - monhost => { - description => "IP addresses of monitors (for external clusters).", - type => 'string', format => 'pve-storage-portal-dns-list', - }, - pool => { - description => "Pool.", - type => 'string', - }, - 'data-pool' => { - description => "Data Pool (for erasure coding only)", - type => 'string', - }, - namespace => { - description => "Namespace.", - type => 'string', - }, - username => { - description => "RBD Id.", - type => 'string', - }, - authsupported => { - description => "Authsupported.", - type => 'string', - }, - krbd => { - description => "Always access rbd through krbd kernel module.", - type => 'boolean', - default => 0, - }, - keyring => { - description => "Client keyring contents (for external clusters).", - type => 'string', - }, + monhost => { + description => "IP addresses of monitors (for external clusters).", + type => 'string', + format => 'pve-storage-portal-dns-list', + }, + pool => { + description => "Pool.", + type => 'string', + }, + 'data-pool' => { + description => "Data Pool (for erasure coding only)", + type => 'string', + }, + namespace => { + description => "Namespace.", + type => 'string', + }, + username => { + description => "RBD Id.", + type => 'string', + }, + authsupported => { + description => "Authsupported.", + type => 'string', + }, + krbd => { + description => "Always access rbd through krbd kernel module.", + type => 'boolean', + default => 0, + }, + keyring => { + description => "Client keyring contents (for external clusters).", + type => 'string', + }, }; } sub options { return { - nodes => { optional => 1 }, - disable => { optional => 1 }, - monhost => { optional => 1}, - pool => { optional => 1 }, - 'data-pool' => { optional => 1 }, - namespace => { optional => 1 }, - username => { optional => 1 }, - content => { optional => 1 }, - krbd => { optional => 1 }, - keyring => { optional => 1 }, - bwlimit => { optional => 1 }, + nodes => { optional => 1 }, + disable => { optional => 1 }, + monhost => { optional => 1 }, + pool => { optional => 1 }, + 'data-pool' => { optional => 1 }, + namespace => { optional => 1 }, + username => { optional => 1 }, + content => { optional => 1 }, + krbd => { optional => 1 }, + keyring => { optional => 1 }, + bwlimit => { optional => 1 }, }; } @@ -439,11 +456,11 @@ sub on_update_hook { my ($class, $storeid, $scfg, %param) = @_; if (exists($param{keyring})) { - if (defined($param{keyring})) { - PVE::CephConfig::ceph_create_keyfile($scfg->{type}, $storeid, $param{keyring}); - } else { - PVE::CephConfig::ceph_remove_keyfile($scfg->{type}, $storeid); - } + if (defined($param{keyring})) { + PVE::CephConfig::ceph_create_keyfile($scfg->{type}, $storeid, $param{keyring}); + } else { + PVE::CephConfig::ceph_remove_keyfile($scfg->{type}, $storeid); + } } return; @@ -459,7 +476,7 @@ sub parse_volname { my ($class, $volname) = @_; if ($volname =~ m/^((base-(\d+)-\S+)\/)?((base)?(vm)?-(\d+)-\S+)$/) { - return ('images', $4, $7, $2, $3, $5, 'raw'); + return ('images', $4, $7, $2, $3, $5, 'raw'); } die "unable to parse rbd volume name '$volname'\n"; @@ -470,11 +487,11 @@ sub path { my $cmd_option = PVE::CephConfig::ceph_connect_option($scfg, $storeid); my ($vtype, $name, $vmid) = $class->parse_volname($volname); - $name .= '@'.$snapname if $snapname; + $name .= '@' . $snapname if $snapname; if ($scfg->{krbd}) { - my $rbd_dev_path = get_rbd_dev_path($scfg, $storeid, $name); - return ($rbd_dev_path, $vmid, $vtype); + my $rbd_dev_path = get_rbd_dev_path($scfg, $storeid, $name); + return ($rbd_dev_path, $vmid, $vtype); } my $rbd_path = get_rbd_path($scfg, $name); @@ -482,10 +499,10 @@ sub path { $path .= ":conf=$cmd_option->{ceph_conf}" if $cmd_option->{ceph_conf}; if (defined($scfg->{monhost})) { - my $monhost = PVE::CephConfig::hostlist($scfg->{monhost}, ';'); - $monhost =~ s/:/\\:/g; - $path .= ":mon_host=$monhost"; - $path .= ":auth_supported=$cmd_option->{auth_supported}"; + my $monhost = PVE::CephConfig::hostlist($scfg->{monhost}, ';'); + $monhost =~ s/:/\\:/g; + $path .= ":mon_host=$monhost"; + $path .= ":auth_supported=$cmd_option->{auth_supported}"; } $path .= ":id=$cmd_option->{userid}:keyring=$cmd_option->{keyring}" if ($cmd_option->{keyring}); @@ -501,14 +518,14 @@ sub find_free_diskname { my $disk_list = []; my $parser = sub { - my $line = shift; - if ($line =~ m/^(.*)$/) { # untaint - push @$disk_list, $1; - } + my $line = shift; + if ($line =~ m/^(.*)$/) { # untaint + push @$disk_list, $1; + } }; eval { - run_rbd_command($cmd, errmsg => "rbd error", errfunc => sub {}, outfunc => $parser); + run_rbd_command($cmd, errmsg => "rbd error", errfunc => sub { }, outfunc => $parser); }; my $err = $@; @@ -522,8 +539,7 @@ sub create_base { my $snap = '__base__'; - my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) = - $class->parse_volname($volname); + my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) = $class->parse_volname($volname); die "create_base not possible with base image\n" if $isBase; @@ -533,7 +549,7 @@ sub create_base { die "rbd image must be at format V2" if $format ne "2"; die "volname '$volname' contains wrong information about parent $parent $basename\n" - if $basename && (!$parent || $parent ne $basename."@".$snap); + if $basename && (!$parent || $parent ne $basename . "@" . $snap); my $newname = $name; $newname =~ s/^vm-/base-/; @@ -541,26 +557,24 @@ sub create_base { my $newvolname = $basename ? "$basename/$newname" : "$newname"; my $cmd = $rbd_cmd->( - $scfg, - $storeid, - 'rename', - get_rbd_path($scfg, $name), - get_rbd_path($scfg, $newname), + $scfg, $storeid, 'rename', + get_rbd_path($scfg, $name), + get_rbd_path($scfg, $newname), ); run_rbd_command($cmd, errmsg => "rbd rename '$name' error"); eval { $class->unmap_volume($storeid, $scfg, $volname); }; warn $@ if $@; - my $running = undef; #fixme : is create_base always offline ? + my $running = undef; #fixme : is create_base always offline ? $class->volume_snapshot($scfg, $storeid, $newname, $snap, $running); my (undef, undef, undef, $protected) = rbd_volume_info($scfg, $storeid, $newname, $snap); - if (!$protected){ - my $cmd = $rbd_cmd->($scfg, $storeid, 'snap', 'protect', $newname, '--snap', $snap); - run_rbd_command($cmd, errmsg => "rbd protect $newname snap '$snap' error"); + if (!$protected) { + my $cmd = $rbd_cmd->($scfg, $storeid, 'snap', 'protect', $newname, '--snap', $snap); + run_rbd_command($cmd, errmsg => "rbd protect $newname snap '$snap' error"); } return $newvolname; @@ -573,31 +587,30 @@ sub clone_image { my $snap = '__base__'; $snap = $snapname if length $snapname; - my ($vtype, $basename, $basevmid, undef, undef, $isBase) = - $class->parse_volname($volname); + my ($vtype, $basename, $basevmid, undef, undef, $isBase) = $class->parse_volname($volname); - die "$volname is not a base image and snapname is not provided\n" - if !$isBase && !length($snapname); + die "$volname is not a base image and snapname is not provided\n" + if !$isBase && !length($snapname); my $name = $class->find_free_diskname($storeid, $scfg, $vmid); warn "clone $volname: $basename snapname $snap to $name\n"; if (length($snapname)) { - my (undef, undef, undef, $protected) = rbd_volume_info($scfg, $storeid, $volname, $snapname); + my (undef, undef, undef, $protected) = + rbd_volume_info($scfg, $storeid, $volname, $snapname); - if (!$protected) { - my $cmd = $rbd_cmd->($scfg, $storeid, 'snap', 'protect', $volname, '--snap', $snapname); - run_rbd_command($cmd, errmsg => "rbd protect $volname snap $snapname error"); - } + if (!$protected) { + my $cmd = $rbd_cmd->($scfg, $storeid, 'snap', 'protect', $volname, '--snap', $snapname); + run_rbd_command($cmd, errmsg => "rbd protect $volname snap $snapname error"); + } } my $newvol = "$basename/$name"; $newvol = $name if length($snapname); my @options = ( - get_rbd_path($scfg, $basename), - '--snap', $snap, + get_rbd_path($scfg, $basename), '--snap', $snap, ); push @options, ('--data-pool', $scfg->{'data-pool'}) if $scfg->{'data-pool'}; @@ -610,15 +623,13 @@ sub clone_image { sub alloc_image { my ($class, $storeid, $scfg, $vmid, $fmt, $name, $size) = @_; - die "illegal name '$name' - should be 'vm-$vmid-*'\n" - if $name && $name !~ m/^vm-$vmid-/; + if $name && $name !~ m/^vm-$vmid-/; $name = $class->find_free_diskname($storeid, $scfg, $vmid) if !$name; my @options = ( - '--image-format' , 2, - '--size', int(($size + 1023) / 1024), + '--image-format', 2, '--size', int(($size + 1023) / 1024), ); push @options, ('--data-pool', $scfg->{'data-pool'}) if $scfg->{'data-pool'}; @@ -631,21 +642,19 @@ sub alloc_image { sub free_image { my ($class, $storeid, $scfg, $volname, $isBase) = @_; - my ($vtype, $name, $vmid, undef, undef, undef) = - $class->parse_volname($volname); - + my ($vtype, $name, $vmid, undef, undef, undef) = $class->parse_volname($volname); my $snaps = rbd_ls_snap($scfg, $storeid, $name); foreach my $snap (keys %$snaps) { - if ($snaps->{$snap}->{protected}) { - my $cmd = $rbd_cmd->($scfg, $storeid, 'snap', 'unprotect', $name, '--snap', $snap); - run_rbd_command($cmd, errmsg => "rbd unprotect $name snap '$snap' error"); - } + if ($snaps->{$snap}->{protected}) { + my $cmd = $rbd_cmd->($scfg, $storeid, 'snap', 'unprotect', $name, '--snap', $snap); + run_rbd_command($cmd, errmsg => "rbd unprotect $name snap '$snap' error"); + } } $class->deactivate_volume($storeid, $scfg, $volname); - my $cmd = $rbd_cmd->($scfg, $storeid, 'snap', 'purge', $name); + my $cmd = $rbd_cmd->($scfg, $storeid, 'snap', 'purge', $name); run_rbd_command($cmd, errmsg => "rbd snap purge '$name' error"); $cmd = $rbd_cmd->($scfg, $storeid, 'rm', $name); @@ -662,25 +671,25 @@ sub list_images { my $res = []; for my $image (sort keys %$dat) { - my $info = $dat->{$image}; - my ($volname, $parent, $owner) = $info->@{'name', 'parent', 'vmid'}; + my $info = $dat->{$image}; + my ($volname, $parent, $owner) = $info->@{ 'name', 'parent', 'vmid' }; - if ($parent && $parent =~ m/^(base-\d+-\S+)\@__base__$/) { - $info->{volid} = "$storeid:$1/$volname"; - } else { - $info->{volid} = "$storeid:$volname"; - } + if ($parent && $parent =~ m/^(base-\d+-\S+)\@__base__$/) { + $info->{volid} = "$storeid:$1/$volname"; + } else { + $info->{volid} = "$storeid:$volname"; + } - if ($vollist) { - my $found = grep { $_ eq $info->{volid} } @$vollist; - next if !$found; - } else { - next if defined ($vmid) && ($owner ne $vmid); - } + if ($vollist) { + my $found = grep { $_ eq $info->{volid} } @$vollist; + next if !$found; + } else { + next if defined($vmid) && ($owner ne $vmid); + } - $info->{format} = 'raw'; + $info->{format} = 'raw'; - push @$res, $info; + push @$res, $info; } return $res; @@ -694,11 +703,11 @@ sub status { my $pool = $scfg->{'data-pool'} // $scfg->{pool} // 'rbd'; - my ($d) = grep { $_->{name} eq $pool } @{$df->{pools}}; + my ($d) = grep { $_->{name} eq $pool } @{ $df->{pools} }; if (!defined($d)) { - warn "could not get usage stats for pool '$pool'\n"; - return; + warn "could not get usage stats for pool '$pool'\n"; + return; } # max_avail -> max available space for data w/o replication in the pool @@ -727,7 +736,7 @@ sub map_volume { my ($vtype, $img_name, $vmid) = $class->parse_volname($volname); my $name = $img_name; - $name .= '@'.$snapname if $snapname; + $name .= '@' . $snapname if $snapname; my $kerneldev = get_rbd_dev_path($scfg, $storeid, $name); @@ -746,13 +755,13 @@ sub unmap_volume { my ($class, $storeid, $scfg, $volname, $snapname) = @_; my ($vtype, $name, $vmid) = $class->parse_volname($volname); - $name .= '@'.$snapname if $snapname; + $name .= '@' . $snapname if $snapname; my $kerneldev = get_rbd_dev_path($scfg, $storeid, $name); if (-b $kerneldev) { - my $cmd = $rbd_cmd->($scfg, $storeid, 'unmap', $kerneldev); - run_rbd_command($cmd, errmsg => "can't unmap rbd device $kerneldev"); + my $cmd = $rbd_cmd->($scfg, $storeid, 'unmap', $kerneldev); + run_rbd_command($cmd, errmsg => "can't unmap rbd device $kerneldev"); } return 1; @@ -790,7 +799,8 @@ sub volume_resize { my ($vtype, $name, $vmid) = $class->parse_volname($volname); - my $cmd = $rbd_cmd->($scfg, $storeid, 'resize', '--size', int(ceil($size/1024/1024)), $name); + my $cmd = + $rbd_cmd->($scfg, $storeid, 'resize', '--size', int(ceil($size / 1024 / 1024)), $name); run_rbd_command($cmd, errmsg => "rbd resize '$volname' error"); return undef; } @@ -822,9 +832,9 @@ sub volume_snapshot_delete { my ($vtype, $name, $vmid) = $class->parse_volname($volname); my (undef, undef, undef, $protected) = rbd_volume_info($scfg, $storeid, $name, $snap); - if ($protected){ - my $cmd = $rbd_cmd->($scfg, $storeid, 'snap', 'unprotect', $name, '--snap', $snap); - run_rbd_command($cmd, errmsg => "rbd unprotect $name snap '$snap' error"); + if ($protected) { + my $cmd = $rbd_cmd->($scfg, $storeid, 'snap', 'unprotect', $name, '--snap', $snap); + run_rbd_command($cmd, errmsg => "rbd unprotect $name snap '$snap' error"); } my $cmd = $rbd_cmd->($scfg, $storeid, 'snap', 'rm', '--snap', $snap, $name); @@ -841,22 +851,22 @@ sub volume_snapshot_needs_fsfreeze { sub volume_has_feature { my ($class, $scfg, $feature, $storeid, $volname, $snapname, $running) = @_; - my $features = { - snapshot => { current => 1, snap => 1}, - clone => { base => 1, snap => 1}, - template => { current => 1}, - copy => { base => 1, current => 1, snap => 1}, - sparseinit => { base => 1, current => 1}, - rename => {current => 1}, + my $features = { + snapshot => { current => 1, snap => 1 }, + clone => { base => 1, snap => 1 }, + template => { current => 1 }, + copy => { base => 1, current => 1, snap => 1 }, + sparseinit => { base => 1, current => 1 }, + rename => { current => 1 }, }; my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) = $class->parse_volname($volname); my $key = undef; - if ($snapname){ - $key = 'snap'; + if ($snapname) { + $key = 'snap'; } else { - $key = $isBase ? 'base' : 'current'; + $key = $isBase ? 'base' : 'current'; } return 1 if $features->{$feature}->{$key}; @@ -867,20 +877,21 @@ sub volume_export_formats { my ($class, $scfg, $storeid, $volname, $snapshot, $base_snapshot, $with_snapshots) = @_; return $class->volume_import_formats( - $scfg, $storeid, $volname, $snapshot, $base_snapshot, $with_snapshots); + $scfg, $storeid, $volname, $snapshot, $base_snapshot, $with_snapshots, + ); } sub volume_export { my ( - $class, - $scfg, - $storeid, - $fh, - $volname, - $format, - $snapshot, - $base_snapshot, - $with_snapshots, + $class, + $scfg, + $storeid, + $fh, + $volname, + $format, + $snapshot, + $base_snapshot, + $with_snapshots, ) = @_; die "volume export format $format not available for $class\n" if $format ne 'raw+size'; @@ -891,9 +902,9 @@ sub volume_export { PVE::Storage::Plugin::write_common_header($fh, $size); my $cmd = $rbd_cmd->($scfg, $storeid, 'export', '--export-format', '1', $volname, '-'); run_rbd_command( - $cmd, - errmsg => 'could not export image', - output => '>&'.fileno($fh), + $cmd, + errmsg => 'could not export image', + output => '>&' . fileno($fh), ); return; @@ -908,16 +919,16 @@ sub volume_import_formats { sub volume_import { my ( - $class, - $scfg, - $storeid, - $fh, - $volname, - $format, - $snapshot, - $base_snapshot, - $with_snapshots, - $allow_rename, + $class, + $scfg, + $storeid, + $fh, + $volname, + $format, + $snapshot, + $base_snapshot, + $with_snapshots, + $allow_rename, ) = @_; die "volume import format $format not available for $class\n" if $format ne 'raw+size'; @@ -926,32 +937,32 @@ sub volume_import { my (undef, $name, $vmid, undef, undef, undef, $file_format) = $class->parse_volname($volname); die "cannot import format $format into a volume of format $file_format\n" - if $file_format ne 'raw'; + if $file_format ne 'raw'; if (rbd_volume_exists($scfg, $storeid, $name)) { - die "volume $name already exists\n" if !$allow_rename; - warn "volume $name already exists - importing with a different name\n"; - $volname = $class->find_free_diskname($storeid, $scfg, $vmid, $file_format); + die "volume $name already exists\n" if !$allow_rename; + warn "volume $name already exists - importing with a different name\n"; + $volname = $class->find_free_diskname($storeid, $scfg, $vmid, $file_format); } my ($size) = PVE::Storage::Plugin::read_common_header($fh); $size = PVE::Storage::Common::align_size_up($size, 1024) / 1024; eval { - my $cmd = $rbd_cmd->($scfg, $storeid, 'import', '--export-format', '1', '-', $volname); - run_rbd_command( - $cmd, - errmsg => 'could not import image', - input => '<&'.fileno($fh), - ); + my $cmd = $rbd_cmd->($scfg, $storeid, 'import', '--export-format', '1', '-', $volname); + run_rbd_command( + $cmd, + errmsg => 'could not import image', + input => '<&' . fileno($fh), + ); }; if (my $err = $@) { - # FIXME there is a slight race between finding the free disk name and removal here - # Does not only affect this plugin, see: - # https://lore.proxmox.com/pve-devel/20240403150712.262773-1-h.duerr@proxmox.com/ - eval { $class->free_image($storeid, $scfg, $volname, 0, $file_format); }; - warn $@ if $@; - die $err; + # FIXME there is a slight race between finding the free disk name and removal here + # Does not only affect this plugin, see: + # https://lore.proxmox.com/pve-devel/20240403150712.262773-1-h.duerr@proxmox.com/ + eval { $class->free_image($storeid, $scfg, $volname, 0, $file_format); }; + warn $@ if $@; + die $err; } return "$storeid:$volname"; @@ -961,25 +972,19 @@ sub rename_volume { my ($class, $scfg, $storeid, $source_volname, $target_vmid, $target_volname) = @_; my ( - undef, - $source_image, - $source_vmid, - $base_name, - $base_vmid, - undef, - $format + undef, $source_image, $source_vmid, $base_name, $base_vmid, undef, $format, ) = $class->parse_volname($source_volname); $target_volname = $class->find_free_diskname($storeid, $scfg, $target_vmid, $format) - if !$target_volname; + if !$target_volname; die "target volume '${target_volname}' already exists\n" - if rbd_volume_exists($scfg, $storeid, $target_volname); + if rbd_volume_exists($scfg, $storeid, $target_volname); my $cmd = $rbd_cmd->($scfg, $storeid, 'rename', $source_image, $target_volname); run_rbd_command( - $cmd, - errmsg => "could not rename image '${source_image}' to '${target_volname}'", + $cmd, + errmsg => "could not rename image '${source_image}' to '${target_volname}'", ); eval { $class->unmap_volume($storeid, $scfg, $source_volname); }; diff --git a/src/PVE/Storage/ZFSPlugin.pm b/src/PVE/Storage/ZFSPlugin.pm index 94cb11f..f0fa522 100644 --- a/src/PVE/Storage/ZFSPlugin.pm +++ b/src/PVE/Storage/ZFSPlugin.pm @@ -14,19 +14,18 @@ use PVE::Storage::LunCmd::Istgt; use PVE::Storage::LunCmd::Iet; use PVE::Storage::LunCmd::LIO; - my @ssh_opts = ('-o', 'BatchMode=yes'); my @ssh_cmd = ('/usr/bin/ssh', @ssh_opts); my $id_rsa_path = '/etc/pve/priv/zfs'; my $lun_cmds = { - create_lu => 1, - delete_lu => 1, - import_lu => 1, - modify_lu => 1, - add_view => 1, - list_view => 1, - list_lu => 1, + create_lu => 1, + delete_lu => 1, + import_lu => 1, + modify_lu => 1, + add_view => 1, + list_view => 1, + list_lu => 1, }; my $zfs_unknown_scsi_provider = sub { @@ -54,14 +53,15 @@ my $zfs_get_base = sub { sub zfs_request { my ($class, $scfg, $timeout, $method, @params) = @_; - $timeout = PVE::RPCEnvironment->is_worker() ? 60*60 : 10 - if !$timeout; + $timeout = PVE::RPCEnvironment->is_worker() ? 60 * 60 : 10 + if !$timeout; my $msg = ''; if ($lun_cmds->{$method}) { if ($scfg->{iscsiprovider} eq 'comstar') { - $msg = PVE::Storage::LunCmd::Comstar::run_lun_command($scfg, $timeout, $method, @params); + $msg = + PVE::Storage::LunCmd::Comstar::run_lun_command($scfg, $timeout, $method, @params); } elsif ($scfg->{iscsiprovider} eq 'istgt') { $msg = PVE::Storage::LunCmd::Istgt::run_lun_command($scfg, $timeout, $method, @params); } elsif ($scfg->{iscsiprovider} eq 'iet') { @@ -73,21 +73,21 @@ sub zfs_request { } } else { - my $target = 'root@' . $scfg->{portal}; + my $target = 'root@' . $scfg->{portal}; - my $cmd = [@ssh_cmd, '-i', "$id_rsa_path/$scfg->{portal}_id_rsa", $target]; + my $cmd = [@ssh_cmd, '-i', "$id_rsa_path/$scfg->{portal}_id_rsa", $target]; if ($method eq 'zpool_list') { - push @$cmd, 'zpool', 'list'; - } else { - push @$cmd, 'zfs', $method; + push @$cmd, 'zpool', 'list'; + } else { + push @$cmd, 'zfs', $method; } - push @$cmd, @params; + push @$cmd, @params; - my $output = sub { - my $line = shift; - $msg .= "$line\n"; + my $output = sub { + my $line = shift; + $msg .= "$line\n"; }; run_command($cmd, outfunc => $output, timeout => $timeout); @@ -116,7 +116,7 @@ sub zfs_add_lun_mapping_entry { my ($class, $scfg, $zvol, $guid) = @_; if (!defined($guid)) { - $guid = $class->zfs_get_lu_name($scfg, $zvol); + $guid = $class->zfs_get_lu_name($scfg, $zvol); } $class->zfs_request($scfg, undef, 'add_view', $guid); @@ -160,7 +160,7 @@ sub zfs_get_lun_number { die "could not find lun_number for guid $guid" if !$guid; if ($class->zfs_request($scfg, undef, 'list_view', $guid) =~ /^(\d+)$/) { - return $1; + return $1; } die "lun_number for guid $guid is not a number"; @@ -174,55 +174,55 @@ sub type { sub plugindata { return { - content => [ {images => 1}, { images => 1 }], - 'sensitive-properties' => {}, + content => [{ images => 1 }, { images => 1 }], + 'sensitive-properties' => {}, }; } sub properties { return { - iscsiprovider => { - description => "iscsi provider", - type => 'string', - }, - # this will disable write caching on comstar and istgt. - # it is not implemented for iet. iet blockio always operates with - # writethrough caching when not in readonly mode - nowritecache => { - description => "disable write caching on the target", - type => 'boolean', - }, - comstar_tg => { - description => "target group for comstar views", - type => 'string', - }, - comstar_hg => { - description => "host group for comstar views", - type => 'string', - }, - lio_tpg => { - description => "target portal group for Linux LIO targets", - type => 'string', - }, + iscsiprovider => { + description => "iscsi provider", + type => 'string', + }, + # this will disable write caching on comstar and istgt. + # it is not implemented for iet. iet blockio always operates with + # writethrough caching when not in readonly mode + nowritecache => { + description => "disable write caching on the target", + type => 'boolean', + }, + comstar_tg => { + description => "target group for comstar views", + type => 'string', + }, + comstar_hg => { + description => "host group for comstar views", + type => 'string', + }, + lio_tpg => { + description => "target portal group for Linux LIO targets", + type => 'string', + }, }; } sub options { return { - nodes => { optional => 1 }, - disable => { optional => 1 }, - portal => { fixed => 1 }, - target => { fixed => 1 }, - pool => { fixed => 1 }, - blocksize => { fixed => 1 }, - iscsiprovider => { fixed => 1 }, - nowritecache => { optional => 1 }, - sparse => { optional => 1 }, - comstar_hg => { optional => 1 }, - comstar_tg => { optional => 1 }, - lio_tpg => { optional => 1 }, - content => { optional => 1 }, - bwlimit => { optional => 1 }, + nodes => { optional => 1 }, + disable => { optional => 1 }, + portal => { fixed => 1 }, + target => { fixed => 1 }, + pool => { fixed => 1 }, + blocksize => { fixed => 1 }, + iscsiprovider => { fixed => 1 }, + nowritecache => { optional => 1 }, + sparse => { optional => 1 }, + comstar_hg => { optional => 1 }, + comstar_tg => { optional => 1 }, + lio_tpg => { optional => 1 }, + content => { optional => 1 }, + bwlimit => { optional => 1 }, }; } @@ -232,7 +232,7 @@ sub path { my ($class, $scfg, $volname, $storeid, $snapname) = @_; die "direct access to snapshots not implemented" - if defined($snapname); + if defined($snapname); my ($vtype, $name, $vmid) = $class->parse_volname($volname); @@ -252,8 +252,7 @@ sub create_base { my $snap = '__base__'; - my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) = - $class->parse_volname($volname); + my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) = $class->parse_volname($volname); die "create_base not possible with base image\n" if $isBase; @@ -268,7 +267,7 @@ sub create_base { my $guid = $class->zfs_create_lu($scfg, $newname); $class->zfs_add_lun_mapping_entry($scfg, $newname, $guid); - my $running = undef; #fixme : is create_base always offline ? + my $running = undef; #fixme : is create_base always offline ? $class->volume_snapshot($scfg, $storeid, $newname, $snap, $running); @@ -291,18 +290,18 @@ sub clone_image { sub alloc_image { my ($class, $storeid, $scfg, $vmid, $fmt, $name, $size) = @_; - + die "unsupported format '$fmt'" if $fmt ne 'raw'; die "illegal name '$name' - should be 'vm-$vmid-*'\n" - if $name && $name !~ m/^vm-$vmid-/; + if $name && $name !~ m/^vm-$vmid-/; my $volname = $name; $volname = $class->find_free_diskname($storeid, $scfg, $vmid, $fmt) if !$volname; - + $class->zfs_create_zvol($scfg, $volname, $size); - + my $guid = $class->zfs_create_lu($scfg, $volname); $class->zfs_add_lun_mapping_entry($scfg, $volname, $guid); @@ -370,21 +369,20 @@ sub volume_has_feature { my ($class, $scfg, $feature, $storeid, $volname, $snapname, $running) = @_; my $features = { - snapshot => { current => 1, snap => 1}, - clone => { base => 1}, - template => { current => 1}, - copy => { base => 1, current => 1}, + snapshot => { current => 1, snap => 1 }, + clone => { base => 1 }, + template => { current => 1 }, + copy => { base => 1, current => 1 }, }; - my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) = - $class->parse_volname($volname); + my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) = $class->parse_volname($volname); my $key = undef; if ($snapname) { - $key = 'snap'; + $key = 'snap'; } else { - $key = $isBase ? 'base' : 'current'; + $key = $isBase ? 'base' : 'current'; } return 1 if $features->{$feature}->{$key}; diff --git a/src/PVE/Storage/ZFSPoolPlugin.pm b/src/PVE/Storage/ZFSPoolPlugin.pm index 26fb0a4..713d26f 100644 --- a/src/PVE/Storage/ZFSPoolPlugin.pm +++ b/src/PVE/Storage/ZFSPoolPlugin.pm @@ -20,39 +20,40 @@ sub type { sub plugindata { return { - content => [ {images => 1, rootdir => 1}, {images => 1 , rootdir => 1}], - format => [ { raw => 1, subvol => 1 } , 'raw' ], - 'sensitive-properties' => {}, + content => [{ images => 1, rootdir => 1 }, { images => 1, rootdir => 1 }], + format => [{ raw => 1, subvol => 1 }, 'raw'], + 'sensitive-properties' => {}, }; } sub properties { return { - blocksize => { - description => "block size", - type => 'string', - }, - sparse => { - description => "use sparse volumes", - type => 'boolean', - }, - mountpoint => { - description => "mount point", - type => 'string', format => 'pve-storage-path', - }, + blocksize => { + description => "block size", + type => 'string', + }, + sparse => { + description => "use sparse volumes", + type => 'boolean', + }, + mountpoint => { + description => "mount point", + type => 'string', + format => 'pve-storage-path', + }, }; } sub options { return { - pool => { fixed => 1 }, - blocksize => { optional => 1 }, - sparse => { optional => 1 }, - nodes => { optional => 1 }, - disable => { optional => 1 }, - content => { optional => 1 }, - bwlimit => { optional => 1 }, - mountpoint => { optional => 1 }, + pool => { fixed => 1 }, + blocksize => { optional => 1 }, + sparse => { optional => 1 }, + nodes => { optional => 1 }, + disable => { optional => 1 }, + content => { optional => 1 }, + bwlimit => { optional => 1 }, + mountpoint => { optional => 1 }, }; } @@ -67,35 +68,35 @@ sub zfs_parse_zvol_list { my @lines = split /\n/, $text; foreach my $line (@lines) { - my ($dataset, $size, $origin, $type, $refquota) = split(/\s+/, $line); - next if !($type eq 'volume' || $type eq 'filesystem'); + my ($dataset, $size, $origin, $type, $refquota) = split(/\s+/, $line); + next if !($type eq 'volume' || $type eq 'filesystem'); - my $zvol = {}; - my @parts = split /\//, $dataset; - next if scalar(@parts) < 2; # we need pool/name - my $name = pop @parts; - my $parsed_pool = join('/', @parts); - next if $parsed_pool ne $pool; + my $zvol = {}; + my @parts = split /\//, $dataset; + next if scalar(@parts) < 2; # we need pool/name + my $name = pop @parts; + my $parsed_pool = join('/', @parts); + next if $parsed_pool ne $pool; - next unless $name =~ m!^(vm|base|subvol|basevol)-(\d+)-(\S+)$!; - $zvol->{owner} = $2; + next unless $name =~ m!^(vm|base|subvol|basevol)-(\d+)-(\S+)$!; + $zvol->{owner} = $2; - $zvol->{name} = $name; - if ($type eq 'filesystem') { - if ($refquota eq 'none') { - $zvol->{size} = 0; - } else { - $zvol->{size} = $refquota + 0; - } - $zvol->{format} = 'subvol'; - } else { - $zvol->{size} = $size + 0; - $zvol->{format} = 'raw'; - } - if ($origin !~ /^-$/) { - $zvol->{origin} = $origin; - } - push @$list, $zvol; + $zvol->{name} = $name; + if ($type eq 'filesystem') { + if ($refquota eq 'none') { + $zvol->{size} = 0; + } else { + $zvol->{size} = $refquota + 0; + } + $zvol->{format} = 'subvol'; + } else { + $zvol->{size} = $size + 0; + $zvol->{format} = 'raw'; + } + if ($origin !~ /^-$/) { + $zvol->{origin} = $origin; + } + push @$list, $zvol; } return $list; @@ -105,9 +106,9 @@ sub parse_volname { my ($class, $volname) = @_; if ($volname =~ m/^(((base|basevol)-(\d+)-\S+)\/)?((base|basevol|vm|subvol)-(\d+)-\S+)$/) { - my $format = ($6 eq 'subvol' || $6 eq 'basevol') ? 'subvol' : 'raw'; - my $isBase = ($6 eq 'base' || $6 eq 'basevol'); - return ('images', $5, $7, $2, $4, $isBase, $format); + my $format = ($6 eq 'subvol' || $6 eq 'basevol') ? 'subvol' : 'raw'; + my $isBase = ($6 eq 'base' || $6 eq 'basevol'); + return ('images', $5, $7, $2, $4, $isBase, $format); } die "unable to parse zfs volume name '$volname'\n"; @@ -123,17 +124,17 @@ sub on_add_hook { # ignore failure, pool might currently not be imported my $mountpoint; eval { - my $res = $class->zfs_get_properties($scfg, 'mountpoint', $scfg->{pool}, 1); - $mountpoint = PVE::Storage::Plugin::verify_path($res, 1) if defined($res); + my $res = $class->zfs_get_properties($scfg, 'mountpoint', $scfg->{pool}, 1); + $mountpoint = PVE::Storage::Plugin::verify_path($res, 1) if defined($res); }; if (defined($cfg_mountpoint)) { - if (defined($mountpoint) && !($cfg_mountpoint =~ m|^\Q$mountpoint\E/?$|)) { - warn "warning for $storeid - mountpoint: $cfg_mountpoint " . - "does not match current mount point: $mountpoint\n"; - } + if (defined($mountpoint) && !($cfg_mountpoint =~ m|^\Q$mountpoint\E/?$|)) { + warn "warning for $storeid - mountpoint: $cfg_mountpoint " + . "does not match current mount point: $mountpoint\n"; + } } else { - $scfg->{mountpoint} = $mountpoint; + $scfg->{mountpoint} = $mountpoint; } return; @@ -148,14 +149,14 @@ sub path { my $mountpoint = $scfg->{mountpoint} // "/$scfg->{pool}"; if ($vtype eq "images") { - if ($name =~ m/^subvol-/ || $name =~ m/^basevol-/) { - $path = "$mountpoint/$name"; - } else { - $path = "/dev/zvol/$scfg->{pool}/$name"; - } - $path .= "\@$snapname" if defined($snapname); + if ($name =~ m/^subvol-/ || $name =~ m/^basevol-/) { + $path = "$mountpoint/$name"; + } else { + $path = "/dev/zvol/$scfg->{pool}/$name"; + } + $path .= "\@$snapname" if defined($snapname); } else { - die "$vtype is not allowed in ZFSPool!"; + die "$vtype is not allowed in ZFSPool!"; } return ($path, $vmid, $vtype); @@ -167,12 +168,12 @@ sub zfs_request { my $cmd = []; if ($method eq 'zpool_list') { - push @$cmd, 'zpool', 'list'; + push @$cmd, 'zpool', 'list'; } elsif ($method eq 'zpool_import') { - push @$cmd, 'zpool', 'import'; - $timeout = 15 if !$timeout || $timeout < 15; + push @$cmd, 'zpool', 'import'; + $timeout = 15 if !$timeout || $timeout < 15; } else { - push @$cmd, 'zfs', $method; + push @$cmd, 'zfs', $method; } push @$cmd, @params; @@ -180,10 +181,10 @@ sub zfs_request { my $output = sub { $msg .= "$_[0]\n" }; if (PVE::RPCEnvironment->is_worker()) { - $timeout = 60*60 if !$timeout; - $timeout = 60*5 if $timeout < 60*5; + $timeout = 60 * 60 if !$timeout; + $timeout = 60 * 5 if $timeout < 60 * 5; } else { - $timeout = 10 if !$timeout; + $timeout = 10 if !$timeout; } run_command($cmd, errmsg => "zfs error", outfunc => $output, timeout => $timeout); @@ -194,17 +195,17 @@ sub zfs_request { sub zfs_wait_for_zvol_link { my ($class, $scfg, $volname, $timeout) = @_; - my $default_timeout = PVE::RPCEnvironment->is_worker() ? 60*5 : 10; + my $default_timeout = PVE::RPCEnvironment->is_worker() ? 60 * 5 : 10; $timeout = $default_timeout if !defined($timeout); my ($devname, undef, undef) = $class->path($scfg, $volname); for (my $i = 1; $i <= $timeout; $i++) { - last if -b $devname; - die "timeout: no zvol device link for '$volname' found after $timeout sec.\n" - if $i == $timeout; + last if -b $devname; + die "timeout: no zvol device link for '$volname' found after $timeout sec.\n" + if $i == $timeout; - sleep(1); + sleep(1); } } @@ -215,28 +216,28 @@ sub alloc_image { if ($fmt eq 'raw') { - die "illegal name '$volname' - should be 'vm-$vmid-*'\n" - if $volname && $volname !~ m/^vm-$vmid-/; - $volname = $class->find_free_diskname($storeid, $scfg, $vmid, $fmt) - if !$volname; + die "illegal name '$volname' - should be 'vm-$vmid-*'\n" + if $volname && $volname !~ m/^vm-$vmid-/; + $volname = $class->find_free_diskname($storeid, $scfg, $vmid, $fmt) + if !$volname; - $class->zfs_create_zvol($scfg, $volname, $size); - $class->zfs_wait_for_zvol_link($scfg, $volname); + $class->zfs_create_zvol($scfg, $volname, $size); + $class->zfs_wait_for_zvol_link($scfg, $volname); - } elsif ( $fmt eq 'subvol') { + } elsif ($fmt eq 'subvol') { - die "illegal name '$volname' - should be 'subvol-$vmid-*'\n" - if $volname && $volname !~ m/^subvol-$vmid-/; - $volname = $class->find_free_diskname($storeid, $scfg, $vmid, $fmt) - if !$volname; + die "illegal name '$volname' - should be 'subvol-$vmid-*'\n" + if $volname && $volname !~ m/^subvol-$vmid-/; + $volname = $class->find_free_diskname($storeid, $scfg, $vmid, $fmt) + if !$volname; - die "illegal name '$volname' - should be 'subvol-$vmid-*'\n" - if $volname !~ m/^subvol-$vmid-/; + die "illegal name '$volname' - should be 'subvol-$vmid-*'\n" + if $volname !~ m/^subvol-$vmid-/; - $class->zfs_create_subvol($scfg, $volname, $size); + $class->zfs_create_subvol($scfg, $volname, $size); } else { - die "unsupported format '$fmt'"; + die "unsupported format '$fmt'"; } return $volname; @@ -260,25 +261,25 @@ sub list_images { my $res = []; for my $info (values $zfs_list->%*) { - my $volname = $info->{name}; - my $parent = $info->{parent}; - my $owner = $info->{vmid}; + my $volname = $info->{name}; + my $parent = $info->{parent}; + my $owner = $info->{vmid}; - if ($parent && $parent =~ m/^(\S+)\@__base__$/) { - my ($basename) = ($1); - $info->{volid} = "$storeid:$basename/$volname"; - } else { - $info->{volid} = "$storeid:$volname"; - } + if ($parent && $parent =~ m/^(\S+)\@__base__$/) { + my ($basename) = ($1); + $info->{volid} = "$storeid:$basename/$volname"; + } else { + $info->{volid} = "$storeid:$volname"; + } - if ($vollist) { - my $found = grep { $_ eq $info->{volid} } @$vollist; - next if !$found; - } else { - next if defined ($vmid) && ($owner ne $vmid); - } + if ($vollist) { + my $found = grep { $_ eq $info->{volid} } @$vollist; + next if !$found; + } else { + next if defined($vmid) && ($owner ne $vmid); + } - push @$res, $info; + push @$res, $info; } return $res; } @@ -286,8 +287,8 @@ sub list_images { sub zfs_get_properties { my ($class, $scfg, $properties, $dataset, $timeout) = @_; - my $result = $class->zfs_request($scfg, $timeout, 'get', '-o', 'value', - '-Hp', $properties, $dataset); + my $result = + $class->zfs_request($scfg, $timeout, 'get', '-o', 'value', '-Hp', $properties, $dataset); my @values = split /\n/, $result; return wantarray ? @values : $values[0]; } @@ -300,12 +301,12 @@ sub zfs_get_pool_stats { my @lines = $class->zfs_get_properties($scfg, 'available,used', $scfg->{pool}); - if($lines[0] =~ /^(\d+)$/) { - $available = $1; + if ($lines[0] =~ /^(\d+)$/) { + $available = $1; } - if($lines[1] =~ /^(\d+)$/) { - $used = $1; + if ($lines[1] =~ /^(\d+)$/) { + $used = $1; } return ($available, $used); @@ -336,8 +337,8 @@ sub zfs_create_subvol { my $dataset = "$scfg->{pool}/$volname"; my $quota = $size ? "${size}k" : "none"; - my $cmd = ['create', '-o', 'acltype=posixacl', '-o', 'xattr=sa', - '-o', "refquota=${quota}", $dataset]; + my $cmd = + ['create', '-o', 'acltype=posixacl', '-o', 'xattr=sa', '-o', "refquota=${quota}", $dataset]; $class->zfs_request($scfg, undef, @$cmd); } @@ -349,19 +350,19 @@ sub zfs_delete_zvol { for (my $i = 0; $i < 6; $i++) { - eval { $class->zfs_request($scfg, undef, 'destroy', '-r', "$scfg->{pool}/$zvol"); }; - if ($err = $@) { - if ($err =~ m/^zfs error:(.*): dataset is busy.*/) { - sleep(1); - } elsif ($err =~ m/^zfs error:.*: dataset does not exist.*$/) { - $err = undef; - last; - } else { - die $err; - } - } else { - last; - } + eval { $class->zfs_request($scfg, undef, 'destroy', '-r', "$scfg->{pool}/$zvol"); }; + if ($err = $@) { + if ($err =~ m/^zfs error:(.*): dataset is busy.*/) { + sleep(1); + } elsif ($err =~ m/^zfs error:.*: dataset does not exist.*$/) { + $err = undef; + last; + } else { + die $err; + } + } else { + last; + } } die $err if $err; @@ -371,16 +372,16 @@ sub zfs_list_zvol { my ($class, $scfg) = @_; my $text = $class->zfs_request( - $scfg, - 10, - 'list', - '-o', - 'name,volsize,origin,type,refquota', - '-t', - 'volume,filesystem', - '-d1', - '-Hp', - $scfg->{pool}, + $scfg, + 10, + 'list', + '-o', + 'name,volsize,origin,type,refquota', + '-t', + 'volume,filesystem', + '-d1', + '-Hp', + $scfg->{pool}, ); # It's still required to have zfs_parse_zvol_list filter by pool, because -d1 lists # $scfg->{pool} too and while unlikely, it could be named to be mistaken for a volume. @@ -389,17 +390,17 @@ sub zfs_list_zvol { my $list = {}; foreach my $zvol (@$zvols) { - my $name = $zvol->{name}; - my $parent = $zvol->{origin}; - if($zvol->{origin} && $zvol->{origin} =~ m/^$scfg->{pool}\/(\S+)$/){ - $parent = $1; - } + my $name = $zvol->{name}; + my $parent = $zvol->{origin}; + if ($zvol->{origin} && $zvol->{origin} =~ m/^$scfg->{pool}\/(\S+)$/) { + $parent = $1; + } - $list->{$name} = { - name => $name, - size => $zvol->{size}, - parent => $parent, - format => $zvol->{format}, + $list->{$name} = { + name => $name, + size => $zvol->{size}, + parent => $parent, + format => $zvol->{format}, vmid => $zvol->{owner}, }; } @@ -420,8 +421,8 @@ sub zfs_get_sorted_snapshot_list { my $snap_names = []; for my $snapshot (@snapshots) { - (my $snap_name = $snapshot) =~ s/^.*@//; - push $snap_names->@*, $snap_name; + (my $snap_name = $snapshot) =~ s/^.*@//; + push $snap_names->@*, $snap_name; } return $snap_names; } @@ -435,9 +436,9 @@ sub status { my $active = 0; eval { - ($free, $used) = $class->zfs_get_pool_stats($scfg); - $active = 1; - $total = $free + $used; + ($free, $used) = $class->zfs_get_pool_stats($scfg); + $active = 1; + $total = $free + $used; }; warn $@ if $@; @@ -447,16 +448,16 @@ sub status { sub volume_size_info { my ($class, $scfg, $storeid, $volname, $timeout) = @_; - my (undef, $vname, undef, $parent, undef, undef, $format) = - $class->parse_volname($volname); + my (undef, $vname, undef, $parent, undef, undef, $format) = $class->parse_volname($volname); my $attr = $format eq 'subvol' ? 'refquota' : 'volsize'; - my ($size, $used) = $class->zfs_get_properties($scfg, "$attr,usedbydataset", "$scfg->{pool}/$vname"); + my ($size, $used) = + $class->zfs_get_properties($scfg, "$attr,usedbydataset", "$scfg->{pool}/$vname"); $used = ($used =~ /^(\d+)$/) ? $1 : 0; if ($size =~ /^(\d+)$/) { - return wantarray ? ($1, $format, $used, $parent) : $1; + return wantarray ? ($1, $format, $used, $parent) : $1; } die "Could not get zfs volume size\n"; @@ -490,10 +491,10 @@ sub volume_snapshot_rollback { # caches, they get mounted in activate volume again # see zfs bug #10931 https://github.com/openzfs/zfs/issues/10931 if ($format eq 'subvol') { - eval { $class->zfs_request($scfg, undef, 'unmount', "$scfg->{pool}/$vname"); }; - if (my $err = $@) { - die $err if $err !~ m/not currently mounted$/; - } + eval { $class->zfs_request($scfg, undef, 'unmount', "$scfg->{pool}/$vname"); }; + if (my $err = $@) { + die $err if $err !~ m/not currently mounted$/; + } } return $msg; @@ -509,20 +510,20 @@ sub volume_rollback_is_possible { my $found; $blockers //= []; # not guaranteed to be set by caller for my $snapshot ($snapshots->@*) { - if ($snapshot eq $snap) { - $found = 1; - } elsif ($found) { - push $blockers->@*, $snapshot; - } + if ($snapshot eq $snap) { + $found = 1; + } elsif ($found) { + push $blockers->@*, $snapshot; + } } my $volid = "${storeid}:${volname}"; die "can't rollback, snapshot '$snap' does not exist on '$volid'\n" - if !$found; + if !$found; die "can't rollback, '$snap' is not most recent snapshot on '$volid'\n" - if scalar($blockers->@*) > 0; + if scalar($blockers->@*) > 0; return 1; } @@ -540,13 +541,13 @@ sub volume_snapshot_info { my $info = {}; for my $line (@lines) { - my ($snapshot, $guid, $creation) = split(/\s+/, $line); - (my $snap_name = $snapshot) =~ s/^.*@//; + my ($snapshot, $guid, $creation) = split(/\s+/, $line); + (my $snap_name = $snapshot) =~ s/^.*@//; - $info->{$snap_name} = { - id => $guid, - timestamp => $creation, - }; + $info->{$snap_name} = { + id => $guid, + timestamp => $creation, + }; } return $info; } @@ -556,12 +557,12 @@ my sub dataset_mounted_heuristic { my $mounts = PVE::ProcFSTools::parse_proc_mounts(); for my $mp (@$mounts) { - my ($what, $dir, $fs) = $mp->@*; - next if $fs ne 'zfs'; - # check for root-dataset or any child-dataset (root-dataset could have 'canmount=off') - # If any child is mounted heuristically assume that `zfs mount -a` was successful - next if $what !~ m!^$dataset(?:/|$)!; - return 1; + my ($what, $dir, $fs) = $mp->@*; + next if $fs ne 'zfs'; + # check for root-dataset or any child-dataset (root-dataset could have 'canmount=off') + # If any child is mounted heuristically assume that `zfs mount -a` was successful + next if $what !~ m!^$dataset(?:/|$)!; + return 1; } return 0; } @@ -576,21 +577,21 @@ sub activate_storage { return 1 if dataset_mounted_heuristic($dataset); # early return my $pool_imported = sub { - my @param = ('-o', 'name', '-H', $pool); - my $res = eval { $class->zfs_request($scfg, undef, 'zpool_list', @param) }; - warn "$@\n" if $@; + my @param = ('-o', 'name', '-H', $pool); + my $res = eval { $class->zfs_request($scfg, undef, 'zpool_list', @param) }; + warn "$@\n" if $@; - return defined($res) && $res =~ m/$pool/; + return defined($res) && $res =~ m/$pool/; }; if (!$pool_imported->()) { - # import can only be done if not yet imported! - my @param = ('-d', '/dev/disk/by-id/', '-o', 'cachefile=none', $pool); - eval { $class->zfs_request($scfg, undef, 'zpool_import', @param) }; - if (my $err = $@) { - # just could've raced with another import, so recheck if it is imported - die "could not activate storage '$storeid', $err\n" if !$pool_imported->(); - } + # import can only be done if not yet imported! + my @param = ('-d', '/dev/disk/by-id/', '-o', 'cachefile=none', $pool); + eval { $class->zfs_request($scfg, undef, 'zpool_import', @param) }; + if (my $err = $@) { + # just could've raced with another import, so recheck if it is imported + die "could not activate storage '$storeid', $err\n" if !$pool_imported->(); + } } eval { $class->zfs_request($scfg, undef, 'mount', '-a') }; die "could not activate storage '$storeid', $@\n" if $@; @@ -610,12 +611,12 @@ sub activate_volume { my (undef, $dataset, undef, undef, undef, undef, $format) = $class->parse_volname($volname); if ($format eq 'raw') { - $class->zfs_wait_for_zvol_link($scfg, $volname); + $class->zfs_wait_for_zvol_link($scfg, $volname); } elsif ($format eq 'subvol') { - my $mounted = $class->zfs_get_properties($scfg, 'mounted', "$scfg->{pool}/$dataset"); - if ($mounted !~ m/^yes$/) { - $class->zfs_request($scfg, undef, 'mount', "$scfg->{pool}/$dataset"); - } + my $mounted = $class->zfs_get_properties($scfg, 'mounted', "$scfg->{pool}/$dataset"); + if ($mounted !~ m/^yes$/) { + $class->zfs_request($scfg, undef, 'mount', "$scfg->{pool}/$dataset"); + } } return 1; @@ -639,11 +640,27 @@ sub clone_image { my $name = $class->find_free_diskname($storeid, $scfg, $vmid, $format); if ($format eq 'subvol') { - my $size = $class->zfs_request($scfg, undef, 'list', '-Hp', '-o', 'refquota', "$scfg->{pool}/$basename"); - chomp($size); - $class->zfs_request($scfg, undef, 'clone', "$scfg->{pool}/$basename\@$snap", "$scfg->{pool}/$name", '-o', "refquota=$size"); + my $size = $class->zfs_request( + $scfg, undef, 'list', '-Hp', '-o', 'refquota', "$scfg->{pool}/$basename", + ); + chomp($size); + $class->zfs_request( + $scfg, + undef, + 'clone', + "$scfg->{pool}/$basename\@$snap", + "$scfg->{pool}/$name", + '-o', + "refquota=$size", + ); } else { - $class->zfs_request($scfg, undef, 'clone', "$scfg->{pool}/$basename\@$snap", "$scfg->{pool}/$name"); + $class->zfs_request( + $scfg, + undef, + 'clone', + "$scfg->{pool}/$basename\@$snap", + "$scfg->{pool}/$name", + ); } return "$basename/$name"; @@ -660,16 +677,16 @@ sub create_base { die "create_base not possible with base image\n" if $isBase; my $newname = $name; - if ( $format eq 'subvol' ) { - $newname =~ s/^subvol-/basevol-/; + if ($format eq 'subvol') { + $newname =~ s/^subvol-/basevol-/; } else { - $newname =~ s/^vm-/base-/; + $newname =~ s/^vm-/base-/; } my $newvolname = $basename ? "$basename/$newname" : "$newname"; $class->zfs_request($scfg, undef, 'rename', "$scfg->{pool}/$name", "$scfg->{pool}/$newname"); - my $running = undef; #fixme : is create_base always offline ? + my $running = undef; #fixme : is create_base always offline ? $class->volume_snapshot($scfg, $storeid, $newname, $snap, $running); @@ -679,17 +696,16 @@ sub create_base { sub volume_resize { my ($class, $scfg, $storeid, $volname, $size, $running) = @_; - my $new_size = int($size/1024); + my $new_size = int($size / 1024); - my (undef, $vname, undef, undef, undef, undef, $format) = - $class->parse_volname($volname); + my (undef, $vname, undef, undef, undef, undef, $format) = $class->parse_volname($volname); my $attr = $format eq 'subvol' ? 'refquota' : 'volsize'; # align size to 1M so we always have a valid multiple of the volume block size if ($format eq 'raw') { - my $padding = (1024 - $new_size % 1024) % 1024; - $new_size = $new_size + $padding; + my $padding = (1024 - $new_size % 1024) % 1024; + $new_size = $new_size + $padding; } $class->zfs_request($scfg, undef, 'set', "$attr=${new_size}k", "$scfg->{pool}/$vname"); @@ -709,24 +725,23 @@ sub volume_has_feature { my ($class, $scfg, $feature, $storeid, $volname, $snapname, $running) = @_; my $features = { - snapshot => { current => 1, snap => 1}, - clone => { base => 1}, - template => { current => 1}, - copy => { base => 1, current => 1}, - sparseinit => { base => 1, current => 1}, - replicate => { base => 1, current => 1}, - rename => {current => 1}, + snapshot => { current => 1, snap => 1 }, + clone => { base => 1 }, + template => { current => 1 }, + copy => { base => 1, current => 1 }, + sparseinit => { base => 1, current => 1 }, + replicate => { base => 1, current => 1 }, + rename => { current => 1 }, }; - my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) = - $class->parse_volname($volname); + my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) = $class->parse_volname($volname); my $key = undef; if ($snapname) { - $key = 'snap'; + $key = 'snap'; } else { - $key = $isBase ? 'base' : 'current'; + $key = $isBase ? 'base' : 'current'; } return 1 if $features->{$feature}->{$key}; @@ -735,19 +750,20 @@ sub volume_has_feature { } sub volume_export { - my ($class, $scfg, $storeid, $fh, $volname, $format, $snapshot, $base_snapshot, $with_snapshots) = @_; + my ($class, $scfg, $storeid, $fh, $volname, $format, $snapshot, $base_snapshot, $with_snapshots) + = @_; die "unsupported export stream format for $class: $format\n" - if $format ne 'zfs'; + if $format ne 'zfs'; die "$class storage can only export snapshots\n" - if !defined($snapshot); + if !defined($snapshot); my $dataset = ($class->parse_volname($volname))[1]; my $fd = fileno($fh); die "internal error: invalid file handle for volume_export\n" - if !defined($fd); + if !defined($fd); $fd = ">&$fd"; # For zfs we always create a replication stream (-R) which means the remote @@ -755,8 +771,8 @@ sub volume_export { # for all our use cases. my $cmd = ['zfs', 'send', '-Rpv']; if (defined($base_snapshot)) { - my $arg = $with_snapshots ? '-I' : '-i'; - push @$cmd, $arg, $base_snapshot; + my $arg = $with_snapshots ? '-I' : '-i'; + push @$cmd, $arg, $base_snapshot; } push @$cmd, '--', "$scfg->{pool}/$dataset\@$snapshot"; @@ -776,39 +792,53 @@ sub volume_export_formats { } sub volume_import { - my ($class, $scfg, $storeid, $fh, $volname, $format, $snapshot, $base_snapshot, $with_snapshots, $allow_rename) = @_; + my ( + $class, + $scfg, + $storeid, + $fh, + $volname, + $format, + $snapshot, + $base_snapshot, + $with_snapshots, + $allow_rename, + ) = @_; die "unsupported import stream format for $class: $format\n" - if $format ne 'zfs'; + if $format ne 'zfs'; my $fd = fileno($fh); die "internal error: invalid file handle for volume_import\n" - if !defined($fd); + if !defined($fd); my (undef, $dataset, $vmid, undef, undef, undef, $volume_format) = - $class->parse_volname($volname); + $class->parse_volname($volname); my $zfspath = "$scfg->{pool}/$dataset"; my $suffix = defined($base_snapshot) ? "\@$base_snapshot" : ''; - my $exists = 0 == run_command(['zfs', 'get', '-H', 'name', $zfspath.$suffix], - noerr => 1, quiet => 1); + my $exists = 0 == run_command( + ['zfs', 'get', '-H', 'name', $zfspath . $suffix], + noerr => 1, + quiet => 1, + ); if (defined($base_snapshot)) { - die "base snapshot '$zfspath\@$base_snapshot' doesn't exist\n" if !$exists; + die "base snapshot '$zfspath\@$base_snapshot' doesn't exist\n" if !$exists; } elsif ($exists) { - die "volume '$zfspath' already exists\n" if !$allow_rename; - warn "volume '$zfspath' already exists - importing with a different name\n"; - $dataset = $class->find_free_diskname($storeid, $scfg, $vmid, $volume_format); - $zfspath = "$scfg->{pool}/$dataset"; + die "volume '$zfspath' already exists\n" if !$allow_rename; + warn "volume '$zfspath' already exists - importing with a different name\n"; + $dataset = $class->find_free_diskname($storeid, $scfg, $vmid, $volume_format); + $zfspath = "$scfg->{pool}/$dataset"; } eval { run_command(['zfs', 'recv', '-F', '--', $zfspath], input => "<&$fd") }; if (my $err = $@) { - if (defined($base_snapshot)) { - eval { run_command(['zfs', 'rollback', '-r', '--', "$zfspath\@$base_snapshot"]) }; - } else { - eval { run_command(['zfs', 'destroy', '-r', '--', $zfspath]) }; - } - die $err; + if (defined($base_snapshot)) { + eval { run_command(['zfs', 'rollback', '-r', '--', "$zfspath\@$base_snapshot"]) }; + } else { + eval { run_command(['zfs', 'destroy', '-r', '--', $zfspath]) }; + } + die $err; } return "$storeid:$dataset"; @@ -817,30 +847,29 @@ sub volume_import { sub volume_import_formats { my ($class, $scfg, $storeid, $volname, $snapshot, $base_snapshot, $with_snapshots) = @_; - return $class->volume_export_formats($scfg, $storeid, $volname, $snapshot, $base_snapshot, $with_snapshots); + return $class->volume_export_formats( + $scfg, $storeid, $volname, $snapshot, $base_snapshot, $with_snapshots, + ); } sub rename_volume { my ($class, $scfg, $storeid, $source_volname, $target_vmid, $target_volname) = @_; my ( - undef, - $source_image, - $source_vmid, - $base_name, - $base_vmid, - undef, - $format + undef, $source_image, $source_vmid, $base_name, $base_vmid, undef, $format, ) = $class->parse_volname($source_volname); $target_volname = $class->find_free_diskname($storeid, $scfg, $target_vmid, $format) - if !$target_volname; + if !$target_volname; my $pool = $scfg->{pool}; my $source_zfspath = "${pool}/${source_image}"; my $target_zfspath = "${pool}/${target_volname}"; - my $exists = 0 == run_command(['zfs', 'get', '-H', 'name', $target_zfspath], - noerr => 1, quiet => 1); + my $exists = 0 == run_command( + ['zfs', 'get', '-H', 'name', $target_zfspath], + noerr => 1, + quiet => 1, + ); die "target volume '${target_volname}' already exists\n" if $exists; $class->zfs_request($scfg, 5, 'rename', ${source_zfspath}, ${target_zfspath}); diff --git a/src/PVE/test/ceph_conf_parse_write_test.pl b/src/PVE/test/ceph_conf_parse_write_test.pl index 1d5e506..8bee600 100755 --- a/src/PVE/test/ceph_conf_parse_write_test.pl +++ b/src/PVE/test/ceph_conf_parse_write_test.pl @@ -9,7 +9,6 @@ use Test::More; use PVE::CephConfig; - # An array of test cases. # Each test case is comprised of the following keys: # description => to identify a single test @@ -17,15 +16,15 @@ use PVE::CephConfig; # raw => the raw content of the file to test my $tests = [ { - description => 'empty file', - expected_cfg => {}, - raw => <<~EOF, + description => 'empty file', + expected_cfg => {}, + raw => <<~EOF, EOF }, { - description => 'file without section', - expected_cfg => {}, - raw => <<~EOF, + description => 'file without section', + expected_cfg => {}, + raw => <<~EOF, While Ceph's format doesn't allow this, we do, because it makes things simpler foo = bar arbitrary text can go here @@ -34,83 +33,83 @@ my $tests = [ EOF }, { - description => 'single section', - expected_cfg => { - foo => { - bar => 'baz', - }, - }, - raw => <<~EOF, + description => 'single section', + expected_cfg => { + foo => { + bar => 'baz', + }, + }, + raw => <<~EOF, [foo] bar = baz EOF }, { - description => 'single section, no key-value pairs', - expected_cfg => { - foo => {}, - }, - raw => <<~EOF, + description => 'single section, no key-value pairs', + expected_cfg => { + foo => {}, + }, + raw => <<~EOF, [foo] EOF }, { - description => 'single section, whitespace before key', - expected_cfg => { - foo => { - bar => 'baz', - }, - }, - raw => <<~EOF, + description => 'single section, whitespace before key', + expected_cfg => { + foo => { + bar => 'baz', + }, + }, + raw => <<~EOF, [foo] \t bar = baz EOF }, { - description => 'single section, section header with preceding whitespace', - expected_cfg => { - foo => { - bar => 'baz', - }, - }, - raw => <<~EOF, + description => 'single section, section header with preceding whitespace', + expected_cfg => { + foo => { + bar => 'baz', + }, + }, + raw => <<~EOF, \t [foo] bar = baz EOF }, { - description => 'single section, section header with comment', - expected_cfg => { - foo => { - bar => 'baz', - }, - }, - raw => <<~EOF, + description => 'single section, section header with comment', + expected_cfg => { + foo => { + bar => 'baz', + }, + }, + raw => <<~EOF, [foo] # some comment bar = baz EOF }, { - description => 'single section, section header ' . - 'with preceding whitespace and comment', - expected_cfg => { - foo => { - bar => 'baz', - }, - }, - raw => <<~EOF, + description => 'single section, section header ' + . 'with preceding whitespace and comment', + expected_cfg => { + foo => { + bar => 'baz', + }, + }, + raw => <<~EOF, \t [foo] ; some comment bar = baz EOF }, { - description => 'single section, arbitrary text before section', - expected_cfg => { - foo => { - bar => 'baz', - }, - }, - raw => <<~EOF, + description => 'single section, arbitrary text before section', + expected_cfg => { + foo => { + bar => 'baz', + }, + }, + raw => <<~EOF, Rust is better than Perl This text is ignored by our parser, because it makes things simpler @@ -119,13 +118,13 @@ my $tests = [ EOF }, { - description => 'single section, invalid key-value pairs', - expected_cfg => { - foo => { - bar => 'baz', - }, - }, - raw => <<~EOF, + description => 'single section, invalid key-value pairs', + expected_cfg => { + foo => { + bar => 'baz', + }, + }, + raw => <<~EOF, [foo] this here will cause a warning and is ignored bar = baz @@ -133,15 +132,15 @@ my $tests = [ EOF }, { - description => 'single section, multiple key-value pairs', - expected_cfg => { - foo => { - one => 1, - two => 2, - three => 3, - }, - }, - raw => <<~EOF, + description => 'single section, multiple key-value pairs', + expected_cfg => { + foo => { + one => 1, + two => 2, + three => 3, + }, + }, + raw => <<~EOF, [foo] one = 1 two = 2 @@ -150,29 +149,29 @@ my $tests = [ EOF }, { - description => 'multiple sections with whitespace in section headers', - expected_cfg => { - 'foo bar' => {}, - ' quo qux ' => {}, - }, - raw => <<~EOF, + description => 'multiple sections with whitespace in section headers', + expected_cfg => { + 'foo bar' => {}, + ' quo qux ' => {}, + }, + raw => <<~EOF, [foo bar] [ quo qux ] EOF }, { - description => 'single section with whitespace in section header ' - . 'and multiple key-value pair', - expected_cfg => { - 'foo bar' => { - one => 1, - two => 2, - }, - ' quo ' => { - three => 3, - }, - }, - raw => <<~EOF, + description => 'single section with whitespace in section header ' + . 'and multiple key-value pair', + expected_cfg => { + 'foo bar' => { + one => 1, + two => 2, + }, + ' quo ' => { + three => 3, + }, + }, + raw => <<~EOF, [foo bar] one = 1 two = 2 @@ -182,17 +181,17 @@ my $tests = [ EOF }, { - description => 'single section, numeric key-value pairs', - expected_cfg => { - foo => { - '0' => 0, - '1' => 1, - '2' => 0, - '3' => 1, - '3.14' => 1.414, - }, - }, - raw => <<~EOF, + description => 'single section, numeric key-value pairs', + expected_cfg => { + foo => { + '0' => 0, + '1' => 1, + '2' => 0, + '3' => 1, + '3.14' => 1.414, + }, + }, + raw => <<~EOF, [foo] 0 = 0 1 = 1 @@ -202,40 +201,40 @@ my $tests = [ EOF }, { - description => 'single section, keys with single-quoted values', - expected_cfg => { - foo => { - bar => 'baz', - quo => 'qux', - }, - }, - raw => <<~EOF, + description => 'single section, keys with single-quoted values', + expected_cfg => { + foo => { + bar => 'baz', + quo => 'qux', + }, + }, + raw => <<~EOF, [foo] bar = 'baz' quo = 'qux' EOF }, { - description => 'single section, keys with double-quoted values', - expected_cfg => { - foo => { - bar => 'baz', - quo => 'qux', - }, - }, - raw => <<~EOF, + description => 'single section, keys with double-quoted values', + expected_cfg => { + foo => { + bar => 'baz', + quo => 'qux', + }, + }, + raw => <<~EOF, [foo] bar = "baz" quo = "qux" EOF }, { - description => 'single section, keys with quoted values, ' - . 'comment literals within quotes', - expected_cfg => { - foo => {}, - }, - raw => <<~EOF, + description => 'single section, keys with quoted values, ' + . 'comment literals within quotes', + expected_cfg => { + foo => {}, + }, + raw => <<~EOF, [foo] one = "1;1" two = "2#2" @@ -244,17 +243,17 @@ my $tests = [ EOF }, { - description => 'single section, keys with quoted values, ' - . 'escaped comment literals within quotes', - expected_cfg => { - foo => { - one => '1;1', - two => '2#2', - three => '3;3', - four => '4#4', - }, - }, - raw => <<~EOF, + description => 'single section, keys with quoted values, ' + . 'escaped comment literals within quotes', + expected_cfg => { + foo => { + one => '1;1', + two => '2#2', + three => '3;3', + four => '4#4', + }, + }, + raw => <<~EOF, [foo] one = "1\\;1" two = "2\\#2" @@ -263,30 +262,29 @@ my $tests = [ EOF }, { - description => 'single section, keys with quoted values, ' - . 'comments after values', - expected_cfg => { - foo => { - bar => 'baz', - quo => 'qux', - }, - }, - raw => <<~EOF, + description => 'single section, keys with quoted values, ' . 'comments after values', + expected_cfg => { + foo => { + bar => 'baz', + quo => 'qux', + }, + }, + raw => <<~EOF, [foo] bar = "baz" ; some comment quo = 'qux' # another comment EOF }, { - description => 'single section, keys with quoted values, ' - . 'continued lines after quotes', - expected_cfg => { - foo => { - bar => 'baz', - quo => 'qux', - }, - }, - raw => <<~EOF, + description => 'single section, keys with quoted values, ' + . 'continued lines after quotes', + expected_cfg => { + foo => { + bar => 'baz', + quo => 'qux', + }, + }, + raw => <<~EOF, [foo] bar = "baz"\\ @@ -295,15 +293,15 @@ my $tests = [ EOF }, { - description => 'single section, keys with quoted values, ' - . 'continued lines with comments after quotes', - expected_cfg => { - foo => { - bar => 'baz', - quo => 'qux', - }, - }, - raw => <<~EOF, + description => 'single section, keys with quoted values, ' + . 'continued lines with comments after quotes', + expected_cfg => { + foo => { + bar => 'baz', + quo => 'qux', + }, + }, + raw => <<~EOF, [foo] bar = "baz"\\ # believe it or not, this is valid syntax @@ -316,14 +314,14 @@ my $tests = [ EOF }, { - description => 'single section, key-value pairs with whitespace', - expected_cfg => { - foo => { - bar => 'baz', - quo => 'qux', - }, - }, - raw => <<~EOF, + description => 'single section, key-value pairs with whitespace', + expected_cfg => { + foo => { + bar => 'baz', + quo => 'qux', + }, + }, + raw => <<~EOF, [foo] \t bar \t =\t \tbaz\t @@ -331,30 +329,30 @@ my $tests = [ EOF }, { - description => 'single section, key-value pairs without whitespace', - expected_cfg => { - foo => { - bar => 'baz', - quo => 'qux', - }, - }, - raw => <<~EOF, + description => 'single section, key-value pairs without whitespace', + expected_cfg => { + foo => { + bar => 'baz', + quo => 'qux', + }, + }, + raw => <<~EOF, [foo] bar=baz quo=qux EOF }, { - description => 'single section, key-value pairs with repeated whitespace ' - . 'in key names', - expected_cfg => { - foo => { - 'one_space' => 1, - 'two_spaces' => 2, - 'three_spaces' => 3, - }, - }, - raw => <<~EOF, + description => 'single section, key-value pairs with repeated whitespace ' + . 'in key names', + expected_cfg => { + foo => { + 'one_space' => 1, + 'two_spaces' => 2, + 'three_spaces' => 3, + }, + }, + raw => <<~EOF, [foo] one space = 1 two spaces = 2 @@ -362,16 +360,16 @@ my $tests = [ EOF }, { - description => 'single section, key-value pairs with whitespace and ' - . 'complex whitespace in key names', - expected_cfg => { - foo => { - 'one_two' => 'foo', - 'three_four' => 'bar', - 'five_six' => 'baz', - }, - }, - raw => <<~EOF, + description => 'single section, key-value pairs with whitespace and ' + . 'complex whitespace in key names', + expected_cfg => { + foo => { + 'one_two' => 'foo', + 'three_four' => 'bar', + 'five_six' => 'baz', + }, + }, + raw => <<~EOF, [foo] \t one two \t =\t \tfoo\t @@ -380,17 +378,17 @@ my $tests = [ EOF }, { - description => 'single section, key-value pairs with repeated whitespace ' - . 'and underlines in key names', - expected_cfg => { - foo => { - 'one_ul' => 2, - 'two_ul' => 0, - 'two__ul' => 0, - 'odd___name' => 4, - }, - }, - raw => <<~EOF, + description => 'single section, key-value pairs with repeated whitespace ' + . 'and underlines in key names', + expected_cfg => { + foo => { + 'one_ul' => 2, + 'two_ul' => 0, + 'two__ul' => 0, + 'odd___name' => 4, + }, + }, + raw => <<~EOF, [foo] # these are equivalent one ul = 0 @@ -411,14 +409,14 @@ my $tests = [ EOF }, { - description => 'single section with line continuations, multiple key-value pairs', - expected_cfg => { - foo => { - bar => 'baz', - quo => 'qux', - }, - }, - raw => <<~EOF, + description => 'single section with line continuations, multiple key-value pairs', + expected_cfg => { + foo => { + bar => 'baz', + quo => 'qux', + }, + }, + raw => <<~EOF, [\\ f\\ o\\ @@ -430,14 +428,14 @@ my $tests = [ EOF }, { - description => 'single section, key-value pairs with continued lines in keys', - expected_cfg => { - foo => { - bar => 'baz', - quo => 'qux', - }, - }, - raw => <<~EOF, + description => 'single section, key-value pairs with continued lines in keys', + expected_cfg => { + foo => { + bar => 'baz', + quo => 'qux', + }, + }, + raw => <<~EOF, [foo] bar\\ = baz @@ -453,25 +451,25 @@ my $tests = [ EOF }, { - description => 'multiple sections with escaped comment literals, ' - . 'multiple key-value pairs', - expected_cfg => { - 'f;oo' => { - one => 1, - two => 2, - }, - 'b#ar' => { - three => 3, - four => 4, - }, - '###' => { - five => 5, - }, - ';;;' => { - six => 6, - }, - }, - raw => <<~EOF, + description => 'multiple sections with escaped comment literals, ' + . 'multiple key-value pairs', + expected_cfg => { + 'f;oo' => { + one => 1, + two => 2, + }, + 'b#ar' => { + three => 3, + four => 4, + }, + '###' => { + five => 5, + }, + ';;;' => { + six => 6, + }, + }, + raw => <<~EOF, [f\\;oo] one = 1 two = 2 @@ -488,14 +486,14 @@ my $tests = [ EOF }, { - description => 'single section, key-value pairs with comments', - expected_cfg => { - foo => { - bar => 'baz', - quo => 'qux', - }, - }, - raw => <<~EOF, + description => 'single section, key-value pairs with comments', + expected_cfg => { + foo => { + bar => 'baz', + quo => 'qux', + }, + }, + raw => <<~EOF, [foo] ; preceding comment bar = baz # some comment @@ -506,14 +504,14 @@ my $tests = [ EOF }, { - description => 'single section, key-value pairs with continued lines', - expected_cfg => { - foo => { - bar => 'baz continued baz', - quo => "qux continued \tqux", - }, - }, - raw => <<~EOF, + description => 'single section, key-value pairs with continued lines', + expected_cfg => { + foo => { + bar => 'baz continued baz', + quo => "qux continued \tqux", + }, + }, + raw => <<~EOF, [foo] bar = baz \\ continued baz @@ -525,16 +523,15 @@ my $tests = [ EOF }, { - description => 'single section, key-value pairs with ' . - 'continued lines and comments', - expected_cfg => { - foo => { - bar => 'baz continued baz', - quo => 'qux continued qux', - key => 'value', - }, - }, - raw => <<~EOF, + description => 'single section, key-value pairs with ' . 'continued lines and comments', + expected_cfg => { + foo => { + bar => 'baz continued baz', + quo => 'qux continued qux', + key => 'value', + }, + }, + raw => <<~EOF, [foo] bar = baz \\ continued baz # comments are allowed here @@ -548,30 +545,30 @@ my $tests = [ EOF }, { - description => 'single section, key-value pairs with ' . - 'escaped commment literals in values', - expected_cfg => { - foo => { - bar => 'baz#escaped', - quo => 'qux;escaped', - }, - }, - raw => <<~EOF, + description => 'single section, key-value pairs with ' + . 'escaped commment literals in values', + expected_cfg => { + foo => { + bar => 'baz#escaped', + quo => 'qux;escaped', + }, + }, + raw => <<~EOF, [foo] bar = baz\\#escaped quo = qux\\;escaped EOF }, { - description => 'single section, key-value pairs with ' . - 'continued lines and escaped commment literals in values', - expected_cfg => { - foo => { - bar => 'baz#escaped', - quo => 'qux;escaped continued# escaped done', - }, - }, - raw => <<~EOF, + description => 'single section, key-value pairs with ' + . 'continued lines and escaped commment literals in values', + expected_cfg => { + foo => { + bar => 'baz#escaped', + quo => 'qux;escaped continued# escaped done', + }, + }, + raw => <<~EOF, [foo] bar = baz\\#escaped @@ -581,21 +578,21 @@ my $tests = [ EOF }, { - description => 'single section, key-value pairs with escaped comment ' - . 'literals in key names', - expected_cfg => { - foo => { - 'b#a#r' => 'baz', - ';q;uo' => 'qux', - '#' => 1, - '##' => 2, - '###' => 3, - ';' => 1, - ';;' => 2, - ';;;' => 3, - }, - }, - raw => <<~EOF, + description => 'single section, key-value pairs with escaped comment ' + . 'literals in key names', + expected_cfg => { + foo => { + 'b#a#r' => 'baz', + ';q;uo' => 'qux', + '#' => 1, + '##' => 2, + '###' => 3, + ';' => 1, + ';;' => 2, + ';;;' => 3, + }, + }, + raw => <<~EOF, [foo] b\\#a\\#r = baz \\;q\\;uo = qux @@ -610,18 +607,18 @@ my $tests = [ EOF }, { - description => 'multiple sections, multiple key-value pairs', - expected_cfg => { - foo => { - one => 1, - two => 2, - }, - bar => { - three => 3, - four => 4, - }, - }, - raw => <<~EOF, + description => 'multiple sections, multiple key-value pairs', + expected_cfg => { + foo => { + one => 1, + two => 2, + }, + bar => { + three => 3, + four => 4, + }, + }, + raw => <<~EOF, [foo] one = 1 two = 2 @@ -631,44 +628,44 @@ my $tests = [ EOF }, { - description => 'multiple sections, multiple key-value pairs, ' - . 'comments inline and inbetween, escaped comment literals, ' - . 'continued lines, arbitrary whitespace', - # NOTE: We don't use '/etc/pve/priv/$cluster.$name.keyring' as value for - # 'keyring' below, because `ceph-conf` will actually substitute those. - # Because we don't care for that (not the parser's or writer's job) we - # just omit the dollar signs. - expected_cfg => { - global => { - auth_client_required => 'cephx', - auth_cluster_required => 'cephx', - auth_service_required => 'cephx', - cluster_network => '172.16.65.0/24', - fsid => '0e2f72eb-ffff-ffff-ffff-f480790a5b07', - mon_allow_pool_delete => 'true', - mon_host => '172.16.65.12 172.16.65.13 172.16.65.11', - ms_bind_ipv4 => 'true', - osd_pool_default_min_size => '2', - osd_pool_default_size => '3', - public_network => '172.16.65.0/24', - }, - client => { - keyring => '/etc/pve/priv/cluster.name.keyring', - }, - 'mon.ceph-01' => { - public_addr => '172.16.65.11', - }, - 'mon.ceph-02' => { - public_addr => '172.16.65.12', - }, - 'mon.ceph-03' => { - public_addr => '172.16.65.13', - }, - 'some arbitrary section' => { - some_key => 'foo;bar;baz', - }, - }, - raw => <<~EOF, + description => 'multiple sections, multiple key-value pairs, ' + . 'comments inline and inbetween, escaped comment literals, ' + . 'continued lines, arbitrary whitespace', + # NOTE: We don't use '/etc/pve/priv/$cluster.$name.keyring' as value for + # 'keyring' below, because `ceph-conf` will actually substitute those. + # Because we don't care for that (not the parser's or writer's job) we + # just omit the dollar signs. + expected_cfg => { + global => { + auth_client_required => 'cephx', + auth_cluster_required => 'cephx', + auth_service_required => 'cephx', + cluster_network => '172.16.65.0/24', + fsid => '0e2f72eb-ffff-ffff-ffff-f480790a5b07', + mon_allow_pool_delete => 'true', + mon_host => '172.16.65.12 172.16.65.13 172.16.65.11', + ms_bind_ipv4 => 'true', + osd_pool_default_min_size => '2', + osd_pool_default_size => '3', + public_network => '172.16.65.0/24', + }, + client => { + keyring => '/etc/pve/priv/cluster.name.keyring', + }, + 'mon.ceph-01' => { + public_addr => '172.16.65.11', + }, + 'mon.ceph-02' => { + public_addr => '172.16.65.12', + }, + 'mon.ceph-03' => { + public_addr => '172.16.65.13', + }, + 'some arbitrary section' => { + some_key => 'foo;bar;baz', + }, + }, + raw => <<~EOF, [global] auth_client_required = cephx auth_cluster_required = \\ @@ -724,17 +721,17 @@ sub test_parse_ceph_config { my $parse_result = eval { PVE::CephConfig::parse_ceph_config(undef, $case->{raw}) }; if ($@) { - fail($desc); - diag('Failed to parse config:'); - diag($@); - return; + fail($desc); + diag('Failed to parse config:'); + diag($@); + return; } if (!is_deeply($parse_result, $case->{expected_cfg}, $desc)) { - diag("=== Expected ==="); - diag(explain($case->{expected_cfg})); - diag("=== Got ==="); - diag(explain($parse_result)); + diag("=== Expected ==="); + diag(explain($case->{expected_cfg})); + diag("=== Got ==="); + diag(explain($parse_result)); } } @@ -745,47 +742,46 @@ sub test_write_ceph_config { my $write_result = eval { PVE::CephConfig::write_ceph_config(undef, $case->{expected_cfg}) }; if ($@) { - fail($desc); - diag('Failed to write config:'); - diag($@); - return; + fail($desc); + diag('Failed to write config:'); + diag($@); + return; } my $parse_result = eval { PVE::CephConfig::parse_ceph_config(undef, $write_result) }; if ($@) { - fail($desc); - diag('Failed to parse previously written config:'); - diag($@); - return; + fail($desc); + diag('Failed to parse previously written config:'); + diag($@); + return; } if (!is_deeply($parse_result, $case->{expected_cfg}, $desc)) { - diag("=== Expected ==="); - diag(explain($case->{expected_cfg})); - diag("=== Got ==="); - diag(explain($parse_result)); - diag("=== Write Output ==="); - diag($write_result); + diag("=== Expected ==="); + diag(explain($case->{expected_cfg})); + diag("=== Got ==="); + diag(explain($parse_result)); + diag("=== Write Output ==="); + diag($write_result); } } sub main { my $test_subs = [ - \&test_parse_ceph_config, - \&test_write_ceph_config, + \&test_parse_ceph_config, \&test_write_ceph_config, ]; plan(tests => scalar($tests->@*) * scalar($test_subs->@*)); for my $case ($tests->@*) { - for my $test_sub ($test_subs->@*) { - eval { - # suppress warnings here to make output less noisy for certain tests - local $SIG{__WARN__} = sub {}; - $test_sub->($case); - }; - warn "$@\n" if $@; - }; + for my $test_sub ($test_subs->@*) { + eval { + # suppress warnings here to make output less noisy for certain tests + local $SIG{__WARN__} = sub { }; + $test_sub->($case); + }; + warn "$@\n" if $@; + } } done_testing(); diff --git a/src/test/archive_info_test.pm b/src/test/archive_info_test.pm index 53e37be..c5c3992 100644 --- a/src/test/archive_info_test.pm +++ b/src/test/archive_info_test.pm @@ -22,119 +22,119 @@ my $NOTES_EXT = PVE::Storage::Plugin::NOTES_EXT; my $tests = [ # backup archives { - description => 'Backup archive, lxc, tgz, future millenium', - archive => "backup/vzdump-lxc-$vmid-3070_01_01-00_00_00.tgz", - expected => { - 'filename' => "vzdump-lxc-$vmid-3070_01_01-00_00_00.tgz", - 'logfilename' => "vzdump-lxc-$vmid-3070_01_01-00_00_00".$LOG_EXT, - 'notesfilename'=> "vzdump-lxc-$vmid-3070_01_01-00_00_00.tgz".$NOTES_EXT, - 'type' => 'lxc', - 'format' => 'tar', - 'decompressor' => ['tar', '-z'], - 'compression' => 'gz', - 'vmid' => $vmid, - 'ctime' => 60*60*24 * (365*1100 + 267), - 'is_std_name' => 1, - }, + description => 'Backup archive, lxc, tgz, future millenium', + archive => "backup/vzdump-lxc-$vmid-3070_01_01-00_00_00.tgz", + expected => { + 'filename' => "vzdump-lxc-$vmid-3070_01_01-00_00_00.tgz", + 'logfilename' => "vzdump-lxc-$vmid-3070_01_01-00_00_00" . $LOG_EXT, + 'notesfilename' => "vzdump-lxc-$vmid-3070_01_01-00_00_00.tgz" . $NOTES_EXT, + 'type' => 'lxc', + 'format' => 'tar', + 'decompressor' => ['tar', '-z'], + 'compression' => 'gz', + 'vmid' => $vmid, + 'ctime' => 60 * 60 * 24 * (365 * 1100 + 267), + 'is_std_name' => 1, + }, }, { - description => 'Backup archive, lxc, tgz, very old', - archive => "backup/vzdump-lxc-$vmid-1970_01_01-02_00_30.tgz", - expected => { - 'filename' => "vzdump-lxc-$vmid-1970_01_01-02_00_30.tgz", - 'logfilename' => "vzdump-lxc-$vmid-1970_01_01-02_00_30".$LOG_EXT, - 'notesfilename'=> "vzdump-lxc-$vmid-1970_01_01-02_00_30.tgz".$NOTES_EXT, - 'type' => 'lxc', - 'format' => 'tar', - 'decompressor' => ['tar', '-z'], - 'compression' => 'gz', - 'vmid' => $vmid, - 'ctime' => 60*60*2 + 30, - 'is_std_name' => 1, - }, + description => 'Backup archive, lxc, tgz, very old', + archive => "backup/vzdump-lxc-$vmid-1970_01_01-02_00_30.tgz", + expected => { + 'filename' => "vzdump-lxc-$vmid-1970_01_01-02_00_30.tgz", + 'logfilename' => "vzdump-lxc-$vmid-1970_01_01-02_00_30" . $LOG_EXT, + 'notesfilename' => "vzdump-lxc-$vmid-1970_01_01-02_00_30.tgz" . $NOTES_EXT, + 'type' => 'lxc', + 'format' => 'tar', + 'decompressor' => ['tar', '-z'], + 'compression' => 'gz', + 'vmid' => $vmid, + 'ctime' => 60 * 60 * 2 + 30, + 'is_std_name' => 1, + }, }, { - description => 'Backup archive, lxc, tgz', - archive => "backup/vzdump-lxc-$vmid-2020_03_30-21_39_30.tgz", - expected => { - 'filename' => "vzdump-lxc-$vmid-2020_03_30-21_39_30.tgz", - 'logfilename' => "vzdump-lxc-$vmid-2020_03_30-21_39_30".$LOG_EXT, - 'notesfilename'=> "vzdump-lxc-$vmid-2020_03_30-21_39_30.tgz".$NOTES_EXT, - 'type' => 'lxc', - 'format' => 'tar', - 'decompressor' => ['tar', '-z'], - 'compression' => 'gz', - 'vmid' => $vmid, - 'ctime' => 1585604370, - 'is_std_name' => 1, - }, + description => 'Backup archive, lxc, tgz', + archive => "backup/vzdump-lxc-$vmid-2020_03_30-21_39_30.tgz", + expected => { + 'filename' => "vzdump-lxc-$vmid-2020_03_30-21_39_30.tgz", + 'logfilename' => "vzdump-lxc-$vmid-2020_03_30-21_39_30" . $LOG_EXT, + 'notesfilename' => "vzdump-lxc-$vmid-2020_03_30-21_39_30.tgz" . $NOTES_EXT, + 'type' => 'lxc', + 'format' => 'tar', + 'decompressor' => ['tar', '-z'], + 'compression' => 'gz', + 'vmid' => $vmid, + 'ctime' => 1585604370, + 'is_std_name' => 1, + }, }, { - description => 'Backup archive, openvz, tgz', - archive => "backup/vzdump-openvz-$vmid-2020_03_30-21_39_30.tgz", - expected => { - 'filename' => "vzdump-openvz-$vmid-2020_03_30-21_39_30.tgz", - 'logfilename' => "vzdump-openvz-$vmid-2020_03_30-21_39_30".$LOG_EXT, - 'notesfilename'=> "vzdump-openvz-$vmid-2020_03_30-21_39_30.tgz".$NOTES_EXT, - 'type' => 'openvz', - 'format' => 'tar', - 'decompressor' => ['tar', '-z'], - 'compression' => 'gz', - 'vmid' => $vmid, - 'ctime' => 1585604370, - 'is_std_name' => 1, - }, + description => 'Backup archive, openvz, tgz', + archive => "backup/vzdump-openvz-$vmid-2020_03_30-21_39_30.tgz", + expected => { + 'filename' => "vzdump-openvz-$vmid-2020_03_30-21_39_30.tgz", + 'logfilename' => "vzdump-openvz-$vmid-2020_03_30-21_39_30" . $LOG_EXT, + 'notesfilename' => "vzdump-openvz-$vmid-2020_03_30-21_39_30.tgz" . $NOTES_EXT, + 'type' => 'openvz', + 'format' => 'tar', + 'decompressor' => ['tar', '-z'], + 'compression' => 'gz', + 'vmid' => $vmid, + 'ctime' => 1585604370, + 'is_std_name' => 1, + }, }, { - description => 'Backup archive, custom dump directory, qemu, tgz', - archive => "/here/be/Back-ups/vzdump-qemu-$vmid-2020_03_30-21_39_30.tgz", - expected => { - 'filename' => "vzdump-qemu-$vmid-2020_03_30-21_39_30.tgz", - 'logfilename' => "vzdump-qemu-$vmid-2020_03_30-21_39_30".$LOG_EXT, - 'notesfilename'=> "vzdump-qemu-$vmid-2020_03_30-21_39_30.tgz".$NOTES_EXT, - 'type' => 'qemu', - 'format' => 'tar', - 'decompressor' => ['tar', '-z'], - 'compression' => 'gz', - 'vmid' => $vmid, - 'ctime' => 1585604370, - 'is_std_name' => 1, - }, + description => 'Backup archive, custom dump directory, qemu, tgz', + archive => "/here/be/Back-ups/vzdump-qemu-$vmid-2020_03_30-21_39_30.tgz", + expected => { + 'filename' => "vzdump-qemu-$vmid-2020_03_30-21_39_30.tgz", + 'logfilename' => "vzdump-qemu-$vmid-2020_03_30-21_39_30" . $LOG_EXT, + 'notesfilename' => "vzdump-qemu-$vmid-2020_03_30-21_39_30.tgz" . $NOTES_EXT, + 'type' => 'qemu', + 'format' => 'tar', + 'decompressor' => ['tar', '-z'], + 'compression' => 'gz', + 'vmid' => $vmid, + 'ctime' => 1585604370, + 'is_std_name' => 1, + }, }, { - description => 'Backup archive, none, tgz', - archive => "backup/vzdump-qemu-$vmid-whatever-the-name_is_here.tgz", - expected => { - 'filename' => "vzdump-qemu-$vmid-whatever-the-name_is_here.tgz", - 'type' => 'qemu', - 'format' => 'tar', - 'decompressor' => ['tar', '-z'], - 'compression' => 'gz', - 'is_std_name' => 0, - }, + description => 'Backup archive, none, tgz', + archive => "backup/vzdump-qemu-$vmid-whatever-the-name_is_here.tgz", + expected => { + 'filename' => "vzdump-qemu-$vmid-whatever-the-name_is_here.tgz", + 'type' => 'qemu', + 'format' => 'tar', + 'decompressor' => ['tar', '-z'], + 'compression' => 'gz', + 'is_std_name' => 0, + }, }, ]; # add new compression fromats to test my $decompressor = { tar => { - gz => ['tar', '-z'], - lzo => ['tar', '--lzop'], - zst => ['tar', '--zstd'], - bz2 => ['tar', '--bzip2'], + gz => ['tar', '-z'], + lzo => ['tar', '--lzop'], + zst => ['tar', '--zstd'], + bz2 => ['tar', '--bzip2'], }, vma => { - gz => ['zcat'], - lzo => ['lzop', '-d', '-c'], - zst => ['zstd', '-q', '-d', '-c'], - bz2 => ['bzcat', '-q'], + gz => ['zcat'], + lzo => ['lzop', '-d', '-c'], + zst => ['zstd', '-q', '-d', '-c'], + bz2 => ['bzcat', '-q'], }, }; my $bkp_suffix = { - qemu => [ 'vma', $decompressor->{vma}, ], - lxc => [ 'tar', $decompressor->{tar}, ], - openvz => [ 'tar', $decompressor->{tar}, ], + qemu => ['vma', $decompressor->{vma}], + lxc => ['tar', $decompressor->{tar}], + openvz => ['tar', $decompressor->{tar}], }; # create more test cases for backup files matches @@ -143,48 +143,48 @@ for my $virt (sort keys %$bkp_suffix) { my $archive_name = "vzdump-$virt-$vmid-2020_03_30-21_12_40"; for my $suffix (sort keys %$decomp) { - push @$tests, { - description => "Backup archive, $virt, $format.$suffix", - archive => "backup/$archive_name.$format.$suffix", - expected => { - 'filename' => "$archive_name.$format.$suffix", - 'logfilename' => $archive_name.$LOG_EXT, - 'notesfilename'=> "$archive_name.$format.$suffix".$NOTES_EXT, - 'type' => "$virt", - 'format' => "$format", - 'decompressor' => $decomp->{$suffix}, - 'compression' => "$suffix", - 'vmid' => $vmid, - 'ctime' => 1585602760, - 'is_std_name' => 1, - }, - }; + push @$tests, + { + description => "Backup archive, $virt, $format.$suffix", + archive => "backup/$archive_name.$format.$suffix", + expected => { + 'filename' => "$archive_name.$format.$suffix", + 'logfilename' => $archive_name . $LOG_EXT, + 'notesfilename' => "$archive_name.$format.$suffix" . $NOTES_EXT, + 'type' => "$virt", + 'format' => "$format", + 'decompressor' => $decomp->{$suffix}, + 'compression' => "$suffix", + 'vmid' => $vmid, + 'ctime' => 1585602760, + 'is_std_name' => 1, + }, + }; } } - # add compression formats to test failed matches my $non_bkp_suffix = { - 'openvz' => [ 'zip', 'tgz.lzo', 'zip.gz', '', ], - 'lxc' => [ 'zip', 'tgz.lzo', 'zip.gz', '', ], - 'qemu' => [ 'vma.xz', 'vms.gz', 'vmx.zst', '', ], - 'none' => [ 'tar.gz', ], + 'openvz' => ['zip', 'tgz.lzo', 'zip.gz', ''], + 'lxc' => ['zip', 'tgz.lzo', 'zip.gz', ''], + 'qemu' => ['vma.xz', 'vms.gz', 'vmx.zst', ''], + 'none' => ['tar.gz'], }; # create tests for failed matches for my $virt (sort keys %$non_bkp_suffix) { my $suffix = $non_bkp_suffix->{$virt}; for my $s (@$suffix) { - my $archive = "backup/vzdump-$virt-$vmid-2020_03_30-21_12_40.$s"; - push @$tests, { - description => "Failed match: Backup archive, $virt, $s", - archive => $archive, - expected => "ERROR: couldn't determine archive info from '$archive'\n", - }; + my $archive = "backup/vzdump-$virt-$vmid-2020_03_30-21_12_40.$s"; + push @$tests, + { + description => "Failed match: Backup archive, $virt, $s", + archive => $archive, + expected => "ERROR: couldn't determine archive info from '$archive'\n", + }; } } - plan tests => scalar @$tests; for my $tt (@$tests) { diff --git a/src/test/disklist_test.pm b/src/test/disklist_test.pm index 97f11fc..a0dc01f 100644 --- a/src/test/disklist_test.pm +++ b/src/test/disklist_test.pm @@ -23,57 +23,57 @@ sub mocked_run_command { my $outputlines = []; if (my $ref = ref($cmd)) { - if ($cmd->[0] =~ m/udevadm/i) { - # simulate udevadm output - my $dev = $cmd->[3]; - $dev =~ s|/sys/block/||; - @$outputlines = split(/\n/, read_test_file("${dev}_udevadm")); + if ($cmd->[0] =~ m/udevadm/i) { + # simulate udevadm output + my $dev = $cmd->[3]; + $dev =~ s|/sys/block/||; + @$outputlines = split(/\n/, read_test_file("${dev}_udevadm")); - } elsif ($cmd->[0] =~ m/smartctl/i) { - # simulate smartctl output - my $dev; - my $type; - if (@$cmd > 3) { - $dev = $cmd->[5]; - $type = 'smart'; - } else { - $dev = $cmd->[2]; - $type = 'health'; - } - $dev =~ s|/dev/||; - @$outputlines = split(/\n/, read_test_file("${dev}_${type}")); - } elsif ($cmd->[0] =~ m/sgdisk/i) { - # simulate sgdisk - die "implement me: @$cmd\n"; - } elsif ($cmd->[0] =~ m/zpool/i) { - # simulate zpool output - @$outputlines = split(/\n/, read_test_file('zpool')); + } elsif ($cmd->[0] =~ m/smartctl/i) { + # simulate smartctl output + my $dev; + my $type; + if (@$cmd > 3) { + $dev = $cmd->[5]; + $type = 'smart'; + } else { + $dev = $cmd->[2]; + $type = 'health'; + } + $dev =~ s|/dev/||; + @$outputlines = split(/\n/, read_test_file("${dev}_${type}")); + } elsif ($cmd->[0] =~ m/sgdisk/i) { + # simulate sgdisk + die "implement me: @$cmd\n"; + } elsif ($cmd->[0] =~ m/zpool/i) { + # simulate zpool output + @$outputlines = split(/\n/, read_test_file('zpool')); - } elsif ($cmd->[0] =~ m/pvs/i) { - # simulate lvs output - @$outputlines = split(/\n/, read_test_file('pvs')); - } elsif ($cmd->[0] =~ m/lvs/i) { - @$outputlines = split(/\n/, read_test_file('lvs')); - } elsif ($cmd->[0] =~ m/lsblk/i) { - my $content = read_test_file('lsblk'); - if ($content eq '') { - $content = '{}'; - } - @$outputlines = split(/\n/, $content); - } else { - die "unexpected run_command call: '@$cmd', aborting\n"; - } + } elsif ($cmd->[0] =~ m/pvs/i) { + # simulate lvs output + @$outputlines = split(/\n/, read_test_file('pvs')); + } elsif ($cmd->[0] =~ m/lvs/i) { + @$outputlines = split(/\n/, read_test_file('lvs')); + } elsif ($cmd->[0] =~ m/lsblk/i) { + my $content = read_test_file('lsblk'); + if ($content eq '') { + $content = '{}'; + } + @$outputlines = split(/\n/, $content); + } else { + die "unexpected run_command call: '@$cmd', aborting\n"; + } } else { - print "unexpected run_command call: '@$cmd', aborting\n"; - die; + print "unexpected run_command call: '@$cmd', aborting\n"; + die; } my $outfunc; if ($param{outfunc}) { - $outfunc = $param{outfunc}; - map { &$outfunc(($_)) } @$outputlines; + $outfunc = $param{outfunc}; + map { &$outfunc(($_)) } @$outputlines; - return 0; + return 0; } } @@ -107,16 +107,16 @@ sub mocked_dir_glob_foreach { my $lines = []; # read lines in from file - if ($dir =~ m{^/sys/block$} ) { - @$lines = split(/\n/, read_test_file('disklist')); + if ($dir =~ m{^/sys/block$}) { + @$lines = split(/\n/, read_test_file('disklist')); } elsif ($dir =~ m{^/sys/block/([^/]+)}) { - @$lines = split(/\n/, read_test_file('partlist')); + @$lines = split(/\n/, read_test_file('partlist')); } foreach my $line (@$lines) { - if ($line =~ m/$regex/) { - &$sub($line); - } + if ($line =~ m/$regex/) { + &$sub($line); + } } } @@ -125,8 +125,8 @@ sub mocked_parse_proc_mounts { my $mounts = []; - foreach my $line(split(/\n/, $text)) { - push @$mounts, [split(/\s+/, $line)]; + foreach my $line (split(/\n/, $text)) { + push @$mounts, [split(/\s+/, $line)]; } return $mounts; @@ -135,83 +135,85 @@ sub mocked_parse_proc_mounts { sub read_test_file { my ($filename) = @_; - if (!-f "disk_tests/$testcasedir/$filename") { - print "file '$testcasedir/$filename' not found\n"; - return ''; + if (!-f "disk_tests/$testcasedir/$filename") { + print "file '$testcasedir/$filename' not found\n"; + return ''; } - open (my $fh, '<', "disk_tests/$testcasedir/$filename") - or die "Cannot open disk_tests/$testcasedir/$filename: $!"; + open(my $fh, '<', "disk_tests/$testcasedir/$filename") + or die "Cannot open disk_tests/$testcasedir/$filename: $!"; my $output = <$fh> // ''; chomp $output if $output; while (my $line = <$fh>) { - chomp $line; - $output .= "\n$line"; + chomp $line; + $output .= "\n$line"; } return $output; } - sub test_disk_list { my ($testdir) = @_; subtest "Test '$testdir'" => sub { - my $testcount = 0; - $testcasedir = $testdir; + my $testcount = 0; + $testcasedir = $testdir; - my $disks; - my $expected_disk_list; - eval { - $disks = PVE::Diskmanage::get_disks(); - }; - warn $@ if $@; - $expected_disk_list = decode_json(read_test_file('disklist_expected.json')); + my $disks; + my $expected_disk_list; + eval { $disks = PVE::Diskmanage::get_disks(); }; + warn $@ if $@; + $expected_disk_list = decode_json(read_test_file('disklist_expected.json')); - print Dumper($disks) if $print; - $testcount++; - is_deeply($disks, $expected_disk_list, 'disk list should be the same'); + print Dumper($disks) if $print; + $testcount++; + is_deeply($disks, $expected_disk_list, 'disk list should be the same'); - foreach my $disk (sort keys %$disks) { - my $smart; - my $expected_smart; - eval { - $smart = PVE::Diskmanage::get_smart_data("/dev/$disk"); - print Dumper($smart) if $print; - $expected_smart = decode_json(read_test_file("${disk}_smart_expected.json")); - }; + foreach my $disk (sort keys %$disks) { + my $smart; + my $expected_smart; + eval { + $smart = PVE::Diskmanage::get_smart_data("/dev/$disk"); + print Dumper($smart) if $print; + $expected_smart = decode_json(read_test_file("${disk}_smart_expected.json")); + }; - if ($smart && $expected_smart) { - $testcount++; - is_deeply($smart, $expected_smart, "smart data for '$disk' should be the same"); - } elsif ($smart && -f "disk_tests/$testcasedir/${disk}_smart_expected.json") { - $testcount++; - ok(0, "could not parse expected smart for '$disk'\n"); - } - my $disk_tmp = {}; + if ($smart && $expected_smart) { + $testcount++; + is_deeply($smart, $expected_smart, "smart data for '$disk' should be the same"); + } elsif ($smart && -f "disk_tests/$testcasedir/${disk}_smart_expected.json") { + $testcount++; + ok(0, "could not parse expected smart for '$disk'\n"); + } + my $disk_tmp = {}; - # test single disk parameter - $disk_tmp = PVE::Diskmanage::get_disks($disk); - warn $@ if $@; - $testcount++; - print Dumper $disk_tmp if $print; - is_deeply($disk_tmp->{$disk}, $expected_disk_list->{$disk}, "disk $disk should be the same"); + # test single disk parameter + $disk_tmp = PVE::Diskmanage::get_disks($disk); + warn $@ if $@; + $testcount++; + print Dumper $disk_tmp if $print; + is_deeply( + $disk_tmp->{$disk}, + $expected_disk_list->{$disk}, + "disk $disk should be the same", + ); + # test wrong parameter + eval { PVE::Diskmanage::get_disks({ test => 1 }); }; + my $err = $@; + $testcount++; + is_deeply( + $err, + "disks is not a string or array reference\n", + "error message should be the same", + ); - # test wrong parameter - eval { - PVE::Diskmanage::get_disks( { test => 1 } ); - }; - my $err = $@; - $testcount++; - is_deeply($err, "disks is not a string or array reference\n", "error message should be the same"); + } + # test multi disk parameter + $disks = PVE::Diskmanage::get_disks([keys %$disks]); + $testcount++; + is_deeply($disks, $expected_disk_list, 'disk list should be the same'); - } - # test multi disk parameter - $disks = PVE::Diskmanage::get_disks( [ keys %$disks ] ); - $testcount++; - is_deeply($disks, $expected_disk_list, 'disk list should be the same'); - - done_testing($testcount); + done_testing($testcount); }; } @@ -235,24 +237,26 @@ $diskmanage_module->mock('is_iscsi' => \&mocked_is_iscsi); print("\tMocked is_iscsi\n"); $diskmanage_module->mock('assert_blockdev' => sub { return 1; }); print("\tMocked assert_blockdev\n"); -$diskmanage_module->mock('dir_is_empty' => sub { - # all partitions have a holder dir - my $val = shift; - if ($val =~ m|^/sys/block/.+/.+/|) { - return 0; - } - return 1; - }); +$diskmanage_module->mock( + 'dir_is_empty' => sub { + # all partitions have a holder dir + my $val = shift; + if ($val =~ m|^/sys/block/.+/.+/|) { + return 0; + } + return 1; + }, +); print("\tMocked dir_is_empty\n"); $diskmanage_module->mock('check_bin' => sub { return 1; }); print("\tMocked check_bin\n"); -my $tools_module= Test::MockModule->new('PVE::ProcFSTools', no_auto => 1); +my $tools_module = Test::MockModule->new('PVE::ProcFSTools', no_auto => 1); $tools_module->mock('parse_proc_mounts' => \&mocked_parse_proc_mounts); print("\tMocked parse_proc_mounts\n"); print("Done Setting up Mocking\n\n"); print("Beginning Tests:\n\n"); -opendir (my $dh, 'disk_tests') +opendir(my $dh, 'disk_tests') or die "Cannot open disk_tests: $!"; while (readdir $dh) { diff --git a/src/test/filesystem_path_test.pm b/src/test/filesystem_path_test.pm index c1b6d90..af52380 100644 --- a/src/test/filesystem_path_test.pm +++ b/src/test/filesystem_path_test.pm @@ -16,54 +16,44 @@ my $path = '/some/path'; # expected => the array of return values; or the die message my $tests = [ { - volname => '1234/vm-1234-disk-0.raw', - snapname => undef, - expected => [ - "$path/images/1234/vm-1234-disk-0.raw", - '1234', - 'images' - ], + volname => '1234/vm-1234-disk-0.raw', + snapname => undef, + expected => [ + "$path/images/1234/vm-1234-disk-0.raw", '1234', 'images', + ], }, { - volname => '1234/vm-1234-disk-0.raw', - snapname => 'my_snap', - expected => "can't snapshot this image format\n" + volname => '1234/vm-1234-disk-0.raw', + snapname => 'my_snap', + expected => "can't snapshot this image format\n", }, { - volname => '1234/vm-1234-disk-0.qcow2', - snapname => undef, - expected => [ - "$path/images/1234/vm-1234-disk-0.qcow2", - '1234', - 'images' - ], + volname => '1234/vm-1234-disk-0.qcow2', + snapname => undef, + expected => [ + "$path/images/1234/vm-1234-disk-0.qcow2", '1234', 'images', + ], }, { - volname => '1234/vm-1234-disk-0.qcow2', - snapname => 'my_snap', - expected => [ - "$path/images/1234/vm-1234-disk-0.qcow2", - '1234', - 'images' - ], + volname => '1234/vm-1234-disk-0.qcow2', + snapname => 'my_snap', + expected => [ + "$path/images/1234/vm-1234-disk-0.qcow2", '1234', 'images', + ], }, { - volname => 'iso/my-awesome-proxmox.iso', - snapname => undef, - expected => [ - "$path/template/iso/my-awesome-proxmox.iso", - undef, - 'iso' - ], + volname => 'iso/my-awesome-proxmox.iso', + snapname => undef, + expected => [ + "$path/template/iso/my-awesome-proxmox.iso", undef, 'iso', + ], }, { - volname => "backup/vzdump-qemu-1234-2020_03_30-21_12_40.vma", - snapname => undef, - expected => [ - "$path/dump/vzdump-qemu-1234-2020_03_30-21_12_40.vma", - 1234, - 'backup' - ], + volname => "backup/vzdump-qemu-1234-2020_03_30-21_12_40.vma", + snapname => undef, + expected => [ + "$path/dump/vzdump-qemu-1234-2020_03_30-21_12_40.vma", 1234, 'backup', + ], }, ]; @@ -76,13 +66,11 @@ foreach my $tt (@$tests) { my $scfg = { path => $path }; my $got; - eval { - $got = [ PVE::Storage::Plugin->filesystem_path($scfg, $volname, $snapname) ]; - }; + eval { $got = [PVE::Storage::Plugin->filesystem_path($scfg, $volname, $snapname)]; }; $got = $@ if $@; is_deeply($got, $expected, "wantarray: filesystem_path for $volname") - || diag(explain($got)); + || diag(explain($got)); } diff --git a/src/test/get_subdir_test.pm b/src/test/get_subdir_test.pm index b9d61d5..5fb5445 100644 --- a/src/test/get_subdir_test.pm +++ b/src/test/get_subdir_test.pm @@ -17,21 +17,26 @@ my $vtype_subdirs = PVE::Storage::Plugin::get_vtype_subdirs(); # [2] => expected return from get_subdir my $tests = [ # failed matches - [ $scfg_with_path, 'none', "unknown vtype 'none'\n" ], - [ {}, 'iso', "storage definition has no path\n" ], + [$scfg_with_path, 'none', "unknown vtype 'none'\n"], + [{}, 'iso', "storage definition has no path\n"], ]; # creates additional positive tests foreach my $type (keys %$vtype_subdirs) { my $path = "$scfg_with_path->{path}/$vtype_subdirs->{$type}"; - push @$tests, [ $scfg_with_path, $type, $path ]; + push @$tests, [$scfg_with_path, $type, $path]; } # creates additional tests for overrides foreach my $type (keys %$vtype_subdirs) { my $override = "${type}_override"; my $scfg_with_override = { path => '/some/path', 'content-dirs' => { $type => $override } }; - push @$tests, [ $scfg_with_override, $type, "$scfg_with_override->{path}/$scfg_with_override->{'content-dirs'}->{$type}" ]; + push @$tests, + [ + $scfg_with_override, + $type, + "$scfg_with_override->{path}/$scfg_with_override->{'content-dirs'}->{$type}", + ]; } plan tests => scalar @$tests; @@ -43,7 +48,7 @@ foreach my $tt (@$tests) { eval { $got = PVE::Storage::Plugin->get_subdir($scfg, $type) }; $got = $@ if $@; - is ($got, $expected, "get_subdir for $type") || diag(explain($got)); + is($got, $expected, "get_subdir for $type") || diag(explain($got)); } done_testing(); diff --git a/src/test/list_volumes_test.pm b/src/test/list_volumes_test.pm index 7b6df6a..e528fc3 100644 --- a/src/test/list_volumes_test.pm +++ b/src/test/list_volumes_test.pm @@ -27,52 +27,52 @@ use constant DEFAULT_CTIME => 1234567890; my $mocked_vmlist = { 'version' => 1, 'ids' => { - '16110' => { - 'node' => 'x42', - 'type' => 'qemu', - 'version' => 4, - }, - '16112' => { - 'node' => 'x42', - 'type' => 'lxc', - 'version' => 7, - }, - '16114' => { - 'node' => 'x42', - 'type' => 'qemu', - 'version' => 2, - }, - '16113' => { - 'node' => 'x42', - 'type' => 'qemu', - 'version' => 5, - }, - '16115' => { - 'node' => 'x42', - 'type' => 'qemu', - 'version' => 1, - }, - '9004' => { - 'node' => 'x42', - 'type' => 'qemu', - 'version' => 6, - } - } + '16110' => { + 'node' => 'x42', + 'type' => 'qemu', + 'version' => 4, + }, + '16112' => { + 'node' => 'x42', + 'type' => 'lxc', + 'version' => 7, + }, + '16114' => { + 'node' => 'x42', + 'type' => 'qemu', + 'version' => 2, + }, + '16113' => { + 'node' => 'x42', + 'type' => 'qemu', + 'version' => 5, + }, + '16115' => { + 'node' => 'x42', + 'type' => 'qemu', + 'version' => 1, + }, + '9004' => { + 'node' => 'x42', + 'type' => 'qemu', + 'version' => 6, + }, + }, }; my $storage_dir = File::Temp->newdir(); my $scfg = { - 'type' => 'dir', + 'type' => 'dir', 'maxfiles' => 0, - 'path' => $storage_dir, - 'shared' => 0, - 'content' => { - 'iso' => 1, - 'rootdir' => 1, - 'vztmpl' => 1, - 'images' => 1, - 'snippets' => 1, - 'backup' => 1, + 'path' => $storage_dir, + 'shared' => 0, + 'content' => { + 'iso' => 1, + 'rootdir' => 1, + 'vztmpl' => 1, + 'images' => 1, + 'snippets' => 1, + 'backup' => 1, }, }; @@ -84,389 +84,387 @@ my $scfg = { # (content, ctime, format, parent, size, used, vimd, volid) my @tests = ( { - description => 'VMID: 16110, VM, qcow2, backup, snippets', - vmid => '16110', - files => [ - "$storage_dir/images/16110/vm-16110-disk-0.qcow2", - "$storage_dir/images/16110/vm-16110-disk-1.raw", - "$storage_dir/images/16110/vm-16110-disk-2.vmdk", - "$storage_dir/dump/vzdump-qemu-16110-2020_03_30-21_11_40.vma.gz", - "$storage_dir/dump/vzdump-qemu-16110-2020_03_30-21_12_45.vma.lzo", - "$storage_dir/dump/vzdump-qemu-16110-2020_03_30-21_13_55.vma", - "$storage_dir/dump/vzdump-qemu-16110-2020_03_30-21_13_55.vma.zst", - "$storage_dir/snippets/userconfig.yaml", - "$storage_dir/snippets/hookscript.pl", - ], - expected => [ - { - 'content' => 'images', - 'ctime' => DEFAULT_CTIME, - 'format' => 'qcow2', - 'parent' => undef, - 'size' => DEFAULT_SIZE, - 'used' => DEFAULT_USED, - 'vmid' => '16110', - 'volid' => 'local:16110/vm-16110-disk-0.qcow2', - }, - { - 'content' => 'images', - 'ctime' => DEFAULT_CTIME, - 'format' => 'raw', - 'parent' => undef, - 'size' => DEFAULT_SIZE, - 'used' => DEFAULT_USED, - 'vmid' => '16110', - 'volid' => 'local:16110/vm-16110-disk-1.raw', - }, - { - 'content' => 'images', - 'ctime' => DEFAULT_CTIME, - 'format' => 'vmdk', - 'parent' => undef, - 'size' => DEFAULT_SIZE, - 'used' => DEFAULT_USED, - 'vmid' => '16110', - 'volid' => 'local:16110/vm-16110-disk-2.vmdk', - }, - { - 'content' => 'backup', - 'ctime' => 1585602700, - 'format' => 'vma.gz', - 'size' => DEFAULT_SIZE, - 'subtype' => 'qemu', - 'vmid' => '16110', - 'volid' => 'local:backup/vzdump-qemu-16110-2020_03_30-21_11_40.vma.gz', - }, - { - 'content' => 'backup', - 'ctime' => 1585602765, - 'format' => 'vma.lzo', - 'size' => DEFAULT_SIZE, - 'subtype' => 'qemu', - 'vmid' => '16110', - 'volid' => 'local:backup/vzdump-qemu-16110-2020_03_30-21_12_45.vma.lzo', - }, - { - 'content' => 'backup', - 'ctime' => 1585602835, - 'format' => 'vma', - 'size' => DEFAULT_SIZE, - 'subtype' => 'qemu', - 'vmid' => '16110', - 'volid' => 'local:backup/vzdump-qemu-16110-2020_03_30-21_13_55.vma', - }, - { - 'content' => 'backup', - 'ctime' => 1585602835, - 'format' => 'vma.zst', - 'size' => DEFAULT_SIZE, - 'subtype' => 'qemu', - 'vmid' => '16110', - 'volid' => 'local:backup/vzdump-qemu-16110-2020_03_30-21_13_55.vma.zst', - }, - { - 'content' => 'snippets', - 'ctime' => DEFAULT_CTIME, - 'format' => 'snippet', - 'size' => DEFAULT_SIZE, - 'volid' => 'local:snippets/hookscript.pl', - }, - { - 'content' => 'snippets', - 'ctime' => DEFAULT_CTIME, - 'format' => 'snippet', - 'size' => DEFAULT_SIZE, - 'volid' => 'local:snippets/userconfig.yaml', - }, - ], + description => 'VMID: 16110, VM, qcow2, backup, snippets', + vmid => '16110', + files => [ + "$storage_dir/images/16110/vm-16110-disk-0.qcow2", + "$storage_dir/images/16110/vm-16110-disk-1.raw", + "$storage_dir/images/16110/vm-16110-disk-2.vmdk", + "$storage_dir/dump/vzdump-qemu-16110-2020_03_30-21_11_40.vma.gz", + "$storage_dir/dump/vzdump-qemu-16110-2020_03_30-21_12_45.vma.lzo", + "$storage_dir/dump/vzdump-qemu-16110-2020_03_30-21_13_55.vma", + "$storage_dir/dump/vzdump-qemu-16110-2020_03_30-21_13_55.vma.zst", + "$storage_dir/snippets/userconfig.yaml", + "$storage_dir/snippets/hookscript.pl", + ], + expected => [ + { + 'content' => 'images', + 'ctime' => DEFAULT_CTIME, + 'format' => 'qcow2', + 'parent' => undef, + 'size' => DEFAULT_SIZE, + 'used' => DEFAULT_USED, + 'vmid' => '16110', + 'volid' => 'local:16110/vm-16110-disk-0.qcow2', + }, + { + 'content' => 'images', + 'ctime' => DEFAULT_CTIME, + 'format' => 'raw', + 'parent' => undef, + 'size' => DEFAULT_SIZE, + 'used' => DEFAULT_USED, + 'vmid' => '16110', + 'volid' => 'local:16110/vm-16110-disk-1.raw', + }, + { + 'content' => 'images', + 'ctime' => DEFAULT_CTIME, + 'format' => 'vmdk', + 'parent' => undef, + 'size' => DEFAULT_SIZE, + 'used' => DEFAULT_USED, + 'vmid' => '16110', + 'volid' => 'local:16110/vm-16110-disk-2.vmdk', + }, + { + 'content' => 'backup', + 'ctime' => 1585602700, + 'format' => 'vma.gz', + 'size' => DEFAULT_SIZE, + 'subtype' => 'qemu', + 'vmid' => '16110', + 'volid' => 'local:backup/vzdump-qemu-16110-2020_03_30-21_11_40.vma.gz', + }, + { + 'content' => 'backup', + 'ctime' => 1585602765, + 'format' => 'vma.lzo', + 'size' => DEFAULT_SIZE, + 'subtype' => 'qemu', + 'vmid' => '16110', + 'volid' => 'local:backup/vzdump-qemu-16110-2020_03_30-21_12_45.vma.lzo', + }, + { + 'content' => 'backup', + 'ctime' => 1585602835, + 'format' => 'vma', + 'size' => DEFAULT_SIZE, + 'subtype' => 'qemu', + 'vmid' => '16110', + 'volid' => 'local:backup/vzdump-qemu-16110-2020_03_30-21_13_55.vma', + }, + { + 'content' => 'backup', + 'ctime' => 1585602835, + 'format' => 'vma.zst', + 'size' => DEFAULT_SIZE, + 'subtype' => 'qemu', + 'vmid' => '16110', + 'volid' => 'local:backup/vzdump-qemu-16110-2020_03_30-21_13_55.vma.zst', + }, + { + 'content' => 'snippets', + 'ctime' => DEFAULT_CTIME, + 'format' => 'snippet', + 'size' => DEFAULT_SIZE, + 'volid' => 'local:snippets/hookscript.pl', + }, + { + 'content' => 'snippets', + 'ctime' => DEFAULT_CTIME, + 'format' => 'snippet', + 'size' => DEFAULT_SIZE, + 'volid' => 'local:snippets/userconfig.yaml', + }, + ], }, { - description => 'VMID: 16112, lxc, raw, backup', - vmid => '16112', - files => [ - "$storage_dir/images/16112/vm-16112-disk-0.raw", - "$storage_dir/dump/vzdump-lxc-16112-2020_03_30-21_39_30.tar.lzo", - "$storage_dir/dump/vzdump-lxc-16112-2020_03_30-21_49_30.tar.gz", - "$storage_dir/dump/vzdump-lxc-16112-2020_03_30-21_49_30.tar.zst", - "$storage_dir/dump/vzdump-lxc-16112-2020_03_30-21_59_30.tgz", - "$storage_dir/dump/vzdump-openvz-16112-2020_03_30-21_39_30.tar.bz2", - ], - expected => [ - { - 'content' => 'rootdir', - 'ctime' => DEFAULT_CTIME, - 'format' => 'raw', - 'parent' => undef, - 'size' => DEFAULT_SIZE, - 'used' => DEFAULT_USED, - 'vmid' => '16112', - 'volid' => 'local:16112/vm-16112-disk-0.raw', - }, - { - 'content' => 'backup', - 'ctime' => 1585604370, - 'format' => 'tar.lzo', - 'size' => DEFAULT_SIZE, - 'subtype' => 'lxc', - 'vmid' => '16112', - 'volid' => 'local:backup/vzdump-lxc-16112-2020_03_30-21_39_30.tar.lzo', - }, - { - 'content' => 'backup', - 'ctime' => 1585604970, - 'format' => 'tar.gz', - 'size' => DEFAULT_SIZE, - 'subtype' => 'lxc', - 'vmid' => '16112', - 'volid' => 'local:backup/vzdump-lxc-16112-2020_03_30-21_49_30.tar.gz', - }, - { - 'content' => 'backup', - 'ctime' => 1585604970, - 'format' => 'tar.zst', - 'size' => DEFAULT_SIZE, - 'subtype' => 'lxc', - 'vmid' => '16112', - 'volid' => 'local:backup/vzdump-lxc-16112-2020_03_30-21_49_30.tar.zst', - }, - { - 'content' => 'backup', - 'ctime' => 1585605570, - 'format' => 'tgz', - 'size' => DEFAULT_SIZE, - 'subtype' => 'lxc', - 'vmid' => '16112', - 'volid' => 'local:backup/vzdump-lxc-16112-2020_03_30-21_59_30.tgz', - }, - { - 'content' => 'backup', - 'ctime' => 1585604370, - 'format' => 'tar.bz2', - 'size' => DEFAULT_SIZE, - 'subtype' => 'openvz', - 'vmid' => '16112', - 'volid' => 'local:backup/vzdump-openvz-16112-2020_03_30-21_39_30.tar.bz2', - }, - ], + description => 'VMID: 16112, lxc, raw, backup', + vmid => '16112', + files => [ + "$storage_dir/images/16112/vm-16112-disk-0.raw", + "$storage_dir/dump/vzdump-lxc-16112-2020_03_30-21_39_30.tar.lzo", + "$storage_dir/dump/vzdump-lxc-16112-2020_03_30-21_49_30.tar.gz", + "$storage_dir/dump/vzdump-lxc-16112-2020_03_30-21_49_30.tar.zst", + "$storage_dir/dump/vzdump-lxc-16112-2020_03_30-21_59_30.tgz", + "$storage_dir/dump/vzdump-openvz-16112-2020_03_30-21_39_30.tar.bz2", + ], + expected => [ + { + 'content' => 'rootdir', + 'ctime' => DEFAULT_CTIME, + 'format' => 'raw', + 'parent' => undef, + 'size' => DEFAULT_SIZE, + 'used' => DEFAULT_USED, + 'vmid' => '16112', + 'volid' => 'local:16112/vm-16112-disk-0.raw', + }, + { + 'content' => 'backup', + 'ctime' => 1585604370, + 'format' => 'tar.lzo', + 'size' => DEFAULT_SIZE, + 'subtype' => 'lxc', + 'vmid' => '16112', + 'volid' => 'local:backup/vzdump-lxc-16112-2020_03_30-21_39_30.tar.lzo', + }, + { + 'content' => 'backup', + 'ctime' => 1585604970, + 'format' => 'tar.gz', + 'size' => DEFAULT_SIZE, + 'subtype' => 'lxc', + 'vmid' => '16112', + 'volid' => 'local:backup/vzdump-lxc-16112-2020_03_30-21_49_30.tar.gz', + }, + { + 'content' => 'backup', + 'ctime' => 1585604970, + 'format' => 'tar.zst', + 'size' => DEFAULT_SIZE, + 'subtype' => 'lxc', + 'vmid' => '16112', + 'volid' => 'local:backup/vzdump-lxc-16112-2020_03_30-21_49_30.tar.zst', + }, + { + 'content' => 'backup', + 'ctime' => 1585605570, + 'format' => 'tgz', + 'size' => DEFAULT_SIZE, + 'subtype' => 'lxc', + 'vmid' => '16112', + 'volid' => 'local:backup/vzdump-lxc-16112-2020_03_30-21_59_30.tgz', + }, + { + 'content' => 'backup', + 'ctime' => 1585604370, + 'format' => 'tar.bz2', + 'size' => DEFAULT_SIZE, + 'subtype' => 'openvz', + 'vmid' => '16112', + 'volid' => 'local:backup/vzdump-openvz-16112-2020_03_30-21_39_30.tar.bz2', + }, + ], }, { - description => 'VMID: 16114, VM, qcow2, linked clone', - vmid => '16114', - files => [ - "$storage_dir/images/16114/vm-16114-disk-0.qcow2", - "$storage_dir/images/16114/vm-16114-disk-1.qcow2", - ], - parent => [ - "../9004/base-9004-disk-0.qcow2", - "../9004/base-9004-disk-1.qcow2", - ], - expected => [ - { - 'content' => 'images', - 'ctime' => DEFAULT_CTIME, - 'format' => 'qcow2', - 'parent' => '../9004/base-9004-disk-0.qcow2', - 'size' => DEFAULT_SIZE, - 'used' => DEFAULT_USED, - 'vmid' => '16114', - 'volid' => 'local:9004/base-9004-disk-0.qcow2/16114/vm-16114-disk-0.qcow2', - }, - { - 'content' => 'images', - 'ctime' => DEFAULT_CTIME, - 'format' => 'qcow2', - 'parent' => '../9004/base-9004-disk-1.qcow2', - 'size' => DEFAULT_SIZE, - 'used' => DEFAULT_USED, - 'vmid' => '16114', - 'volid' => 'local:9004/base-9004-disk-1.qcow2/16114/vm-16114-disk-1.qcow2', - }, - ], + description => 'VMID: 16114, VM, qcow2, linked clone', + vmid => '16114', + files => [ + "$storage_dir/images/16114/vm-16114-disk-0.qcow2", + "$storage_dir/images/16114/vm-16114-disk-1.qcow2", + ], + parent => [ + "../9004/base-9004-disk-0.qcow2", "../9004/base-9004-disk-1.qcow2", + ], + expected => [ + { + 'content' => 'images', + 'ctime' => DEFAULT_CTIME, + 'format' => 'qcow2', + 'parent' => '../9004/base-9004-disk-0.qcow2', + 'size' => DEFAULT_SIZE, + 'used' => DEFAULT_USED, + 'vmid' => '16114', + 'volid' => 'local:9004/base-9004-disk-0.qcow2/16114/vm-16114-disk-0.qcow2', + }, + { + 'content' => 'images', + 'ctime' => DEFAULT_CTIME, + 'format' => 'qcow2', + 'parent' => '../9004/base-9004-disk-1.qcow2', + 'size' => DEFAULT_SIZE, + 'used' => DEFAULT_USED, + 'vmid' => '16114', + 'volid' => 'local:9004/base-9004-disk-1.qcow2/16114/vm-16114-disk-1.qcow2', + }, + ], }, { - description => 'VMID: 9004, VM, template, qcow2', - vmid => '9004', - files => [ - "$storage_dir/images/9004/base-9004-disk-0.qcow2", - "$storage_dir/images/9004/base-9004-disk-1.qcow2", - ], - expected => [ - { - 'content' => 'images', - 'ctime' => DEFAULT_CTIME, - 'format' => 'qcow2', - 'parent' => undef, - 'size' => DEFAULT_SIZE, - 'used' => DEFAULT_USED, - 'vmid' => '9004', - 'volid' => 'local:9004/base-9004-disk-0.qcow2', - }, - { - 'content' => 'images', - 'ctime' => DEFAULT_CTIME, - 'format' => 'qcow2', - 'parent' => undef, - 'size' => DEFAULT_SIZE, - 'used' => DEFAULT_USED, - 'vmid' => '9004', - 'volid' => 'local:9004/base-9004-disk-1.qcow2', - }, - ], + description => 'VMID: 9004, VM, template, qcow2', + vmid => '9004', + files => [ + "$storage_dir/images/9004/base-9004-disk-0.qcow2", + "$storage_dir/images/9004/base-9004-disk-1.qcow2", + ], + expected => [ + { + 'content' => 'images', + 'ctime' => DEFAULT_CTIME, + 'format' => 'qcow2', + 'parent' => undef, + 'size' => DEFAULT_SIZE, + 'used' => DEFAULT_USED, + 'vmid' => '9004', + 'volid' => 'local:9004/base-9004-disk-0.qcow2', + }, + { + 'content' => 'images', + 'ctime' => DEFAULT_CTIME, + 'format' => 'qcow2', + 'parent' => undef, + 'size' => DEFAULT_SIZE, + 'used' => DEFAULT_USED, + 'vmid' => '9004', + 'volid' => 'local:9004/base-9004-disk-1.qcow2', + }, + ], }, { - description => 'VMID: none, templates, snippets, backup', - vmid => undef, - files => [ - "$storage_dir/dump/vzdump-lxc-19253-2020_02_03-19_57_43.tar.gz", - "$storage_dir/dump/vzdump-lxc-19254-2019_01_21-19_29_19.tar", - "$storage_dir/template/iso/archlinux-2020.02.01-x86_64.iso", - "$storage_dir/template/iso/debian-8.11.1-amd64-DVD-1.iso", - "$storage_dir/template/iso/debian-9.12.0-amd64-netinst.iso", - "$storage_dir/template/iso/proxmox-ve_6.1-1.iso", - "$storage_dir/template/cache/archlinux-base_20190924-1_amd64.tar.gz", - "$storage_dir/template/cache/debian-10.0-standard_10.0-1_amd64.tar.gz", - "$storage_dir/template/cache/debian-11.0-standard_11.0-1_amd64.tar.bz2", - "$storage_dir/template/cache/alpine-3.10-default_20190626_amd64.tar.xz", - "$storage_dir/snippets/userconfig.yaml", - "$storage_dir/snippets/hookscript.pl", - "$storage_dir/private/1234/", # fileparse needs / at the end - "$storage_dir/private/1234/subvol-1234-disk-0.subvol/", # fileparse needs / at the end - ], - expected => [ - { - 'content' => 'vztmpl', - 'ctime' => DEFAULT_CTIME, - 'format' => 'txz', - 'size' => DEFAULT_SIZE, - 'volid' => 'local:vztmpl/alpine-3.10-default_20190626_amd64.tar.xz', - }, - { - 'content' => 'vztmpl', - 'ctime' => DEFAULT_CTIME, - 'format' => 'tgz', - 'size' => DEFAULT_SIZE, - 'volid' => 'local:vztmpl/archlinux-base_20190924-1_amd64.tar.gz', - }, - { - 'content' => 'vztmpl', - 'ctime' => DEFAULT_CTIME, - 'format' => 'tgz', - 'size' => DEFAULT_SIZE, - 'volid' => 'local:vztmpl/debian-10.0-standard_10.0-1_amd64.tar.gz', - }, - { - 'content' => 'vztmpl', - 'ctime' => DEFAULT_CTIME, - 'format' => 'tbz2', - 'size' => DEFAULT_SIZE, - 'volid' => 'local:vztmpl/debian-11.0-standard_11.0-1_amd64.tar.bz2', - }, - { - 'content' => 'iso', - 'ctime' => DEFAULT_CTIME, - 'format' => 'iso', - 'size' => DEFAULT_SIZE, - 'volid' => 'local:iso/archlinux-2020.02.01-x86_64.iso', - }, - { - 'content' => 'iso', - 'ctime' => DEFAULT_CTIME, - 'format' => 'iso', - 'size' => DEFAULT_SIZE, - 'volid' => 'local:iso/debian-8.11.1-amd64-DVD-1.iso', - }, - { - 'content' => 'iso', - 'ctime' => DEFAULT_CTIME, - 'format' => 'iso', - 'size' => DEFAULT_SIZE, - 'volid' => 'local:iso/debian-9.12.0-amd64-netinst.iso', - }, - { - 'content' => 'iso', - 'ctime' => DEFAULT_CTIME, - 'format' => 'iso', - 'size' => DEFAULT_SIZE, - 'volid' => 'local:iso/proxmox-ve_6.1-1.iso', - }, - { - 'content' => 'backup', - 'ctime' => 1580759863, - 'format' => 'tar.gz', - 'size' => DEFAULT_SIZE, - 'subtype' => 'lxc', - 'vmid' => '19253', - 'volid' => 'local:backup/vzdump-lxc-19253-2020_02_03-19_57_43.tar.gz', - }, - { - 'content' => 'backup', - 'ctime' => 1548098959, - 'format' => 'tar', - 'size' => DEFAULT_SIZE, - 'subtype' => 'lxc', - 'vmid' => '19254', - 'volid' => 'local:backup/vzdump-lxc-19254-2019_01_21-19_29_19.tar', - }, - { - 'content' => 'snippets', - 'ctime' => DEFAULT_CTIME, - 'format' => 'snippet', - 'size' => DEFAULT_SIZE, - 'volid' => 'local:snippets/hookscript.pl', - }, - { - 'content' => 'snippets', - 'ctime' => DEFAULT_CTIME, - 'format' => 'snippet', - 'size' => DEFAULT_SIZE, - 'volid' => 'local:snippets/userconfig.yaml', - }, - ], + description => 'VMID: none, templates, snippets, backup', + vmid => undef, + files => [ + "$storage_dir/dump/vzdump-lxc-19253-2020_02_03-19_57_43.tar.gz", + "$storage_dir/dump/vzdump-lxc-19254-2019_01_21-19_29_19.tar", + "$storage_dir/template/iso/archlinux-2020.02.01-x86_64.iso", + "$storage_dir/template/iso/debian-8.11.1-amd64-DVD-1.iso", + "$storage_dir/template/iso/debian-9.12.0-amd64-netinst.iso", + "$storage_dir/template/iso/proxmox-ve_6.1-1.iso", + "$storage_dir/template/cache/archlinux-base_20190924-1_amd64.tar.gz", + "$storage_dir/template/cache/debian-10.0-standard_10.0-1_amd64.tar.gz", + "$storage_dir/template/cache/debian-11.0-standard_11.0-1_amd64.tar.bz2", + "$storage_dir/template/cache/alpine-3.10-default_20190626_amd64.tar.xz", + "$storage_dir/snippets/userconfig.yaml", + "$storage_dir/snippets/hookscript.pl", + "$storage_dir/private/1234/", # fileparse needs / at the end + "$storage_dir/private/1234/subvol-1234-disk-0.subvol/", # fileparse needs / at the end + ], + expected => [ + { + 'content' => 'vztmpl', + 'ctime' => DEFAULT_CTIME, + 'format' => 'txz', + 'size' => DEFAULT_SIZE, + 'volid' => 'local:vztmpl/alpine-3.10-default_20190626_amd64.tar.xz', + }, + { + 'content' => 'vztmpl', + 'ctime' => DEFAULT_CTIME, + 'format' => 'tgz', + 'size' => DEFAULT_SIZE, + 'volid' => 'local:vztmpl/archlinux-base_20190924-1_amd64.tar.gz', + }, + { + 'content' => 'vztmpl', + 'ctime' => DEFAULT_CTIME, + 'format' => 'tgz', + 'size' => DEFAULT_SIZE, + 'volid' => 'local:vztmpl/debian-10.0-standard_10.0-1_amd64.tar.gz', + }, + { + 'content' => 'vztmpl', + 'ctime' => DEFAULT_CTIME, + 'format' => 'tbz2', + 'size' => DEFAULT_SIZE, + 'volid' => 'local:vztmpl/debian-11.0-standard_11.0-1_amd64.tar.bz2', + }, + { + 'content' => 'iso', + 'ctime' => DEFAULT_CTIME, + 'format' => 'iso', + 'size' => DEFAULT_SIZE, + 'volid' => 'local:iso/archlinux-2020.02.01-x86_64.iso', + }, + { + 'content' => 'iso', + 'ctime' => DEFAULT_CTIME, + 'format' => 'iso', + 'size' => DEFAULT_SIZE, + 'volid' => 'local:iso/debian-8.11.1-amd64-DVD-1.iso', + }, + { + 'content' => 'iso', + 'ctime' => DEFAULT_CTIME, + 'format' => 'iso', + 'size' => DEFAULT_SIZE, + 'volid' => 'local:iso/debian-9.12.0-amd64-netinst.iso', + }, + { + 'content' => 'iso', + 'ctime' => DEFAULT_CTIME, + 'format' => 'iso', + 'size' => DEFAULT_SIZE, + 'volid' => 'local:iso/proxmox-ve_6.1-1.iso', + }, + { + 'content' => 'backup', + 'ctime' => 1580759863, + 'format' => 'tar.gz', + 'size' => DEFAULT_SIZE, + 'subtype' => 'lxc', + 'vmid' => '19253', + 'volid' => 'local:backup/vzdump-lxc-19253-2020_02_03-19_57_43.tar.gz', + }, + { + 'content' => 'backup', + 'ctime' => 1548098959, + 'format' => 'tar', + 'size' => DEFAULT_SIZE, + 'subtype' => 'lxc', + 'vmid' => '19254', + 'volid' => 'local:backup/vzdump-lxc-19254-2019_01_21-19_29_19.tar', + }, + { + 'content' => 'snippets', + 'ctime' => DEFAULT_CTIME, + 'format' => 'snippet', + 'size' => DEFAULT_SIZE, + 'volid' => 'local:snippets/hookscript.pl', + }, + { + 'content' => 'snippets', + 'ctime' => DEFAULT_CTIME, + 'format' => 'snippet', + 'size' => DEFAULT_SIZE, + 'volid' => 'local:snippets/userconfig.yaml', + }, + ], }, { - description => 'VMID: none, parent, non-matching', - # string instead of vmid in folder - #"$storage_dir/images/ssss/base-4321-disk-0.qcow2/1234/vm-1234-disk-0.qcow2", - vmid => undef, - files => [ - "$storage_dir/images/1234/vm-1234-disk-0.qcow2", - ], - parent => [ - "../ssss/base-4321-disk-0.qcow2", - ], - expected => [ - { - 'content' => 'images', - 'ctime' => DEFAULT_CTIME, - 'format' => 'qcow2', - 'parent' => '../ssss/base-4321-disk-0.qcow2', - 'size' => DEFAULT_SIZE, - 'used' => DEFAULT_USED, - 'vmid' => '1234', - 'volid' => 'local:1234/vm-1234-disk-0.qcow2', - } - ], + description => 'VMID: none, parent, non-matching', + # string instead of vmid in folder + #"$storage_dir/images/ssss/base-4321-disk-0.qcow2/1234/vm-1234-disk-0.qcow2", + vmid => undef, + files => [ + "$storage_dir/images/1234/vm-1234-disk-0.qcow2", + ], + parent => [ + "../ssss/base-4321-disk-0.qcow2", + ], + expected => [ + { + 'content' => 'images', + 'ctime' => DEFAULT_CTIME, + 'format' => 'qcow2', + 'parent' => '../ssss/base-4321-disk-0.qcow2', + 'size' => DEFAULT_SIZE, + 'used' => DEFAULT_USED, + 'vmid' => '1234', + 'volid' => 'local:1234/vm-1234-disk-0.qcow2', + }, + ], }, { - description => 'VMID: none, non-matching', - # failed matches - vmid => undef, - files => [ - "$storage_dir/images/ssss/base-4321-disk-0.raw", - "$storage_dir/images/ssss/vm-1234-disk-0.qcow2", - "$storage_dir/template/iso/yet-again-a-installation-disk.dvd", - "$storage_dir/template/cache/debian-10.0-standard_10.0-1_amd64.zip.gz", - "$storage_dir/private/subvol-19254-disk-0/19254", - "$storage_dir/dump/vzdump-openvz-16112-2020_03_30-21_39_30.zip.gz", - "$storage_dir/dump/vzdump-openvz-16112-2020_03_30-21_39_30.tgz.lzo", - "$storage_dir/dump/vzdump-qemu-16110-2020_03_30-21_12_40.vma.xz", - "$storage_dir/dump/vzdump-qemu-16110-2020_03_30-21_12_40.vms.gz", - ], - expected => [], # returns empty list + description => 'VMID: none, non-matching', + # failed matches + vmid => undef, + files => [ + "$storage_dir/images/ssss/base-4321-disk-0.raw", + "$storage_dir/images/ssss/vm-1234-disk-0.qcow2", + "$storage_dir/template/iso/yet-again-a-installation-disk.dvd", + "$storage_dir/template/cache/debian-10.0-standard_10.0-1_amd64.zip.gz", + "$storage_dir/private/subvol-19254-disk-0/19254", + "$storage_dir/dump/vzdump-openvz-16112-2020_03_30-21_39_30.zip.gz", + "$storage_dir/dump/vzdump-openvz-16112-2020_03_30-21_39_30.tgz.lzo", + "$storage_dir/dump/vzdump-qemu-16110-2020_03_30-21_12_40.vma.xz", + "$storage_dir/dump/vzdump-qemu-16110-2020_03_30-21_12_40.vms.gz", + ], + expected => [], # returns empty list }, ); - # provide static vmlist for tests my $mock_cluster = Test::MockModule->new('PVE::Cluster', no_auto => 1); $mock_cluster->redefine(get_vmlist => sub { return $mocked_vmlist; }); @@ -474,26 +472,31 @@ $mock_cluster->redefine(get_vmlist => sub { return $mocked_vmlist; }); # populate is File::stat's method to fill all information from CORE::stat into # an blessed array. my $mock_stat = Test::MockModule->new('File::stat', no_auto => 1); -$mock_stat->redefine(populate => sub { - my (@st) = @_; - $st[7] = DEFAULT_SIZE; - $st[10] = DEFAULT_CTIME; +$mock_stat->redefine( + populate => sub { + my (@st) = @_; + $st[7] = DEFAULT_SIZE; + $st[10] = DEFAULT_CTIME; - my $result = $mock_stat->original('populate')->(@st); + my $result = $mock_stat->original('populate')->(@st); - return $result; -}); + return $result; + }, +); # override info provided by qemu-img in file_size_info my $mock_fsi = Test::MockModule->new('PVE::Storage::Plugin', no_auto => 1); -$mock_fsi->redefine(file_size_info => sub { - my ($size, $format, $used, $parent, $ctime) = $mock_fsi->original('file_size_info')->(@_); +$mock_fsi->redefine( + file_size_info => sub { + my ($size, $format, $used, $parent, $ctime) = + $mock_fsi->original('file_size_info')->(@_); - $size = DEFAULT_SIZE; - $used = DEFAULT_USED; + $size = DEFAULT_SIZE; + $used = DEFAULT_USED; - return wantarray ? ($size, $format, $used, $parent, $ctime) : $size; -}); + return wantarray ? ($size, $format, $used, $parent, $ctime) : $size; + }, +); my $plan = scalar @tests; plan tests => $plan + 1; @@ -507,54 +510,56 @@ plan tests => $plan + 1; PVE::Storage::Plugin->list_volumes('sid', $scfg_with_type, undef, ['images']); - is_deeply ($tested_vmlist, $original_vmlist, - 'PVE::Cluster::vmlist remains unmodified') - || diag ("Expected vmlist to remain\n", explain($original_vmlist), - "but it turned to\n", explain($tested_vmlist)); + is_deeply($tested_vmlist, $original_vmlist, 'PVE::Cluster::vmlist remains unmodified') + || diag( + "Expected vmlist to remain\n", + explain($original_vmlist), + "but it turned to\n", + explain($tested_vmlist), + ); } - { my $sid = 'local'; - my $types = [ 'rootdir', 'images', 'vztmpl', 'iso', 'backup', 'snippets' ]; - my @suffixes = ( 'qcow2', 'raw', 'vmdk', 'vhdx' ); + my $types = ['rootdir', 'images', 'vztmpl', 'iso', 'backup', 'snippets']; + my @suffixes = ('qcow2', 'raw', 'vmdk', 'vhdx'); # run through test cases foreach my $tt (@tests) { - my $vmid = $tt->{vmid}; - my $files = $tt->{files}; - my $expected = $tt->{expected}; - my $description = $tt->{description}; - my $parent = $tt->{parent}; + my $vmid = $tt->{vmid}; + my $files = $tt->{files}; + my $expected = $tt->{expected}; + my $description = $tt->{description}; + my $parent = $tt->{parent}; - # prepare environment - my $num = 0; #parent disks - for my $file (@$files) { - my ($name, $dir, $suffix) = fileparse($file, @suffixes); + # prepare environment + my $num = 0; #parent disks + for my $file (@$files) { + my ($name, $dir, $suffix) = fileparse($file, @suffixes); - make_path($dir, { verbose => 1, mode => 0755 }); + make_path($dir, { verbose => 1, mode => 0755 }); - if ($name) { - # using qemu-img to also be able to represent the backing device - my @cmd = ( '/usr/bin/qemu-img', 'create', "$file", DEFAULT_SIZE ); - push @cmd, ( '-f', $suffix ) if $suffix; - push @cmd, ( '-u', '-b', @$parent[$num] ) if $parent; - push @cmd, ( '-F', $suffix ) if $parent && $suffix; - $num++; + if ($name) { + # using qemu-img to also be able to represent the backing device + my @cmd = ('/usr/bin/qemu-img', 'create', "$file", DEFAULT_SIZE); + push @cmd, ('-f', $suffix) if $suffix; + push @cmd, ('-u', '-b', @$parent[$num]) if $parent; + push @cmd, ('-F', $suffix) if $parent && $suffix; + $num++; - run_command([@cmd]); - } - } + run_command([@cmd]); + } + } - my $got; - eval { $got = PVE::Storage::Plugin->list_volumes($sid, $scfg, $vmid, $types) }; - $got = $@ if $@; + my $got; + eval { $got = PVE::Storage::Plugin->list_volumes($sid, $scfg, $vmid, $types) }; + $got = $@ if $@; - is_deeply($got, $expected, $description) || diag(explain($got)); + is_deeply($got, $expected, $description) || diag(explain($got)); - # clean up after each test case, otherwise - # we get wrong results from leftover files - remove_tree($storage_dir, { verbose => 1 }); + # clean up after each test case, otherwise + # we get wrong results from leftover files + remove_tree($storage_dir, { verbose => 1 }); } } diff --git a/src/test/parse_volname_test.pm b/src/test/parse_volname_test.pm index 175500d..9e96842 100644 --- a/src/test/parse_volname_test.pm +++ b/src/test/parse_volname_test.pm @@ -19,251 +19,285 @@ my $tests = [ # VM images # { - description => 'VM disk image, linked, qcow2, vm- as base-', - volname => "$vmid/vm-$vmid-disk-0.qcow2/$vmid/vm-$vmid-disk-0.qcow2", - expected => [ 'images', "vm-$vmid-disk-0.qcow2", "$vmid", "vm-$vmid-disk-0.qcow2", "$vmid", undef, 'qcow2', ], + description => 'VM disk image, linked, qcow2, vm- as base-', + volname => "$vmid/vm-$vmid-disk-0.qcow2/$vmid/vm-$vmid-disk-0.qcow2", + expected => [ + 'images', + "vm-$vmid-disk-0.qcow2", + "$vmid", + "vm-$vmid-disk-0.qcow2", + "$vmid", + undef, + 'qcow2', + ], }, # # iso # { - description => 'ISO image, iso', - volname => 'iso/some-installation-disk.iso', - expected => ['iso', 'some-installation-disk.iso', undef, undef, undef, undef, 'raw'], + description => 'ISO image, iso', + volname => 'iso/some-installation-disk.iso', + expected => ['iso', 'some-installation-disk.iso', undef, undef, undef, undef, 'raw'], }, { - description => 'ISO image, img', - volname => 'iso/some-other-installation-disk.img', - expected => ['iso', 'some-other-installation-disk.img', undef, undef, undef, undef, 'raw'], + description => 'ISO image, img', + volname => 'iso/some-other-installation-disk.img', + expected => + ['iso', 'some-other-installation-disk.img', undef, undef, undef, undef, 'raw'], }, # # container templates # { - description => 'Container template tar.gz', - volname => 'vztmpl/debian-10.0-standard_10.0-1_amd64.tar.gz', - expected => ['vztmpl', 'debian-10.0-standard_10.0-1_amd64.tar.gz', undef, undef, undef, undef, 'raw'], + description => 'Container template tar.gz', + volname => 'vztmpl/debian-10.0-standard_10.0-1_amd64.tar.gz', + expected => [ + 'vztmpl', + 'debian-10.0-standard_10.0-1_amd64.tar.gz', + undef, + undef, + undef, + undef, + 'raw', + ], }, { - description => 'Container template tar.xz', - volname => 'vztmpl/debian-10.0-standard_10.0-1_amd64.tar.xz', - expected => ['vztmpl', 'debian-10.0-standard_10.0-1_amd64.tar.xz', undef, undef, undef, undef, 'raw'], + description => 'Container template tar.xz', + volname => 'vztmpl/debian-10.0-standard_10.0-1_amd64.tar.xz', + expected => [ + 'vztmpl', + 'debian-10.0-standard_10.0-1_amd64.tar.xz', + undef, + undef, + undef, + undef, + 'raw', + ], }, { - description => 'Container template tar.bz2', - volname => 'vztmpl/debian-10.0-standard_10.0-1_amd64.tar.bz2', - expected => ['vztmpl', 'debian-10.0-standard_10.0-1_amd64.tar.bz2', undef, undef, undef, undef, 'raw'], + description => 'Container template tar.bz2', + volname => 'vztmpl/debian-10.0-standard_10.0-1_amd64.tar.bz2', + expected => [ + 'vztmpl', + 'debian-10.0-standard_10.0-1_amd64.tar.bz2', + undef, + undef, + undef, + undef, + 'raw', + ], }, # # container rootdir # { - description => 'Container rootdir, sub directory', - volname => "rootdir/$vmid", - expected => ['rootdir', "$vmid", "$vmid"], + description => 'Container rootdir, sub directory', + volname => "rootdir/$vmid", + expected => ['rootdir', "$vmid", "$vmid"], }, { - description => 'Container rootdir, subvol', - volname => "$vmid/subvol-$vmid-disk-0.subvol", - expected => [ 'images', "subvol-$vmid-disk-0.subvol", "$vmid", undef, undef, undef, 'subvol' ], + description => 'Container rootdir, subvol', + volname => "$vmid/subvol-$vmid-disk-0.subvol", + expected => + ['images', "subvol-$vmid-disk-0.subvol", "$vmid", undef, undef, undef, 'subvol'], }, { - description => 'Backup archive, no virtualization type', - volname => "backup/vzdump-none-$vmid-2020_03_30-21_39_30.tar", - expected => ['backup', "vzdump-none-$vmid-2020_03_30-21_39_30.tar", undef, undef, undef, undef, 'raw'], + description => 'Backup archive, no virtualization type', + volname => "backup/vzdump-none-$vmid-2020_03_30-21_39_30.tar", + expected => [ + 'backup', + "vzdump-none-$vmid-2020_03_30-21_39_30.tar", + undef, + undef, + undef, + undef, + 'raw', + ], }, # # Snippets # { - description => 'Snippets, yaml', - volname => 'snippets/userconfig.yaml', - expected => ['snippets', 'userconfig.yaml', undef, undef, undef, undef, 'raw'], + description => 'Snippets, yaml', + volname => 'snippets/userconfig.yaml', + expected => ['snippets', 'userconfig.yaml', undef, undef, undef, undef, 'raw'], }, { - description => 'Snippets, perl', - volname => 'snippets/hookscript.pl', - expected => ['snippets', 'hookscript.pl', undef, undef, undef, undef, 'raw'], + description => 'Snippets, perl', + volname => 'snippets/hookscript.pl', + expected => ['snippets', 'hookscript.pl', undef, undef, undef, undef, 'raw'], }, # # Import # { - description => "Import, ova", - volname => 'import/import.ova', - expected => ['import', 'import.ova', undef, undef, undef ,undef, 'ova'], + description => "Import, ova", + volname => 'import/import.ova', + expected => ['import', 'import.ova', undef, undef, undef, undef, 'ova'], }, { - description => "Import, ovf", - volname => 'import/import.ovf', - expected => ['import', 'import.ovf', undef, undef, undef ,undef, 'ovf'], + description => "Import, ovf", + volname => 'import/import.ovf', + expected => ['import', 'import.ovf', undef, undef, undef, undef, 'ovf'], }, { - description => "Import, innner file of ova", - volname => 'import/import.ova/disk.qcow2', - expected => ['import', 'import.ova/disk.qcow2', undef, undef, undef, undef, 'ova+qcow2'], + description => "Import, innner file of ova", + volname => 'import/import.ova/disk.qcow2', + expected => + ['import', 'import.ova/disk.qcow2', undef, undef, undef, undef, 'ova+qcow2'], }, { - description => "Import, innner file of ova", - volname => 'import/import.ova/disk.vmdk', - expected => ['import', 'import.ova/disk.vmdk', undef, undef, undef, undef, 'ova+vmdk'], + description => "Import, innner file of ova", + volname => 'import/import.ova/disk.vmdk', + expected => ['import', 'import.ova/disk.vmdk', undef, undef, undef, undef, 'ova+vmdk'], }, { - description => "Import, innner file of ova with whitespace in name", - volname => 'import/import.ova/OS disk.vmdk', - expected => ['import', 'import.ova/OS disk.vmdk', undef, undef, undef, undef, 'ova+vmdk'], + description => "Import, innner file of ova with whitespace in name", + volname => 'import/import.ova/OS disk.vmdk', + expected => + ['import', 'import.ova/OS disk.vmdk', undef, undef, undef, undef, 'ova+vmdk'], }, { - description => "Import, innner file of ova", - volname => 'import/import.ova/disk.raw', - expected => ['import', 'import.ova/disk.raw', undef, undef, undef, undef, 'ova+raw'], + description => "Import, innner file of ova", + volname => 'import/import.ova/disk.raw', + expected => ['import', 'import.ova/disk.raw', undef, undef, undef, undef, 'ova+raw'], }, # # failed matches # { - description => "Failed match: VM disk image, base, raw", - volname => "ssss/base-$vmid-disk-0.raw", - expected => "unable to parse directory volume name 'ssss/base-$vmid-disk-0.raw'\n", + description => "Failed match: VM disk image, base, raw", + volname => "ssss/base-$vmid-disk-0.raw", + expected => "unable to parse directory volume name 'ssss/base-$vmid-disk-0.raw'\n", }, { - description => 'Failed match: ISO image, dvd', - volname => 'iso/yet-again-a-installation-disk.dvd', - expected => "unable to parse directory volume name 'iso/yet-again-a-installation-disk.dvd'\n", + description => 'Failed match: ISO image, dvd', + volname => 'iso/yet-again-a-installation-disk.dvd', + expected => + "unable to parse directory volume name 'iso/yet-again-a-installation-disk.dvd'\n", }, { - description => 'Failed match: Container template, zip.gz', - volname => 'vztmpl/debian-10.0-standard_10.0-1_amd64.zip.gz', - expected => "unable to parse directory volume name 'vztmpl/debian-10.0-standard_10.0-1_amd64.zip.gz'\n", + description => 'Failed match: Container template, zip.gz', + volname => 'vztmpl/debian-10.0-standard_10.0-1_amd64.zip.gz', + expected => + "unable to parse directory volume name 'vztmpl/debian-10.0-standard_10.0-1_amd64.zip.gz'\n", }, { - description => 'Failed match: Container rootdir, subvol', - volname => "rootdir/subvol-$vmid-disk-0", - expected => "unable to parse directory volume name 'rootdir/subvol-$vmid-disk-0'\n", + description => 'Failed match: Container rootdir, subvol', + volname => "rootdir/subvol-$vmid-disk-0", + expected => "unable to parse directory volume name 'rootdir/subvol-$vmid-disk-0'\n", }, { - description => 'Failed match: VM disk image, linked, vhdx', - volname => "$vmid/base-$vmid-disk-0.vhdx/$vmid/vm-$vmid-disk-0.vhdx", - expected => "unable to parse volume filename 'base-$vmid-disk-0.vhdx'\n", + description => 'Failed match: VM disk image, linked, vhdx', + volname => "$vmid/base-$vmid-disk-0.vhdx/$vmid/vm-$vmid-disk-0.vhdx", + expected => "unable to parse volume filename 'base-$vmid-disk-0.vhdx'\n", }, { - description => 'Failed match: VM disk image, linked, qcow2, first vmid', - volname => "ssss/base-$vmid-disk-0.qcow2/$vmid/vm-$vmid-disk-0.qcow2", - expected => "unable to parse directory volume name 'ssss/base-$vmid-disk-0.qcow2/$vmid/vm-$vmid-disk-0.qcow2'\n", + description => 'Failed match: VM disk image, linked, qcow2, first vmid', + volname => "ssss/base-$vmid-disk-0.qcow2/$vmid/vm-$vmid-disk-0.qcow2", + expected => + "unable to parse directory volume name 'ssss/base-$vmid-disk-0.qcow2/$vmid/vm-$vmid-disk-0.qcow2'\n", }, { - description => 'Failed match: VM disk image, linked, qcow2, second vmid', - volname => "$vmid/base-$vmid-disk-0.qcow2/ssss/vm-$vmid-disk-0.qcow2", - expected => "unable to parse volume filename 'base-$vmid-disk-0.qcow2/ssss/vm-$vmid-disk-0.qcow2'\n", + description => 'Failed match: VM disk image, linked, qcow2, second vmid', + volname => "$vmid/base-$vmid-disk-0.qcow2/ssss/vm-$vmid-disk-0.qcow2", + expected => + "unable to parse volume filename 'base-$vmid-disk-0.qcow2/ssss/vm-$vmid-disk-0.qcow2'\n", }, { - description => "Failed match: import dir but no ova/ovf/disk image", - volname => "import/test.foo", - expected => "unable to parse directory volume name 'import/test.foo'\n", + description => "Failed match: import dir but no ova/ovf/disk image", + volname => "import/test.foo", + expected => "unable to parse directory volume name 'import/test.foo'\n", }, ]; # create more test cases for VM disk images matches -my $disk_suffix = [ 'raw', 'qcow2', 'vmdk' ]; +my $disk_suffix = ['raw', 'qcow2', 'vmdk']; foreach my $s (@$disk_suffix) { my @arr = ( - { - description => "VM disk image, $s", - volname => "$vmid/vm-$vmid-disk-1.$s", - expected => [ - 'images', - "vm-$vmid-disk-1.$s", - "$vmid", - undef, - undef, - undef, - "$s", - ], - }, - { - description => "VM disk image, linked, $s", - volname => "$vmid/base-$vmid-disk-0.$s/$vmid/vm-$vmid-disk-0.$s", - expected => [ - 'images', - "vm-$vmid-disk-0.$s", - "$vmid", - "base-$vmid-disk-0.$s", - "$vmid", - undef, - "$s", - ], - }, - { - description => "VM disk image, base, $s", - volname => "$vmid/base-$vmid-disk-0.$s", - expected => [ - 'images', - "base-$vmid-disk-0.$s", - "$vmid", - undef, - undef, - 'base-', - "$s" - ], - }, + { + description => "VM disk image, $s", + volname => "$vmid/vm-$vmid-disk-1.$s", + expected => [ + 'images', "vm-$vmid-disk-1.$s", "$vmid", undef, undef, undef, "$s", + ], + }, + { + description => "VM disk image, linked, $s", + volname => "$vmid/base-$vmid-disk-0.$s/$vmid/vm-$vmid-disk-0.$s", + expected => [ + 'images', + "vm-$vmid-disk-0.$s", + "$vmid", + "base-$vmid-disk-0.$s", + "$vmid", + undef, + "$s", + ], + }, + { + description => "VM disk image, base, $s", + volname => "$vmid/base-$vmid-disk-0.$s", + expected => [ + 'images', "base-$vmid-disk-0.$s", "$vmid", undef, undef, 'base-', "$s", + ], + }, ); push @$tests, @arr; } - # create more test cases for backup files matches my $bkp_suffix = { - qemu => [ 'vma', 'vma.gz', 'vma.lzo', 'vma.zst' ], - lxc => [ 'tar', 'tgz', 'tar.gz', 'tar.lzo', 'tar.zst', 'tar.bz2' ], - openvz => [ 'tar', 'tgz', 'tar.gz', 'tar.lzo', 'tar.zst' ], + qemu => ['vma', 'vma.gz', 'vma.lzo', 'vma.zst'], + lxc => ['tar', 'tgz', 'tar.gz', 'tar.lzo', 'tar.zst', 'tar.bz2'], + openvz => ['tar', 'tgz', 'tar.gz', 'tar.lzo', 'tar.zst'], }; foreach my $virt (keys %$bkp_suffix) { my $suffix = $bkp_suffix->{$virt}; foreach my $s (@$suffix) { - my @arr = ( - { - description => "Backup archive, $virt, $s", - volname => "backup/vzdump-$virt-$vmid-2020_03_30-21_12_40.$s", - expected => [ - 'backup', - "vzdump-$virt-$vmid-2020_03_30-21_12_40.$s", - "$vmid", - undef, - undef, - undef, - 'raw' - ], - }, - ); + my @arr = ( + { + description => "Backup archive, $virt, $s", + volname => "backup/vzdump-$virt-$vmid-2020_03_30-21_12_40.$s", + expected => [ + 'backup', + "vzdump-$virt-$vmid-2020_03_30-21_12_40.$s", + "$vmid", + undef, + undef, + undef, + 'raw', + ], + }, + ); - push @$tests, @arr; + push @$tests, @arr; } } - # create more test cases for failed backup files matches my $non_bkp_suffix = { - qemu => [ 'vms.gz', 'vma.xz' ], - lxc => [ 'zip.gz', 'tgz.lzo' ], + qemu => ['vms.gz', 'vma.xz'], + lxc => ['zip.gz', 'tgz.lzo'], }; foreach my $virt (keys %$non_bkp_suffix) { my $suffix = $non_bkp_suffix->{$virt}; foreach my $s (@$suffix) { - my @arr = ( - { - description => "Failed match: Backup archive, $virt, $s", - volname => "backup/vzdump-$virt-$vmid-2020_03_30-21_12_40.$s", - expected => "unable to parse directory volume name 'backup/vzdump-$virt-$vmid-2020_03_30-21_12_40.$s'\n", - }, - ); + my @arr = ( + { + description => "Failed match: Backup archive, $virt, $s", + volname => "backup/vzdump-$virt-$vmid-2020_03_30-21_12_40.$s", + expected => + "unable to parse directory volume name 'backup/vzdump-$virt-$vmid-2020_03_30-21_12_40.$s'\n", + }, + ); - push @$tests, @arr; + push @$tests, @arr; } } - # # run through test case array # @@ -278,12 +312,12 @@ foreach my $t (@$tests) { my $expected = $t->{expected}; my $got; - eval { $got = [ PVE::Storage::Plugin->parse_volname($volname) ] }; + eval { $got = [PVE::Storage::Plugin->parse_volname($volname)] }; $got = $@ if $@; is_deeply($got, $expected, $description); - $seen_vtype->{@$expected[0]} = 1 if ref $expected eq 'ARRAY'; + $seen_vtype->{ @$expected[0] } = 1 if ref $expected eq 'ARRAY'; } # to check if all $vtype_subdirs are defined in path_to_volume_id diff --git a/src/test/path_to_volume_id_test.pm b/src/test/path_to_volume_id_test.pm index 23c5a23..9a5ecbb 100644 --- a/src/test/path_to_volume_id_test.pm +++ b/src/test/path_to_volume_id_test.pm @@ -17,24 +17,24 @@ use File::Temp; my $storage_dir = File::Temp->newdir(); my $scfg = { 'digest' => 'd29306346b8b25b90a4a96165f1e8f52d1af1eda', - 'ids' => { - 'local' => { - 'shared' => 0, - 'path' => "$storage_dir", - 'type' => 'dir', - 'maxfiles' => 0, - 'content' => { - 'snippets' => 1, - 'rootdir' => 1, - 'images' => 1, - 'iso' => 1, - 'backup' => 1, - 'vztmpl' => 1, - }, - }, + 'ids' => { + 'local' => { + 'shared' => 0, + 'path' => "$storage_dir", + 'type' => 'dir', + 'maxfiles' => 0, + 'content' => { + 'snippets' => 1, + 'rootdir' => 1, + 'images' => 1, + 'iso' => 1, + 'backup' => 1, + 'vztmpl' => 1, + }, + }, }, 'order' => { - 'local' => 1, + 'local' => 1, }, }; @@ -44,219 +44,199 @@ my $scfg = { # expected => the result that path_to_volume_id should return my @tests = ( { - description => 'Image, qcow2', - volname => "$storage_dir/images/16110/vm-16110-disk-0.qcow2", - expected => [ - 'images', - 'local:16110/vm-16110-disk-0.qcow2', - ], + description => 'Image, qcow2', + volname => "$storage_dir/images/16110/vm-16110-disk-0.qcow2", + expected => [ + 'images', 'local:16110/vm-16110-disk-0.qcow2', + ], }, { - description => 'Image, raw', - volname => "$storage_dir/images/16112/vm-16112-disk-0.raw", - expected => [ - 'images', - 'local:16112/vm-16112-disk-0.raw', - ], + description => 'Image, raw', + volname => "$storage_dir/images/16112/vm-16112-disk-0.raw", + expected => [ + 'images', 'local:16112/vm-16112-disk-0.raw', + ], }, { - description => 'Image template, qcow2', - volname => "$storage_dir/images/9004/base-9004-disk-0.qcow2", - expected => [ - 'images', - 'local:9004/base-9004-disk-0.qcow2', - ], + description => 'Image template, qcow2', + volname => "$storage_dir/images/9004/base-9004-disk-0.qcow2", + expected => [ + 'images', 'local:9004/base-9004-disk-0.qcow2', + ], }, { - description => 'Backup, vma.gz', - volname => "$storage_dir/dump/vzdump-qemu-16110-2020_03_30-21_11_40.vma.gz", - expected => [ - 'backup', - 'local:backup/vzdump-qemu-16110-2020_03_30-21_11_40.vma.gz', - ], + description => 'Backup, vma.gz', + volname => "$storage_dir/dump/vzdump-qemu-16110-2020_03_30-21_11_40.vma.gz", + expected => [ + 'backup', 'local:backup/vzdump-qemu-16110-2020_03_30-21_11_40.vma.gz', + ], }, { - description => 'Backup, vma.lzo', - volname => "$storage_dir/dump/vzdump-qemu-16110-2020_03_30-21_12_45.vma.lzo", - expected => [ - 'backup', - 'local:backup/vzdump-qemu-16110-2020_03_30-21_12_45.vma.lzo', - ], + description => 'Backup, vma.lzo', + volname => "$storage_dir/dump/vzdump-qemu-16110-2020_03_30-21_12_45.vma.lzo", + expected => [ + 'backup', 'local:backup/vzdump-qemu-16110-2020_03_30-21_12_45.vma.lzo', + ], }, { - description => 'Backup, vma', - volname => "$storage_dir/dump/vzdump-qemu-16110-2020_03_30-21_13_55.vma", - expected => [ - 'backup', - 'local:backup/vzdump-qemu-16110-2020_03_30-21_13_55.vma', - ], + description => 'Backup, vma', + volname => "$storage_dir/dump/vzdump-qemu-16110-2020_03_30-21_13_55.vma", + expected => [ + 'backup', 'local:backup/vzdump-qemu-16110-2020_03_30-21_13_55.vma', + ], }, { - description => 'Backup, tar.lzo', - volname => "$storage_dir/dump/vzdump-lxc-16112-2020_03_30-21_39_30.tar.lzo", - expected => [ - 'backup', - 'local:backup/vzdump-lxc-16112-2020_03_30-21_39_30.tar.lzo', - ], + description => 'Backup, tar.lzo', + volname => "$storage_dir/dump/vzdump-lxc-16112-2020_03_30-21_39_30.tar.lzo", + expected => [ + 'backup', 'local:backup/vzdump-lxc-16112-2020_03_30-21_39_30.tar.lzo', + ], }, { - description => 'Backup, vma.zst', - volname => "$storage_dir/dump/vzdump-qemu-16110-2020_03_30-21_13_55.vma.zst", - expected => [ - 'backup', - 'local:backup/vzdump-qemu-16110-2020_03_30-21_13_55.vma.zst' - ], + description => 'Backup, vma.zst', + volname => "$storage_dir/dump/vzdump-qemu-16110-2020_03_30-21_13_55.vma.zst", + expected => [ + 'backup', 'local:backup/vzdump-qemu-16110-2020_03_30-21_13_55.vma.zst', + ], }, { - description => 'Backup, tar.zst', - volname => "$storage_dir/dump/vzdump-lxc-16112-2020_03_30-21_39_30.tar.zst", - expected => [ - 'backup', - 'local:backup/vzdump-lxc-16112-2020_03_30-21_39_30.tar.zst' - ], + description => 'Backup, tar.zst', + volname => "$storage_dir/dump/vzdump-lxc-16112-2020_03_30-21_39_30.tar.zst", + expected => [ + 'backup', 'local:backup/vzdump-lxc-16112-2020_03_30-21_39_30.tar.zst', + ], }, { - description => 'Backup, tar.bz2', - volname => "$storage_dir/dump/vzdump-openvz-16112-2020_03_30-21_39_30.tar.bz2", - expected => [ - 'backup', - 'local:backup/vzdump-openvz-16112-2020_03_30-21_39_30.tar.bz2', - ], + description => 'Backup, tar.bz2', + volname => "$storage_dir/dump/vzdump-openvz-16112-2020_03_30-21_39_30.tar.bz2", + expected => [ + 'backup', 'local:backup/vzdump-openvz-16112-2020_03_30-21_39_30.tar.bz2', + ], }, { - description => 'ISO file', - volname => "$storage_dir/template/iso/yet-again-a-installation-disk.iso", - expected => [ - 'iso', - 'local:iso/yet-again-a-installation-disk.iso', - ], + description => 'ISO file', + volname => "$storage_dir/template/iso/yet-again-a-installation-disk.iso", + expected => [ + 'iso', 'local:iso/yet-again-a-installation-disk.iso', + ], }, { - description => 'CT template, tar.gz', - volname => "$storage_dir/template/cache/debian-10.0-standard_10.0-1_amd64.tar.gz", - expected => [ - 'vztmpl', - 'local:vztmpl/debian-10.0-standard_10.0-1_amd64.tar.gz', - ], + description => 'CT template, tar.gz', + volname => "$storage_dir/template/cache/debian-10.0-standard_10.0-1_amd64.tar.gz", + expected => [ + 'vztmpl', 'local:vztmpl/debian-10.0-standard_10.0-1_amd64.tar.gz', + ], }, { - description => 'CT template, wrong ending, tar bz2', - volname => "$storage_dir/template/cache/debian-10.0-standard_10.0-1_amd64.tar.bz2", - expected => [ - 'vztmpl', - 'local:vztmpl/debian-10.0-standard_10.0-1_amd64.tar.bz2', - ], + description => 'CT template, wrong ending, tar bz2', + volname => "$storage_dir/template/cache/debian-10.0-standard_10.0-1_amd64.tar.bz2", + expected => [ + 'vztmpl', 'local:vztmpl/debian-10.0-standard_10.0-1_amd64.tar.bz2', + ], }, { - description => 'Rootdir', - volname => "$storage_dir/private/1234/", # fileparse needs / at the end - expected => [ - 'rootdir', - 'local:rootdir/1234', - ], + description => 'Rootdir', + volname => "$storage_dir/private/1234/", # fileparse needs / at the end + expected => [ + 'rootdir', 'local:rootdir/1234', + ], }, { - description => 'Rootdir, folder subvol', - volname => "$storage_dir/images/1234/subvol-1234-disk-0.subvol/", # fileparse needs / at the end - expected => [ - 'images', - 'local:1234/subvol-1234-disk-0.subvol' - ], + description => 'Rootdir, folder subvol', + volname => "$storage_dir/images/1234/subvol-1234-disk-0.subvol/", # fileparse needs / at the end + expected => [ + 'images', 'local:1234/subvol-1234-disk-0.subvol', + ], }, { - description => 'Snippets, yaml', - volname => "$storage_dir/snippets/userconfig.yaml", - expected => [ - 'snippets', - 'local:snippets/userconfig.yaml', - ], + description => 'Snippets, yaml', + volname => "$storage_dir/snippets/userconfig.yaml", + expected => [ + 'snippets', 'local:snippets/userconfig.yaml', + ], }, { - description => 'Snippets, hookscript', - volname => "$storage_dir/snippets/hookscript.pl", - expected => [ - 'snippets', - 'local:snippets/hookscript.pl', - ], + description => 'Snippets, hookscript', + volname => "$storage_dir/snippets/hookscript.pl", + expected => [ + 'snippets', 'local:snippets/hookscript.pl', + ], }, { - description => 'CT template, tar.xz', - volname => "$storage_dir/template/cache/debian-10.0-standard_10.0-1_amd64.tar.xz", - expected => [ - 'vztmpl', - 'local:vztmpl/debian-10.0-standard_10.0-1_amd64.tar.xz', - ], + description => 'CT template, tar.xz', + volname => "$storage_dir/template/cache/debian-10.0-standard_10.0-1_amd64.tar.xz", + expected => [ + 'vztmpl', 'local:vztmpl/debian-10.0-standard_10.0-1_amd64.tar.xz', + ], }, { - description => 'Import, ova', - volname => "$storage_dir/import/import.ova", - expected => [ - 'import', - 'local:import/import.ova', - ], + description => 'Import, ova', + volname => "$storage_dir/import/import.ova", + expected => [ + 'import', 'local:import/import.ova', + ], }, { - description => 'Import, ovf', - volname => "$storage_dir/import/import.ovf", - expected => [ - 'import', - 'local:import/import.ovf', - ], + description => 'Import, ovf', + volname => "$storage_dir/import/import.ovf", + expected => [ + 'import', 'local:import/import.ovf', + ], }, # no matches, path or files with failures { - description => 'Base template, string as vmid in folder name', - volname => "$storage_dir/images/ssss/base-4321-disk-0.raw", - expected => [''], + description => 'Base template, string as vmid in folder name', + volname => "$storage_dir/images/ssss/base-4321-disk-0.raw", + expected => [''], }, { - description => 'ISO file, wrong ending', - volname => "$storage_dir/template/iso/yet-again-a-installation-disk.dvd", - expected => [''], + description => 'ISO file, wrong ending', + volname => "$storage_dir/template/iso/yet-again-a-installation-disk.dvd", + expected => [''], }, { - description => 'CT template, wrong ending, zip.gz', - volname => "$storage_dir/template/cache/debian-10.0-standard_10.0-1_amd64.zip.gz", - expected => [''], + description => 'CT template, wrong ending, zip.gz', + volname => "$storage_dir/template/cache/debian-10.0-standard_10.0-1_amd64.zip.gz", + expected => [''], }, { - description => 'Rootdir as subvol, wrong path', - volname => "$storage_dir/private/subvol-19254-disk-0/", - expected => [''], + description => 'Rootdir as subvol, wrong path', + volname => "$storage_dir/private/subvol-19254-disk-0/", + expected => [''], }, { - description => 'Backup, wrong format, openvz, zip.gz', - volname => "$storage_dir/dump/vzdump-openvz-16112-2020_03_30-21_39_30.zip.gz", - expected => [''], + description => 'Backup, wrong format, openvz, zip.gz', + volname => "$storage_dir/dump/vzdump-openvz-16112-2020_03_30-21_39_30.zip.gz", + expected => [''], }, { - description => 'Backup, wrong format, openvz, tgz.lzo', - volname => "$storage_dir/dump/vzdump-openvz-16112-2020_03_30-21_39_30.tgz.lzo", - expected => [''], + description => 'Backup, wrong format, openvz, tgz.lzo', + volname => "$storage_dir/dump/vzdump-openvz-16112-2020_03_30-21_39_30.tgz.lzo", + expected => [''], }, { - description => 'Backup, wrong ending, qemu, vma.xz', - volname => "$storage_dir/dump/vzdump-qemu-16110-2020_03_30-21_12_40.vma.xz", - expected => [''], + description => 'Backup, wrong ending, qemu, vma.xz', + volname => "$storage_dir/dump/vzdump-qemu-16110-2020_03_30-21_12_40.vma.xz", + expected => [''], }, { - description => 'Backup, wrong format, qemu, vms.gz', - volname => "$storage_dir/dump/vzdump-qemu-16110-2020_03_30-21_12_40.vms.gz", - expected => [''], + description => 'Backup, wrong format, qemu, vms.gz', + volname => "$storage_dir/dump/vzdump-qemu-16110-2020_03_30-21_12_40.vms.gz", + expected => [''], }, { - description => 'Image, string as vmid in folder name', - volname => "$storage_dir/images/ssss/vm-1234-disk-0.qcow2", - expected => [''], + description => 'Image, string as vmid in folder name', + volname => "$storage_dir/images/ssss/vm-1234-disk-0.qcow2", + expected => [''], }, { - description => 'Import, non ova/ovf/disk image in import dir', - volname => "$storage_dir/import/test.foo", - expected => [''], + description => 'Import, non ova/ovf/disk image in import dir', + volname => "$storage_dir/import/test.foo", + expected => [''], }, ); @@ -275,19 +255,19 @@ foreach my $tt (@tests) { make_path($dir, { verbose => 1, mode => 0755 }); if ($name) { - open(my $fh, ">>", "$file") || die "Error open file: $!"; - close($fh); + open(my $fh, ">>", "$file") || die "Error open file: $!"; + close($fh); } # run tests my $got; - eval { $got = [ PVE::Storage::path_to_volume_id($scfg, $file) ] }; + eval { $got = [PVE::Storage::path_to_volume_id($scfg, $file)] }; $got = $@ if $@; is_deeply($got, $expected, $description) || diag(explain($got)); - $seen_vtype->{@$expected[0]} = 1 - if ( @$expected[0] ne '' && scalar @$expected > 1); + $seen_vtype->{ @$expected[0] } = 1 + if (@$expected[0] ne '' && scalar @$expected > 1); } # to check if all $vtype_subdirs are defined in path_to_volume_id diff --git a/src/test/prune_backups_test.pm b/src/test/prune_backups_test.pm index b57d280..5cde3a8 100644 --- a/src/test/prune_backups_test.pm +++ b/src/test/prune_backups_test.pm @@ -18,183 +18,193 @@ my $mocked_backups_lists = {}; my $basetime = 1577881101; # 2020_01_01-12_18_21 UTC foreach my $vmid (@vmids) { - push @{$mocked_backups_lists->{default}}, ( - { - 'volid' => "$storeid:backup/vzdump-qemu-$vmid-2018_05_26-11_18_21.tar.zst", - 'ctime' => $basetime - 585*24*60*60 - 60*60, - 'vmid' => $vmid, - }, - { - 'volid' => "$storeid:backup/vzdump-qemu-$vmid-2019_12_31-11_18_21.tar.zst", - 'ctime' => $basetime - 24*60*60 - 60*60, - 'vmid' => $vmid, - }, - { - 'volid' => "$storeid:backup/vzdump-qemu-$vmid-2019_12_31-11_18_51.tar.zst", - 'ctime' => $basetime - 24*60*60 - 60*60 + 30, - 'vmid' => $vmid, - 'protected' => 1, - }, - { - 'volid' => "$storeid:backup/vzdump-qemu-$vmid-2019_12_31-11_19_21.tar.zst", - 'ctime' => $basetime - 24*60*60 - 60*60 + 60, - 'vmid' => $vmid, - }, - { - 'volid' => "$storeid:backup/vzdump-qemu-$vmid-2020_01_01-11_18_21.tar.zst", - 'ctime' => $basetime - 60*60, - 'vmid' => $vmid, - }, - { - 'volid' => "$storeid:backup/vzdump-qemu-$vmid-2020_01_01-12_18_21.tar.zst", - 'ctime' => $basetime, - 'vmid' => $vmid, - }, - { - 'volid' => "$storeid:backup/vzdump-lxc-$vmid-2020_01_01-12_18_21.tar.zst", - 'ctime' => $basetime, - 'vmid' => $vmid, - }, - { - 'volid' => "$storeid:backup/vzdump-$vmid-renamed.tar.zst", - 'ctime' => 1234, - 'vmid' => $vmid, - }, - ); + push @{ $mocked_backups_lists->{default} }, + ( + { + 'volid' => "$storeid:backup/vzdump-qemu-$vmid-2018_05_26-11_18_21.tar.zst", + 'ctime' => $basetime - 585 * 24 * 60 * 60 - 60 * 60, + 'vmid' => $vmid, + }, + { + 'volid' => "$storeid:backup/vzdump-qemu-$vmid-2019_12_31-11_18_21.tar.zst", + 'ctime' => $basetime - 24 * 60 * 60 - 60 * 60, + 'vmid' => $vmid, + }, + { + 'volid' => "$storeid:backup/vzdump-qemu-$vmid-2019_12_31-11_18_51.tar.zst", + 'ctime' => $basetime - 24 * 60 * 60 - 60 * 60 + 30, + 'vmid' => $vmid, + 'protected' => 1, + }, + { + 'volid' => "$storeid:backup/vzdump-qemu-$vmid-2019_12_31-11_19_21.tar.zst", + 'ctime' => $basetime - 24 * 60 * 60 - 60 * 60 + 60, + 'vmid' => $vmid, + }, + { + 'volid' => "$storeid:backup/vzdump-qemu-$vmid-2020_01_01-11_18_21.tar.zst", + 'ctime' => $basetime - 60 * 60, + 'vmid' => $vmid, + }, + { + 'volid' => "$storeid:backup/vzdump-qemu-$vmid-2020_01_01-12_18_21.tar.zst", + 'ctime' => $basetime, + 'vmid' => $vmid, + }, + { + 'volid' => "$storeid:backup/vzdump-lxc-$vmid-2020_01_01-12_18_21.tar.zst", + 'ctime' => $basetime, + 'vmid' => $vmid, + }, + { + 'volid' => "$storeid:backup/vzdump-$vmid-renamed.tar.zst", + 'ctime' => 1234, + 'vmid' => $vmid, + }, + ); } -push @{$mocked_backups_lists->{year1970}}, ( - { - 'volid' => "$storeid:backup/vzdump-lxc-321-1970_01_01-00_01_23.tar.zst", - 'ctime' => 83, - 'vmid' => 321, - }, - { - 'volid' => "$storeid:backup/vzdump-lxc-321-2070_01_01-00_01_00.tar.zst", - 'ctime' => 60*60*24 * (365*100 + 25) + 60, - 'vmid' => 321, - }, -); -push @{$mocked_backups_lists->{novmid}}, ( - { - 'volid' => "$storeid:backup/vzdump-lxc-novmid.tar.gz", - 'ctime' => 1234, - }, -); -push @{$mocked_backups_lists->{threeway}}, ( - { - 'volid' => "$storeid:backup/vzdump-qemu-7654-2019_12_25-12_18_21.tar.zst", - 'ctime' => $basetime - 7*24*60*60, - 'vmid' => 7654, - }, - { - 'volid' => "$storeid:backup/vzdump-qemu-7654-2019_12_31-12_18_21.tar.zst", - 'ctime' => $basetime - 24*60*60, - 'vmid' => 7654, - }, - { - 'volid' => "$storeid:backup/vzdump-qemu-7654-2020_01_01-12_18_21.tar.zst", - 'ctime' => $basetime, - 'vmid' => 7654, - }, -); -push @{$mocked_backups_lists->{weekboundary}}, ( - { - 'volid' => "$storeid:backup/vzdump-qemu-7654-2020_12_03-12_18_21.tar.zst", - 'ctime' => $basetime + (366-31+2)*24*60*60, - 'vmid' => 7654, - }, - { - 'volid' => "$storeid:backup/vzdump-qemu-7654-2020_12_04-12_18_21.tar.zst", - 'ctime' => $basetime + (366-31+3)*24*60*60, - 'vmid' => 7654, - }, - { - 'volid' => "$storeid:backup/vzdump-qemu-7654-2020_12_07-12_18_21.tar.zst", - 'ctime' => $basetime + (366-31+6)*24*60*60, - 'vmid' => 7654, - }, -); +push @{ $mocked_backups_lists->{year1970} }, + ( + { + 'volid' => "$storeid:backup/vzdump-lxc-321-1970_01_01-00_01_23.tar.zst", + 'ctime' => 83, + 'vmid' => 321, + }, + { + 'volid' => "$storeid:backup/vzdump-lxc-321-2070_01_01-00_01_00.tar.zst", + 'ctime' => 60 * 60 * 24 * (365 * 100 + 25) + 60, + 'vmid' => 321, + }, + ); +push @{ $mocked_backups_lists->{novmid} }, + ( + { + 'volid' => "$storeid:backup/vzdump-lxc-novmid.tar.gz", + 'ctime' => 1234, + }, + ); +push @{ $mocked_backups_lists->{threeway} }, + ( + { + 'volid' => "$storeid:backup/vzdump-qemu-7654-2019_12_25-12_18_21.tar.zst", + 'ctime' => $basetime - 7 * 24 * 60 * 60, + 'vmid' => 7654, + }, + { + 'volid' => "$storeid:backup/vzdump-qemu-7654-2019_12_31-12_18_21.tar.zst", + 'ctime' => $basetime - 24 * 60 * 60, + 'vmid' => 7654, + }, + { + 'volid' => "$storeid:backup/vzdump-qemu-7654-2020_01_01-12_18_21.tar.zst", + 'ctime' => $basetime, + 'vmid' => 7654, + }, + ); +push @{ $mocked_backups_lists->{weekboundary} }, + ( + { + 'volid' => "$storeid:backup/vzdump-qemu-7654-2020_12_03-12_18_21.tar.zst", + 'ctime' => $basetime + (366 - 31 + 2) * 24 * 60 * 60, + 'vmid' => 7654, + }, + { + 'volid' => "$storeid:backup/vzdump-qemu-7654-2020_12_04-12_18_21.tar.zst", + 'ctime' => $basetime + (366 - 31 + 3) * 24 * 60 * 60, + 'vmid' => 7654, + }, + { + 'volid' => "$storeid:backup/vzdump-qemu-7654-2020_12_07-12_18_21.tar.zst", + 'ctime' => $basetime + (366 - 31 + 6) * 24 * 60 * 60, + 'vmid' => 7654, + }, + ); my $current_list; my $mock_plugin = Test::MockModule->new('PVE::Storage::Plugin'); -$mock_plugin->redefine(list_volumes => sub { - my ($class, $storeid, $scfg, $vmid, $content_types) = @_; +$mock_plugin->redefine( + list_volumes => sub { + my ($class, $storeid, $scfg, $vmid, $content_types) = @_; - my $list = $mocked_backups_lists->{$current_list}; + my $list = $mocked_backups_lists->{$current_list}; - return $list if !defined($vmid); + return $list if !defined($vmid); - return [ grep { $_->{vmid} eq $vmid } @{$list} ]; -}); + return [grep { $_->{vmid} eq $vmid } @{$list}]; + }, +); sub generate_expected { my ($vmids, $type, $marks) = @_; my @expected; foreach my $vmid (@{$vmids}) { - push @expected, ( - { - 'volid' => "$storeid:backup/vzdump-qemu-$vmid-2018_05_26-11_18_21.tar.zst", - 'type' => 'qemu', - 'ctime' => $basetime - 585*24*60*60 - 60*60, - 'mark' => $marks->[0], - 'vmid' => $vmid, - }, - { - 'volid' => "$storeid:backup/vzdump-qemu-$vmid-2019_12_31-11_18_21.tar.zst", - 'type' => 'qemu', - 'ctime' => $basetime - 24*60*60 - 60*60, - 'mark' => $marks->[1], - 'vmid' => $vmid, - }, - { - 'volid' => "$storeid:backup/vzdump-qemu-$vmid-2019_12_31-11_18_51.tar.zst", - 'type' => 'qemu', - 'ctime' => $basetime - 24*60*60 - 60*60 + 30, - 'mark' => 'protected', - 'vmid' => $vmid, - }, - { - 'volid' => "$storeid:backup/vzdump-qemu-$vmid-2019_12_31-11_19_21.tar.zst", - 'type' => 'qemu', - 'ctime' => $basetime - 24*60*60 - 60*60 + 60, - 'mark' => $marks->[2], - 'vmid' => $vmid, - }, - { - 'volid' => "$storeid:backup/vzdump-qemu-$vmid-2020_01_01-11_18_21.tar.zst", - 'type' => 'qemu', - 'ctime' => $basetime - 60*60, - 'mark' => $marks->[3], - 'vmid' => $vmid, - }, - { - 'volid' => "$storeid:backup/vzdump-qemu-$vmid-2020_01_01-12_18_21.tar.zst", - 'type' => 'qemu', - 'ctime' => $basetime, - 'mark' => $marks->[4], - 'vmid' => $vmid, - }, - ) if !defined($type) || $type eq 'qemu'; - push @expected, ( - { - 'volid' => "$storeid:backup/vzdump-lxc-$vmid-2020_01_01-12_18_21.tar.zst", - 'type' => 'lxc', - 'ctime' => $basetime, - 'mark' => $marks->[5], - 'vmid' => $vmid, - }, - ) if !defined($type) || $type eq 'lxc'; - push @expected, ( - { - 'volid' => "$storeid:backup/vzdump-$vmid-renamed.tar.zst", - 'type' => 'unknown', - 'ctime' => 1234, - 'mark' => 'renamed', - 'vmid' => $vmid, - }, - ) if !defined($type); + push @expected, + ( + { + 'volid' => "$storeid:backup/vzdump-qemu-$vmid-2018_05_26-11_18_21.tar.zst", + 'type' => 'qemu', + 'ctime' => $basetime - 585 * 24 * 60 * 60 - 60 * 60, + 'mark' => $marks->[0], + 'vmid' => $vmid, + }, + { + 'volid' => "$storeid:backup/vzdump-qemu-$vmid-2019_12_31-11_18_21.tar.zst", + 'type' => 'qemu', + 'ctime' => $basetime - 24 * 60 * 60 - 60 * 60, + 'mark' => $marks->[1], + 'vmid' => $vmid, + }, + { + 'volid' => "$storeid:backup/vzdump-qemu-$vmid-2019_12_31-11_18_51.tar.zst", + 'type' => 'qemu', + 'ctime' => $basetime - 24 * 60 * 60 - 60 * 60 + 30, + 'mark' => 'protected', + 'vmid' => $vmid, + }, + { + 'volid' => "$storeid:backup/vzdump-qemu-$vmid-2019_12_31-11_19_21.tar.zst", + 'type' => 'qemu', + 'ctime' => $basetime - 24 * 60 * 60 - 60 * 60 + 60, + 'mark' => $marks->[2], + 'vmid' => $vmid, + }, + { + 'volid' => "$storeid:backup/vzdump-qemu-$vmid-2020_01_01-11_18_21.tar.zst", + 'type' => 'qemu', + 'ctime' => $basetime - 60 * 60, + 'mark' => $marks->[3], + 'vmid' => $vmid, + }, + { + 'volid' => "$storeid:backup/vzdump-qemu-$vmid-2020_01_01-12_18_21.tar.zst", + 'type' => 'qemu', + 'ctime' => $basetime, + 'mark' => $marks->[4], + 'vmid' => $vmid, + }, + ) if !defined($type) || $type eq 'qemu'; + push @expected, + ( + { + 'volid' => "$storeid:backup/vzdump-lxc-$vmid-2020_01_01-12_18_21.tar.zst", + 'type' => 'lxc', + 'ctime' => $basetime, + 'mark' => $marks->[5], + 'vmid' => $vmid, + }, + ) if !defined($type) || $type eq 'lxc'; + push @expected, + ( + { + 'volid' => "$storeid:backup/vzdump-$vmid-renamed.tar.zst", + 'type' => 'unknown', + 'ctime' => 1234, + 'mark' => 'renamed', + 'vmid' => $vmid, + }, + ) if !defined($type); } - return [ sort { $a->{volid} cmp $b->{volid} } @expected ]; + return [sort { $a->{volid} cmp $b->{volid} } @expected]; } # an array of test cases, each test is comprised of the following keys: @@ -208,268 +218,312 @@ sub generate_expected { # most of them are created further below my $tests = [ { - description => 'last=3, multiple IDs', - keep => { - 'keep-last' => 3, - }, - expected => generate_expected(\@vmids, undef, ['remove', 'remove', 'keep', 'keep', 'keep', 'keep']), + description => 'last=3, multiple IDs', + keep => { + 'keep-last' => 3, + }, + expected => + generate_expected(\@vmids, undef, ['remove', 'remove', 'keep', 'keep', 'keep', 'keep']), }, { - description => 'weekly=2, one ID', - vmid => $vmids[0], - keep => { - 'keep-weekly' => 2, - }, - expected => generate_expected([$vmids[0]], undef, ['keep', 'remove', 'remove', 'remove', 'keep', 'keep']), + description => 'weekly=2, one ID', + vmid => $vmids[0], + keep => { + 'keep-weekly' => 2, + }, + expected => generate_expected( + [$vmids[0]], + undef, + ['keep', 'remove', 'remove', 'remove', 'keep', 'keep'], + ), }, { - description => 'daily=weekly=monthly=1, multiple IDs', - keep => { - 'keep-hourly' => 0, - 'keep-daily' => 1, - 'keep-weekly' => 1, - 'keep-monthly' => 1, - }, - expected => generate_expected(\@vmids, undef, ['keep', 'remove', 'keep', 'remove', 'keep', 'keep']), + description => 'daily=weekly=monthly=1, multiple IDs', + keep => { + 'keep-hourly' => 0, + 'keep-daily' => 1, + 'keep-weekly' => 1, + 'keep-monthly' => 1, + }, + expected => + generate_expected(\@vmids, undef, ['keep', 'remove', 'keep', 'remove', 'keep', 'keep']), }, { - description => 'hourly=4, one ID', - vmid => $vmids[0], - keep => { - 'keep-hourly' => 4, - 'keep-daily' => 0, - }, - expected => generate_expected([$vmids[0]], undef, ['keep', 'remove', 'keep', 'keep', 'keep', 'keep']), + description => 'hourly=4, one ID', + vmid => $vmids[0], + keep => { + 'keep-hourly' => 4, + 'keep-daily' => 0, + }, + expected => generate_expected( + [$vmids[0]], + undef, + ['keep', 'remove', 'keep', 'keep', 'keep', 'keep'], + ), }, { - description => 'yearly=2, multiple IDs', - keep => { - 'keep-hourly' => 0, - 'keep-daily' => 0, - 'keep-weekly' => 0, - 'keep-monthly' => 0, - 'keep-yearly' => 2, - }, - expected => generate_expected(\@vmids, undef, ['remove', 'remove', 'keep', 'remove', 'keep', 'keep']), + description => 'yearly=2, multiple IDs', + keep => { + 'keep-hourly' => 0, + 'keep-daily' => 0, + 'keep-weekly' => 0, + 'keep-monthly' => 0, + 'keep-yearly' => 2, + }, + expected => generate_expected( + \@vmids, + undef, + ['remove', 'remove', 'keep', 'remove', 'keep', 'keep'], + ), }, { - description => 'last=2,hourly=2 one ID', - vmid => $vmids[0], - keep => { - 'keep-last' => 2, - 'keep-hourly' => 2, - }, - expected => generate_expected([$vmids[0]], undef, ['keep', 'remove', 'keep', 'keep', 'keep', 'keep']), + description => 'last=2,hourly=2 one ID', + vmid => $vmids[0], + keep => { + 'keep-last' => 2, + 'keep-hourly' => 2, + }, + expected => generate_expected( + [$vmids[0]], + undef, + ['keep', 'remove', 'keep', 'keep', 'keep', 'keep'], + ), }, { - description => 'last=1,monthly=2, multiple IDs', - keep => { - 'keep-last' => 1, - 'keep-monthly' => 2, - }, - expected => generate_expected(\@vmids, undef, ['keep', 'remove', 'keep', 'remove', 'keep', 'keep']), + description => 'last=1,monthly=2, multiple IDs', + keep => { + 'keep-last' => 1, + 'keep-monthly' => 2, + }, + expected => + generate_expected(\@vmids, undef, ['keep', 'remove', 'keep', 'remove', 'keep', 'keep']), }, { - description => 'monthly=3, one ID', - vmid => $vmids[0], - keep => { - 'keep-monthly' => 3, - }, - expected => generate_expected([$vmids[0]], undef, ['keep', 'remove', 'keep', 'remove', 'keep', 'keep']), + description => 'monthly=3, one ID', + vmid => $vmids[0], + keep => { + 'keep-monthly' => 3, + }, + expected => generate_expected( + [$vmids[0]], + undef, + ['keep', 'remove', 'keep', 'remove', 'keep', 'keep'], + ), }, { - description => 'last=daily=weekly=1, multiple IDs', - keep => { - 'keep-last' => 1, - 'keep-daily' => 1, - 'keep-weekly' => 1, - }, - expected => generate_expected(\@vmids, undef, ['keep', 'remove', 'keep', 'remove', 'keep', 'keep']), + description => 'last=daily=weekly=1, multiple IDs', + keep => { + 'keep-last' => 1, + 'keep-daily' => 1, + 'keep-weekly' => 1, + }, + expected => + generate_expected(\@vmids, undef, ['keep', 'remove', 'keep', 'remove', 'keep', 'keep']), }, { - description => 'last=daily=weekly=1, others zero, multiple IDs', - keep => { - 'keep-hourly' => 0, - 'keep-last' => 1, - 'keep-daily' => 1, - 'keep-weekly' => 1, - 'keep-monthly' => 0, - 'keep-yearly' => 0, - }, - expected => generate_expected(\@vmids, undef, ['keep', 'remove', 'keep', 'remove', 'keep', 'keep']), + description => 'last=daily=weekly=1, others zero, multiple IDs', + keep => { + 'keep-hourly' => 0, + 'keep-last' => 1, + 'keep-daily' => 1, + 'keep-weekly' => 1, + 'keep-monthly' => 0, + 'keep-yearly' => 0, + }, + expected => + generate_expected(\@vmids, undef, ['keep', 'remove', 'keep', 'remove', 'keep', 'keep']), }, { - description => 'daily=2, one ID', - vmid => $vmids[0], - keep => { - 'keep-daily' => 2, - }, - expected => generate_expected([$vmids[0]], undef, ['remove', 'remove', 'keep', 'remove', 'keep', 'keep']), + description => 'daily=2, one ID', + vmid => $vmids[0], + keep => { + 'keep-daily' => 2, + }, + expected => generate_expected( + [$vmids[0]], + undef, + ['remove', 'remove', 'keep', 'remove', 'keep', 'keep'], + ), }, { - description => 'weekly=monthly=1, multiple IDs', - keep => { - 'keep-weekly' => 1, - 'keep-monthly' => 1, - }, - expected => generate_expected(\@vmids, undef, ['keep', 'remove', 'remove', 'remove', 'keep', 'keep']), + description => 'weekly=monthly=1, multiple IDs', + keep => { + 'keep-weekly' => 1, + 'keep-monthly' => 1, + }, + expected => generate_expected( + \@vmids, + undef, + ['keep', 'remove', 'remove', 'remove', 'keep', 'keep'], + ), }, { - description => 'weekly=yearly=1, one ID', - vmid => $vmids[0], - keep => { - 'keep-weekly' => 1, - 'keep-yearly' => 1, - }, - expected => generate_expected([$vmids[0]], undef, ['keep', 'remove', 'remove', 'remove', 'keep', 'keep']), + description => 'weekly=yearly=1, one ID', + vmid => $vmids[0], + keep => { + 'keep-weekly' => 1, + 'keep-yearly' => 1, + }, + expected => generate_expected( + [$vmids[0]], + undef, + ['keep', 'remove', 'remove', 'remove', 'keep', 'keep'], + ), }, { - description => 'weekly=yearly=1, one ID, type qemu', - vmid => $vmids[0], - type => 'qemu', - keep => { - 'keep-weekly' => 1, - 'keep-yearly' => 1, - }, - expected => generate_expected([$vmids[0]], 'qemu', ['keep', 'remove', 'remove', 'remove', 'keep', '']), + description => 'weekly=yearly=1, one ID, type qemu', + vmid => $vmids[0], + type => 'qemu', + keep => { + 'keep-weekly' => 1, + 'keep-yearly' => 1, + }, + expected => generate_expected( + [$vmids[0]], + 'qemu', + ['keep', 'remove', 'remove', 'remove', 'keep', ''], + ), }, { - description => 'week=yearly=1, one ID, type lxc', - vmid => $vmids[0], - type => 'lxc', - keep => { - 'keep-last' => 1, - }, - expected => generate_expected([$vmids[0]], 'lxc', ['', '', '', '', '', 'keep']), + description => 'week=yearly=1, one ID, type lxc', + vmid => $vmids[0], + type => 'lxc', + keep => { + 'keep-last' => 1, + }, + expected => generate_expected([$vmids[0]], 'lxc', ['', '', '', '', '', 'keep']), }, { - description => 'yearly=1, year before 2000', - keep => { - 'keep-yearly' => 1, - }, - list => 'year1970', - expected => [ - { - 'volid' => "$storeid:backup/vzdump-lxc-321-1970_01_01-00_01_23.tar.zst", - 'ctime' => 83, - 'mark' => 'remove', - 'type' => 'lxc', - 'vmid' => 321, - }, - { - 'volid' => "$storeid:backup/vzdump-lxc-321-2070_01_01-00_01_00.tar.zst", - 'ctime' => 60*60*24 * (365*100 + 25) + 60, - 'mark' => 'keep', - 'type' => 'lxc', - 'vmid' => 321, - }, - ], + description => 'yearly=1, year before 2000', + keep => { + 'keep-yearly' => 1, + }, + list => 'year1970', + expected => [ + { + 'volid' => "$storeid:backup/vzdump-lxc-321-1970_01_01-00_01_23.tar.zst", + 'ctime' => 83, + 'mark' => 'remove', + 'type' => 'lxc', + 'vmid' => 321, + }, + { + 'volid' => "$storeid:backup/vzdump-lxc-321-2070_01_01-00_01_00.tar.zst", + 'ctime' => 60 * 60 * 24 * (365 * 100 + 25) + 60, + 'mark' => 'keep', + 'type' => 'lxc', + 'vmid' => 321, + }, + ], }, { - description => 'last=1, ne ID, year before 2000', - keep => { - 'keep-last' => 1, - }, - list => 'novmid', - expected => [ - { - 'volid' => "$storeid:backup/vzdump-lxc-novmid.tar.gz", - 'ctime' => 1234, - 'mark' => 'renamed', - 'type' => 'lxc', - }, - ], + description => 'last=1, ne ID, year before 2000', + keep => { + 'keep-last' => 1, + }, + list => 'novmid', + expected => [ + { + 'volid' => "$storeid:backup/vzdump-lxc-novmid.tar.gz", + 'ctime' => 1234, + 'mark' => 'renamed', + 'type' => 'lxc', + }, + ], }, { - description => 'all missing, multiple IDs', - keep => {}, - expected => generate_expected(\@vmids, undef, ['keep', 'keep', 'keep', 'keep', 'keep', 'keep']), + description => 'all missing, multiple IDs', + keep => {}, + expected => + generate_expected(\@vmids, undef, ['keep', 'keep', 'keep', 'keep', 'keep', 'keep']), }, { - description => 'all zero, multiple IDs', - keep => { - 'keep-last' => 0, - 'keep-hourly' => 0, - 'keep-daily' => 0, - 'keep-weekly' => 0, - 'keep-monthyl' => 0, - 'keep-yearly' => 0, - }, - expected => generate_expected(\@vmids, undef, ['keep', 'keep', 'keep', 'keep', 'keep', 'keep']), + description => 'all zero, multiple IDs', + keep => { + 'keep-last' => 0, + 'keep-hourly' => 0, + 'keep-daily' => 0, + 'keep-weekly' => 0, + 'keep-monthyl' => 0, + 'keep-yearly' => 0, + }, + expected => + generate_expected(\@vmids, undef, ['keep', 'keep', 'keep', 'keep', 'keep', 'keep']), }, { - description => 'some zero, some missing, multiple IDs', - keep => { - 'keep-last' => 0, - 'keep-hourly' => 0, - 'keep-daily' => 0, - 'keep-monthyl' => 0, - 'keep-yearly' => 0, - }, - expected => generate_expected(\@vmids, undef, ['keep', 'keep', 'keep', 'keep', 'keep', 'keep']), + description => 'some zero, some missing, multiple IDs', + keep => { + 'keep-last' => 0, + 'keep-hourly' => 0, + 'keep-daily' => 0, + 'keep-monthyl' => 0, + 'keep-yearly' => 0, + }, + expected => + generate_expected(\@vmids, undef, ['keep', 'keep', 'keep', 'keep', 'keep', 'keep']), }, { - description => 'daily=weekly=monthly=1', - keep => { - 'keep-daily' => 1, - 'keep-weekly' => 1, - 'keep-monthly' => 1, - }, - list => 'threeway', - expected => [ - { - 'volid' => "$storeid:backup/vzdump-qemu-7654-2019_12_25-12_18_21.tar.zst", - 'ctime' => $basetime - 7*24*60*60, - 'type' => 'qemu', - 'vmid' => 7654, - 'mark' => 'keep', - }, - { - 'volid' => "$storeid:backup/vzdump-qemu-7654-2019_12_31-12_18_21.tar.zst", - 'ctime' => $basetime - 24*60*60, - 'type' => 'qemu', - 'vmid' => 7654, - 'mark' => 'remove', # month is already covered by the backup kept by keep-weekly! - }, - { - 'volid' => "$storeid:backup/vzdump-qemu-7654-2020_01_01-12_18_21.tar.zst", - 'ctime' => $basetime, - 'type' => 'qemu', - 'vmid' => 7654, - 'mark' => 'keep', - }, - ], + description => 'daily=weekly=monthly=1', + keep => { + 'keep-daily' => 1, + 'keep-weekly' => 1, + 'keep-monthly' => 1, + }, + list => 'threeway', + expected => [ + { + 'volid' => "$storeid:backup/vzdump-qemu-7654-2019_12_25-12_18_21.tar.zst", + 'ctime' => $basetime - 7 * 24 * 60 * 60, + 'type' => 'qemu', + 'vmid' => 7654, + 'mark' => 'keep', + }, + { + 'volid' => "$storeid:backup/vzdump-qemu-7654-2019_12_31-12_18_21.tar.zst", + 'ctime' => $basetime - 24 * 60 * 60, + 'type' => 'qemu', + 'vmid' => 7654, + 'mark' => 'remove', # month is already covered by the backup kept by keep-weekly! + }, + { + 'volid' => "$storeid:backup/vzdump-qemu-7654-2020_01_01-12_18_21.tar.zst", + 'ctime' => $basetime, + 'type' => 'qemu', + 'vmid' => 7654, + 'mark' => 'keep', + }, + ], }, { - description => 'daily=weekly=1,weekboundary', - keep => { - 'keep-daily' => 1, - 'keep-weekly' => 1, - }, - list => 'weekboundary', - expected => [ - { - 'volid' => "$storeid:backup/vzdump-qemu-7654-2020_12_03-12_18_21.tar.zst", - 'ctime' => $basetime + (366-31+2)*24*60*60, - 'type' => 'qemu', - 'vmid' => 7654, - 'mark' => 'remove', - }, - { - 'volid' => "$storeid:backup/vzdump-qemu-7654-2020_12_04-12_18_21.tar.zst", - 'ctime' => $basetime + (366-31+3)*24*60*60, - 'type' => 'qemu', - 'vmid' => 7654, - 'mark' => 'keep', - }, - { - 'volid' => "$storeid:backup/vzdump-qemu-7654-2020_12_07-12_18_21.tar.zst", - 'ctime' => $basetime + (366-31+6)*24*60*60, - 'type' => 'qemu', - 'vmid' => 7654, - 'mark' => 'keep', - }, - ], + description => 'daily=weekly=1,weekboundary', + keep => { + 'keep-daily' => 1, + 'keep-weekly' => 1, + }, + list => 'weekboundary', + expected => [ + { + 'volid' => "$storeid:backup/vzdump-qemu-7654-2020_12_03-12_18_21.tar.zst", + 'ctime' => $basetime + (366 - 31 + 2) * 24 * 60 * 60, + 'type' => 'qemu', + 'vmid' => 7654, + 'mark' => 'remove', + }, + { + 'volid' => "$storeid:backup/vzdump-qemu-7654-2020_12_04-12_18_21.tar.zst", + 'ctime' => $basetime + (366 - 31 + 3) * 24 * 60 * 60, + 'type' => 'qemu', + 'vmid' => 7654, + 'mark' => 'keep', + }, + { + 'volid' => "$storeid:backup/vzdump-qemu-7654-2020_12_07-12_18_21.tar.zst", + 'ctime' => $basetime + (366 - 31 + 6) * 24 * 60 * 60, + 'type' => 'qemu', + 'vmid' => 7654, + 'mark' => 'keep', + }, + ], }, ]; @@ -478,9 +532,11 @@ plan tests => scalar @$tests; for my $tt (@$tests) { my $got = eval { - $current_list = $tt->{list} // 'default'; - my $res = PVE::Storage::Plugin->prune_backups($tt->{scfg}, $storeid, $tt->{keep}, $tt->{vmid}, $tt->{type}, 1); - return [ sort { $a->{volid} cmp $b->{volid} } @{$res} ]; + $current_list = $tt->{list} // 'default'; + my $res = PVE::Storage::Plugin->prune_backups( + $tt->{scfg}, $storeid, $tt->{keep}, $tt->{vmid}, $tt->{type}, 1, + ); + return [sort { $a->{volid} cmp $b->{volid} } @{$res}]; }; $got = $@ if $@; diff --git a/src/test/rbd_namespace.pl b/src/test/rbd_namespace.pl index 6b115ce..ecec9f2 100755 --- a/src/test/rbd_namespace.pl +++ b/src/test/rbd_namespace.pl @@ -26,7 +26,7 @@ use JSON; use PVE::Tools qw(run_command); my $pool = "testpool"; -my $use_existing= undef; +my $use_existing = undef; my $namespace = "testspace"; my $showhelp = ''; my $vmid = 999999; @@ -46,7 +46,7 @@ Known options are: -h, --help Print this help message "; -GetOptions ( +GetOptions( "pool=s" => \$pool, "use-existing" => \$use_existing, "namespace=s" => \$namespace, @@ -54,7 +54,7 @@ GetOptions ( "h|help" => \$showhelp, "cleanup" => \$cleanup, "d|debug" => \$DEBUG, -) or die ($helpstring); +) or die($helpstring); if ($showhelp) { warn $helpstring; @@ -69,6 +69,7 @@ my $vmid_linked_clone = int($vmid) - 2; sub jp { print to_json($_[0], { utf8 => 8, pretty => 1, canonical => 1 }) . "\n"; } + sub dbgvar { jp(@_) if $DEBUG; } @@ -77,26 +78,24 @@ sub run_cmd { my ($cmd, $json, $ignore_errors) = @_; my $raw = ''; - my $parser = sub {$raw .= shift;}; + my $parser = sub { $raw .= shift; }; - eval { - run_command($cmd, outfunc => $parser); - }; + eval { run_command($cmd, outfunc => $parser); }; if (my $err = $@) { - die $err if !$ignore_errors; + die $err if !$ignore_errors; } if ($json) { - my $result; - if ($raw eq '') { - $result = []; - } elsif ($raw =~ m/^(\[.*\])$/s) { # untaint - $result = JSON::decode_json($1); - } else { - die "got unexpected data from command: '$cmd' -> '$raw'\n"; - } - return $result; - } + my $result; + if ($raw eq '') { + $result = []; + } elsif ($raw =~ m/^(\[.*\])$/s) { # untaint + $result = JSON::decode_json($1); + } else { + die "got unexpected data from command: '$cmd' -> '$raw'\n"; + } + return $result; + } return $raw; } @@ -105,17 +104,15 @@ sub run_test_cmd { my $raw = ''; my $out = sub { - my $line = shift; - $raw .= "${line}\n"; + my $line = shift; + $raw .= "${line}\n"; }; - eval { - run_command($cmd, outfunc => $out); - }; + eval { run_command($cmd, outfunc => $out); }; if (my $err = $@) { - print $raw; - print $err; - return 0; + print $raw; + print $err; + return 0; } print $raw; return 1; @@ -126,23 +123,23 @@ sub prepare { my $pools = run_cmd("ceph osd pool ls --format json", 1); - my %poolnames = map {$_ => 1} @$pools; + my %poolnames = map { $_ => 1 } @$pools; die "Pool '$pool' does not exist!\n" - if !exists($poolnames{$pool}) && $use_existing; + if !exists($poolnames{$pool}) && $use_existing; run_cmd(['pveceph', 'pool', 'create', ${pool}, '--add_storages', 1]) - if !$use_existing; + if !$use_existing; my $namespaces = run_cmd(['rbd', '-p', ${pool}, 'namespace', 'ls', '--format', 'json'], 1); dbgvar($namespace); my $ns_found = 0; for my $i (@$namespaces) { - $ns_found = 1 if $i->{name} eq $namespace; + $ns_found = 1 if $i->{name} eq $namespace; } if (!$ns_found) { - print "Create namespace '${namespace}' in pool '${pool}'\n"; - run_cmd(['rbd', 'namespace', 'create', "${pool}/${namespace}"]); + print "Create namespace '${namespace}' in pool '${pool}'\n"; + run_cmd(['rbd', 'namespace', 'create', "${pool}/${namespace}"]); } my $storages = run_cmd(['pvesh', 'get', 'storage', '--output-format', 'json'], 1); @@ -152,41 +149,67 @@ sub prepare { print "Create storage definition\n"; for my $stor (@$storages) { - $pool_found = 1 if $stor->{storage} eq $pool; - $rbd_found = 1 if $stor->{storage} eq $storage_name; + $pool_found = 1 if $stor->{storage} eq $pool; + $rbd_found = 1 if $stor->{storage} eq $storage_name; - if ($rbd_found) { - run_cmd(['pvesm', 'set', ${storage_name}, '--krbd', '0']); - die "Enable the storage '$stor->{storage}'!" if $stor->{disable}; - } + if ($rbd_found) { + run_cmd(['pvesm', 'set', ${storage_name}, '--krbd', '0']); + die "Enable the storage '$stor->{storage}'!" if $stor->{disable}; + } } if (!$pool_found) { - die "No storage for pool '${pool}' found! Must have same name as pool!\n" - if $use_existing; + die "No storage for pool '${pool}' found! Must have same name as pool!\n" + if $use_existing; - run_cmd(['pvesm', 'add', 'rbd', $pool, '--pool', $pool, '--content', 'images,rootdir']); + run_cmd(['pvesm', 'add', 'rbd', $pool, '--pool', $pool, '--content', 'images,rootdir']); } # create PVE storages (librbd / krbd) - run_cmd(['pvesm', 'add', 'rbd', ${storage_name}, '--krbd', '0', '--pool', ${pool}, '--namespace', ${namespace}, '--content', 'images,rootdir']) - if !$rbd_found; - + run_cmd( + [ + 'pvesm', + 'add', + 'rbd', + ${storage_name}, + '--krbd', + '0', + '--pool', + ${pool}, + '--namespace', + ${namespace}, + '--content', + 'images,rootdir', + ], + ) if !$rbd_found; # create test VM print "Create test VM ${vmid}\n"; - my $vms = run_cmd(['pvesh', 'get', 'cluster/resources', '--type', 'vm', '--output-format', 'json'], 1); + my $vms = + run_cmd(['pvesh', 'get', 'cluster/resources', '--type', 'vm', '--output-format', 'json'], + 1); for my $vm (@$vms) { - # TODO: introduce a force flag to make this behaviour configurable + # TODO: introduce a force flag to make this behaviour configurable - if ($vm->{vmid} eq $vmid) { - print "Test VM '${vmid}' already exists. It will be removed and recreated!\n"; - run_cmd(['qm', 'stop', ${vmid}], 0, 1); - run_cmd(['qm', 'destroy', ${vmid}]); - } + if ($vm->{vmid} eq $vmid) { + print "Test VM '${vmid}' already exists. It will be removed and recreated!\n"; + run_cmd(['qm', 'stop', ${vmid}], 0, 1); + run_cmd(['qm', 'destroy', ${vmid}]); + } } - run_cmd(['qm', 'create', ${vmid}, '--bios', 'ovmf', '--efidisk0', "${storage_name}:1", '--scsi0', "${storage_name}:2"]); + run_cmd( + [ + 'qm', + 'create', + ${vmid}, + '--bios', + 'ovmf', + '--efidisk0', + "${storage_name}:1", + '--scsi0', + "${storage_name}:2", + ], + ); } - sub cleanup { print "Cleaning up test environment!\n"; print "Removing VMs\n"; @@ -195,7 +218,21 @@ sub cleanup { run_cmd(['qm', 'stop', ${vmid_clone}], 0, 1); run_cmd(['qm', 'destroy', ${vmid_linked_clone}], 0, 1); run_cmd(['qm', 'destroy', ${vmid_clone}], 0, 1); - run_cmd(['for', 'i', 'in', "/dev/rbd/${pool}/${namespace}/*;", 'do', '/usr/bin/rbd', 'unmap', '\$i;', 'done'], 0, 1); + run_cmd( + [ + 'for', + 'i', + 'in', + "/dev/rbd/${pool}/${namespace}/*;", + 'do', + '/usr/bin/rbd', + 'unmap', + '\$i;', + 'done', + ], + 0, + 1, + ); run_cmd(['qm', 'unlock', ${vmid}], 0, 1); run_cmd(['qm', 'destroy', ${vmid}], 0, 1); @@ -206,10 +243,10 @@ sub cleanup { run_cmd(['rbd', 'namespace', 'remove', "${pool}/${namespace}"], 0, 1); if (!$use_existing) { - print "Removing Storage definition for ${pool}\n"; - run_cmd(['pvesm', 'remove', ${pool}], 0, 1); - print "Removing test pool\n"; - run_cmd(['pveceph', 'pool', 'destroy', $pool]); + print "Removing Storage definition for ${pool}\n"; + run_cmd(['pvesm', 'remove', ${pool}], 0, 1); + print "Removing test pool\n"; + run_cmd(['pveceph', 'pool', 'destroy', $pool]); } } @@ -229,102 +266,96 @@ my $tests = [ # ], # }, { - name => 'first VM start', - steps => [ - ['qm', 'start', $vmid], - ], + name => 'first VM start', + steps => [ + ['qm', 'start', $vmid], + ], }, { - name => 'snapshot/rollback', - steps => [ - ['qm', 'snapshot', $vmid, 'test'], - ['qm', 'rollback', $vmid, 'test'], - ], - cleanup => [ - ['qm', 'unlock', $vmid], - ], + name => 'snapshot/rollback', + steps => [ + ['qm', 'snapshot', $vmid, 'test'], ['qm', 'rollback', $vmid, 'test'], + ], + cleanup => [ + ['qm', 'unlock', $vmid], + ], }, { - name => 'remove snapshot', - steps => [ - ['qm', 'delsnapshot', $vmid, 'test'], - ], + name => 'remove snapshot', + steps => [ + ['qm', 'delsnapshot', $vmid, 'test'], + ], }, { - name => 'moving disk between namespaces', - steps => [ - ['qm', 'move_disk', $vmid, 'scsi0', $pool, '--delete', 1], - ['qm', 'move_disk', $vmid, 'scsi0', $storage_name, '--delete', 1], - ], + name => 'moving disk between namespaces', + steps => [ + ['qm', 'move_disk', $vmid, 'scsi0', $pool, '--delete', 1], + ['qm', 'move_disk', $vmid, 'scsi0', $storage_name, '--delete', 1], + ], }, { - name => 'switch to krbd', - preparations => [ - ['qm', 'stop', $vmid], - ['pvesm', 'set', $storage_name, '--krbd', 1] - ], + name => 'switch to krbd', + preparations => [ + ['qm', 'stop', $vmid], ['pvesm', 'set', $storage_name, '--krbd', 1], + ], }, { - name => 'start VM with krbd', - steps => [ - ['qm', 'start', $vmid], - ], + name => 'start VM with krbd', + steps => [ + ['qm', 'start', $vmid], + ], }, { - name => 'snapshot/rollback with krbd', - steps => [ - ['qm', 'snapshot', $vmid, 'test'], - ['qm', 'rollback', $vmid, 'test'], - ], - cleanup => [ - ['qm', 'unlock', $vmid], - ], + name => 'snapshot/rollback with krbd', + steps => [ + ['qm', 'snapshot', $vmid, 'test'], ['qm', 'rollback', $vmid, 'test'], + ], + cleanup => [ + ['qm', 'unlock', $vmid], + ], }, { - name => 'remove snapshot with krbd', - steps => [ - ['qm', 'delsnapshot', $vmid, 'test'], - ], + name => 'remove snapshot with krbd', + steps => [ + ['qm', 'delsnapshot', $vmid, 'test'], + ], }, { - name => 'moving disk between namespaces with krbd', - steps => [ - ['qm', 'move_disk', $vmid, 'scsi0', $pool, '--delete', 1], - ['qm', 'move_disk', $vmid, 'scsi0', $storage_name, '--delete', 1], - ], + name => 'moving disk between namespaces with krbd', + steps => [ + ['qm', 'move_disk', $vmid, 'scsi0', $pool, '--delete', 1], + ['qm', 'move_disk', $vmid, 'scsi0', $storage_name, '--delete', 1], + ], }, { - name => 'clone VM with krbd', - steps => [ - ['qm', 'clone', $vmid, $vmid_clone], - ], + name => 'clone VM with krbd', + steps => [ + ['qm', 'clone', $vmid, $vmid_clone], + ], }, { - name => 'switch to non krbd', - preparations => [ - ['qm', 'stop', $vmid], - ['qm', 'stop', $vmid_clone], - ['pvesm', 'set', $storage_name, '--krbd', 0] - ], + name => 'switch to non krbd', + preparations => [ + ['qm', 'stop', $vmid], + ['qm', 'stop', $vmid_clone], + ['pvesm', 'set', $storage_name, '--krbd', 0], + ], }, { - name => 'templates and linked clone', - steps => [ - ['qm', 'template', $vmid], - ['qm', 'clone', $vmid, $vmid_linked_clone], - ['qm', 'start', $vmid_linked_clone], - ['qm', 'stop', $vmid_linked_clone], - ], + name => 'templates and linked clone', + steps => [ + ['qm', 'template', $vmid], + ['qm', 'clone', $vmid, $vmid_linked_clone], + ['qm', 'start', $vmid_linked_clone], + ['qm', 'stop', $vmid_linked_clone], + ], }, { - name => 'start linked clone with krbd', - preparations => [ - ['pvesm', 'set', $storage_name, '--krbd', 1] - ], - steps => [ - ['qm', 'start', $vmid_linked_clone], - ['qm', 'stop', $vmid_linked_clone], - ], + name => 'start linked clone with krbd', + preparations => [['pvesm', 'set', $storage_name, '--krbd', 1]], + steps => [ + ['qm', 'start', $vmid_linked_clone], ['qm', 'stop', $vmid_linked_clone], + ], }, ]; @@ -332,8 +363,8 @@ sub run_prep_cleanup { my ($cmds) = @_; for (@$cmds) { - print join(' ', @$_). "\n"; - run_cmd($_); + print join(' ', @$_) . "\n"; + run_cmd($_); } } @@ -341,7 +372,7 @@ sub run_steps { my ($steps) = @_; for (@$steps) { - ok(run_test_cmd($_), join(' ', @$_)); + ok(run_test_cmd($_), join(' ', @$_)); } } @@ -350,23 +381,23 @@ sub run_tests { my $num_tests = 0; for (@$tests) { - $num_tests += scalar(@{$_->{steps}}) if defined $_->{steps}; + $num_tests += scalar(@{ $_->{steps} }) if defined $_->{steps}; } print("Tests: $num_tests\n"); plan tests => $num_tests; for my $test (@$tests) { - print "Section: $test->{name}\n"; - run_prep_cleanup($test->{preparations}) if defined $test->{preparations}; - run_steps($test->{steps}) if defined $test->{steps}; - run_prep_cleanup($test->{cleanup}) if defined $test->{cleanup}; + print "Section: $test->{name}\n"; + run_prep_cleanup($test->{preparations}) if defined $test->{preparations}; + run_steps($test->{steps}) if defined $test->{steps}; + run_prep_cleanup($test->{cleanup}) if defined $test->{cleanup}; } done_testing(); if (Test::More->builder->is_passing()) { - cleanup(); + cleanup(); } } diff --git a/src/test/run_bwlimit_tests.pl b/src/test/run_bwlimit_tests.pl index 6ae379c..973a8e5 100755 --- a/src/test/run_bwlimit_tests.pl +++ b/src/test/run_bwlimit_tests.pl @@ -51,23 +51,23 @@ EOF my $permissions = { 'user1@test' => {}, - 'user2@test' => { '/' => ['Sys.Modify'], }, - 'user3@test' => { '/storage' => ['Datastore.Allocate'], }, - 'user4@test' => { '/storage/d20m40r30' => ['Datastore.Allocate'], }, + 'user2@test' => { '/' => ['Sys.Modify'] }, + 'user3@test' => { '/storage' => ['Datastore.Allocate'] }, + 'user4@test' => { '/storage/d20m40r30' => ['Datastore.Allocate'] }, }; my $pve_cluster_module; $pve_cluster_module = Test::MockModule->new('PVE::Cluster'); $pve_cluster_module->mock( - cfs_update => sub {}, + cfs_update => sub { }, get_config => sub { - my ($file) = @_; - if ($file eq 'datacenter.cfg') { - return $datacenter_cfg; - } elsif ($file eq 'storage.cfg') { - return $storage_cfg; - } - die "TODO: mock get_config($file)\n"; + my ($file) = @_; + if ($file eq 'datacenter.cfg') { + return $datacenter_cfg; + } elsif ($file eq 'storage.cfg') { + return $storage_cfg; + } + die "TODO: mock get_config($file)\n"; }, ); @@ -75,136 +75,360 @@ my $rpcenv_module; $rpcenv_module = Test::MockModule->new('PVE::RPCEnvironment'); $rpcenv_module->mock( check => sub { - my ($env, $user, $path, $perms, $noerr) = @_; - return 1 if $user eq 'root@pam'; - my $userperms = $permissions->{$user} - or die "no permissions defined for user $user\n"; - if (defined(my $pathperms = $userperms->{$path})) { - foreach my $pp (@$pathperms) { - foreach my $reqp (@$perms) { - return 1 if $pp eq $reqp; - } - } - } - die "permission denied\n" if !$noerr; - return 0; + my ($env, $user, $path, $perms, $noerr) = @_; + return 1 if $user eq 'root@pam'; + my $userperms = $permissions->{$user} + or die "no permissions defined for user $user\n"; + if (defined(my $pathperms = $userperms->{$path})) { + foreach my $pp (@$pathperms) { + foreach my $reqp (@$perms) { + return 1 if $pp eq $reqp; + } + } + } + die "permission denied\n" if !$noerr; + return 0; }, ); my $rpcenv = PVE::RPCEnvironment->init('pub'); my @tests = ( - [ user => 'root@pam' ], - [ ['unknown', ['nolimit'], undef], 100, 'root / generic default limit, requesting default' ], - [ ['move', ['nolimit'], undef], 80, 'root / specific default limit, requesting default (move)' ], - [ ['restore', ['nolimit'], undef], 60, 'root / specific default limit, requesting default (restore)' ], - [ ['unknown', ['d50m40r30'], undef], 50, 'root / storage default limit' ], - [ ['move', ['d50m40r30'], undef], 40, 'root / specific storage limit (move)' ], - [ ['restore', ['d50m40r30'], undef], 30, 'root / specific storage limit (restore)' ], - [ ['unknown', ['nolimit'], 0], 0, 'root / generic default limit' ], - [ ['move', ['nolimit'], 0], 0, 'root / specific default limit (move)' ], - [ ['restore', ['nolimit'], 0], 0, 'root / specific default limit (restore)' ], - [ ['unknown', ['d50m40r30'], 0], 0, 'root / storage default limit' ], - [ ['move', ['d50m40r30'], 0], 0, 'root / specific storage limit (move)' ], - [ ['restore', ['d50m40r30'], 0], 0, 'root / specific storage limit (restore)' ], - [ ['migrate', undef, 100], 100, 'root / undef storage (migrate)' ], - [ ['migrate', [], 100], 100, 'root / no storage (migrate)' ], - [ ['migrate', [undef], undef], 100, 'root / [undef] storage no override (migrate)' ], - [ ['migrate', [undef, undef], 200], 200, 'root / list of undef storages with override (migrate)' ], + [user => 'root@pam'], + [['unknown', ['nolimit'], undef], 100, 'root / generic default limit, requesting default'], + [ + ['move', ['nolimit'], undef], + 80, + 'root / specific default limit, requesting default (move)', + ], + [ + ['restore', ['nolimit'], undef], + 60, + 'root / specific default limit, requesting default (restore)', + ], + [['unknown', ['d50m40r30'], undef], 50, 'root / storage default limit'], + [['move', ['d50m40r30'], undef], 40, 'root / specific storage limit (move)'], + [['restore', ['d50m40r30'], undef], 30, 'root / specific storage limit (restore)'], + [['unknown', ['nolimit'], 0], 0, 'root / generic default limit'], + [['move', ['nolimit'], 0], 0, 'root / specific default limit (move)'], + [['restore', ['nolimit'], 0], 0, 'root / specific default limit (restore)'], + [['unknown', ['d50m40r30'], 0], 0, 'root / storage default limit'], + [['move', ['d50m40r30'], 0], 0, 'root / specific storage limit (move)'], + [['restore', ['d50m40r30'], 0], 0, 'root / specific storage limit (restore)'], + [['migrate', undef, 100], 100, 'root / undef storage (migrate)'], + [['migrate', [], 100], 100, 'root / no storage (migrate)'], + [['migrate', [undef], undef], 100, 'root / [undef] storage no override (migrate)'], + [ + ['migrate', [undef, undef], 200], + 200, + 'root / list of undef storages with override (migrate)', + ], - [ user => 'user1@test' ], - [ ['unknown', ['nolimit'], undef], 100, 'generic default limit' ], - [ ['move', ['nolimit'], undef], 80, 'specific default limit (move)' ], - [ ['restore', ['nolimit'], undef], 60, 'specific default limit (restore)' ], - [ ['unknown', ['d50m40r30'], undef], 50, 'storage default limit' ], - [ ['move', ['d50m40r30'], undef], 40, 'specific storage limit (move)' ], - [ ['restore', ['d50m40r30'], undef], 30, 'specific storage limit (restore)' ], - [ ['unknown', ['d200m400r300'], undef], 200, 'storage default limit above datacenter limits' ], - [ ['move', ['d200m400r300'], undef], 400, 'specific storage limit above datacenter limits (move)' ], - [ ['restore', ['d200m400r300'], undef], 300, 'specific storage limit above datacenter limits (restore)' ], - [ ['unknown', ['d50'], undef], 50, 'storage default limit' ], - [ ['move', ['d50'], undef], 50, 'storage default limit (move)' ], - [ ['restore', ['d50'], undef], 50, 'storage default limit (restore)' ], + [user => 'user1@test'], + [['unknown', ['nolimit'], undef], 100, 'generic default limit'], + [['move', ['nolimit'], undef], 80, 'specific default limit (move)'], + [['restore', ['nolimit'], undef], 60, 'specific default limit (restore)'], + [['unknown', ['d50m40r30'], undef], 50, 'storage default limit'], + [['move', ['d50m40r30'], undef], 40, 'specific storage limit (move)'], + [['restore', ['d50m40r30'], undef], 30, 'specific storage limit (restore)'], + [ + ['unknown', ['d200m400r300'], undef], + 200, + 'storage default limit above datacenter limits', + ], + [ + ['move', ['d200m400r300'], undef], + 400, + 'specific storage limit above datacenter limits (move)', + ], + [ + ['restore', ['d200m400r300'], undef], + 300, + 'specific storage limit above datacenter limits (restore)', + ], + [['unknown', ['d50'], undef], 50, 'storage default limit'], + [['move', ['d50'], undef], 50, 'storage default limit (move)'], + [['restore', ['d50'], undef], 50, 'storage default limit (restore)'], - [ user => 'user2@test' ], - [ ['unknown', ['nolimit'], 0], 0, 'generic default limit with Sys.Modify, passing unlimited' ], - [ ['unknown', ['nolimit'], undef], 100, 'generic default limit with Sys.Modify' ], - [ ['move', ['nolimit'], undef], 80, 'specific default limit with Sys.Modify (move)' ], - [ ['restore', ['nolimit'], undef], 60, 'specific default limit with Sys.Modify (restore)' ], - [ ['restore', ['nolimit'], 0], 0, 'specific default limit with Sys.Modify, passing unlimited (restore)' ], - [ ['move', ['nolimit'], 0], 0, 'specific default limit with Sys.Modify, passing unlimited (move)' ], - [ ['unknown', ['d50m40r30'], undef], 50, 'storage default limit with Sys.Modify' ], - [ ['restore', ['d50m40r30'], undef], 30, 'specific storage limit with Sys.Modify (restore)' ], - [ ['move', ['d50m40r30'], undef], 40, 'specific storage limit with Sys.Modify (move)' ], + [user => 'user2@test'], + [ + ['unknown', ['nolimit'], 0], + 0, + 'generic default limit with Sys.Modify, passing unlimited', + ], + [['unknown', ['nolimit'], undef], 100, 'generic default limit with Sys.Modify'], + [['move', ['nolimit'], undef], 80, 'specific default limit with Sys.Modify (move)'], + [['restore', ['nolimit'], undef], 60, 'specific default limit with Sys.Modify (restore)'], + [ + ['restore', ['nolimit'], 0], + 0, + 'specific default limit with Sys.Modify, passing unlimited (restore)', + ], + [ + ['move', ['nolimit'], 0], + 0, + 'specific default limit with Sys.Modify, passing unlimited (move)', + ], + [['unknown', ['d50m40r30'], undef], 50, 'storage default limit with Sys.Modify'], + [['restore', ['d50m40r30'], undef], 30, 'specific storage limit with Sys.Modify (restore)'], + [['move', ['d50m40r30'], undef], 40, 'specific storage limit with Sys.Modify (move)'], - [ user => 'user3@test' ], - [ ['unknown', ['nolimit'], undef], 100, 'generic default limit with privileges on /' ], - [ ['unknown', ['nolimit'], 80], 80, 'generic default limit with privileges on /, passing an override value' ], - [ ['unknown', ['nolimit'], 0], 0, 'generic default limit with privileges on /, passing unlimited' ], - [ ['move', ['nolimit'], undef], 80, 'specific default limit with privileges on / (move)' ], - [ ['move', ['nolimit'], 0], 0, 'specific default limit with privileges on /, passing unlimited (move)' ], - [ ['restore', ['nolimit'], undef], 60, 'specific default limit with privileges on / (restore)' ], - [ ['restore', ['nolimit'], 0], 0, 'specific default limit with privileges on /, passing unlimited (restore)' ], - [ ['unknown', ['d50m40r30'], 0], 0, 'storage default limit with privileges on /, passing unlimited' ], - [ ['unknown', ['d50m40r30'], undef], 50, 'storage default limit with privileges on /' ], - [ ['unknown', ['d50m40r30'], 0], 0, 'storage default limit with privileges on, passing unlimited /' ], - [ ['move', ['d50m40r30'], undef], 40, 'specific storage limit with privileges on / (move)' ], - [ ['move', ['d50m40r30'], 0], 0, 'specific storage limit with privileges on, passing unlimited / (move)' ], - [ ['restore', ['d50m40r30'], undef], 30, 'specific storage limit with privileges on / (restore)' ], - [ ['restore', ['d50m40r30'], 0], 0, 'specific storage limit with privileges on /, passing unlimited (restore)' ], + [user => 'user3@test'], + [['unknown', ['nolimit'], undef], 100, 'generic default limit with privileges on /'], + [ + ['unknown', ['nolimit'], 80], + 80, + 'generic default limit with privileges on /, passing an override value', + ], + [ + ['unknown', ['nolimit'], 0], + 0, + 'generic default limit with privileges on /, passing unlimited', + ], + [['move', ['nolimit'], undef], 80, 'specific default limit with privileges on / (move)'], + [ + ['move', ['nolimit'], 0], + 0, + 'specific default limit with privileges on /, passing unlimited (move)', + ], + [ + ['restore', ['nolimit'], undef], + 60, + 'specific default limit with privileges on / (restore)', + ], + [ + ['restore', ['nolimit'], 0], + 0, + 'specific default limit with privileges on /, passing unlimited (restore)', + ], + [ + ['unknown', ['d50m40r30'], 0], + 0, + 'storage default limit with privileges on /, passing unlimited', + ], + [['unknown', ['d50m40r30'], undef], 50, 'storage default limit with privileges on /'], + [ + ['unknown', ['d50m40r30'], 0], + 0, + 'storage default limit with privileges on, passing unlimited /', + ], + [['move', ['d50m40r30'], undef], 40, 'specific storage limit with privileges on / (move)'], + [ + ['move', ['d50m40r30'], 0], + 0, + 'specific storage limit with privileges on, passing unlimited / (move)', + ], + [ + ['restore', ['d50m40r30'], undef], + 30, + 'specific storage limit with privileges on / (restore)', + ], + [ + ['restore', ['d50m40r30'], 0], + 0, + 'specific storage limit with privileges on /, passing unlimited (restore)', + ], - [ user => 'user4@test' ], - [ ['unknown', ['nolimit'], 10], 10, 'generic default limit with privileges on a different storage, passing lower override' ], - [ ['unknown', ['nolimit'], undef], 100, 'generic default limit with privileges on a different storage' ], - [ ['unknown', ['nolimit'], 0], 100, 'generic default limit with privileges on a different storage, passing unlimited' ], - [ ['move', ['nolimit'], undef], 80, 'specific default limit with privileges on a different storage (move)' ], - [ ['restore', ['nolimit'], undef], 60, 'specific default limit with privileges on a different storage (restore)' ], - [ ['unknown', ['d50m40r30'], undef], 50, 'storage default limit with privileges on a different storage' ], - [ ['move', ['d50m40r30'], undef], 40, 'specific storage limit with privileges on a different storage (move)' ], - [ ['restore', ['d50m40r30'], undef], 30, 'specific storage limit with privileges on a different storage (restore)' ], - [ ['unknown', ['d20m40r30'], undef], 20, 'storage default limit with privileges on that storage' ], - [ ['unknown', ['d20m40r30'], 0], 0, 'storage default limit with privileges on that storage, passing unlimited' ], - [ ['move', ['d20m40r30'], undef], 40, 'specific storage limit with privileges on that storage (move)' ], - [ ['move', ['d20m40r30'], 0], 0, 'specific storage limit with privileges on that storage, passing unlimited (move)' ], - [ ['move', ['d20m40r30'], 10], 10, 'specific storage limit with privileges on that storage, passing low override (move)' ], - [ ['move', ['d20m40r30'], 300], 300, 'specific storage limit with privileges on that storage, passing high override (move)' ], - [ ['restore', ['d20m40r30'], undef], 30, 'specific storage limit with privileges on that storage (restore)' ], - [ ['restore', ['d20m40r30'], 0], 0, 'specific storage limit with privileges on that storage, passing unlimited (restore)' ], - [ ['unknown', ['d50m40r30', 'd20m40r30'], 0], 50, 'multiple storages default limit with privileges on one of them, passing unlimited' ], - [ ['move', ['d50m40r30', 'd20m40r30'], 0], 40, 'multiple storages specific limit with privileges on one of them, passing unlimited (move)' ], - [ ['restore', ['d50m40r30', 'd20m40r30'], 0], 30, 'multiple storages specific limit with privileges on one of them, passing unlimited (restore)' ], - [ ['unknown', ['d50m40r30', 'd20m40r30'], undef], 20, 'multiple storages default limit with privileges on one of them' ], - [ ['unknown', ['d10', 'd20m40r30'], undef], 10, 'multiple storages default limit with privileges on one of them (storage limited)' ], - [ ['move', ['d10', 'd20m40r30'], undef], 10, 'multiple storages specific limit with privileges on one of them (storage limited) (move)' ], - [ ['restore', ['d10', 'd20m40r30'], undef], 10, 'multiple storages specific limit with privileges on one of them (storage limited) (restore)' ], - [ ['restore', ['d10', 'd20m40r30'], 5], 5, 'multiple storages specific limit (storage limited) (restore), passing lower override' ], - [ ['restore', ['d200', 'd200m400r300'], 65], 65, 'multiple storages specific limit (storage limited) (restore), passing lower override' ], - [ ['restore', ['d200', 'd200m400r300'], 400], 200, 'multiple storages specific limit (storage limited) (restore), passing higher override' ], - [ ['restore', ['d200', 'd200m400r300'], 0], 200, 'multiple storages specific limit (storage limited) (restore), passing unlimited' ], - [ ['restore', ['d200', 'd200m400r300'], 1], 1, 'multiple storages specific limit (storage limited) (restore), passing 1' ], - [ ['restore', ['d10', 'd20m40r30'], 500], 10, 'multiple storages specific limit with privileges on one of them (storage limited) (restore), passing higher override' ], - [ ['unknown', ['nolimit', 'd20m40r30'], 0], 100, 'multiple storages default limit with privileges on one of them, passing unlimited (default limited)' ], - [ ['move', ['nolimit', 'd20m40r30'], 0], 80, 'multiple storages specific limit with privileges on one of them, passing unlimited (default limited) (move)' ], - [ ['restore', ['nolimit', 'd20m40r30'], 0], 60, 'multiple storages specific limit with privileges on one of them, passing unlimited (default limited) (restore)' ], - [ ['unknown', ['nolimit', 'd20m40r30'], undef], 20, 'multiple storages default limit with privileges on one of them (default limited)' ], - [ ['move', ['nolimit', 'd20m40r30'], undef], 40, 'multiple storages specific limit with privileges on one of them (default limited) (move)' ], - [ ['restore', ['nolimit', 'd20m40r30'], undef], 30, 'multiple storages specific limit with privileges on one of them (default limited) (restore)' ], - [ ['restore', ['d20m40r30', 'm50'], 200], 60, 'multiple storages specific limit with privileges on one of them (global default limited) (restore)' ], - [ ['move', ['nolimit', undef ], 40] , 40, 'multiple storages one undefined, passing 40 (move)' ], - [ ['move', undef, 100] , 80, 'undef storage, passing 100 (move)' ], - [ ['move', [undef], 100] , 80, '[undef] storage, passing 100 (move)' ], - [ ['move', [undef], undef] , 80, '[undef] storage, no override (move)' ], + [user => 'user4@test'], + [ + ['unknown', ['nolimit'], 10], + 10, + 'generic default limit with privileges on a different storage, passing lower override', + ], + [ + ['unknown', ['nolimit'], undef], + 100, + 'generic default limit with privileges on a different storage', + ], + [ + ['unknown', ['nolimit'], 0], + 100, + 'generic default limit with privileges on a different storage, passing unlimited', + ], + [ + ['move', ['nolimit'], undef], + 80, + 'specific default limit with privileges on a different storage (move)', + ], + [ + ['restore', ['nolimit'], undef], + 60, + 'specific default limit with privileges on a different storage (restore)', + ], + [ + ['unknown', ['d50m40r30'], undef], + 50, + 'storage default limit with privileges on a different storage', + ], + [ + ['move', ['d50m40r30'], undef], + 40, + 'specific storage limit with privileges on a different storage (move)', + ], + [ + ['restore', ['d50m40r30'], undef], + 30, + 'specific storage limit with privileges on a different storage (restore)', + ], + [ + ['unknown', ['d20m40r30'], undef], + 20, + 'storage default limit with privileges on that storage', + ], + [ + ['unknown', ['d20m40r30'], 0], + 0, + 'storage default limit with privileges on that storage, passing unlimited', + ], + [ + ['move', ['d20m40r30'], undef], + 40, + 'specific storage limit with privileges on that storage (move)', + ], + [ + ['move', ['d20m40r30'], 0], + 0, + 'specific storage limit with privileges on that storage, passing unlimited (move)', + ], + [ + ['move', ['d20m40r30'], 10], + 10, + 'specific storage limit with privileges on that storage, passing low override (move)', + ], + [ + ['move', ['d20m40r30'], 300], + 300, + 'specific storage limit with privileges on that storage, passing high override (move)', + ], + [ + ['restore', ['d20m40r30'], undef], + 30, + 'specific storage limit with privileges on that storage (restore)', + ], + [ + ['restore', ['d20m40r30'], 0], + 0, + 'specific storage limit with privileges on that storage, passing unlimited (restore)', + ], + [ + ['unknown', ['d50m40r30', 'd20m40r30'], 0], + 50, + 'multiple storages default limit with privileges on one of them, passing unlimited', + ], + [ + ['move', ['d50m40r30', 'd20m40r30'], 0], + 40, + 'multiple storages specific limit with privileges on one of them, passing unlimited (move)', + ], + [ + ['restore', ['d50m40r30', 'd20m40r30'], 0], + 30, + 'multiple storages specific limit with privileges on one of them, passing unlimited (restore)', + ], + [ + ['unknown', ['d50m40r30', 'd20m40r30'], undef], + 20, + 'multiple storages default limit with privileges on one of them', + ], + [ + ['unknown', ['d10', 'd20m40r30'], undef], + 10, + 'multiple storages default limit with privileges on one of them (storage limited)', + ], + [ + ['move', ['d10', 'd20m40r30'], undef], + 10, + 'multiple storages specific limit with privileges on one of them (storage limited) (move)', + ], + [ + ['restore', ['d10', 'd20m40r30'], undef], + 10, + 'multiple storages specific limit with privileges on one of them (storage limited) (restore)', + ], + [ + ['restore', ['d10', 'd20m40r30'], 5], + 5, + 'multiple storages specific limit (storage limited) (restore), passing lower override', + ], + [ + ['restore', ['d200', 'd200m400r300'], 65], + 65, + 'multiple storages specific limit (storage limited) (restore), passing lower override', + ], + [ + ['restore', ['d200', 'd200m400r300'], 400], + 200, + 'multiple storages specific limit (storage limited) (restore), passing higher override', + ], + [ + ['restore', ['d200', 'd200m400r300'], 0], + 200, + 'multiple storages specific limit (storage limited) (restore), passing unlimited', + ], + [ + ['restore', ['d200', 'd200m400r300'], 1], + 1, + 'multiple storages specific limit (storage limited) (restore), passing 1', + ], + [ + ['restore', ['d10', 'd20m40r30'], 500], + 10, + 'multiple storages specific limit with privileges on one of them (storage limited) (restore), passing higher override', + ], + [ + ['unknown', ['nolimit', 'd20m40r30'], 0], + 100, + 'multiple storages default limit with privileges on one of them, passing unlimited (default limited)', + ], + [ + ['move', ['nolimit', 'd20m40r30'], 0], + 80, + 'multiple storages specific limit with privileges on one of them, passing unlimited (default limited) (move)', + ], + [ + ['restore', ['nolimit', 'd20m40r30'], 0], + 60, + 'multiple storages specific limit with privileges on one of them, passing unlimited (default limited) (restore)', + ], + [ + ['unknown', ['nolimit', 'd20m40r30'], undef], + 20, + 'multiple storages default limit with privileges on one of them (default limited)', + ], + [ + ['move', ['nolimit', 'd20m40r30'], undef], + 40, + 'multiple storages specific limit with privileges on one of them (default limited) (move)', + ], + [ + ['restore', ['nolimit', 'd20m40r30'], undef], + 30, + 'multiple storages specific limit with privileges on one of them (default limited) (restore)', + ], + [ + ['restore', ['d20m40r30', 'm50'], 200], + 60, + 'multiple storages specific limit with privileges on one of them (global default limited) (restore)', + ], + [ + ['move', ['nolimit', undef], 40], + 40, + 'multiple storages one undefined, passing 40 (move)', + ], + [['move', undef, 100], 80, 'undef storage, passing 100 (move)'], + [['move', [undef], 100], 80, '[undef] storage, passing 100 (move)'], + [['move', [undef], undef], 80, '[undef] storage, no override (move)'], ); foreach my $t (@tests) { my ($args, $expected, $description) = @$t; if (!ref($args)) { - if ($args eq 'user') { - $rpcenv->set_user($expected); - } else { - die "not a test specification\n"; - } - next; + if ($args eq 'user') { + $rpcenv->set_user($expected); + } else { + die "not a test specification\n"; + } + next; } is(PVE::Storage::get_bandwidth_limit(@$args), $expected, $description); } diff --git a/src/test/run_disk_tests.pl b/src/test/run_disk_tests.pl index c1a698e..5a6af07 100755 --- a/src/test/run_disk_tests.pl +++ b/src/test/run_disk_tests.pl @@ -5,8 +5,8 @@ use warnings; use TAP::Harness; -my $harness = TAP::Harness->new( { verbosity => -2 }); -my $res = $harness->runtests( "disklist_test.pm" ); +my $harness = TAP::Harness->new({ verbosity => -2 }); +my $res = $harness->runtests("disklist_test.pm"); exit -1 if !$res || $res->{failed} || $res->{parse_errors}; diff --git a/src/test/run_ovf_tests.pl b/src/test/run_ovf_tests.pl index b8fa4b1..43b2ee2 100755 --- a/src/test/run_ovf_tests.pl +++ b/src/test/run_ovf_tests.pl @@ -10,11 +10,12 @@ use Test::More; use Data::Dumper; -my $test_manifests = join ('/', $Bin, 'ovf_manifests'); +my $test_manifests = join('/', $Bin, 'ovf_manifests'); print "parsing ovfs\n"; -my $win2008 = eval { PVE::GuestImport::OVF::parse_ovf("$test_manifests/Win_2008_R2_two-disks.ovf") }; +my $win2008 = + eval { PVE::GuestImport::OVF::parse_ovf("$test_manifests/Win_2008_R2_two-disks.ovf") }; if (my $err = $@) { fail('parse win2008'); warn("error: $err\n"); @@ -28,7 +29,8 @@ if (my $err = $@) { } else { ok('parse win10'); } -my $win10noNs = eval { PVE::GuestImport::OVF::parse_ovf("$test_manifests/Win10-Liz_no_default_ns.ovf") }; +my $win10noNs = + eval { PVE::GuestImport::OVF::parse_ovf("$test_manifests/Win10-Liz_no_default_ns.ovf") }; if (my $err = $@) { fail("parse win10 no default rasd NS"); warn("error: $err\n"); @@ -38,26 +40,59 @@ if (my $err = $@) { print "testing disks\n"; -is($win2008->{disks}->[0]->{disk_address}, 'scsi0', 'multidisk vm has the correct first disk controller'); -is($win2008->{disks}->[0]->{backing_file}, "$test_manifests/disk1.vmdk", 'multidisk vm has the correct first disk backing device'); +is( + $win2008->{disks}->[0]->{disk_address}, + 'scsi0', + 'multidisk vm has the correct first disk controller', +); +is( + $win2008->{disks}->[0]->{backing_file}, + "$test_manifests/disk1.vmdk", + 'multidisk vm has the correct first disk backing device', +); is($win2008->{disks}->[0]->{virtual_size}, 2048, 'multidisk vm has the correct first disk size'); -is($win2008->{disks}->[1]->{disk_address}, 'scsi1', 'multidisk vm has the correct second disk controller'); -is($win2008->{disks}->[1]->{backing_file}, "$test_manifests/disk2.vmdk", 'multidisk vm has the correct second disk backing device'); +is( + $win2008->{disks}->[1]->{disk_address}, + 'scsi1', + 'multidisk vm has the correct second disk controller', +); +is( + $win2008->{disks}->[1]->{backing_file}, + "$test_manifests/disk2.vmdk", + 'multidisk vm has the correct second disk backing device', +); is($win2008->{disks}->[1]->{virtual_size}, 2048, 'multidisk vm has the correct second disk size'); is($win10->{disks}->[0]->{disk_address}, 'scsi0', 'single disk vm has the correct disk controller'); -is($win10->{disks}->[0]->{backing_file}, "$test_manifests/Win10-Liz-disk1.vmdk", 'single disk vm has the correct disk backing device'); +is( + $win10->{disks}->[0]->{backing_file}, + "$test_manifests/Win10-Liz-disk1.vmdk", + 'single disk vm has the correct disk backing device', +); is($win10->{disks}->[0]->{virtual_size}, 2048, 'single disk vm has the correct size'); -is($win10noNs->{disks}->[0]->{disk_address}, 'scsi0', 'single disk vm (no default rasd NS) has the correct disk controller'); -is($win10noNs->{disks}->[0]->{backing_file}, "$test_manifests/Win10-Liz-disk1.vmdk", 'single disk vm (no default rasd NS) has the correct disk backing device'); -is($win10noNs->{disks}->[0]->{virtual_size}, 2048, 'single disk vm (no default rasd NS) has the correct size'); +is( + $win10noNs->{disks}->[0]->{disk_address}, + 'scsi0', + 'single disk vm (no default rasd NS) has the correct disk controller', +); +is( + $win10noNs->{disks}->[0]->{backing_file}, + "$test_manifests/Win10-Liz-disk1.vmdk", + 'single disk vm (no default rasd NS) has the correct disk backing device', +); +is( + $win10noNs->{disks}->[0]->{virtual_size}, + 2048, + 'single disk vm (no default rasd NS) has the correct size', +); print "testing nics\n"; is($win2008->{net}->{net0}->{model}, 'e1000', 'win2008 has correct nic model'); is($win10->{net}->{net0}->{model}, 'e1000e', 'win10 has correct nic model'); -is($win10noNs->{net}->{net0}->{model}, 'e1000e', 'win10 (no default rasd NS) has correct nic model'); +is($win10noNs->{net}->{net0}->{model}, 'e1000e', + 'win10 (no default rasd NS) has correct nic model'); print "\ntesting vm.conf extraction\n"; diff --git a/src/test/run_plugin_tests.pl b/src/test/run_plugin_tests.pl index d33429a..8bce9d3 100755 --- a/src/test/run_plugin_tests.pl +++ b/src/test/run_plugin_tests.pl @@ -8,7 +8,7 @@ $ENV{TZ} = 'UTC'; use TAP::Harness; -my $harness = TAP::Harness->new( { verbosity => -1 }); +my $harness = TAP::Harness->new({ verbosity => -1 }); my $res = $harness->runtests( "archive_info_test.pm", "parse_volname_test.pm", diff --git a/src/test/run_test_zfspoolplugin.pl b/src/test/run_test_zfspoolplugin.pl index 095ccb3..9bd70a8 100755 --- a/src/test/run_test_zfspoolplugin.pl +++ b/src/test/run_test_zfspoolplugin.pl @@ -55,175 +55,181 @@ my $test19 = sub { my @res; my $fail = 0; eval { - @res = PVE::Storage::path($cfg, "$storagename:$vmdisk"); - if ($res[0] ne "\/dev\/zvol\/regressiontest\/$vmdisk") { - $count++; - $fail = 1; - warn "Test 19 a: path is not correct: expected \'\/dev\/zvol\/regressiontest\/$vmdisk'\ get \'$res[0]\'"; - } - if ($res[1] ne "102") { - if (!$fail) { - $count++; - $fail = 1; - } - warn "Test 19 a: owner is not correct: expected \'102\' get \'$res[1]\'"; - } - if ($res[2] ne "images") { - if (!$fail) { - $count++; - $fail = 1; - } - warn "Test 19 a: owner is not correct: expected \'images\' get \'$res[2]\'"; - } + @res = PVE::Storage::path($cfg, "$storagename:$vmdisk"); + if ($res[0] ne "\/dev\/zvol\/regressiontest\/$vmdisk") { + $count++; + $fail = 1; + warn + "Test 19 a: path is not correct: expected \'\/dev\/zvol\/regressiontest\/$vmdisk'\ get \'$res[0]\'"; + } + if ($res[1] ne "102") { + if (!$fail) { + $count++; + $fail = 1; + } + warn "Test 19 a: owner is not correct: expected \'102\' get \'$res[1]\'"; + } + if ($res[2] ne "images") { + if (!$fail) { + $count++; + $fail = 1; + } + warn "Test 19 a: owner is not correct: expected \'images\' get \'$res[2]\'"; + } }; - if ( $@ ) { - $count++; - warn "Test 19 a: $@"; + if ($@) { + $count++; + warn "Test 19 a: $@"; } @res = undef; $fail = 0; eval { - @res = PVE::Storage::path($cfg, "$storagename:$vmbase"); - if ($res[0] ne "\/dev\/zvol\/regressiontest\/$vmbase") { - $count++; - $fail = 1; - warn "Test 19 b: path is not correct: expected \'\/dev\/zvol\/regressiontest\/$vmbase'\ get \'$res[0]\'"; - } - if ($res[1] ne "100") { - if (!$fail) { - $count++; - $fail = 1; - } - warn "Test 19 b: owner is not correct: expected \'100\' get \'$res[1]\'"; - } - if ($res[2] ne "images") { - if (!$fail) { - $count++; - $fail = 1; - } - warn "Test 19 b: owner is not correct: expected \'images\' get \'$res[2]\'"; - } + @res = PVE::Storage::path($cfg, "$storagename:$vmbase"); + if ($res[0] ne "\/dev\/zvol\/regressiontest\/$vmbase") { + $count++; + $fail = 1; + warn + "Test 19 b: path is not correct: expected \'\/dev\/zvol\/regressiontest\/$vmbase'\ get \'$res[0]\'"; + } + if ($res[1] ne "100") { + if (!$fail) { + $count++; + $fail = 1; + } + warn "Test 19 b: owner is not correct: expected \'100\' get \'$res[1]\'"; + } + if ($res[2] ne "images") { + if (!$fail) { + $count++; + $fail = 1; + } + warn "Test 19 b: owner is not correct: expected \'images\' get \'$res[2]\'"; + } }; - if ( $@ ) { - $count++; - warn "Test 19 b: $@"; + if ($@) { + $count++; + warn "Test 19 b: $@"; } @res = undef; $fail = 0; eval { - @res = PVE::Storage::path($cfg, "$storagename:$vmbase\/$vmlinked"); - if ($res[0] ne "\/dev\/zvol\/regressiontest\/$vmlinked") { - $count++; - $fail = 1; - warn "Test 19 c: path is not correct: expected \'\/dev\/zvol\/regressiontest\/$vmlinked'\ get \'$res[0]\'"; - } - if ($res[1] ne "101") { - if (!$fail) { - $count++; - $fail = 1; - } - warn "Test 19 c: owner is not correct: expected \'101\' get \'$res[1]\'"; - } - if ($res[2] ne "images") { - if (!$fail) { - $count++; - $fail = 1; - } - warn "Test 19 c: owner is not correct: expected \'images\' get \'$res[2]\'"; - } + @res = PVE::Storage::path($cfg, "$storagename:$vmbase\/$vmlinked"); + if ($res[0] ne "\/dev\/zvol\/regressiontest\/$vmlinked") { + $count++; + $fail = 1; + warn + "Test 19 c: path is not correct: expected \'\/dev\/zvol\/regressiontest\/$vmlinked'\ get \'$res[0]\'"; + } + if ($res[1] ne "101") { + if (!$fail) { + $count++; + $fail = 1; + } + warn "Test 19 c: owner is not correct: expected \'101\' get \'$res[1]\'"; + } + if ($res[2] ne "images") { + if (!$fail) { + $count++; + $fail = 1; + } + warn "Test 19 c: owner is not correct: expected \'images\' get \'$res[2]\'"; + } }; - if ( $@ ) { - $count++; - warn "Test 19 c: $@"; + if ($@) { + $count++; + warn "Test 19 c: $@"; } @res = undef; $fail = 0; eval { - @res = PVE::Storage::path($cfg, "$storagename:$ctdisk"); - if ($res[0] ne "\/$mountpoint\/$ctdisk") { - $count++; - $fail = 1; - warn "Test 19 d: path is not correct: expected \'\/$mountpoint\/$ctdisk'\ get \'$res[0]\'"; - } - if ($res[1] ne "202") { - if (!$fail) { - $count++; - $fail = 1; - } - warn "Test 19 d: owner is not correct: expected \'202\' get \'$res[1]\'"; - } - if ($res[2] ne "images") { - if (!$fail) { - $count++; - $fail = 1; - } - warn "Test 19 d: owner is not correct: expected \'images\' get \'$res[2]\'"; - } + @res = PVE::Storage::path($cfg, "$storagename:$ctdisk"); + if ($res[0] ne "\/$mountpoint\/$ctdisk") { + $count++; + $fail = 1; + warn + "Test 19 d: path is not correct: expected \'\/$mountpoint\/$ctdisk'\ get \'$res[0]\'"; + } + if ($res[1] ne "202") { + if (!$fail) { + $count++; + $fail = 1; + } + warn "Test 19 d: owner is not correct: expected \'202\' get \'$res[1]\'"; + } + if ($res[2] ne "images") { + if (!$fail) { + $count++; + $fail = 1; + } + warn "Test 19 d: owner is not correct: expected \'images\' get \'$res[2]\'"; + } }; - if ( $@ ) { - $count++; - warn "Test 19 d: $@"; + if ($@) { + $count++; + warn "Test 19 d: $@"; } @res = undef; $fail = 0; eval { - @res = PVE::Storage::path($cfg, "$storagename:$ctbase"); - if ($res[0] ne "\/$mountpoint\/$ctbase") { - $count++; - $fail = 1; - warn "Test 19 e: path is not correct: expected \'\/$mountpoint\/$ctbase'\ get \'$res[0]\'"; - } - if ($res[1] ne "200") { - if (!$fail) { - $count++; - $fail = 1; - } - warn "Test 19 e: owner is not correct: expected \'200\' get \'$res[1]\'"; - } - if ($res[2] ne "images") { - if (!$fail) { - $count++; - $fail = 1; - } - warn "Test 19 e: owner is not correct: expected \'images\' get \'$res[2]\'"; - } + @res = PVE::Storage::path($cfg, "$storagename:$ctbase"); + if ($res[0] ne "\/$mountpoint\/$ctbase") { + $count++; + $fail = 1; + warn + "Test 19 e: path is not correct: expected \'\/$mountpoint\/$ctbase'\ get \'$res[0]\'"; + } + if ($res[1] ne "200") { + if (!$fail) { + $count++; + $fail = 1; + } + warn "Test 19 e: owner is not correct: expected \'200\' get \'$res[1]\'"; + } + if ($res[2] ne "images") { + if (!$fail) { + $count++; + $fail = 1; + } + warn "Test 19 e: owner is not correct: expected \'images\' get \'$res[2]\'"; + } }; - if ( $@ ) { - $count++; - warn "Test 19 e: $@"; + if ($@) { + $count++; + warn "Test 19 e: $@"; } @res = undef; $fail = 0; eval { - @res = PVE::Storage::path($cfg, "$storagename:$ctbase\/$ctlinked"); - if ($res[0] ne "\/$mountpoint\/$ctlinked") { - $count++; - $fail = 1; - warn "Test 19 f: path is not correct: expected \'\/$mountpoint\/$ctlinked'\ get \'$res[0]\'"; - } - if ($res[1] ne "201") { - if (!$fail) { - $count++; - $fail = 1; - } - warn "Test 19 f: owner is not correct: expected \'201\' get \'$res[1]\'"; - } - if ($res[2] ne "images") { - if (!$fail) { - $count++; - $fail = 1; - } - warn "Test 19 f: owner is not correct: expected \'images\' get \'$res[2]\'"; - } + @res = PVE::Storage::path($cfg, "$storagename:$ctbase\/$ctlinked"); + if ($res[0] ne "\/$mountpoint\/$ctlinked") { + $count++; + $fail = 1; + warn + "Test 19 f: path is not correct: expected \'\/$mountpoint\/$ctlinked'\ get \'$res[0]\'"; + } + if ($res[1] ne "201") { + if (!$fail) { + $count++; + $fail = 1; + } + warn "Test 19 f: owner is not correct: expected \'201\' get \'$res[1]\'"; + } + if ($res[2] ne "images") { + if (!$fail) { + $count++; + $fail = 1; + } + warn "Test 19 f: owner is not correct: expected \'images\' get \'$res[2]\'"; + } }; - if ( $@ ) { - $count++; - warn "Test 19 f: $@"; + if ($@) { + $count++; + warn "Test 19 f: $@"; } }; $tests->{19} = $test19; @@ -234,45 +240,45 @@ my $test18 = sub { my $res; eval { - $res = PVE::Storage::scan_zfs($cfg, $storagename); + $res = PVE::Storage::scan_zfs($cfg, $storagename); - my $exists = 0; - foreach my $subvol (@$res){ - if ($subvol->{pool} eq 'regressiontest') { - $exists++; - } - } - if (!$exists) { - $count++; - warn "Test 18 a: not pool"; - } + my $exists = 0; + foreach my $subvol (@$res) { + if ($subvol->{pool} eq 'regressiontest') { + $exists++; + } + } + if (!$exists) { + $count++; + warn "Test 18 a: not pool"; + } }; - if ( $@ ) { - $count++; - warn "Test 18 a: $@"; + if ($@) { + $count++; + warn "Test 18 a: $@"; } $res = undef; eval { - $res = PVE::Storage::scan_zfs($cfg, $storagename); - - foreach my $subvol (@$res){ - if ($subvol->{pool} eq 'zfspool/subvol') { - $count++; - warn "Test 18 b:"; - } - } + $res = PVE::Storage::scan_zfs($cfg, $storagename); - foreach my $subvol (@$res){ - if ($subvol->{pool} eq 'zfspool/basevol') { - $count++; - warn "Test 18 c"; - } - } + foreach my $subvol (@$res) { + if ($subvol->{pool} eq 'zfspool/subvol') { + $count++; + warn "Test 18 b:"; + } + } + + foreach my $subvol (@$res) { + if ($subvol->{pool} eq 'zfspool/basevol') { + $count++; + warn "Test 18 c"; + } + } }; - if ( $@ ) { - $count++; - warn "Test 18 a: $@"; + if ($@) { + $count++; + warn "Test 18 a: $@"; } }; $tests->{18} = $test18; @@ -282,12 +288,12 @@ my $test17 = sub { print "\nrun test17 \"deactivate_storage\"\n"; eval { - PVE::Storage::activate_storage($cfg, $storagename); - PVE::Storage::deactivate_storage($cfg, $storagename); + PVE::Storage::activate_storage($cfg, $storagename); + PVE::Storage::deactivate_storage($cfg, $storagename); }; if ($@) { - $count++; - warn "Test 17 a: $@"; + $count++; + warn "Test 17 a: $@"; } }; $tests->{17} = $test17; @@ -296,12 +302,10 @@ my $test16 = sub { print "\nrun test16 \"activate_storage\"\n"; - eval { - PVE::Storage::activate_storage($cfg, $storagename); - }; + eval { PVE::Storage::activate_storage($cfg, $storagename); }; if ($@) { - $count++; - warn "Test 16 a: $@"; + $count++; + warn "Test 16 a: $@"; } }; $tests->{16} = $test16; @@ -313,131 +317,145 @@ my $test15 = sub { my $hash = Dumper {}; my $res = Dumper PVE::Storage::template_list($cfg, $storagename, "vztmpl"); - if ( $hash ne $res ) { - $count++; - warn "Test 15 a failed\n"; + if ($hash ne $res) { + $count++; + warn "Test 15 a failed\n"; } $res = undef; $res = Dumper PVE::Storage::template_list($cfg, $storagename, "iso"); - if ( $hash ne $res ) { - $count++; - warn "Test 15 b failed\n"; + if ($hash ne $res) { + $count++; + warn "Test 15 b failed\n"; } $res = undef; $res = Dumper PVE::Storage::template_list($cfg, $storagename, "backup"); - if ( $hash ne $res ) { - $count++; - warn "Test 15 c failed\n"; + if ($hash ne $res) { + $count++; + warn "Test 15 c failed\n"; } $res = undef; - $hash = Dumper {'zfstank99' => [ - { - 'parent' => undef, - 'volid' => 'zfstank99:base-100-disk-1', - 'name' => 'base-100-disk-1', - 'vmid' => '100', - 'size' => 1073741824, - 'format' => 'raw' - } - ]}; + $hash = Dumper { + 'zfstank99' => [ + { + 'parent' => undef, + 'volid' => 'zfstank99:base-100-disk-1', + 'name' => 'base-100-disk-1', + 'vmid' => '100', + 'size' => 1073741824, + 'format' => 'raw', + }, + ], + }; $res = Dumper PVE::Storage::vdisk_list($cfg, $storagename, 100, ["$storagename:$vmbase"]); - if ( $hash ne $res ) { - $count++; - warn "Test 15 d failed\n"; + if ($hash ne $res) { + $count++; + warn "Test 15 d failed\n"; } $res = undef; - $hash = Dumper {'zfstank99' => [ - { - 'parent' => undef, - 'volid' => 'zfstank99:vm-102-disk-1', - 'name' => 'vm-102-disk-1', - 'vmid' => '102', - 'size' => 1073741824, - 'format' => 'raw' - } - ]}; + $hash = Dumper { + 'zfstank99' => [ + { + 'parent' => undef, + 'volid' => 'zfstank99:vm-102-disk-1', + 'name' => 'vm-102-disk-1', + 'vmid' => '102', + 'size' => 1073741824, + 'format' => 'raw', + }, + ], + }; $res = Dumper PVE::Storage::vdisk_list($cfg, $storagename, 102, ["$storagename:$vmdisk"]); - if ( $hash ne $res ) { - $count++; - warn "Test 15 e failed\n"; + if ($hash ne $res) { + $count++; + warn "Test 15 e failed\n"; } $res = undef; - $hash = Dumper {'zfstank99' => [ - { - 'parent' => 'base-100-disk-1@__base__', - 'volid' => "$storagename:$vmbase\/$vmlinked", - 'name' => 'vm-101-disk-1', - 'vmid' => '101', - 'size' => 1073741824, - 'format' => 'raw' - } - ]}; + $hash = Dumper { + 'zfstank99' => [ + { + 'parent' => 'base-100-disk-1@__base__', + 'volid' => "$storagename:$vmbase\/$vmlinked", + 'name' => 'vm-101-disk-1', + 'vmid' => '101', + 'size' => 1073741824, + 'format' => 'raw', + }, + ], + }; - $res = Dumper PVE::Storage::vdisk_list($cfg, $storagename, 101, ["$storagename:$vmbase\/$vmlinked"]); - if ( $hash ne $res ) { - $count++; - warn "Test 15 f failed\n"; + $res = Dumper PVE::Storage::vdisk_list($cfg, $storagename, 101, + ["$storagename:$vmbase\/$vmlinked"]); + if ($hash ne $res) { + $count++; + warn "Test 15 f failed\n"; } $res = undef; - $hash = Dumper {'zfstank99' => [ - { - 'parent' => undef, - 'volid' => 'zfstank99:basevol-200-disk-1', - 'name' => 'basevol-200-disk-1', - 'vmid' => '200', - 'size' => 1073741824, - 'format' => 'subvol' - } - ]}; + $hash = Dumper { + 'zfstank99' => [ + { + 'parent' => undef, + 'volid' => 'zfstank99:basevol-200-disk-1', + 'name' => 'basevol-200-disk-1', + 'vmid' => '200', + 'size' => 1073741824, + 'format' => 'subvol', + }, + ], + }; - $res = Dumper PVE::Storage::vdisk_list($cfg, $storagename, 200, ["$storagename:$ctbase"]); - if ( $hash ne $res ) { - $count++; - warn "Test 15 g failed\n"; + $res = Dumper PVE::Storage::vdisk_list($cfg, $storagename, 200, ["$storagename:$ctbase"]); + if ($hash ne $res) { + $count++; + warn "Test 15 g failed\n"; } $res = undef; - $hash = Dumper {'zfstank99' => [ - { - 'parent' => undef, - 'volid' => 'zfstank99:subvol-202-disk-1', - 'name' => 'subvol-202-disk-1', - 'vmid' => '202', - 'size' => 1073741824, - 'format' => 'subvol' - } - ]}; + $hash = Dumper { + 'zfstank99' => [ + { + 'parent' => undef, + 'volid' => 'zfstank99:subvol-202-disk-1', + 'name' => 'subvol-202-disk-1', + 'vmid' => '202', + 'size' => 1073741824, + 'format' => 'subvol', + }, + ], + }; $res = Dumper PVE::Storage::vdisk_list($cfg, $storagename, 202, ["$storagename:$ctdisk"]); - if ( $hash ne $res ) { - $count++; - warn "Test 15 h failed\n"; + if ($hash ne $res) { + $count++; + warn "Test 15 h failed\n"; } $res = undef; - $hash = Dumper {'zfstank99' => [ - { - 'parent' => 'basevol-200-disk-1@__base__', - 'volid' => "$storagename:$ctbase\/$ctlinked", - 'name' => 'subvol-201-disk-1', - 'vmid' => '201', - 'size' => 1073741824, - 'format' => 'subvol' - } - ]}; - $res = Dumper PVE::Storage::vdisk_list($cfg, $storagename, 201, ["$storagename:$ctbase\/$ctlinked"]); - if ( $hash ne $res ) { - $count++; - warn "Test 15 i failed\n"; + $hash = Dumper { + 'zfstank99' => [ + { + 'parent' => 'basevol-200-disk-1@__base__', + 'volid' => "$storagename:$ctbase\/$ctlinked", + 'name' => 'subvol-201-disk-1', + 'vmid' => '201', + 'size' => 1073741824, + 'format' => 'subvol', + }, + ], + }; + $res = Dumper PVE::Storage::vdisk_list($cfg, $storagename, 201, + ["$storagename:$ctbase\/$ctlinked"]); + if ($hash ne $res) { + $count++; + warn "Test 15 i failed\n"; } }; $tests->{15} = $test15; @@ -447,115 +465,111 @@ my $test14 = sub { print "\nrun test14 \"vdisk_free\"\n"; eval { - PVE::Storage::vdisk_free($cfg, "$storagename:$vmdisk"); + PVE::Storage::vdisk_free($cfg, "$storagename:$vmdisk"); - eval { - run_command("zfs list $zpath\/$vmdisk", outfunc => sub {}, errfunc => sub {}); - }; - if (!$@) { - $count++; - warn "Test14 a: vdisk still exists\n"; - } + eval { + run_command("zfs list $zpath\/$vmdisk", outfunc => sub { }, errfunc => sub { }); + }; + if (!$@) { + $count++; + warn "Test14 a: vdisk still exists\n"; + } }; if ($@) { - $count++; - warn "Test14 a: $@"; + $count++; + warn "Test14 a: $@"; } - eval { - PVE::Storage::vdisk_free($cfg, "$storagename:$vmbase"); - }; + eval { PVE::Storage::vdisk_free($cfg, "$storagename:$vmbase"); }; if (!$@) { - $count++; - warn "Test14 b: free vdisk should not work\n"; + $count++; + warn "Test14 b: free vdisk should not work\n"; } eval { - PVE::Storage::vdisk_free($cfg, "$storagename:$vmbase\/$vmlinked"); + PVE::Storage::vdisk_free($cfg, "$storagename:$vmbase\/$vmlinked"); - eval { - run_command("zfs list $zpath\/$vmlinked", outfunc => sub {}, errfunc => sub {}); - }; - if (!$@) { - $count++; - warn "Test14 c: vdisk still exists\n"; - } + eval { + run_command("zfs list $zpath\/$vmlinked", outfunc => sub { }, errfunc => sub { }); + }; + if (!$@) { + $count++; + warn "Test14 c: vdisk still exists\n"; + } }; if ($@) { - $count++; - warn "Test14 c: $@"; + $count++; + warn "Test14 c: $@"; } eval { - PVE::Storage::vdisk_free($cfg, "$storagename:$ctdisk"); + PVE::Storage::vdisk_free($cfg, "$storagename:$ctdisk"); - eval { - run_command("zfs list $zpath\/$ctdisk", outfunc => sub {}, errfunc => sub {}); - }; - if (!$@) { - $count++; - warn "Test14 d: vdisk still exists\n"; - } + eval { + run_command("zfs list $zpath\/$ctdisk", outfunc => sub { }, errfunc => sub { }); + }; + if (!$@) { + $count++; + warn "Test14 d: vdisk still exists\n"; + } }; if ($@) { - $count++; - warn "Test14 d: $@"; + $count++; + warn "Test14 d: $@"; } - eval { - PVE::Storage::vdisk_free($cfg, "$storagename:$ctbase"); - }; + eval { PVE::Storage::vdisk_free($cfg, "$storagename:$ctbase"); }; if (!$@) { - $count++; - warn "Test14 e: free vdisk should not work\n"; + $count++; + warn "Test14 e: free vdisk should not work\n"; } eval { - PVE::Storage::vdisk_free($cfg, "$storagename:$ctbase\/$ctlinked"); + PVE::Storage::vdisk_free($cfg, "$storagename:$ctbase\/$ctlinked"); - eval { - run_command("zfs list $zpath\/$ctlinked", outfunc => sub {}, errfunc => sub {}); - }; - if (!$@) { - $count++; - warn "Test14 f: vdisk still exists\n"; - } + eval { + run_command("zfs list $zpath\/$ctlinked", outfunc => sub { }, errfunc => sub { }); + }; + if (!$@) { + $count++; + warn "Test14 f: vdisk still exists\n"; + } }; if ($@) { - $count++; - warn "Test14 f: $@"; + $count++; + warn "Test14 f: $@"; } eval { - PVE::Storage::vdisk_free($cfg, "$storagename:$vmbase"); + PVE::Storage::vdisk_free($cfg, "$storagename:$vmbase"); - eval { - run_command("zfs list $zpath\/$vmbase", outfunc => sub {}, errfunc => sub {}); - }; - if (!$@) { - $count++; - warn "Test14 g: vdisk still exists\n"; - } + eval { + run_command("zfs list $zpath\/$vmbase", outfunc => sub { }, errfunc => sub { }); + }; + if (!$@) { + $count++; + warn "Test14 g: vdisk still exists\n"; + } }; if ($@) { - $count++; - warn "Test14 g: $@"; + $count++; + warn "Test14 g: $@"; } eval { - PVE::Storage::vdisk_free($cfg, "$storagename:$ctbase"); + PVE::Storage::vdisk_free($cfg, "$storagename:$ctbase"); - eval { - run_command("zfs list $zpath\/$ctbase", outfunc => sub {}, errfunc => sub {}); - }; - if (!$@) { - $count++; - warn "Test14 h: vdisk still exists\n"; - } + eval { + run_command("zfs list $zpath\/$ctbase", outfunc => sub { }, errfunc => sub { }); + }; + if (!$@) { + $count++; + warn "Test14 h: vdisk still exists\n"; + } }; if ($@) { - $count++; - warn "Test14 h: $@"; + $count++; + warn "Test14 h: $@"; } }; $tests->{14} = $test14; @@ -565,99 +579,115 @@ my $test13 = sub { print "\nrun test13 \"vdisk_alloc\"\n"; eval { - my $tmp_volid = PVE::Storage::vdisk_alloc($cfg, $storagename, "112", "raw", undef ,1024 * 1024); + my $tmp_volid = + PVE::Storage::vdisk_alloc($cfg, $storagename, "112", "raw", undef, 1024 * 1024); - if ($tmp_volid ne "$storagename:vm-112-disk-0") { - die "volname:$tmp_volid don't match\n"; - } - eval { - run_command("zfs get -H volsize $zpath\/vm-112-disk-0", outfunc => - sub { my $tmp = shift; - if ($tmp !~ m/^$zpath\/vm-112-disk-0.*volsize.*1G.*$/) { - die "size don't match\n"; - } - }); - }; - if ($@) { - $count++; - warn "Test13 a: $@"; - } + if ($tmp_volid ne "$storagename:vm-112-disk-0") { + die "volname:$tmp_volid don't match\n"; + } + eval { + run_command( + "zfs get -H volsize $zpath\/vm-112-disk-0", + outfunc => sub { + my $tmp = shift; + if ($tmp !~ m/^$zpath\/vm-112-disk-0.*volsize.*1G.*$/) { + die "size don't match\n"; + } + }, + ); + }; + if ($@) { + $count++; + warn "Test13 a: $@"; + } }; if ($@) { - $count++; - warn "Test13 a: $@"; + $count++; + warn "Test13 a: $@"; } eval { - my $tmp_volid = PVE::Storage::vdisk_alloc($cfg, $storagename, "112", "raw", undef ,2048 * 1024); + my $tmp_volid = + PVE::Storage::vdisk_alloc($cfg, $storagename, "112", "raw", undef, 2048 * 1024); - if ($tmp_volid ne "$storagename:vm-112-disk-1") { - die "volname:$tmp_volid don't match\n"; - } - eval { - run_command("zfs get -H volsize $zpath\/vm-112-disk-1", outfunc => - sub { my $tmp = shift; - if ($tmp !~ m/^$zpath\/vm-112-disk-1.*volsize.*2G.*$/) { - die "size don't match\n"; - } - }); - }; - if ($@) { - $count++; - warn "Test13 b: $@"; - } + if ($tmp_volid ne "$storagename:vm-112-disk-1") { + die "volname:$tmp_volid don't match\n"; + } + eval { + run_command( + "zfs get -H volsize $zpath\/vm-112-disk-1", + outfunc => sub { + my $tmp = shift; + if ($tmp !~ m/^$zpath\/vm-112-disk-1.*volsize.*2G.*$/) { + die "size don't match\n"; + } + }, + ); + }; + if ($@) { + $count++; + warn "Test13 b: $@"; + } }; if ($@) { - $count++; - warn "Test13 b: $@"; + $count++; + warn "Test13 b: $@"; } eval { - my $tmp_volid = PVE::Storage::vdisk_alloc($cfg, $storagename, "113", "subvol", undef ,1024 * 1024); + my $tmp_volid = + PVE::Storage::vdisk_alloc($cfg, $storagename, "113", "subvol", undef, 1024 * 1024); - if ($tmp_volid ne "$storagename:subvol-113-disk-0") { - die "volname:$tmp_volid don't match\n"; - } - eval { - run_command("zfs get -H refquota $zpath\/subvol-113-disk-0", outfunc => - sub { my $tmp = shift; - if ($tmp !~ m/^$zpath\/subvol-113-disk-0.*refquota.*1G.*$/) { - die "size don't match\n"; - } - }); - }; - if ($@) { - $count++; - warn "Test13 c: $@"; - } + if ($tmp_volid ne "$storagename:subvol-113-disk-0") { + die "volname:$tmp_volid don't match\n"; + } + eval { + run_command( + "zfs get -H refquota $zpath\/subvol-113-disk-0", + outfunc => sub { + my $tmp = shift; + if ($tmp !~ m/^$zpath\/subvol-113-disk-0.*refquota.*1G.*$/) { + die "size don't match\n"; + } + }, + ); + }; + if ($@) { + $count++; + warn "Test13 c: $@"; + } }; if ($@) { - $count++; - warn "Test13 c: $@"; + $count++; + warn "Test13 c: $@"; } eval { - my $tmp_volid = PVE::Storage::vdisk_alloc($cfg, $storagename, "113", "subvol", undef ,2048 * 1024); + my $tmp_volid = + PVE::Storage::vdisk_alloc($cfg, $storagename, "113", "subvol", undef, 2048 * 1024); - if ($tmp_volid ne "$storagename:subvol-113-disk-1") { - die "volname:$tmp_volid don't match\n"; - } - eval { - run_command("zfs get -H refquota $zpath\/subvol-113-disk-1", outfunc => - sub { my $tmp = shift; - if ($tmp !~ m/^$zpath\/subvol-113-disk-1.*refquota.*G.*$/) { - die "size don't match\n"; - } - }); - }; - if ($@) { - $count++; - warn "Test13 d: $@"; - } + if ($tmp_volid ne "$storagename:subvol-113-disk-1") { + die "volname:$tmp_volid don't match\n"; + } + eval { + run_command( + "zfs get -H refquota $zpath\/subvol-113-disk-1", + outfunc => sub { + my $tmp = shift; + if ($tmp !~ m/^$zpath\/subvol-113-disk-1.*refquota.*G.*$/) { + die "size don't match\n"; + } + }, + ); + }; + if ($@) { + $count++; + warn "Test13 d: $@"; + } }; if ($@) { - $count++; - warn "Test13 d: $@"; + $count++; + warn "Test13 d: $@"; } }; $tests->{13} = $test13; @@ -667,79 +697,79 @@ my $test12 = sub { print "\nrun test12 \"vdisk_create_base\"\n"; eval { - my $tmp_volid = PVE::Storage::vdisk_create_base($cfg, "$storagename:$vmdisk"); + my $tmp_volid = PVE::Storage::vdisk_create_base($cfg, "$storagename:$vmdisk"); - if ($tmp_volid ne "$storagename:base-102-disk-1") { - die; - } - eval { - run_command("zfs list $zpath\/base-102-disk-1", outfunc => sub {}); - }; - if ($@) { - $count++; - warn "Test12 a: $@"; - } + if ($tmp_volid ne "$storagename:base-102-disk-1") { + die; + } + eval { + run_command("zfs list $zpath\/base-102-disk-1", outfunc => sub { }); + }; + if ($@) { + $count++; + warn "Test12 a: $@"; + } }; if ($@) { - $count++; - warn "Test12 a: $@"; + $count++; + warn "Test12 a: $@"; } eval { - my $tmp_volid = PVE::Storage::vdisk_create_base($cfg, "$storagename:$vmlinked"); + my $tmp_volid = PVE::Storage::vdisk_create_base($cfg, "$storagename:$vmlinked"); - if ($tmp_volid ne "$storagename:base-101-disk-1") { - die; - } - eval { - run_command("zfs list $zpath\/base-101-disk-1", outfunc => sub {}); - }; - if ($@) { - $count++; - warn "Test12 b: $@"; - } + if ($tmp_volid ne "$storagename:base-101-disk-1") { + die; + } + eval { + run_command("zfs list $zpath\/base-101-disk-1", outfunc => sub { }); + }; + if ($@) { + $count++; + warn "Test12 b: $@"; + } }; if ($@) { - $count++; - warn "Test12 b: $@"; + $count++; + warn "Test12 b: $@"; } eval { - my $tmp_volid = PVE::Storage::vdisk_create_base($cfg, "$storagename:$ctdisk"); + my $tmp_volid = PVE::Storage::vdisk_create_base($cfg, "$storagename:$ctdisk"); - if ($tmp_volid ne "$storagename:basevol-202-disk-1") { - die ; - } - eval { - run_command("zfs list $zpath\/basevol-202-disk-1", outfunc => sub {}); - }; - if ($@) { - $count++; - warn "Test12 c: $@"; - } + if ($tmp_volid ne "$storagename:basevol-202-disk-1") { + die; + } + eval { + run_command("zfs list $zpath\/basevol-202-disk-1", outfunc => sub { }); + }; + if ($@) { + $count++; + warn "Test12 c: $@"; + } }; if ($@) { - $count++; - warn "Test12 c: $@"; + $count++; + warn "Test12 c: $@"; } eval { - my $tmp_volid = PVE::Storage::vdisk_create_base($cfg, "$storagename:$ctlinked"); + my $tmp_volid = PVE::Storage::vdisk_create_base($cfg, "$storagename:$ctlinked"); - if ($tmp_volid ne "$storagename:basevol-201-disk-1") { - die; - } - eval { - run_command("zfs list $zpath\/basevol-201-disk-1", outfunc => sub {}); - }; - if ($@) { - $count++; - warn "Test12 d: $@"; - } + if ($tmp_volid ne "$storagename:basevol-201-disk-1") { + die; + } + eval { + run_command("zfs list $zpath\/basevol-201-disk-1", outfunc => sub { }); + }; + if ($@) { + $count++; + warn "Test12 d: $@"; + } }; if ($@) { - $count++; - warn "Test12 d: $@"; + $count++; + warn "Test12 d: $@"; } }; $tests->{12} = $test12; @@ -748,240 +778,240 @@ my $test11 = sub { print "\nrun test11 \"volume_is_base\"\n"; - eval { - PVE::Storage::vdisk_clone($cfg, "$storagename:$vmdisk", 110); - }; + eval { PVE::Storage::vdisk_clone($cfg, "$storagename:$vmdisk", 110); }; if (!$@) { - $count++; - warn "Test11 a: clone_image only works on base images"; + $count++; + warn "Test11 a: clone_image only works on base images"; } eval { - if ("$storagename:$vmbase\/vm-110-disk-0" ne - PVE::Storage::vdisk_clone($cfg, "$storagename:$vmbase", 110, '__base__')){ - $count++; - warn "Test11 b"; - } - run_command("zfs list -H -o volsize $zpath\/vm-110-disk-0", outfunc => sub { - my $line = shift; + if ("$storagename:$vmbase\/vm-110-disk-0" ne + PVE::Storage::vdisk_clone($cfg, "$storagename:$vmbase", 110, '__base__') + ) { + $count++; + warn "Test11 b"; + } + run_command( + "zfs list -H -o volsize $zpath\/vm-110-disk-0", + outfunc => sub { + my $line = shift; - chomp($line); - warn "Test11 b not correct volsize" if $line !~ m/$volsize/; - }); + chomp($line); + warn "Test11 b not correct volsize" if $line !~ m/$volsize/; + }, + ); }; if ($@) { - $count++; - warn "Test11 b: $@"; + $count++; + warn "Test11 b: $@"; } - eval { - PVE::Storage::vdisk_clone($cfg, "$storagename:$vmbase\/$vmlinked", 111); - }; + eval { PVE::Storage::vdisk_clone($cfg, "$storagename:$vmbase\/$vmlinked", 111); }; if (!$@) { - $count++; - warn "Test11 c: clone_image only works on base images"; + $count++; + warn "Test11 c: clone_image only works on base images"; } - eval { - PVE::Storage::vdisk_clone($cfg, "$storagename:$ctdisk", 110); - }; + eval { PVE::Storage::vdisk_clone($cfg, "$storagename:$ctdisk", 110); }; if (!$@) { - $count++; - warn "Test11 d: clone_image only works on base images"; + $count++; + warn "Test11 d: clone_image only works on base images"; } eval { - if ( "$storagename:$ctbase\/subvol-210-disk-0" ne - PVE::Storage::vdisk_clone($cfg, "$storagename:$ctbase", 210, '__base__')){ - $count++; - warn "Test11 e"; - } - run_command("zfs list -H -o refquota $zpath\/subvol-210-disk-0", outfunc => sub { - my $line = shift; + if ("$storagename:$ctbase\/subvol-210-disk-0" ne + PVE::Storage::vdisk_clone($cfg, "$storagename:$ctbase", 210, '__base__') + ) { + $count++; + warn "Test11 e"; + } + run_command( + "zfs list -H -o refquota $zpath\/subvol-210-disk-0", + outfunc => sub { + my $line = shift; - chomp($line); - warn "Test11 e not correct volsize" if $line !~ m/$volsize/; - }); + chomp($line); + warn "Test11 e not correct volsize" if $line !~ m/$volsize/; + }, + ); }; if ($@) { - $count++; - warn "Test11 e: $@"; + $count++; + warn "Test11 e: $@"; } - eval { - PVE::Storage::vdisk_clone($cfg, "$storagename:$ctbase\/$ctlinked", 211); - }; + eval { PVE::Storage::vdisk_clone($cfg, "$storagename:$ctbase\/$ctlinked", 211); }; if (!$@) { - $count++; - warn "Test11 f: clone_image only works on base images"; + $count++; + warn "Test11 f: clone_image only works on base images"; } }; $tests->{11} = $test11; -my $test10 =sub { +my $test10 = sub { print "\nrun test10 \"volume_is_base\"\n"; eval { - if (1 == volume_is_base($cfg, "$storagename:$vmdisk")) { - $count++; - warn "Test10 a: is no base"; - } + if (1 == volume_is_base($cfg, "$storagename:$vmdisk")) { + $count++; + warn "Test10 a: is no base"; + } }; if ($@) { - $count++; - warn "Test10 a: $@"; + $count++; + warn "Test10 a: $@"; } eval { - if (0 == volume_is_base($cfg, "$storagename:$vmbase")) { - $count++; - warn "Test10 b: is base"; - } + if (0 == volume_is_base($cfg, "$storagename:$vmbase")) { + $count++; + warn "Test10 b: is base"; + } }; if ($@) { - $count++; - warn "Test10 b: $@"; + $count++; + warn "Test10 b: $@"; } eval { - if (1 == volume_is_base($cfg, "$storagename:$vmbase\/$vmlinked")) { - $count++; - warn "Test10 c: is no base"; - } + if (1 == volume_is_base($cfg, "$storagename:$vmbase\/$vmlinked")) { + $count++; + warn "Test10 c: is no base"; + } }; if ($@) { - $count++; - warn "Test10 c: $@"; + $count++; + warn "Test10 c: $@"; } eval { - if (1 == volume_is_base($cfg, "$storagename:$ctdisk")) { - $count++; - warn "Test10 d: is no base"; - } + if (1 == volume_is_base($cfg, "$storagename:$ctdisk")) { + $count++; + warn "Test10 d: is no base"; + } }; if ($@) { - $count++; - warn "Test10 d: $@"; + $count++; + warn "Test10 d: $@"; } eval { - if (0 == volume_is_base($cfg, "$storagename:$ctbase")) { - $count++; - warn "Test10 e: is base"; - } + if (0 == volume_is_base($cfg, "$storagename:$ctbase")) { + $count++; + warn "Test10 e: is base"; + } }; if ($@) { - $count++; - warn "Test10 e: $@"; + $count++; + warn "Test10 e: $@"; } eval { - if (1 == volume_is_base($cfg, "$storagename:$ctbase\/$ctlinked")) { - $count++; - warn "Test10 f: is no base"; - } + if (1 == volume_is_base($cfg, "$storagename:$ctbase\/$ctlinked")) { + $count++; + warn "Test10 f: is no base"; + } }; if ($@) { - $count++; - warn "Test10 f: $@"; + $count++; + warn "Test10 f: $@"; } }; $tests->{10} = $test10; -my $test9 =sub { +my $test9 = sub { print "\nrun test9 \"parse_volume_id\"\n"; eval { - my ($store, $disk) = PVE::Storage::parse_volume_id("$storagename:$vmdisk"); + my ($store, $disk) = PVE::Storage::parse_volume_id("$storagename:$vmdisk"); - if ($store ne $storagename || $disk ne $vmdisk) { - $count++; - warn "Test9 a: parsing wrong"; - } + if ($store ne $storagename || $disk ne $vmdisk) { + $count++; + warn "Test9 a: parsing wrong"; + } }; if ($@) { - $count++; - warn "Test9 a: $@"; + $count++; + warn "Test9 a: $@"; } eval { - my ($store, $disk) = PVE::Storage::parse_volume_id("$storagename:$vmbase"); + my ($store, $disk) = PVE::Storage::parse_volume_id("$storagename:$vmbase"); - if ($store ne $storagename || $disk ne $vmbase) { - $count++; - warn "Test9 b: parsing wrong"; - } + if ($store ne $storagename || $disk ne $vmbase) { + $count++; + warn "Test9 b: parsing wrong"; + } }; if ($@) { - $count++; - warn "Test9 b: $@"; + $count++; + warn "Test9 b: $@"; } eval { - my ($store, $disk) = PVE::Storage::parse_volume_id("$storagename:$vmbase\/$vmlinked"); + my ($store, $disk) = PVE::Storage::parse_volume_id("$storagename:$vmbase\/$vmlinked"); - if ($store ne $storagename || $disk ne "$vmbase\/$vmlinked") { - $count++; - warn "Test9 c: parsing wrong"; - } + if ($store ne $storagename || $disk ne "$vmbase\/$vmlinked") { + $count++; + warn "Test9 c: parsing wrong"; + } }; if ($@) { - $count++; - warn "Test9 c: $@"; + $count++; + warn "Test9 c: $@"; } eval { - my ($store, $disk) = PVE::Storage::parse_volume_id("$storagename:$ctdisk"); + my ($store, $disk) = PVE::Storage::parse_volume_id("$storagename:$ctdisk"); - if ($store ne $storagename || $disk ne $ctdisk) { - $count++; - warn "Test9 d: parsing wrong"; - } + if ($store ne $storagename || $disk ne $ctdisk) { + $count++; + warn "Test9 d: parsing wrong"; + } }; if ($@) { - $count++; - warn "Test9 d: $@"; + $count++; + warn "Test9 d: $@"; } eval { - my ($store, $disk) = PVE::Storage::parse_volume_id("$storagename:$ctbase"); + my ($store, $disk) = PVE::Storage::parse_volume_id("$storagename:$ctbase"); - if ($store ne $storagename || $disk ne $ctbase) { - $count++; - warn "Test9 e: parsing wrong"; - } + if ($store ne $storagename || $disk ne $ctbase) { + $count++; + warn "Test9 e: parsing wrong"; + } }; if ($@) { - $count++; - warn "Test9 e: $@"; + $count++; + warn "Test9 e: $@"; } eval { - my ($store, $disk) = PVE::Storage::parse_volume_id("$storagename:$ctbase\/$ctlinked"); + my ($store, $disk) = PVE::Storage::parse_volume_id("$storagename:$ctbase\/$ctlinked"); - if ($store ne $storagename || $disk ne "$ctbase\/$ctlinked") { - $count++; - warn "Test9 f: parsing wrong"; - } + if ($store ne $storagename || $disk ne "$ctbase\/$ctlinked") { + $count++; + warn "Test9 f: parsing wrong"; + } }; if ($@) { - $count++; - warn "Test9 f: $@"; + $count++; + warn "Test9 f: $@"; } }; $tests->{9} = $test9; @@ -991,94 +1021,136 @@ my $test8 = sub { print "\nrun test8 \"parse_volname\"\n"; eval { - my ($vtype, $name, $vmid, $basename, $basevmid, $isBase, $format) = PVE::Storage::parse_volname($cfg, "$storagename:$vmdisk"); + my ($vtype, $name, $vmid, $basename, $basevmid, $isBase, $format) = + PVE::Storage::parse_volname($cfg, "$storagename:$vmdisk"); - if ($vtype ne 'images' || $vmid ne '102' || $name ne $vmdisk || - defined($basename) || defined($basevmid) || $isBase || - $format ne 'raw') { - $count++; - warn "Test8 a: parsing wrong"; - } + if ( + $vtype ne 'images' + || $vmid ne '102' + || $name ne $vmdisk + || defined($basename) + || defined($basevmid) + || $isBase + || $format ne 'raw' + ) { + $count++; + warn "Test8 a: parsing wrong"; + } }; if ($@) { - $count++; - warn "Test8 a: $@"; + $count++; + warn "Test8 a: $@"; } eval { - my ($vtype, $name, $vmid, $basename, $basevmid, $isBase, $format) = PVE::Storage::parse_volname($cfg, "$storagename:$vmbase"); + my ($vtype, $name, $vmid, $basename, $basevmid, $isBase, $format) = + PVE::Storage::parse_volname($cfg, "$storagename:$vmbase"); - if ($vtype ne 'images' || $vmid ne '100' || $name ne $vmbase || - defined($basename) || defined($basevmid) || !$isBase || - $format ne 'raw') { - $count++; - warn "Test8 b: parsing wrong"; - } + if ( + $vtype ne 'images' + || $vmid ne '100' + || $name ne $vmbase + || defined($basename) + || defined($basevmid) + || !$isBase + || $format ne 'raw' + ) { + $count++; + warn "Test8 b: parsing wrong"; + } }; if ($@) { - $count++; - warn "Test8 b: $@"; + $count++; + warn "Test8 b: $@"; } eval { - my ($vtype, $name, $vmid, $basename, $basevmid, $isBase, $format) = PVE::Storage::parse_volname($cfg, "$storagename:$vmbase\/$vmlinked"); + my ($vtype, $name, $vmid, $basename, $basevmid, $isBase, $format) = + PVE::Storage::parse_volname($cfg, "$storagename:$vmbase\/$vmlinked"); - if ($vtype ne 'images' || $name ne $vmlinked || $vmid ne '101' || - $basename ne $vmbase || $basevmid ne '100' || $isBase || - $format ne 'raw') { - $count++; - warn "Test8 c: parsing wrong"; - } + if ( + $vtype ne 'images' + || $name ne $vmlinked + || $vmid ne '101' + || $basename ne $vmbase + || $basevmid ne '100' + || $isBase + || $format ne 'raw' + ) { + $count++; + warn "Test8 c: parsing wrong"; + } }; if ($@) { - $count++; - warn "Test8 c: $@"; + $count++; + warn "Test8 c: $@"; } eval { - my ($vtype, $name, $vmid, $basename, $basevmid, $isBase, $format) = PVE::Storage::parse_volname($cfg, "$storagename:$ctdisk"); + my ($vtype, $name, $vmid, $basename, $basevmid, $isBase, $format) = + PVE::Storage::parse_volname($cfg, "$storagename:$ctdisk"); - if ($vtype ne 'images' || $vmid ne '202' || $name ne $ctdisk || - defined($basename) || defined($basevmid) || $isBase || - $format ne 'subvol') { - $count++; - warn "Test8 d: parsing wrong"; - } + if ( + $vtype ne 'images' + || $vmid ne '202' + || $name ne $ctdisk + || defined($basename) + || defined($basevmid) + || $isBase + || $format ne 'subvol' + ) { + $count++; + warn "Test8 d: parsing wrong"; + } }; if ($@) { - $count++; - warn "Test8 d: $@"; + $count++; + warn "Test8 d: $@"; } eval { - my ($vtype, $name, $vmid, $basename, $basevmid, $isBase, $format) = PVE::Storage::parse_volname($cfg, "$storagename:$ctbase"); - if ($vtype ne 'images' || $vmid ne '200' || $name ne $ctbase || - defined($basename) || defined($basevmid) || !$isBase || - $format ne 'subvol') { - $count++; - warn "Test8 e: parsing wrong"; - } + my ($vtype, $name, $vmid, $basename, $basevmid, $isBase, $format) = + PVE::Storage::parse_volname($cfg, "$storagename:$ctbase"); + if ( + $vtype ne 'images' + || $vmid ne '200' + || $name ne $ctbase + || defined($basename) + || defined($basevmid) + || !$isBase + || $format ne 'subvol' + ) { + $count++; + warn "Test8 e: parsing wrong"; + } }; if ($@) { - $count++; - warn "Test8 e: $@"; + $count++; + warn "Test8 e: $@"; } eval { - my ($vtype, $name, $vmid, $basename, $basevmid, $isBase, $format) = PVE::Storage::parse_volname($cfg, "$storagename:$ctbase\/$ctlinked"); + my ($vtype, $name, $vmid, $basename, $basevmid, $isBase, $format) = + PVE::Storage::parse_volname($cfg, "$storagename:$ctbase\/$ctlinked"); - if ($vtype ne 'images' || $name ne $ctlinked || $vmid ne '201' || - $basename ne $ctbase || $basevmid ne '200' || $isBase || - $format ne 'subvol') { - $count++; - warn "Test8 f: parsing wrong"; - } + if ( + $vtype ne 'images' + || $name ne $ctlinked + || $vmid ne '201' + || $basename ne $ctbase + || $basevmid ne '200' + || $isBase + || $format ne 'subvol' + ) { + $count++; + warn "Test8 f: parsing wrong"; + } }; if ($@) { - $count++; - warn "Test8 f: $@"; + $count++; + warn "Test8 f: $@"; } }; $tests->{8} = $test8; @@ -1089,269 +1161,287 @@ my $test7 = sub { my $tmp_guid; my $parse_guid = sub { - my ($line) = shift; + my ($line) = shift; - if ( $line =~ m/^Disk identifier \(GUID\)\: (.*)$/ ) { - $tmp_guid = $1; - } + if ($line =~ m/^Disk identifier \(GUID\)\: (.*)$/) { + $tmp_guid = $1; + } }; eval { - PVE::Storage::activate_volumes($cfg, ["$storagename:$vmdisk"]); - run_command("sgdisk --randomize-guids \/dev\/zvol\/$zpath\/$vmdisk", outfunc => $parse_guid); - run_command("sgdisk -p \/dev\/zvol\/$zpath\/$vmdisk", outfunc => $parse_guid); + PVE::Storage::activate_volumes($cfg, ["$storagename:$vmdisk"]); + run_command( + "sgdisk --randomize-guids \/dev\/zvol\/$zpath\/$vmdisk", + outfunc => $parse_guid, + ); + run_command("sgdisk -p \/dev\/zvol\/$zpath\/$vmdisk", outfunc => $parse_guid); - my $old_guid = $tmp_guid; - PVE::Storage::volume_snapshot($cfg, "$storagename:$vmdisk", 'snap1'); + my $old_guid = $tmp_guid; + PVE::Storage::volume_snapshot($cfg, "$storagename:$vmdisk", 'snap1'); - run_command("sgdisk --randomize-guids \/dev\/zvol\/$zpath\/$vmdisk", outfunc => $parse_guid); - eval { - PVE::Storage::volume_snapshot_rollback($cfg, "$storagename:$vmdisk", 'snap1'); - PVE::Storage::activate_volumes($cfg, ["$storagename:$vmdisk"]); - $tmp_guid = undef; - run_command("sgdisk -p \/dev\/zvol\/$zpath\/$vmdisk", outfunc => $parse_guid); - if ($old_guid ne $tmp_guid) { - $count++; - warn "Test7 a: Zvol makes no rollback"; - } - }; - if ($@) { - $count++; - warn "Test7 a: $@"; - } + run_command( + "sgdisk --randomize-guids \/dev\/zvol\/$zpath\/$vmdisk", + outfunc => $parse_guid, + ); + eval { + PVE::Storage::volume_snapshot_rollback($cfg, "$storagename:$vmdisk", 'snap1'); + PVE::Storage::activate_volumes($cfg, ["$storagename:$vmdisk"]); + $tmp_guid = undef; + run_command("sgdisk -p \/dev\/zvol\/$zpath\/$vmdisk", outfunc => $parse_guid); + if ($old_guid ne $tmp_guid) { + $count++; + warn "Test7 a: Zvol makes no rollback"; + } + }; + if ($@) { + $count++; + warn "Test7 a: $@"; + } }; if ($@) { - $count++; - warn "Test7 a: $@"; + $count++; + warn "Test7 a: $@"; } $tmp_guid = undef; eval { - PVE::Storage::activate_volumes($cfg, ["$storagename:$vmbase"]); - run_command("sgdisk --randomize-guids \/dev\/zvol\/$zpath\/$vmbase", outfunc => $parse_guid); - run_command("sgdisk -p \/dev\/zvol\/$zpath\/$vmbase", outfunc => $parse_guid); + PVE::Storage::activate_volumes($cfg, ["$storagename:$vmbase"]); + run_command( + "sgdisk --randomize-guids \/dev\/zvol\/$zpath\/$vmbase", + outfunc => $parse_guid, + ); + run_command("sgdisk -p \/dev\/zvol\/$zpath\/$vmbase", outfunc => $parse_guid); - my $old_guid = $tmp_guid; - PVE::Storage::volume_snapshot($cfg, "$storagename:$vmbase", 'snap1'); + my $old_guid = $tmp_guid; + PVE::Storage::volume_snapshot($cfg, "$storagename:$vmbase", 'snap1'); - run_command("sgdisk --randomize-guids \/dev\/zvol\/$zpath\/$vmbase", outfunc => $parse_guid); - eval { - PVE::Storage::volume_snapshot_rollback($cfg, "$storagename:$vmbase", 'snap1'); - PVE::Storage::activate_volumes($cfg, ["$storagename:$vmbase"]); - $tmp_guid = undef; - run_command("sgdisk -p \/dev\/zvol\/$zpath\/$vmbase", outfunc => $parse_guid); - if ($old_guid ne $tmp_guid) { - $count++; - warn "Test7 b: Zvol makes no rollback"; - } - }; - if ($@) { - $count++; - warn "Test7 b: $@"; - } + run_command( + "sgdisk --randomize-guids \/dev\/zvol\/$zpath\/$vmbase", + outfunc => $parse_guid, + ); + eval { + PVE::Storage::volume_snapshot_rollback($cfg, "$storagename:$vmbase", 'snap1'); + PVE::Storage::activate_volumes($cfg, ["$storagename:$vmbase"]); + $tmp_guid = undef; + run_command("sgdisk -p \/dev\/zvol\/$zpath\/$vmbase", outfunc => $parse_guid); + if ($old_guid ne $tmp_guid) { + $count++; + warn "Test7 b: Zvol makes no rollback"; + } + }; + if ($@) { + $count++; + warn "Test7 b: $@"; + } }; if ($@) { - $count++; - warn "Test7 b: $@"; + $count++; + warn "Test7 b: $@"; } $tmp_guid = undef; eval { - PVE::Storage::activate_volumes($cfg, ["$storagename:$vmbase/$vmlinked"]); - run_command("sgdisk --randomize-guids \/dev\/zvol\/$zpath\/$vmlinked", outfunc => $parse_guid); - run_command("sgdisk -p \/dev\/zvol\/$zpath\/$vmlinked", outfunc => $parse_guid); + PVE::Storage::activate_volumes($cfg, ["$storagename:$vmbase/$vmlinked"]); + run_command( + "sgdisk --randomize-guids \/dev\/zvol\/$zpath\/$vmlinked", + outfunc => $parse_guid, + ); + run_command("sgdisk -p \/dev\/zvol\/$zpath\/$vmlinked", outfunc => $parse_guid); - my $old_guid = $tmp_guid; - PVE::Storage::volume_snapshot($cfg, "$storagename:$vmbase\/$vmlinked", 'snap1'); + my $old_guid = $tmp_guid; + PVE::Storage::volume_snapshot($cfg, "$storagename:$vmbase\/$vmlinked", 'snap1'); - run_command("sgdisk --randomize-guids \/dev\/zvol\/$zpath\/$vmlinked", outfunc => $parse_guid); - eval { - PVE::Storage::volume_snapshot_rollback($cfg, "$storagename:$vmbase\/$vmlinked", 'snap1'); - PVE::Storage::activate_volumes($cfg, ["$storagename:$vmbase/$vmlinked"]); - $tmp_guid = undef; - run_command("sgdisk -p \/dev\/zvol\/$zpath\/$vmlinked", outfunc => $parse_guid); - if ($old_guid ne $tmp_guid) { - $count++; - warn "Test7 c: Zvol makes no rollback"; - } - }; - if ($@) { - $count++; - warn "Test7 c: $@"; - } + run_command( + "sgdisk --randomize-guids \/dev\/zvol\/$zpath\/$vmlinked", + outfunc => $parse_guid, + ); + eval { + PVE::Storage::volume_snapshot_rollback( + $cfg, "$storagename:$vmbase\/$vmlinked", 'snap1', + ); + PVE::Storage::activate_volumes($cfg, ["$storagename:$vmbase/$vmlinked"]); + $tmp_guid = undef; + run_command("sgdisk -p \/dev\/zvol\/$zpath\/$vmlinked", outfunc => $parse_guid); + if ($old_guid ne $tmp_guid) { + $count++; + warn "Test7 c: Zvol makes no rollback"; + } + }; + if ($@) { + $count++; + warn "Test7 c: $@"; + } }; if ($@) { - $count++; - warn "Test7 c: $@"; + $count++; + warn "Test7 c: $@"; } $tmp_guid = undef; eval { - PVE::Storage::volume_snapshot($cfg, "$storagename:$ctdisk", 'snap1'); + PVE::Storage::volume_snapshot($cfg, "$storagename:$ctdisk", 'snap1'); - run_command("touch \/$mountpoint\/$ctdisk\/test.txt", outfunc => $parse_guid); - eval { - PVE::Storage::volume_snapshot_rollback($cfg, "$storagename:$ctdisk", 'snap1'); - eval { - run_command("ls \/$mountpoint\/$ctdisk\/test.txt", errofunc => sub {}); - }; - if (!$@) { - $count++; - warn "Test7 d: $@"; - } - }; - if ($@) { - $count++; - warn "Test7 d: $@"; - } + run_command("touch \/$mountpoint\/$ctdisk\/test.txt", outfunc => $parse_guid); + eval { + PVE::Storage::volume_snapshot_rollback($cfg, "$storagename:$ctdisk", 'snap1'); + eval { + run_command("ls \/$mountpoint\/$ctdisk\/test.txt", errofunc => sub { }); + }; + if (!$@) { + $count++; + warn "Test7 d: $@"; + } + }; + if ($@) { + $count++; + warn "Test7 d: $@"; + } }; if ($@) { - $count++; - warn "Test7 d: $@"; + $count++; + warn "Test7 d: $@"; } eval { - PVE::Storage::volume_snapshot($cfg, "$storagename:$ctbase", 'snap1'); + PVE::Storage::volume_snapshot($cfg, "$storagename:$ctbase", 'snap1'); - run_command("touch \/$mountpoint\/$ctbase\/test.txt", outfunc => $parse_guid); - eval { - PVE::Storage::volume_snapshot_rollback($cfg, "$storagename:$ctbase", 'snap1'); - eval { - run_command("ls \/$mountpoint\/$ctbase\/test.txt", errofunc => sub {}); - }; - if (!$@) { - $count++; - warn "Test7 e: $@"; - } - }; - if ($@) { - $count++; - warn "Test7 e: $@"; - } + run_command("touch \/$mountpoint\/$ctbase\/test.txt", outfunc => $parse_guid); + eval { + PVE::Storage::volume_snapshot_rollback($cfg, "$storagename:$ctbase", 'snap1'); + eval { + run_command("ls \/$mountpoint\/$ctbase\/test.txt", errofunc => sub { }); + }; + if (!$@) { + $count++; + warn "Test7 e: $@"; + } + }; + if ($@) { + $count++; + warn "Test7 e: $@"; + } }; if ($@) { - $count++; - warn "Test7 f: $@"; + $count++; + warn "Test7 f: $@"; } eval { - PVE::Storage::volume_snapshot($cfg, "$storagename:$ctbase/$ctlinked", 'snap1'); + PVE::Storage::volume_snapshot($cfg, "$storagename:$ctbase/$ctlinked", 'snap1'); - run_command("touch \/$mountpoint\/$ctlinked\/test.txt", outfunc => $parse_guid); - eval { - PVE::Storage::volume_snapshot_rollback($cfg, "$storagename:$ctbase/$ctlinked", 'snap1'); - eval { - run_command("ls \/$zpath\/$ctlinked\/test.txt", errofunc => sub {}); - }; - if (!$@) { - $count++; - warn "Test7 g: $@"; - } - }; - if ($@) { - $count++; - warn "Test7 g: $@"; - } + run_command("touch \/$mountpoint\/$ctlinked\/test.txt", outfunc => $parse_guid); + eval { + PVE::Storage::volume_snapshot_rollback( + $cfg, "$storagename:$ctbase/$ctlinked", 'snap1', + ); + eval { + run_command("ls \/$zpath\/$ctlinked\/test.txt", errofunc => sub { }); + }; + if (!$@) { + $count++; + warn "Test7 g: $@"; + } + }; + if ($@) { + $count++; + warn "Test7 g: $@"; + } }; if ($@) { - $count++; - warn "Test7 g: $@"; + $count++; + warn "Test7 g: $@"; } eval { - PVE::Storage::volume_snapshot($cfg, "$storagename:$vmdisk", 'snap2'); + PVE::Storage::volume_snapshot($cfg, "$storagename:$vmdisk", 'snap2'); - eval { - PVE::Storage::volume_snapshot_rollback($cfg, "$storagename:$vmdisk", 'snap1'); - }; - if (!$@) { - $count++; - warn "Test7 h: Not allowed to rollback"; - } + eval { PVE::Storage::volume_snapshot_rollback($cfg, "$storagename:$vmdisk", 'snap1'); }; + if (!$@) { + $count++; + warn "Test7 h: Not allowed to rollback"; + } }; if ($@) { - $count++; - warn "Test7 h: $@"; + $count++; + warn "Test7 h: $@"; } eval { - PVE::Storage::volume_snapshot($cfg, "$storagename:$vmbase", 'snap2'); + PVE::Storage::volume_snapshot($cfg, "$storagename:$vmbase", 'snap2'); - eval { - PVE::Storage::volume_snapshot_rollback($cfg, "$storagename:$vmbase", 'snap1'); - }; - if (!$@) { - $count++; - warn "Test7 i: Not allowed to rollback"; - } + eval { PVE::Storage::volume_snapshot_rollback($cfg, "$storagename:$vmbase", 'snap1'); }; + if (!$@) { + $count++; + warn "Test7 i: Not allowed to rollback"; + } }; if ($@) { - $count++; - warn "Test7 i: $@"; + $count++; + warn "Test7 i: $@"; } eval { - PVE::Storage::volume_snapshot($cfg, "$storagename:$vmbase\/$vmlinked", 'snap2'); + PVE::Storage::volume_snapshot($cfg, "$storagename:$vmbase\/$vmlinked", 'snap2'); - eval { - PVE::Storage::volume_snapshot_rollback($cfg, "$storagename:$vmbase\/$vmlinked", 'snap1'); - }; - if (!$@) { - $count++; - warn "Test7 j: Not allowed to rollback"; - } + eval { + PVE::Storage::volume_snapshot_rollback( + $cfg, "$storagename:$vmbase\/$vmlinked", 'snap1', + ); + }; + if (!$@) { + $count++; + warn "Test7 j: Not allowed to rollback"; + } }; if ($@) { - $count++; - warn "Test7 j: $@"; + $count++; + warn "Test7 j: $@"; } eval { - PVE::Storage::volume_snapshot($cfg, "$storagename:$ctdisk", 'snap2'); + PVE::Storage::volume_snapshot($cfg, "$storagename:$ctdisk", 'snap2'); - eval { - PVE::Storage::volume_snapshot_rollback($cfg, "$storagename:$ctdisk", 'snap1'); - }; - if (!$@) { - $count++; - warn "Test7 k: Not allowed to rollback"; - } + eval { PVE::Storage::volume_snapshot_rollback($cfg, "$storagename:$ctdisk", 'snap1'); }; + if (!$@) { + $count++; + warn "Test7 k: Not allowed to rollback"; + } }; if ($@) { - $count++; - warn "Test7 k: $@"; + $count++; + warn "Test7 k: $@"; } eval { - PVE::Storage::volume_snapshot($cfg, "$storagename:$ctbase", 'snap2'); + PVE::Storage::volume_snapshot($cfg, "$storagename:$ctbase", 'snap2'); - eval { - PVE::Storage::volume_snapshot_rollback($cfg, "$storagename:$ctbase", 'snap1'); - }; - if (!$@) { - $count++; - warn "Test7 l: Not allowed to rollback"; - } + eval { PVE::Storage::volume_snapshot_rollback($cfg, "$storagename:$ctbase", 'snap1'); }; + if (!$@) { + $count++; + warn "Test7 l: Not allowed to rollback"; + } }; if ($@) { - $count++; - warn "Test7 l: $@"; + $count++; + warn "Test7 l: $@"; } eval { - PVE::Storage::volume_snapshot($cfg, "$storagename:$ctbase/$ctlinked", 'snap2'); + PVE::Storage::volume_snapshot($cfg, "$storagename:$ctbase/$ctlinked", 'snap2'); - eval { - PVE::Storage::volume_snapshot_rollback($cfg, "$storagename:$ctbase/$ctlinked", 'snap1'); - }; - if (!$@) { - $count++; - warn "Test7 m: Not allowed to rollback"; - } + eval { + PVE::Storage::volume_snapshot_rollback( + $cfg, "$storagename:$ctbase/$ctlinked", 'snap1', + ); + }; + if (!$@) { + $count++; + warn "Test7 m: Not allowed to rollback"; + } }; if ($@) { - $count++; - warn "Test7 m: $@"; + $count++; + warn "Test7 m: $@"; } }; $tests->{7} = $test7; @@ -1361,168 +1451,182 @@ my $test6 = sub { print "\nrun test6 \"volume_rollback_is_possible\"\n"; eval { - PVE::Storage::volume_snapshot($cfg, "$storagename:$vmdisk", 'snap1'); + PVE::Storage::volume_snapshot($cfg, "$storagename:$vmdisk", 'snap1'); - my $blockers = []; - my $res = PVE::Storage::volume_rollback_is_possible( - $cfg, - "$storagename:$vmdisk", - 'snap1', - $blockers, - ); - if ($res != 1) { - $count++; - warn "Test6 a: Rollback should be possible"; - } - if (scalar($blockers->@*) != 0) { - $count++; - warn "Test6 a: 'blockers' should be empty"; - } + my $blockers = []; + my $res = PVE::Storage::volume_rollback_is_possible( + $cfg, "$storagename:$vmdisk", 'snap1', $blockers, + ); + if ($res != 1) { + $count++; + warn "Test6 a: Rollback should be possible"; + } + if (scalar($blockers->@*) != 0) { + $count++; + warn "Test6 a: 'blockers' should be empty"; + } }; if ($@) { - $count++; - warn "Test6 a: $@"; + $count++; + warn "Test6 a: $@"; } eval { - PVE::Storage::volume_snapshot($cfg, "$storagename:$vmbase", 'snap1'); - if ( 1 != - PVE::Storage::volume_rollback_is_possible($cfg, "$storagename:$vmbase", 'snap1')) { - $count++; - warn "Test6 b: Rollback should be possible"; - } + PVE::Storage::volume_snapshot($cfg, "$storagename:$vmbase", 'snap1'); + if (1 != + PVE::Storage::volume_rollback_is_possible($cfg, "$storagename:$vmbase", 'snap1') + ) { + $count++; + warn "Test6 b: Rollback should be possible"; + } }; if ($@) { - $count++; - warn "Test6 b: $@"; + $count++; + warn "Test6 b: $@"; } eval { - PVE::Storage::volume_snapshot($cfg, "$storagename:$vmlinked", 'snap1'); - if ( 1 != - PVE::Storage::volume_rollback_is_possible($cfg, "$storagename:$vmbase\/$vmlinked", 'snap1')) { - $count++; - warn "Test6 c: Rollback should be possible"; - } + PVE::Storage::volume_snapshot($cfg, "$storagename:$vmlinked", 'snap1'); + if ( + 1 != PVE::Storage::volume_rollback_is_possible( + $cfg, "$storagename:$vmbase\/$vmlinked", 'snap1', + ) + ) { + $count++; + warn "Test6 c: Rollback should be possible"; + } }; if ($@) { - $count++; - warn "Test6 c: $@"; + $count++; + warn "Test6 c: $@"; } eval { - PVE::Storage::volume_snapshot($cfg, "$storagename:$ctdisk", 'snap1'); - if ( 1 != - PVE::Storage::volume_rollback_is_possible($cfg, "$storagename:$ctdisk", 'snap1')) { - $count++; - warn "Test6 d: Rollback should be possible"; - } + PVE::Storage::volume_snapshot($cfg, "$storagename:$ctdisk", 'snap1'); + if (1 != + PVE::Storage::volume_rollback_is_possible($cfg, "$storagename:$ctdisk", 'snap1') + ) { + $count++; + warn "Test6 d: Rollback should be possible"; + } }; if ($@) { - $count++; - warn "Test6 d: $@"; + $count++; + warn "Test6 d: $@"; } eval { - PVE::Storage::volume_snapshot($cfg, "$storagename:$ctbase", 'snap1'); - if ( 1 != - PVE::Storage::volume_rollback_is_possible($cfg, "$storagename:$ctbase", 'snap1')) { - $count++; - warn "Test6 e: Rollback should be possible"; - } + PVE::Storage::volume_snapshot($cfg, "$storagename:$ctbase", 'snap1'); + if (1 != + PVE::Storage::volume_rollback_is_possible($cfg, "$storagename:$ctbase", 'snap1') + ) { + $count++; + warn "Test6 e: Rollback should be possible"; + } }; if ($@) { - $count++; - warn "Test6 e: $@"; + $count++; + warn "Test6 e: $@"; } eval { - PVE::Storage::volume_snapshot($cfg, "$storagename:$ctlinked", 'snap1'); - if ( 1 != - PVE::Storage::volume_rollback_is_possible($cfg, "$storagename:$ctbase\/$ctlinked", 'snap1')) { - $count++; - warn "Test6 f: Rollback should be possible"; - } + PVE::Storage::volume_snapshot($cfg, "$storagename:$ctlinked", 'snap1'); + if ( + 1 != PVE::Storage::volume_rollback_is_possible( + $cfg, "$storagename:$ctbase\/$ctlinked", 'snap1', + ) + ) { + $count++; + warn "Test6 f: Rollback should be possible"; + } }; if ($@) { - $count++; - warn "Test6 f: $@"; + $count++; + warn "Test6 f: $@"; } my $blockers = []; eval { - PVE::Storage::volume_snapshot($cfg, "$storagename:$vmdisk", 'snap2'); - PVE::Storage::volume_rollback_is_possible($cfg, "$storagename:$vmdisk", 'snap1', $blockers); + PVE::Storage::volume_snapshot($cfg, "$storagename:$vmdisk", 'snap2'); + PVE::Storage::volume_rollback_is_possible( + $cfg, "$storagename:$vmdisk", 'snap1', $blockers, + ); }; if (!$@) { - $count++; - warn "Test6 g: Rollback should not be possible"; + $count++; + warn "Test6 g: Rollback should not be possible"; } elsif (scalar($blockers->@*) != 1 || $blockers->[0] ne 'snap2') { - $count++; - warn "Test6 g: 'blockers' should be ['snap2']"; + $count++; + warn "Test6 g: 'blockers' should be ['snap2']"; } undef $blockers; $blockers = []; eval { - PVE::Storage::volume_snapshot($cfg, "$storagename:$vmbase", 'snap2'); - PVE::Storage::volume_snapshot($cfg, "$storagename:$vmbase", 'snap3'); - PVE::Storage::volume_rollback_is_possible($cfg, "$storagename:$vmbase", 'snap1', $blockers); + PVE::Storage::volume_snapshot($cfg, "$storagename:$vmbase", 'snap2'); + PVE::Storage::volume_snapshot($cfg, "$storagename:$vmbase", 'snap3'); + PVE::Storage::volume_rollback_is_possible( + $cfg, "$storagename:$vmbase", 'snap1', $blockers, + ); }; if (!$@) { - $count++; - warn "Test6 h: Rollback should not be possible"; + $count++; + warn "Test6 h: Rollback should not be possible"; } else { - if (scalar($blockers->@*) != 2) { - $count++; - warn "Test6 g: 'blockers' should contain two elements"; - } - my $blockers_hash = { map { $_ => 1 } $blockers->@* }; - if (!$blockers_hash->{'snap2'}) { - $count++; - warn "Test6 g: 'blockers' should contain 'snap2'"; - } - if (!$blockers_hash->{'snap3'}) { - $count++; - warn "Test6 g: 'blockers' should contain 'snap3'"; - } + if (scalar($blockers->@*) != 2) { + $count++; + warn "Test6 g: 'blockers' should contain two elements"; + } + my $blockers_hash = { map { $_ => 1 } $blockers->@* }; + if (!$blockers_hash->{'snap2'}) { + $count++; + warn "Test6 g: 'blockers' should contain 'snap2'"; + } + if (!$blockers_hash->{'snap3'}) { + $count++; + warn "Test6 g: 'blockers' should contain 'snap3'"; + } } undef $blockers; eval { - PVE::Storage::volume_snapshot($cfg, "$storagename:$vmlinked", 'snap2'); - PVE::Storage::volume_rollback_is_possible($cfg, "$storagename:$vmbase\/$vmlinked", 'snap1'); + PVE::Storage::volume_snapshot($cfg, "$storagename:$vmlinked", 'snap2'); + PVE::Storage::volume_rollback_is_possible( + $cfg, "$storagename:$vmbase\/$vmlinked", 'snap1', + ); }; if (!$@) { - $count++; - warn "Test6 j: Rollback should not be possible"; + $count++; + warn "Test6 j: Rollback should not be possible"; } eval { - PVE::Storage::volume_snapshot($cfg, "$storagename:$ctdisk", 'snap2'); - PVE::Storage::volume_rollback_is_possible($cfg, "$storagename:$ctdisk", 'snap1'); + PVE::Storage::volume_snapshot($cfg, "$storagename:$ctdisk", 'snap2'); + PVE::Storage::volume_rollback_is_possible($cfg, "$storagename:$ctdisk", 'snap1'); }; if (!$@) { - $count++; - warn "Test6 k: Rollback should not be possible"; + $count++; + warn "Test6 k: Rollback should not be possible"; } eval { - PVE::Storage::volume_snapshot($cfg, "$storagename:$ctbase", 'snap2'); + PVE::Storage::volume_snapshot($cfg, "$storagename:$ctbase", 'snap2'); PVE::Storage::volume_rollback_is_possible($cfg, "$storagename:$ctbase", 'snap1'); }; if (!$@) { - $count++; - warn "Test6 l: Rollback should not be possible"; + $count++; + warn "Test6 l: Rollback should not be possible"; } eval { - PVE::Storage::volume_snapshot($cfg, "$storagename:$ctlinked", 'snap2'); - PVE::Storage::volume_rollback_is_possible($cfg, "$storagename:$ctbase\/$ctlinked", 'snap1'); + PVE::Storage::volume_snapshot($cfg, "$storagename:$ctlinked", 'snap2'); + PVE::Storage::volume_rollback_is_possible( + $cfg, "$storagename:$ctbase\/$ctlinked", 'snap1', + ); }; if (!$@) { - $count++; - warn "Test6 m: Rollback should not be possible"; + $count++; + warn "Test6 m: Rollback should not be possible"; } }; $tests->{6} = $test6; @@ -1530,153 +1634,163 @@ $tests->{6} = $test6; my $test5 = sub { print "\nrun test5 \"volume_snapshot_delete\"\n"; - my $out = sub{my $tmp = shift;}; + my $out = sub { my $tmp = shift; }; eval { - run_command("zfs snapshot $zpath\/$vmdisk\@snap"); - eval{ - PVE::Storage::volume_snapshot_delete($cfg, "$storagename:$vmdisk", 'snap'); - eval{ - run_command("zfs list $zpath\/$vmdisk\@snap", errfunc => $out, outfunc => $out); - }; - if (!$@) { - $count++; - warn "Test5 a: snapshot still exists"; - } - }; - if ($@) { - $count++; - warn "Test5 PVE a: $@"; - } + run_command("zfs snapshot $zpath\/$vmdisk\@snap"); + eval { + PVE::Storage::volume_snapshot_delete($cfg, "$storagename:$vmdisk", 'snap'); + eval { + run_command("zfs list $zpath\/$vmdisk\@snap", errfunc => $out, outfunc => $out); + }; + if (!$@) { + $count++; + warn "Test5 a: snapshot still exists"; + } + }; + if ($@) { + $count++; + warn "Test5 PVE a: $@"; + } }; if ($@) { - $count++; - warn "Test5 a: $@"; + $count++; + warn "Test5 a: $@"; } eval { - run_command("zfs snapshot $zpath\/$vmbase\@snap"); - eval{ - PVE::Storage::volume_snapshot_delete($cfg, "$storagename:$vmbase", 'snap'); - eval{ - run_command("zfs list $zpath\/$vmbase\@snap", errmsg => $out, outfunc => $out); - }; - if (!$@) { - $count++; - warn "Test5 b: snapshot still exists"; - } - }; - if ($@) { - $count++; - warn "Test5 PVE b: $@"; - } + run_command("zfs snapshot $zpath\/$vmbase\@snap"); + eval { + PVE::Storage::volume_snapshot_delete($cfg, "$storagename:$vmbase", 'snap'); + eval { + run_command("zfs list $zpath\/$vmbase\@snap", errmsg => $out, outfunc => $out); + }; + if (!$@) { + $count++; + warn "Test5 b: snapshot still exists"; + } + }; + if ($@) { + $count++; + warn "Test5 PVE b: $@"; + } }; if ($@) { - $count++; - warn "Test5 b: $@"; + $count++; + warn "Test5 b: $@"; } eval { - run_command("zfs snapshot $zpath\/$vmlinked\@snap"); - eval{ - PVE::Storage::volume_snapshot_delete($cfg, "$storagename:$vmbase\/$vmlinked", 'snap'); - eval{ - run_command("zfs list $zpath\/$vmlinked\@snap", errmsg => $out, outfunc => $out); - }; - if (!$@) { - $count++; - warn "Test5 c: snapshot still exists"; - } - }; - if ($@) { - $count++; - warn "Test5 PVE c: $@"; - } + run_command("zfs snapshot $zpath\/$vmlinked\@snap"); + eval { + PVE::Storage::volume_snapshot_delete( + $cfg, "$storagename:$vmbase\/$vmlinked", 'snap', + ); + eval { + run_command( + "zfs list $zpath\/$vmlinked\@snap", + errmsg => $out, + outfunc => $out, + ); + }; + if (!$@) { + $count++; + warn "Test5 c: snapshot still exists"; + } + }; + if ($@) { + $count++; + warn "Test5 PVE c: $@"; + } }; if ($@) { - $count++; - warn "Test5 c: $@"; + $count++; + warn "Test5 c: $@"; } eval { - run_command("zfs snapshot $zpath\/$ctdisk\@snap"); - eval{ - PVE::Storage::volume_snapshot_delete($cfg, "$storagename:$ctdisk", 'snap'); - eval{ - run_command("zfs list $zpath\/$ctdisk\@snap", errmsg => $out, outfunc => $out); - }; - if (!$@) { - $count++; - warn "Test5 d: snapshot still exists"; - } - }; - if ($@) { - $count++; - warn "Test5 PVE d: $@"; - } + run_command("zfs snapshot $zpath\/$ctdisk\@snap"); + eval { + PVE::Storage::volume_snapshot_delete($cfg, "$storagename:$ctdisk", 'snap'); + eval { + run_command("zfs list $zpath\/$ctdisk\@snap", errmsg => $out, outfunc => $out); + }; + if (!$@) { + $count++; + warn "Test5 d: snapshot still exists"; + } + }; + if ($@) { + $count++; + warn "Test5 PVE d: $@"; + } }; if ($@) { - $count++; - warn "Test5 d: $@"; + $count++; + warn "Test5 d: $@"; } eval { - run_command("zfs snapshot $zpath\/$ctbase\@snap"); - eval{ - PVE::Storage::volume_snapshot_delete($cfg, "$storagename:$ctbase", 'snap'); - eval{ - run_command("zfs list $zpath\/$ctbase\@snap", errmsg => $out, outfunc => $out); - }; - if (!$@) { - $count++; - warn "Test5 e: snapshot still exists"; - } - }; - if ($@) { - $count++; - warn "Test5 PVE e: $@"; - } + run_command("zfs snapshot $zpath\/$ctbase\@snap"); + eval { + PVE::Storage::volume_snapshot_delete($cfg, "$storagename:$ctbase", 'snap'); + eval { + run_command("zfs list $zpath\/$ctbase\@snap", errmsg => $out, outfunc => $out); + }; + if (!$@) { + $count++; + warn "Test5 e: snapshot still exists"; + } + }; + if ($@) { + $count++; + warn "Test5 PVE e: $@"; + } }; if ($@) { - $count++; - warn "Test5 e: $@"; + $count++; + warn "Test5 e: $@"; } eval { - run_command("zfs snapshot $zpath\/$ctlinked\@snap"); - eval{ - PVE::Storage::volume_snapshot_delete($cfg, "$storagename:$ctbase\/$ctlinked", 'snap'); - eval{ - run_command("zfs list $zpath\/$ctlinked\@snap", errmsg => $out, outfunc => $out); - }; - if (!$@) { - $count++; - warn "Test5 f: snapshot still exists"; - } - }; - if ($@) { - $count++; - warn "Test5 PVE f: $@"; - } + run_command("zfs snapshot $zpath\/$ctlinked\@snap"); + eval { + PVE::Storage::volume_snapshot_delete( + $cfg, "$storagename:$ctbase\/$ctlinked", 'snap', + ); + eval { + run_command( + "zfs list $zpath\/$ctlinked\@snap", + errmsg => $out, + outfunc => $out, + ); + }; + if (!$@) { + $count++; + warn "Test5 f: snapshot still exists"; + } + }; + if ($@) { + $count++; + warn "Test5 PVE f: $@"; + } }; if ($@) { - $count++; - warn "Test5 f: $@"; + $count++; + warn "Test5 f: $@"; } print "######Ignore Output if no Test5 g: is included######\n"; - eval{ - PVE::Storage::volume_snapshot_delete($cfg, "$storagename:$vmbase", '__base__'); - eval{ - run_command("zfs list $zpath\/$vmbase\@__base__", outfunc => $out); - }; - if ($@) { - $count++; - warn "Test5 g: $@"; - } + eval { + PVE::Storage::volume_snapshot_delete($cfg, "$storagename:$vmbase", '__base__'); + eval { run_command("zfs list $zpath\/$vmbase\@__base__", outfunc => $out); }; + if ($@) { + $count++; + warn "Test5 g: $@"; + } }; if (!$@) { - $count++; - warn "Test5 PVE g: snapshot __base__ can be erased"; + $count++; + warn "Test5 PVE g: snapshot __base__ can be erased"; } print "######End Ignore#######\n"; }; @@ -1685,96 +1799,84 @@ $tests->{5} = $test5; my $test4 = sub { print "\nrun test4 \"volume_snapshot\"\n"; - my $out = sub{}; + my $out = sub { }; eval { - PVE::Storage::volume_snapshot($cfg, "$storagename:$vmdisk", 'snap'); - eval{ - run_command("zfs list $zpath\/$vmdisk\@snap", errmsg => $out, outfunc => $out); - }; - if ($@) { - $count++; - warn "Test4 a: $@"; - } + PVE::Storage::volume_snapshot($cfg, "$storagename:$vmdisk", 'snap'); + eval { run_command("zfs list $zpath\/$vmdisk\@snap", errmsg => $out, outfunc => $out); }; + if ($@) { + $count++; + warn "Test4 a: $@"; + } }; if ($@) { - $count++; - warn "Test4 a: $@"; + $count++; + warn "Test4 a: $@"; } eval { - PVE::Storage::volume_snapshot($cfg, "$storagename:$vmbase", 'snap'); - eval{ - run_command("zfs list $zpath\/$vmbase\@snap", errmsg => $out, outfunc => $out); - }; - if ($@) { - $count++; - warn "Test4 b: $@"; - } + PVE::Storage::volume_snapshot($cfg, "$storagename:$vmbase", 'snap'); + eval { run_command("zfs list $zpath\/$vmbase\@snap", errmsg => $out, outfunc => $out); }; + if ($@) { + $count++; + warn "Test4 b: $@"; + } }; if ($@) { - $count++; - warn "Test4 c: $@"; + $count++; + warn "Test4 c: $@"; } eval { - PVE::Storage::volume_snapshot($cfg, "$storagename:$vmbase\/$vmlinked", 'snap'); - eval{ - run_command("zfs list $zpath\/$vmdisk\@snap", errmsg => $out, outfunc => $out); - }; - if ($@) { - $count++; - warn "Test4 c: $@"; - } + PVE::Storage::volume_snapshot($cfg, "$storagename:$vmbase\/$vmlinked", 'snap'); + eval { run_command("zfs list $zpath\/$vmdisk\@snap", errmsg => $out, outfunc => $out); }; + if ($@) { + $count++; + warn "Test4 c: $@"; + } }; if ($@) { - $count++; - warn "Test4 c: $@"; + $count++; + warn "Test4 c: $@"; } eval { - PVE::Storage::volume_snapshot($cfg, "$storagename:$ctdisk", 'snap'); - eval{ - run_command("zfs list $zpath\/$ctdisk\@snap", errmsg => $out, outfunc => $out); - }; - if ($@) { - $count++; - warn "Test4 d: $@"; - } + PVE::Storage::volume_snapshot($cfg, "$storagename:$ctdisk", 'snap'); + eval { run_command("zfs list $zpath\/$ctdisk\@snap", errmsg => $out, outfunc => $out); }; + if ($@) { + $count++; + warn "Test4 d: $@"; + } }; if ($@) { - $count++; - warn "Test4 d: $@"; + $count++; + warn "Test4 d: $@"; } eval { - PVE::Storage::volume_snapshot($cfg, "$storagename:$ctbase", 'snap'); - eval{ - run_command("zfs list $zpath\/$ctbase\@snap", errmsg => $out, outfunc => $out); - }; - if ($@) { - $count++; - warn "Test4 e: $@"; - } + PVE::Storage::volume_snapshot($cfg, "$storagename:$ctbase", 'snap'); + eval { run_command("zfs list $zpath\/$ctbase\@snap", errmsg => $out, outfunc => $out); }; + if ($@) { + $count++; + warn "Test4 e: $@"; + } }; if ($@) { - $count++; - warn "Test4 e: $@"; + $count++; + warn "Test4 e: $@"; } eval { - PVE::Storage::volume_snapshot($cfg, "$storagename:$ctbase\/$ctlinked", 'snap'); - eval{ - run_command("zfs list $zpath\/$ctdisk\@snap", errmsg => $out, outfunc => $out); - }; - if ($@) { - $count++; - warn "Test4 f: $@"; - } + PVE::Storage::volume_snapshot($cfg, "$storagename:$ctbase\/$ctlinked", 'snap'); + eval { run_command("zfs list $zpath\/$ctdisk\@snap", errmsg => $out, outfunc => $out); }; + if ($@) { + $count++; + warn "Test4 f: $@"; + } }; if ($@) { - $count++; - warn "Test4 f: $@"; + $count++; + warn "Test4 f: $@"; } }; $tests->{4} = $test4; @@ -1784,663 +1886,831 @@ my $test3 = sub { print "\nrun test3 \"volume_has_feature\"\n"; eval { - if (!PVE::Storage::volume_has_feature($cfg, 'snapshot', "$storagename:$vmdisk", undef, 0)) { - $count++; - warn "Test3 a failed"; - } + if (!PVE::Storage::volume_has_feature( + $cfg, 'snapshot', "$storagename:$vmdisk", undef, 0, + )) { + $count++; + warn "Test3 a failed"; + } }; if ($@) { - $count++; - warn "Test3 a: $@"; + $count++; + warn "Test3 a: $@"; } eval { - if (PVE::Storage::volume_has_feature($cfg, 'snapshot', "$storagename:$vmbase", undef, 0)) { - $count++; - warn "Test3 b failed"; - } + if (PVE::Storage::volume_has_feature( + $cfg, 'snapshot', "$storagename:$vmbase", undef, 0, + )) { + $count++; + warn "Test3 b failed"; + } }; if ($@) { - $count++; - warn "Test3 b: $@"; + $count++; + warn "Test3 b: $@"; } eval { - if (!PVE::Storage::volume_has_feature($cfg, 'snapshot', "$storagename:$vmbase\/$vmlinked", undef, 0)) { - $count++; - warn "Test3 c failed"; - } + if (!PVE::Storage::volume_has_feature( + $cfg, + 'snapshot', + "$storagename:$vmbase\/$vmlinked", + undef, + 0, + )) { + $count++; + warn "Test3 c failed"; + } }; if ($@) { - $count++; - warn "Test3 c: $@"; + $count++; + warn "Test3 c: $@"; } eval { - if (!PVE::Storage::volume_has_feature($cfg, 'snapshot', "$storagename:$ctdisk", undef, 0)) { - $count++; - warn "Test3 d failed"; - } + if (!PVE::Storage::volume_has_feature( + $cfg, 'snapshot', "$storagename:$ctdisk", undef, 0, + )) { + $count++; + warn "Test3 d failed"; + } }; if ($@) { - $count++; - warn "Test3 d: $@"; + $count++; + warn "Test3 d: $@"; } eval { - if (PVE::Storage::volume_has_feature($cfg, 'snapshot', "$storagename:$ctbase", undef, 0)) { - $count++; - warn "Test3 e failed"; - } + if (PVE::Storage::volume_has_feature( + $cfg, 'snapshot', "$storagename:$ctbase", undef, 0, + )) { + $count++; + warn "Test3 e failed"; + } }; if ($@) { - $count++; - warn "Test3 e: $@"; + $count++; + warn "Test3 e: $@"; } eval { - if (!PVE::Storage::volume_has_feature($cfg, 'snapshot', "$storagename:$ctbase\/$ctlinked", undef, 0)) { - $count++; - warn "Test3 f failed"; - } + if (!PVE::Storage::volume_has_feature( + $cfg, + 'snapshot', + "$storagename:$ctbase\/$ctlinked", + undef, + 0, + )) { + $count++; + warn "Test3 f failed"; + } }; if ($@) { - $count++; - warn "Test3 f: $@"; + $count++; + warn "Test3 f: $@"; } eval { - if (PVE::Storage::volume_has_feature($cfg, 'clone', "$storagename:$vmdisk", undef, 0)) { - $count++; - warn "Test3 g failed"; - } + if (PVE::Storage::volume_has_feature($cfg, 'clone', "$storagename:$vmdisk", undef, 0)) { + $count++; + warn "Test3 g failed"; + } }; if ($@) { - $count++; - warn "Test3 g: $@"; + $count++; + warn "Test3 g: $@"; } eval { - if (!PVE::Storage::volume_has_feature($cfg, 'clone', "$storagename:$vmbase", undef, 0)) { - $count++; - warn "Test3 h failed"; - } + if (!PVE::Storage::volume_has_feature($cfg, 'clone', "$storagename:$vmbase", undef, 0)) { + $count++; + warn "Test3 h failed"; + } }; if ($@) { - $count++; - warn "Test3 h: $@"; + $count++; + warn "Test3 h: $@"; } eval { - if (PVE::Storage::volume_has_feature($cfg, 'clone', "$storagename:$vmbase\/$vmlinked", undef, 0)) { - $count++; - warn "Test3 h failed"; - } + if (PVE::Storage::volume_has_feature( + $cfg, + 'clone', + "$storagename:$vmbase\/$vmlinked", + undef, + 0, + )) { + $count++; + warn "Test3 h failed"; + } }; if ($@) { - $count++; - warn "Test3 h: $@"; + $count++; + warn "Test3 h: $@"; } eval { - if (PVE::Storage::volume_has_feature($cfg, 'clone', "$storagename:$ctdisk", undef, 0)) { - $count++; - warn "Test3 i failed"; - } + if (PVE::Storage::volume_has_feature($cfg, 'clone', "$storagename:$ctdisk", undef, 0)) { + $count++; + warn "Test3 i failed"; + } }; if ($@) { - $count++; - warn "Test3 i: $@"; + $count++; + warn "Test3 i: $@"; } eval { - if (!PVE::Storage::volume_has_feature($cfg, 'clone', "$storagename:$ctbase", undef, 0)) { - $count++; - warn "Test3 j failed"; - } + if (!PVE::Storage::volume_has_feature($cfg, 'clone', "$storagename:$ctbase", undef, 0)) { + $count++; + warn "Test3 j failed"; + } }; if ($@) { - $count++; - warn "Test3 j: $@"; + $count++; + warn "Test3 j: $@"; } eval { - if (PVE::Storage::volume_has_feature($cfg, 'clone', "$storagename:$ctbase\/$ctlinked", undef, 0)) { - $count++; - warn "Test3 k failed"; - } + if (PVE::Storage::volume_has_feature( + $cfg, + 'clone', + "$storagename:$ctbase\/$ctlinked", + undef, + 0, + )) { + $count++; + warn "Test3 k failed"; + } }; if ($@) { - $count++; - warn "Test3 k: $@"; + $count++; + warn "Test3 k: $@"; } eval { - if (!PVE::Storage::volume_has_feature($cfg, 'template', "$storagename:$vmdisk", undef, 0)) { - $count++; - warn "Test3 l failed"; - } + if (!PVE::Storage::volume_has_feature( + $cfg, 'template', "$storagename:$vmdisk", undef, 0, + )) { + $count++; + warn "Test3 l failed"; + } }; if ($@) { - $count++; - warn "Test3 l: $@"; + $count++; + warn "Test3 l: $@"; } eval { - if (PVE::Storage::volume_has_feature($cfg, 'template', "$storagename:$vmbase", undef, 0)) { - $count++; - warn "Test3 m failed"; - } + if (PVE::Storage::volume_has_feature( + $cfg, 'template', "$storagename:$vmbase", undef, 0, + )) { + $count++; + warn "Test3 m failed"; + } }; if ($@) { - $count++; - warn "Test3 m: $@"; + $count++; + warn "Test3 m: $@"; } eval { - if (!PVE::Storage::volume_has_feature($cfg, 'template', "$storagename:$vmbase\/$vmlinked", undef, 0)) { - $count++; - warn "Test3 n failed"; - } + if (!PVE::Storage::volume_has_feature( + $cfg, + 'template', + "$storagename:$vmbase\/$vmlinked", + undef, + 0, + )) { + $count++; + warn "Test3 n failed"; + } }; if ($@) { - $count++; - warn "Test3 n: $@"; + $count++; + warn "Test3 n: $@"; } eval { - if (!PVE::Storage::volume_has_feature($cfg, 'template', "$storagename:$ctdisk", undef, 0)) { - $count++; - warn "Test3 o failed"; - } + if (!PVE::Storage::volume_has_feature( + $cfg, 'template', "$storagename:$ctdisk", undef, 0, + )) { + $count++; + warn "Test3 o failed"; + } }; if ($@) { - $count++; - warn "Test3 o: $@"; + $count++; + warn "Test3 o: $@"; } eval { - if (PVE::Storage::volume_has_feature($cfg, 'template', "$storagename:$ctbase", undef, 0)) { - $count++; - warn "Test3 p failed"; - } + if (PVE::Storage::volume_has_feature( + $cfg, 'template', "$storagename:$ctbase", undef, 0, + )) { + $count++; + warn "Test3 p failed"; + } }; if ($@) { - $count++; - warn "Test3 p: $@"; + $count++; + warn "Test3 p: $@"; } eval { - if (!PVE::Storage::volume_has_feature($cfg, 'template', "$storagename:$ctbase\/$ctlinked", undef, 0)) { - $count++; - warn "Test3 q failed"; - } + if (!PVE::Storage::volume_has_feature( + $cfg, + 'template', + "$storagename:$ctbase\/$ctlinked", + undef, + 0, + )) { + $count++; + warn "Test3 q failed"; + } }; if ($@) { - $count++; - warn "Test3 q: $@"; + $count++; + warn "Test3 q: $@"; } eval { - if (!PVE::Storage::volume_has_feature($cfg, 'copy', "$storagename:$vmdisk", undef, 0)) { - $count++; - warn "Test3 r failed"; - } + if (!PVE::Storage::volume_has_feature($cfg, 'copy', "$storagename:$vmdisk", undef, 0)) { + $count++; + warn "Test3 r failed"; + } }; if ($@) { - $count++; - warn "Test3 r: $@"; + $count++; + warn "Test3 r: $@"; } eval { - if (!PVE::Storage::volume_has_feature($cfg, 'copy', "$storagename:$vmbase", undef, 0)) { - $count++; - warn "Test3 s failed"; - } + if (!PVE::Storage::volume_has_feature($cfg, 'copy', "$storagename:$vmbase", undef, 0)) { + $count++; + warn "Test3 s failed"; + } }; if ($@) { - $count++; - warn "Test3 s: $@"; + $count++; + warn "Test3 s: $@"; } eval { - if (!PVE::Storage::volume_has_feature($cfg, 'copy', "$storagename:$vmbase\/$vmlinked", undef, 0)) { - $count++; - warn "Test3 t failed"; - } + if (!PVE::Storage::volume_has_feature( + $cfg, + 'copy', + "$storagename:$vmbase\/$vmlinked", + undef, + 0, + )) { + $count++; + warn "Test3 t failed"; + } }; if ($@) { - $count++; - warn "Test3 t: $@"; + $count++; + warn "Test3 t: $@"; } eval { - if (!PVE::Storage::volume_has_feature($cfg, 'copy', "$storagename:$ctdisk", undef, 0)) { - $count++; - warn "Test3 u failed"; - } + if (!PVE::Storage::volume_has_feature($cfg, 'copy', "$storagename:$ctdisk", undef, 0)) { + $count++; + warn "Test3 u failed"; + } }; if ($@) { - $count++; - warn "Test3 u: $@"; + $count++; + warn "Test3 u: $@"; } eval { - if (!PVE::Storage::volume_has_feature($cfg, 'copy', "$storagename:$ctbase", undef, 0)) { - $count++; - warn "Test3 v failed"; - } + if (!PVE::Storage::volume_has_feature($cfg, 'copy', "$storagename:$ctbase", undef, 0)) { + $count++; + warn "Test3 v failed"; + } }; if ($@) { - $count++; - warn "Test3 v: $@"; + $count++; + warn "Test3 v: $@"; } eval { - if (!PVE::Storage::volume_has_feature($cfg, 'copy', "$storagename:$ctbase\/$ctlinked", undef, 0)) { - $count++; - warn "Test3 w failed"; - } + if (!PVE::Storage::volume_has_feature( + $cfg, + 'copy', + "$storagename:$ctbase\/$ctlinked", + undef, + 0, + )) { + $count++; + warn "Test3 w failed"; + } }; if ($@) { - $count++; - warn "Test3 w: $@"; + $count++; + warn "Test3 w: $@"; } eval { - if (!PVE::Storage::volume_has_feature($cfg, 'sparseinit', "$storagename:$vmdisk", undef, 0)) { - $count++; - warn "Test3 x failed"; - } + if (!PVE::Storage::volume_has_feature( + $cfg, 'sparseinit', "$storagename:$vmdisk", undef, 0, + )) { + $count++; + warn "Test3 x failed"; + } }; if ($@) { - $count++; - warn "Test3 x: $@"; + $count++; + warn "Test3 x: $@"; } eval { - if (!PVE::Storage::volume_has_feature($cfg, 'sparseinit', "$storagename:$vmbase", undef, 0)) { - $count++; - warn "Test3 y failed"; - } + if (!PVE::Storage::volume_has_feature( + $cfg, 'sparseinit', "$storagename:$vmbase", undef, 0, + )) { + $count++; + warn "Test3 y failed"; + } }; if ($@) { - $count++; - warn "Test3 y: $@"; + $count++; + warn "Test3 y: $@"; } eval { - if (!PVE::Storage::volume_has_feature($cfg, 'sparseinit', "$storagename:$vmbase\/$vmlinked", undef, 0)) { - $count++; - warn "Test3 z failed"; - } + if (!PVE::Storage::volume_has_feature( + $cfg, + 'sparseinit', + "$storagename:$vmbase\/$vmlinked", + undef, + 0, + )) { + $count++; + warn "Test3 z failed"; + } }; if ($@) { - $count++; - warn "Test3 z: $@"; + $count++; + warn "Test3 z: $@"; } eval { - if (!PVE::Storage::volume_has_feature($cfg, 'sparseinit', "$storagename:$ctdisk", undef, 0)) { - $count++; - warn "Test3 A failed"; - } + if (!PVE::Storage::volume_has_feature( + $cfg, 'sparseinit', "$storagename:$ctdisk", undef, 0, + )) { + $count++; + warn "Test3 A failed"; + } }; if ($@) { - $count++; - warn "Test3 A: $@"; + $count++; + warn "Test3 A: $@"; } eval { - if (!PVE::Storage::volume_has_feature($cfg, 'sparseinit', "$storagename:$ctbase", undef, 0)) { - $count++; - warn "Test3 B failed"; - } + if (!PVE::Storage::volume_has_feature( + $cfg, 'sparseinit', "$storagename:$ctbase", undef, 0, + )) { + $count++; + warn "Test3 B failed"; + } }; if ($@) { - $count++; - warn "Test3 B: $@"; + $count++; + warn "Test3 B: $@"; } eval { - if (!PVE::Storage::volume_has_feature($cfg, 'sparseinit', "$storagename:$ctbase\/$ctlinked", undef, 0)) { - $count++; - warn "Test3 C failed"; - } + if (!PVE::Storage::volume_has_feature( + $cfg, + 'sparseinit', + "$storagename:$ctbase\/$ctlinked", + undef, + 0, + )) { + $count++; + warn "Test3 C failed"; + } }; if ($@) { - $count++; - warn "Test3 C: $@"; + $count++; + warn "Test3 C: $@"; } eval { - if (!PVE::Storage::volume_has_feature($cfg, 'snapshot', "$storagename:$vmdisk", 'test', 0)) { - $count++; - warn "Test3 a1 failed"; - } + if (!PVE::Storage::volume_has_feature( + $cfg, 'snapshot', "$storagename:$vmdisk", 'test', 0, + )) { + $count++; + warn "Test3 a1 failed"; + } }; if ($@) { - $count++; - warn "Test3 a1: $@"; + $count++; + warn "Test3 a1: $@"; } eval { - if (!PVE::Storage::volume_has_feature($cfg, 'snapshot', "$storagename:$vmbase", 'test', 0)) { - $count++; - warn "Test3 b1 failed"; - } + if (!PVE::Storage::volume_has_feature( + $cfg, 'snapshot', "$storagename:$vmbase", 'test', 0, + )) { + $count++; + warn "Test3 b1 failed"; + } }; if ($@) { - $count++; - warn "Test3 b1: $@"; + $count++; + warn "Test3 b1: $@"; } eval { - if (!PVE::Storage::volume_has_feature($cfg, 'snapshot', "$storagename:$vmbase\/$vmlinked", 'test', 0)) { - $count++; - warn "Test3 c1 failed"; - } + if (!PVE::Storage::volume_has_feature( + $cfg, + 'snapshot', + "$storagename:$vmbase\/$vmlinked", + 'test', + 0, + )) { + $count++; + warn "Test3 c1 failed"; + } }; if ($@) { - $count++; - warn "Test3 c1: $@"; + $count++; + warn "Test3 c1: $@"; } eval { - if (!PVE::Storage::volume_has_feature($cfg, 'snapshot', "$storagename:$ctdisk", 'test', 0)) { - $count++; - warn "Test3 d1 failed"; - } + if (!PVE::Storage::volume_has_feature( + $cfg, 'snapshot', "$storagename:$ctdisk", 'test', 0, + )) { + $count++; + warn "Test3 d1 failed"; + } }; if ($@) { - $count++; - warn "Test3 d1: $@"; + $count++; + warn "Test3 d1: $@"; } eval { - if (!PVE::Storage::volume_has_feature($cfg, 'snapshot', "$storagename:$ctbase", 'test', 0)) { - $count++; - warn "Test3 e1 failed"; - } + if (!PVE::Storage::volume_has_feature( + $cfg, 'snapshot', "$storagename:$ctbase", 'test', 0, + )) { + $count++; + warn "Test3 e1 failed"; + } }; if ($@) { - $count++; - warn "Test3 e1: $@"; + $count++; + warn "Test3 e1: $@"; } eval { - if (!PVE::Storage::volume_has_feature($cfg, 'snapshot', "$storagename:$ctbase\/$ctlinked", 'test', 0)) { - $count++; - warn "Test3 f1 failed"; - } + if (!PVE::Storage::volume_has_feature( + $cfg, + 'snapshot', + "$storagename:$ctbase\/$ctlinked", + 'test', + 0, + )) { + $count++; + warn "Test3 f1 failed"; + } }; if ($@) { - $count++; - warn "Test3 f1: $@"; + $count++; + warn "Test3 f1: $@"; } eval { - if (PVE::Storage::volume_has_feature($cfg, 'clone', "$storagename:$vmdisk", 'test', 0)) { - $count++; - warn "Test3 g1 failed"; - } + if (PVE::Storage::volume_has_feature($cfg, 'clone', "$storagename:$vmdisk", 'test', 0)) { + $count++; + warn "Test3 g1 failed"; + } }; if ($@) { - $count++; - warn "Test3 g1: $@"; + $count++; + warn "Test3 g1: $@"; } eval { - if (PVE::Storage::volume_has_feature($cfg, 'clone', "$storagename:$vmbase", 'test', 0)) { - $count++; - warn "Test3 h1 failed"; - } + if (PVE::Storage::volume_has_feature($cfg, 'clone', "$storagename:$vmbase", 'test', 0)) { + $count++; + warn "Test3 h1 failed"; + } }; if ($@) { - $count++; - warn "Test3 h1: $@"; + $count++; + warn "Test3 h1: $@"; } eval { - if (PVE::Storage::volume_has_feature($cfg, 'clone', "$storagename:$vmbase\/$vmlinked", 'test', 0)) { - $count++; - warn "Test3 h1 failed"; - } + if (PVE::Storage::volume_has_feature( + $cfg, + 'clone', + "$storagename:$vmbase\/$vmlinked", + 'test', + 0, + )) { + $count++; + warn "Test3 h1 failed"; + } }; if ($@) { - $count++; - warn "Test3 h1: $@"; + $count++; + warn "Test3 h1: $@"; } eval { - if (PVE::Storage::volume_has_feature($cfg, 'clone', "$storagename:$ctdisk", 'test', 0)) { - $count++; - warn "Test3 i1 failed"; - } + if (PVE::Storage::volume_has_feature($cfg, 'clone', "$storagename:$ctdisk", 'test', 0)) { + $count++; + warn "Test3 i1 failed"; + } }; if ($@) { - $count++; - warn "Test3 i1: $@"; + $count++; + warn "Test3 i1: $@"; } eval { - if (PVE::Storage::volume_has_feature($cfg, 'clone', "$storagename:$ctbase", 'test', 0)) { - $count++; - warn "Test3 j1 failed"; - } + if (PVE::Storage::volume_has_feature($cfg, 'clone', "$storagename:$ctbase", 'test', 0)) { + $count++; + warn "Test3 j1 failed"; + } }; if ($@) { - $count++; - warn "Test3 j1: $@"; + $count++; + warn "Test3 j1: $@"; } eval { - if (PVE::Storage::volume_has_feature($cfg, 'clone', "$storagename:$ctbase\/$ctlinked", 'test', 0)) { - $count++; - warn "Test3 k1 failed"; - } + if (PVE::Storage::volume_has_feature( + $cfg, + 'clone', + "$storagename:$ctbase\/$ctlinked", + 'test', + 0, + )) { + $count++; + warn "Test3 k1 failed"; + } }; if ($@) { - $count++; - warn "Test3 k1: $@"; + $count++; + warn "Test3 k1: $@"; } eval { - if (PVE::Storage::volume_has_feature($cfg, 'template', "$storagename:$vmdisk", 'test', 0)) { - $count++; - warn "Test3 l1 failed"; - } + if (PVE::Storage::volume_has_feature( + $cfg, 'template', "$storagename:$vmdisk", 'test', 0, + )) { + $count++; + warn "Test3 l1 failed"; + } }; if ($@) { - $count++; - warn "Test3 l1: $@"; + $count++; + warn "Test3 l1: $@"; } eval { - if (PVE::Storage::volume_has_feature($cfg, 'template', "$storagename:$vmbase", 'test', 0)) { - $count++; - warn "Test3 m1 failed"; - } + if (PVE::Storage::volume_has_feature( + $cfg, 'template', "$storagename:$vmbase", 'test', 0, + )) { + $count++; + warn "Test3 m1 failed"; + } }; if ($@) { - $count++; - warn "Test3 m1: $@"; + $count++; + warn "Test3 m1: $@"; } eval { - if (PVE::Storage::volume_has_feature($cfg, 'template', "$storagename:$vmbase\/$vmlinked", 'test', 0)) { - $count++; - warn "Test3 n1 failed"; - } + if (PVE::Storage::volume_has_feature( + $cfg, + 'template', + "$storagename:$vmbase\/$vmlinked", + 'test', + 0, + )) { + $count++; + warn "Test3 n1 failed"; + } }; if ($@) { - $count++; - warn "Test3 n1: $@"; + $count++; + warn "Test3 n1: $@"; } eval { - if (PVE::Storage::volume_has_feature($cfg, 'template', "$storagename:$ctdisk", 'test', 0)) { - $count++; - warn "Test3 o1 failed"; - } + if (PVE::Storage::volume_has_feature( + $cfg, 'template', "$storagename:$ctdisk", 'test', 0, + )) { + $count++; + warn "Test3 o1 failed"; + } }; if ($@) { - $count++; - warn "Test3 o1: $@"; + $count++; + warn "Test3 o1: $@"; } eval { - if (PVE::Storage::volume_has_feature($cfg, 'template', "$storagename:$ctbase", 'test', 0)) { - $count++; - warn "Test3 p1 failed"; - } + if (PVE::Storage::volume_has_feature( + $cfg, 'template', "$storagename:$ctbase", 'test', 0, + )) { + $count++; + warn "Test3 p1 failed"; + } }; if ($@) { - $count++; - warn "Test3 p1: $@"; + $count++; + warn "Test3 p1: $@"; } eval { - if (PVE::Storage::volume_has_feature($cfg, 'template', "$storagename:$ctbase\/$ctlinked", 'test', 0)) { - $count++; - warn "Test3 q1 failed"; - } + if (PVE::Storage::volume_has_feature( + $cfg, + 'template', + "$storagename:$ctbase\/$ctlinked", + 'test', + 0, + )) { + $count++; + warn "Test3 q1 failed"; + } }; if ($@) { - $count++; - warn "Test3 q1: $@"; + $count++; + warn "Test3 q1: $@"; } eval { - if (PVE::Storage::volume_has_feature($cfg, 'copy', "$storagename:$vmdisk", 'test', 0)) { - $count++; - warn "Test3 r1 failed"; - } + if (PVE::Storage::volume_has_feature($cfg, 'copy', "$storagename:$vmdisk", 'test', 0)) { + $count++; + warn "Test3 r1 failed"; + } }; if ($@) { - $count++; - warn "Test3 r1: $@"; + $count++; + warn "Test3 r1: $@"; } eval { - if (PVE::Storage::volume_has_feature($cfg, 'copy', "$storagename:$vmbase", 'test', 0)) { - $count++; - warn "Test3 s1 failed"; - } + if (PVE::Storage::volume_has_feature($cfg, 'copy', "$storagename:$vmbase", 'test', 0)) { + $count++; + warn "Test3 s1 failed"; + } }; if ($@) { - $count++; - warn "Test3 s1: $@"; + $count++; + warn "Test3 s1: $@"; } eval { - if (PVE::Storage::volume_has_feature($cfg, 'copy', "$storagename:$vmbase\/$vmlinked", 'test', 0)) { - $count++; - warn "Test3 t1 failed"; - } + if (PVE::Storage::volume_has_feature( + $cfg, + 'copy', + "$storagename:$vmbase\/$vmlinked", + 'test', + 0, + )) { + $count++; + warn "Test3 t1 failed"; + } }; if ($@) { - $count++; - warn "Test3 t1: $@"; + $count++; + warn "Test3 t1: $@"; } eval { - if (PVE::Storage::volume_has_feature($cfg, 'copy', "$storagename:$ctdisk", 'test', 0)) { - $count++; - warn "Test3 u1 failed"; - } + if (PVE::Storage::volume_has_feature($cfg, 'copy', "$storagename:$ctdisk", 'test', 0)) { + $count++; + warn "Test3 u1 failed"; + } }; if ($@) { - $count++; - warn "Test3 u1: $@"; + $count++; + warn "Test3 u1: $@"; } eval { - if (PVE::Storage::volume_has_feature($cfg, 'copy', "$storagename:$ctbase", 'test', 0)) { - $count++; - warn "Test3 v1 failed"; - } + if (PVE::Storage::volume_has_feature($cfg, 'copy', "$storagename:$ctbase", 'test', 0)) { + $count++; + warn "Test3 v1 failed"; + } }; if ($@) { - $count++; - warn "Test3 v1: $@"; + $count++; + warn "Test3 v1: $@"; } eval { - if (PVE::Storage::volume_has_feature($cfg, 'copy', "$storagename:$ctbase\/$ctlinked", 'test', 0)) { - $count++; - warn "Test3 w1 failed"; - } + if (PVE::Storage::volume_has_feature( + $cfg, + 'copy', + "$storagename:$ctbase\/$ctlinked", + 'test', + 0, + )) { + $count++; + warn "Test3 w1 failed"; + } }; if ($@) { - $count++; - warn "Test3 w1: $@"; + $count++; + warn "Test3 w1: $@"; } eval { - if (PVE::Storage::volume_has_feature($cfg, 'sparseinit', "$storagename:$vmdisk", 'test', 0)) { - $count++; - warn "Test3 x1 failed"; - } + if (PVE::Storage::volume_has_feature( + $cfg, 'sparseinit', "$storagename:$vmdisk", 'test', 0, + )) { + $count++; + warn "Test3 x1 failed"; + } }; if ($@) { - $count++; - warn "Test3 x1: $@"; + $count++; + warn "Test3 x1: $@"; } eval { - if (PVE::Storage::volume_has_feature($cfg, 'sparseinit', "$storagename:$vmbase", 'test', 0)) { - $count++; - warn "Test3 y1 failed"; - } + if (PVE::Storage::volume_has_feature( + $cfg, 'sparseinit', "$storagename:$vmbase", 'test', 0, + )) { + $count++; + warn "Test3 y1 failed"; + } }; if ($@) { - $count++; - warn "Test3 y1: $@"; + $count++; + warn "Test3 y1: $@"; } eval { - if (PVE::Storage::volume_has_feature($cfg, 'sparseinit', "$storagename:$vmbase\/$vmlinked", 'test', 0)) { - $count++; - warn "Test3 z1 failed"; - } + if (PVE::Storage::volume_has_feature( + $cfg, + 'sparseinit', + "$storagename:$vmbase\/$vmlinked", + 'test', + 0, + )) { + $count++; + warn "Test3 z1 failed"; + } }; if ($@) { - $count++; - warn "Test3 z1: $@"; + $count++; + warn "Test3 z1: $@"; } eval { - if (PVE::Storage::volume_has_feature($cfg, 'sparseinit', "$storagename:$ctdisk", 'test', 0)) { - $count++; - warn "Test3 A1 failed"; - } + if (PVE::Storage::volume_has_feature( + $cfg, 'sparseinit', "$storagename:$ctdisk", 'test', 0, + )) { + $count++; + warn "Test3 A1 failed"; + } }; if ($@) { - $count++; - warn "Test3 A1: $@"; + $count++; + warn "Test3 A1: $@"; } eval { - if (PVE::Storage::volume_has_feature($cfg, 'sparseinit', "$storagename:$ctbase", 'test', 0)) { - $count++; - warn "Test3 B1 failed"; - } + if (PVE::Storage::volume_has_feature( + $cfg, 'sparseinit', "$storagename:$ctbase", 'test', 0, + )) { + $count++; + warn "Test3 B1 failed"; + } }; if ($@) { - $count++; - warn "Test3 B1: $@"; + $count++; + warn "Test3 B1: $@"; } eval { - if (PVE::Storage::volume_has_feature($cfg, 'sparseinit', "$storagename:$ctbase\/$ctlinked", 'test', 0)) { - $count++; - warn "Test3 C1 failed"; - } + if (PVE::Storage::volume_has_feature( + $cfg, + 'sparseinit', + "$storagename:$ctbase\/$ctlinked", + 'test', + 0, + )) { + $count++; + warn "Test3 C1 failed"; + } }; if ($@) { - $count++; - warn "Test3 C1: $@"; + $count++; + warn "Test3 C1: $@"; } }; $tests->{3} = $test3; @@ -2451,96 +2721,104 @@ my $test2 = sub { my $newsize = ($volsize + 1) * 1024 * 1024 * 1024; eval { - if (($newsize/1024) != - PVE::Storage::volume_resize($cfg, "$storagename:$vmdisk", $newsize, 0)) { - $count++; - warn "Test2 a failed"; - } - if ($newsize != - PVE::Storage::volume_size_info($cfg, "$storagename:$vmdisk")) { - $count++; - warn "Test2 a failed"; - } + if (($newsize / 1024) != + PVE::Storage::volume_resize($cfg, "$storagename:$vmdisk", $newsize, 0) + ) { + $count++; + warn "Test2 a failed"; + } + if ($newsize != PVE::Storage::volume_size_info($cfg, "$storagename:$vmdisk")) { + $count++; + warn "Test2 a failed"; + } }; if ($@) { - $count++; - warn "Test2 a: $@"; + $count++; + warn "Test2 a: $@"; } eval { - warn "Test2 b failed" if ($newsize/1024) != PVE::Storage::volume_resize($cfg, "$storagename:$vmbase", $newsize, 0); - warn "Test2 b failed" if $newsize != - PVE::Storage::volume_size_info($cfg, "$storagename:$vmbase"); + warn "Test2 b failed" + if ($newsize / 1024) != + PVE::Storage::volume_resize($cfg, "$storagename:$vmbase", $newsize, 0); + warn "Test2 b failed" + if $newsize != PVE::Storage::volume_size_info($cfg, "$storagename:$vmbase"); }; if ($@) { - $count++; - warn "Test2 b: $@"; + $count++; + warn "Test2 b: $@"; } eval { - if (($newsize/1024) != PVE::Storage::volume_resize($cfg, "$storagename:$vmbase\/$vmlinked", $newsize, 0)) { - $count++; - warn "Test2 c failed"; - } - if ($newsize != - PVE::Storage::volume_size_info($cfg, "$storagename:$vmbase\/$vmlinked")) { - $count++; - warn "Test2 c failed"; - } + if (($newsize / 1024) != + PVE::Storage::volume_resize($cfg, "$storagename:$vmbase\/$vmlinked", $newsize, 0) + ) { + $count++; + warn "Test2 c failed"; + } + if ( + $newsize != PVE::Storage::volume_size_info($cfg, "$storagename:$vmbase\/$vmlinked") + ) { + $count++; + warn "Test2 c failed"; + } }; if ($@) { - $count++; - warn "Test2 c: $@"; + $count++; + warn "Test2 c: $@"; } eval { - if (($newsize/1024) != PVE::Storage::volume_resize($cfg, "$storagename:$ctdisk", $newsize, 0)) { - $count++; - warn "Test2 d failed"; - } - if ($newsize != - PVE::Storage::volume_size_info($cfg, "$storagename:$ctdisk")) { - $count++; - warn "Test2 d failed" - } + if (($newsize / 1024) != + PVE::Storage::volume_resize($cfg, "$storagename:$ctdisk", $newsize, 0) + ) { + $count++; + warn "Test2 d failed"; + } + if ($newsize != PVE::Storage::volume_size_info($cfg, "$storagename:$ctdisk")) { + $count++; + warn "Test2 d failed"; + } }; if ($@) { - $count++; - warn "Test2 d: $@"; + $count++; + warn "Test2 d: $@"; } eval { - if (($newsize/1024) != - PVE::Storage::volume_resize($cfg, "$storagename:$ctbase", $newsize, 0)) { - $count++; - warn "Test2 e failed"; - } - if ($newsize != - PVE::Storage::volume_size_info($cfg, "$storagename:$ctbase")) { - $count++; - warn "Test2 e failed"; - } + if (($newsize / 1024) != + PVE::Storage::volume_resize($cfg, "$storagename:$ctbase", $newsize, 0) + ) { + $count++; + warn "Test2 e failed"; + } + if ($newsize != PVE::Storage::volume_size_info($cfg, "$storagename:$ctbase")) { + $count++; + warn "Test2 e failed"; + } }; if ($@) { - $count++; - warn "Test2 e: $@"; + $count++; + warn "Test2 e: $@"; } eval { - if (($newsize/1024) != - PVE::Storage::volume_resize($cfg, "$storagename:$ctbase\/$ctlinked", $newsize, 0)) { - $count++; - warn "Test2 f failed"; - } - if ($newsize != - PVE::Storage::volume_size_info($cfg, "$storagename:$ctbase\/$ctlinked")) { - $count++; - warn "Test2 f failed"; - } + if (($newsize / 1024) != + PVE::Storage::volume_resize($cfg, "$storagename:$ctbase\/$ctlinked", $newsize, 0) + ) { + $count++; + warn "Test2 f failed"; + } + if ( + $newsize != PVE::Storage::volume_size_info($cfg, "$storagename:$ctbase\/$ctlinked") + ) { + $count++; + warn "Test2 f failed"; + } }; if ($@) { - $count++; - warn "Test2 f: $@"; + $count++; + warn "Test2 f: $@"; } }; $tests->{2} = $test2; @@ -2551,74 +2829,70 @@ my $test1 = sub { my $size = ($volsize * 1024 * 1024 * 1024); eval { - if ($size != PVE::Storage::volume_size_info($cfg, "$storagename:$vmdisk")) { - $count++; - warn "Test1 a failed"; - } + if ($size != PVE::Storage::volume_size_info($cfg, "$storagename:$vmdisk")) { + $count++; + warn "Test1 a failed"; + } }; if ($@) { - $count++; - warn "Test1 a : $@"; + $count++; + warn "Test1 a : $@"; } eval { - if ($size != PVE::Storage::volume_size_info($cfg, "$storagename:$vmbase")) { - $count++; - warn "Test1 b failed"; - } + if ($size != PVE::Storage::volume_size_info($cfg, "$storagename:$vmbase")) { + $count++; + warn "Test1 b failed"; + } }; if ($@) { - $count++; - warn "Test1 b : $@"; + $count++; + warn "Test1 b : $@"; } eval { - if ($size != - PVE::Storage::volume_size_info($cfg, "$storagename:$vmbase\/$vmlinked")) { - $count++; - warn "Test1 c failed"; - } + if ($size != PVE::Storage::volume_size_info($cfg, "$storagename:$vmbase\/$vmlinked")) { + $count++; + warn "Test1 c failed"; + } }; if ($@) { - $count++; - warn "Test1 c : $@"; + $count++; + warn "Test1 c : $@"; } eval { - if ($size != - PVE::Storage::volume_size_info($cfg, "$storagename:$ctdisk")) { - $count++; - warn "Test1 d failed"; - } + if ($size != PVE::Storage::volume_size_info($cfg, "$storagename:$ctdisk")) { + $count++; + warn "Test1 d failed"; + } }; if ($@) { - $count++; - warn "Test1 d : $@"; + $count++; + warn "Test1 d : $@"; } eval { - if ($size != - PVE::Storage::volume_size_info($cfg, "$storagename:$ctbase")) { - $count++; - warn "Test1 e failed"; - } + if ($size != PVE::Storage::volume_size_info($cfg, "$storagename:$ctbase")) { + $count++; + warn "Test1 e failed"; + } }; if ($@) { - $count++; - warn "Test1 e : $@"; + $count++; + warn "Test1 e : $@"; } eval { - if ($size != - PVE::Storage::volume_size_info($cfg, "$storagename:$vmbase\/$vmlinked")) { - $count++; - warn "Test1 f failed" - } + if ($size != PVE::Storage::volume_size_info($cfg, "$storagename:$vmbase\/$vmlinked")) { + $count++; + warn "Test1 f failed"; + } }; if ($@) { - $count++; - warn "Test1 f : $@"; + $count++; + warn "Test1 f : $@"; } }; @@ -2648,12 +2922,12 @@ sub setup_zfs { run_command("zfs clone $zpath\/$ctbase$basesnap $zpath\/$ctlinked -o refquota=${volsize}G"); my $vollist = [ - "$storagename:$vmdisk", - "$storagename:$vmbase", - "$storagename:$vmbase/$vmlinked", - "$storagename:$ctdisk", - "$storagename:$ctbase", - "$storagename:$ctbase/$ctlinked", + "$storagename:$vmdisk", + "$storagename:$vmbase", + "$storagename:$vmbase/$vmlinked", + "$storagename:$ctdisk", + "$storagename:$ctbase", + "$storagename:$ctbase/$ctlinked", ]; PVE::Storage::activate_volumes($cfg, $vollist); @@ -2664,47 +2938,43 @@ sub cleanup_zfs { print "destroy $pool\/$subvol\n" if $verbose; eval { run_command("zfs destroy $zpath -r"); }; if ($@) { - print "cleanup failed: $@\nretrying once\n" if $verbose; - eval { run_command("zfs destroy $zpath -r"); }; - if ($@) { - clean_up_zpool(); - setup_zpool(); - } + print "cleanup failed: $@\nretrying once\n" if $verbose; + eval { run_command("zfs destroy $zpath -r"); }; + if ($@) { + clean_up_zpool(); + setup_zpool(); + } } } sub setup_zpool { unlink 'zpool.img'; - eval { - run_command("truncate -s 8G zpool.img"); - }; + eval { run_command("truncate -s 8G zpool.img"); }; if ($@) { - clean_up_zpool(); + clean_up_zpool(); } my $pwd = cwd(); - eval { - run_command("zpool create -m \/$mountpoint $subvol $pwd\/zpool.img"); - }; + eval { run_command("zpool create -m \/$mountpoint $subvol $pwd\/zpool.img"); }; if ($@) { - clean_up_zpool(); + clean_up_zpool(); } } sub clean_up_zpool { - eval { - run_command("zpool destroy -f $subvol"); - }; + eval { run_command("zpool destroy -f $subvol"); }; if ($@) { - warn $@;} + warn $@; + } unlink 'zpool.img'; } sub volume_is_base { my ($cfg, $volid) = @_; - my (undef, undef, undef, undef, undef, $isBase, undef) = PVE::Storage::parse_volname($cfg, $volid); + my (undef, undef, undef, undef, undef, $isBase, undef) = + PVE::Storage::parse_volname($cfg, $volid); return $isBase; } @@ -2725,18 +2995,19 @@ setup_zpool(); my $time = time; print "Start tests for ZFSPoolPlugin\n"; -$cfg = {'ids' => { - $storagename => { - 'content' => { - 'images' => 1, - 'rootdir' => 1 - }, - 'pool' => $subvol, - 'mountpoint' => "\/$mountpoint", - 'type' => 'zfspool' +$cfg = { + 'ids' => { + $storagename => { + 'content' => { + 'images' => 1, + 'rootdir' => 1, + }, + 'pool' => $subvol, + 'mountpoint' => "\/$mountpoint", + 'type' => 'zfspool', + }, }, - }, - 'order' => {'zfstank99' => 1,} + 'order' => { 'zfstank99' => 1 }, }; $zpath = $subvol; @@ -2744,12 +3015,10 @@ $zpath = $subvol; for (my $i = $start_test; $i <= $end_test; $i++) { setup_zfs(); - eval { - $tests->{$i}(); - }; + eval { $tests->{$i}(); }; if (my $err = $@) { - warn $err; - $count++; + warn $err; + $count++; } cleanup_zfs(); }