auto-format code using perltidy with Proxmox style guide

using the new top-level `make tidy` target, which calls perltidy via
our wrapper to enforce the desired style as closely as possible.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
This commit is contained in:
Thomas Lamprecht
2025-06-11 10:03:21 +02:00
parent 5d23073cb6
commit 5a66c27cc6
54 changed files with 14137 additions and 12461 deletions

View File

@ -19,27 +19,27 @@ use PVE::API2::Disks::ZFS;
use PVE::RESTHandler;
use base qw(PVE::RESTHandler);
__PACKAGE__->register_method ({
__PACKAGE__->register_method({
subclass => "PVE::API2::Disks::LVM",
path => 'lvm',
});
__PACKAGE__->register_method ({
__PACKAGE__->register_method({
subclass => "PVE::API2::Disks::LVMThin",
path => 'lvmthin',
});
__PACKAGE__->register_method ({
__PACKAGE__->register_method({
subclass => "PVE::API2::Disks::Directory",
path => 'directory',
});
__PACKAGE__->register_method ({
__PACKAGE__->register_method({
subclass => "PVE::API2::Disks::ZFS",
path => 'zfs',
});
__PACKAGE__->register_method ({
__PACKAGE__->register_method({
name => 'index',
path => '',
method => 'GET',
@ -58,7 +58,7 @@ __PACKAGE__->register_method ({
type => "object",
properties => {},
},
links => [ { rel => 'child', href => "{name}" } ],
links => [{ rel => 'child', href => "{name}" }],
},
code => sub {
my ($param) = @_;
@ -75,9 +75,10 @@ __PACKAGE__->register_method ({
];
return $result;
}});
},
});
__PACKAGE__->register_method ({
__PACKAGE__->register_method({
name => 'list',
path => 'list',
method => 'GET',
@ -123,8 +124,8 @@ __PACKAGE__->register_method ({
used => { type => 'string', optional => 1 },
gpt => { type => 'boolean' },
mounted => { type => 'boolean' },
size => { type => 'integer'},
osdid => { type => 'integer'}, # TODO: deprecate / remove in PVE 9?
size => { type => 'integer' },
osdid => { type => 'integer' }, # TODO: deprecate / remove in PVE 9?
'osdid-list' => {
type => 'array',
items => { type => 'integer' },
@ -132,13 +133,13 @@ __PACKAGE__->register_method ({
vendor => { type => 'string', optional => 1 },
model => { type => 'string', optional => 1 },
serial => { type => 'string', optional => 1 },
wwn => { type => 'string', optional => 1},
health => { type => 'string', optional => 1},
wwn => { type => 'string', optional => 1 },
health => { type => 'string', optional => 1 },
parent => {
type => 'string',
description => 'For partitions only. The device path of ' .
'the disk the partition resides on.',
optional => 1
description => 'For partitions only. The device path of '
. 'the disk the partition resides on.',
optional => 1,
},
},
},
@ -150,9 +151,7 @@ __PACKAGE__->register_method ({
my $include_partitions = $param->{'include-partitions'} // 0;
my $disks = PVE::Diskmanage::get_disks(
undef,
$skipsmart,
$include_partitions
undef, $skipsmart, $include_partitions,
);
my $type = $param->{type} // '';
@ -163,8 +162,8 @@ __PACKAGE__->register_method ({
if ($type eq 'journal_disks') {
next if $entry->{osdid} >= 0;
if (my $usage = $entry->{used}) {
next if !($usage eq 'partitions' && $entry->{gpt}
|| $usage eq 'LVM');
next
if !($usage eq 'partitions' && $entry->{gpt} || $usage eq 'LVM');
}
} elsif ($type eq 'unused') {
next if $entry->{used};
@ -174,9 +173,10 @@ __PACKAGE__->register_method ({
push @$result, $entry;
}
return $result;
}});
},
});
__PACKAGE__->register_method ({
__PACKAGE__->register_method({
name => 'smart',
path => 'smart',
method => 'GET',
@ -207,7 +207,7 @@ __PACKAGE__->register_method ({
properties => {
health => { type => 'string' },
type => { type => 'string', optional => 1 },
attributes => { type => 'array', optional => 1},
attributes => { type => 'array', optional => 1 },
text => { type => 'string', optional => 1 },
},
},
@ -222,9 +222,10 @@ __PACKAGE__->register_method ({
$result = { health => $result->{health} } if $param->{healthonly};
return $result;
}});
},
});
__PACKAGE__->register_method ({
__PACKAGE__->register_method({
name => 'initgpt',
path => 'initgpt',
method => 'POST',
@ -271,9 +272,10 @@ __PACKAGE__->register_method ({
my $diskid = $disk;
$diskid =~ s|^.*/||; # remove all up to the last slash
return $rpcenv->fork_worker('diskinit', $diskid, $authuser, $worker);
}});
},
});
__PACKAGE__->register_method ({
__PACKAGE__->register_method({
name => 'wipe_disk',
path => 'wipedisk',
method => 'PUT',
@ -314,6 +316,7 @@ __PACKAGE__->register_method ({
my $basename = basename($disk); # avoid '/' in the ID
return $rpcenv->fork_worker('wipedisk', $basename, $authuser, $worker);
}});
},
});
1;

View File

@ -90,7 +90,7 @@ my $write_ini = sub {
file_set_contents($filename, $content);
};
__PACKAGE__->register_method ({
__PACKAGE__->register_method({
name => 'index',
path => '',
method => 'GET',
@ -139,36 +139,44 @@ __PACKAGE__->register_method ({
my $result = [];
dir_glob_foreach('/etc/systemd/system', '^mnt-pve-(.+)\.mount$', sub {
dir_glob_foreach(
'/etc/systemd/system',
'^mnt-pve-(.+)\.mount$',
sub {
my ($filename, $storid) = @_;
$storid = PVE::Systemd::unescape_unit($storid);
my $unitfile = "/etc/systemd/system/$filename";
my $unit = $read_ini->($unitfile);
push @$result, {
push @$result,
{
unitfile => $unitfile,
path => "/mnt/pve/$storid",
device => $unit->{'Mount'}->{'What'},
type => $unit->{'Mount'}->{'Type'},
options => $unit->{'Mount'}->{'Options'},
};
});
},
);
return $result;
}});
},
});
__PACKAGE__->register_method ({
__PACKAGE__->register_method({
name => 'create',
path => '',
method => 'POST',
proxyto => 'node',
protected => 1,
permissions => {
description => "Requires additionally 'Datastore.Allocate' on /storage when setting 'add_storage'",
description =>
"Requires additionally 'Datastore.Allocate' on /storage when setting 'add_storage'",
check => ['perm', '/', ['Sys.Modify']],
},
description => "Create a Filesystem on an unused disk. Will be mounted under '/mnt/pve/NAME'.",
description =>
"Create a Filesystem on an unused disk. Will be mounted under '/mnt/pve/NAME'.",
parameters => {
additionalProperties => 0,
properties => {
@ -226,7 +234,8 @@ __PACKAGE__->register_method ({
# reserve the name and add as disabled, will be enabled below if creation works out
PVE::API2::Storage::Config->create_or_update(
$name, $node, $storage_params, $verify_params, 1);
$name, $node, $storage_params, $verify_params, 1,
);
}
my $mounted = PVE::Diskmanage::mounted_paths();
@ -251,10 +260,14 @@ __PACKAGE__->register_method ({
my ($devname) = $dev =~ m|^/dev/(.*)$|;
$part = "/dev/";
dir_glob_foreach("/sys/block/$devname", qr/\Q$devname\E.+/, sub {
dir_glob_foreach(
"/sys/block/$devname",
qr/\Q$devname\E.+/,
sub {
my ($partition) = @_;
$part .= $partition;
});
},
);
}
# create filesystem
@ -277,14 +290,17 @@ __PACKAGE__->register_method ({
$cmd = [$BLKID, $part, '-o', 'export'];
print "# ", join(' ', @$cmd), "\n";
run_command($cmd, outfunc => sub {
run_command(
$cmd,
outfunc => sub {
my ($line) = @_;
if ($line =~ m/^UUID=(.*)$/) {
$uuid = $1;
$uuid_path = "/dev/disk/by-uuid/$uuid";
}
});
},
);
die "could not get UUID of device '$part'\n" if !$uuid;
@ -305,22 +321,25 @@ __PACKAGE__->register_method ({
if ($param->{add_storage}) {
PVE::API2::Storage::Config->create_or_update(
$name, $node, $storage_params, $verify_params);
$name, $node, $storage_params, $verify_params,
);
}
});
};
return $rpcenv->fork_worker('dircreate', $name, $user, $worker);
}});
},
});
__PACKAGE__->register_method ({
__PACKAGE__->register_method({
name => 'delete',
path => '{name}',
method => 'DELETE',
proxyto => 'node',
protected => 1,
permissions => {
description => "Requires additionally 'Datastore.Allocate' on /storage when setting 'cleanup-config'",
description =>
"Requires additionally 'Datastore.Allocate' on /storage when setting 'cleanup-config'",
check => ['perm', '/', ['Sys.Modify']],
},
description => "Unmounts the storage and removes the mount unit.",
@ -330,8 +349,9 @@ __PACKAGE__->register_method ({
node => get_standard_option('pve-node'),
name => get_standard_option('pve-storage-id'),
'cleanup-config' => {
description => "Marks associated storage(s) as not available on this node anymore ".
"or removes them from the configuration (if configured for this node only).",
description =>
"Marks associated storage(s) as not available on this node anymore "
. "or removes them from the configuration (if configured for this node only).",
type => 'boolean',
optional => 1,
default => 0,
@ -380,7 +400,9 @@ __PACKAGE__->register_method ({
run_command(['systemctl', 'stop', $mountunitname]);
run_command(['systemctl', 'disable', $mountunitname]);
unlink $mountunitpath or $! == ENOENT or die "cannot remove $mountunitpath - $!\n";
unlink $mountunitpath
or $! == ENOENT
or die "cannot remove $mountunitpath - $!\n";
my $config_err;
if ($param->{'cleanup-config'}) {
@ -388,7 +410,9 @@ __PACKAGE__->register_method ({
my ($scfg) = @_;
return $scfg->{type} eq 'dir' && $scfg->{path} eq $path;
};
eval { PVE::API2::Storage::Config->cleanup_storages_for_node($match, $node); };
eval {
PVE::API2::Storage::Config->cleanup_storages_for_node($match, $node);
};
warn $config_err = $@ if $@;
}
@ -402,6 +426,7 @@ __PACKAGE__->register_method ({
};
return $rpcenv->fork_worker('dirremove', $name, $user, $worker);
}});
},
});
1;

View File

@ -14,7 +14,7 @@ use PVE::RESTHandler;
use base qw(PVE::RESTHandler);
__PACKAGE__->register_method ({
__PACKAGE__->register_method({
name => 'index',
path => '',
method => 'GET',
@ -72,7 +72,8 @@ __PACKAGE__->register_method ({
},
size => {
type => 'integer',
description => 'The size of the physical volume in bytes',
description =>
'The size of the physical volume in bytes',
},
free => {
type => 'integer',
@ -97,7 +98,7 @@ __PACKAGE__->register_method ({
my $vg = $vgs->{$vg_name};
$vg->{name} = $vg_name;
$vg->{leaf} = 0;
foreach my $pv (@{$vg->{pvs}}) {
foreach my $pv (@{ $vg->{pvs} }) {
$pv->{leaf} = 1;
}
$vg->{children} = delete $vg->{pvs};
@ -108,16 +109,18 @@ __PACKAGE__->register_method ({
leaf => 0,
children => $result,
};
}});
},
});
__PACKAGE__->register_method ({
__PACKAGE__->register_method({
name => 'create',
path => '',
method => 'POST',
proxyto => 'node',
protected => 1,
permissions => {
description => "Requires additionally 'Datastore.Allocate' on /storage when setting 'add_storage'",
description =>
"Requires additionally 'Datastore.Allocate' on /storage when setting 'add_storage'",
check => ['perm', '/', ['Sys.Modify']],
},
description => "Create an LVM Volume Group",
@ -167,7 +170,8 @@ __PACKAGE__->register_method ({
# reserve the name and add as disabled, will be enabled below if creation works out
PVE::API2::Storage::Config->create_or_update(
$name, $node, $storage_params, $verify_params, 1);
$name, $node, $storage_params, $verify_params, 1,
);
}
my $worker = sub {
@ -187,22 +191,25 @@ __PACKAGE__->register_method ({
if ($param->{add_storage}) {
PVE::API2::Storage::Config->create_or_update(
$name, $node, $storage_params, $verify_params);
$name, $node, $storage_params, $verify_params,
);
}
});
};
return $rpcenv->fork_worker('lvmcreate', $name, $user, $worker);
}});
},
});
__PACKAGE__->register_method ({
__PACKAGE__->register_method({
name => 'delete',
path => '{name}',
method => 'DELETE',
proxyto => 'node',
protected => 1,
permissions => {
description => "Requires additionally 'Datastore.Allocate' on /storage when setting 'cleanup-config'",
description =>
"Requires additionally 'Datastore.Allocate' on /storage when setting 'cleanup-config'",
check => ['perm', '/', ['Sys.Modify']],
},
description => "Remove an LVM Volume Group.",
@ -212,8 +219,9 @@ __PACKAGE__->register_method ({
node => get_standard_option('pve-node'),
name => get_standard_option('pve-storage-id'),
'cleanup-config' => {
description => "Marks associated storage(s) as not available on this node anymore ".
"or removes them from the configuration (if configured for this node only).",
description =>
"Marks associated storage(s) as not available on this node anymore "
. "or removes them from the configuration (if configured for this node only).",
type => 'boolean',
optional => 1,
default => 0,
@ -251,7 +259,9 @@ __PACKAGE__->register_method ({
my ($scfg) = @_;
return $scfg->{type} eq 'lvm' && $scfg->{vgname} eq $name;
};
eval { PVE::API2::Storage::Config->cleanup_storages_for_node($match, $node); };
eval {
PVE::API2::Storage::Config->cleanup_storages_for_node($match, $node);
};
warn $config_err = $@ if $@;
}
@ -274,6 +284,7 @@ __PACKAGE__->register_method ({
};
return $rpcenv->fork_worker('lvmremove', $name, $user, $worker);
}});
},
});
1;

View File

@ -15,7 +15,7 @@ use PVE::RESTHandler;
use base qw(PVE::RESTHandler);
__PACKAGE__->register_method ({
__PACKAGE__->register_method({
name => 'index',
path => '',
method => 'GET',
@ -66,16 +66,18 @@ __PACKAGE__->register_method ({
code => sub {
my ($param) = @_;
return PVE::Storage::LvmThinPlugin::list_thinpools(undef);
}});
},
});
__PACKAGE__->register_method ({
__PACKAGE__->register_method({
name => 'create',
path => '',
method => 'POST',
proxyto => 'node',
protected => 1,
permissions => {
description => "Requires additionally 'Datastore.Allocate' on /storage when setting 'add_storage'",
description =>
"Requires additionally 'Datastore.Allocate' on /storage when setting 'add_storage'",
check => ['perm', '/', ['Sys.Modify']],
},
description => "Create an LVM thinpool",
@ -125,7 +127,8 @@ __PACKAGE__->register_method ({
# reserve the name and add as disabled, will be enabled below if creation works out
PVE::API2::Storage::Config->create_or_update(
$name, $node, $storage_params, $verify_params, 1);
$name, $node, $storage_params, $verify_params, 1,
);
}
my $worker = sub {
@ -143,45 +146,51 @@ __PACKAGE__->register_method ({
PVE::Storage::LVMPlugin::lvm_create_volume_group($dev, $name);
my $pv = PVE::Storage::LVMPlugin::lvm_pv_info($dev);
# keep some free space just in case
my $datasize = $pv->{size} - 128*1024;
my $datasize = $pv->{size} - 128 * 1024;
# default to 1% for metadata
my $metadatasize = $datasize/100;
my $metadatasize = $datasize / 100;
# but at least 1G, as recommended in lvmthin man
$metadatasize = 1024*1024 if $metadatasize < 1024*1024;
$metadatasize = 1024 * 1024 if $metadatasize < 1024 * 1024;
# but at most 16G, which is the current lvm max
$metadatasize = 16*1024*1024 if $metadatasize > 16*1024*1024;
$metadatasize = 16 * 1024 * 1024 if $metadatasize > 16 * 1024 * 1024;
# shrink data by needed amount for metadata
$datasize -= 2*$metadatasize;
$datasize -= 2 * $metadatasize;
run_command([
'/sbin/lvcreate',
'--type', 'thin-pool',
'--type',
'thin-pool',
"-L${datasize}K",
'--poolmetadatasize', "${metadatasize}K",
'-n', $name,
$name
'--poolmetadatasize',
"${metadatasize}K",
'-n',
$name,
$name,
]);
PVE::Diskmanage::udevadm_trigger($dev);
if ($param->{add_storage}) {
PVE::API2::Storage::Config->create_or_update(
$name, $node, $storage_params, $verify_params);
$name, $node, $storage_params, $verify_params,
);
}
});
};
return $rpcenv->fork_worker('lvmthincreate', $name, $user, $worker);
}});
},
});
__PACKAGE__->register_method ({
__PACKAGE__->register_method({
name => 'delete',
path => '{name}',
method => 'DELETE',
proxyto => 'node',
protected => 1,
permissions => {
description => "Requires additionally 'Datastore.Allocate' on /storage when setting 'cleanup-config'",
description =>
"Requires additionally 'Datastore.Allocate' on /storage when setting 'cleanup-config'",
check => ['perm', '/', ['Sys.Modify']],
},
description => "Remove an LVM thin pool.",
@ -192,8 +201,9 @@ __PACKAGE__->register_method ({
name => get_standard_option('pve-storage-id'),
'volume-group' => get_standard_option('pve-storage-id'),
'cleanup-config' => {
description => "Marks associated storage(s) as not available on this node anymore ".
"or removes them from the configuration (if configured for this node only).",
description =>
"Marks associated storage(s) as not available on this node anymore "
. "or removes them from the configuration (if configured for this node only).",
type => 'boolean',
optional => 1,
default => 0,
@ -232,11 +242,14 @@ __PACKAGE__->register_method ({
if ($param->{'cleanup-config'}) {
my $match = sub {
my ($scfg) = @_;
return $scfg->{type} eq 'lvmthin'
return
$scfg->{type} eq 'lvmthin'
&& $scfg->{vgname} eq $vg
&& $scfg->{thinpool} eq $lv;
};
eval { PVE::API2::Storage::Config->cleanup_storages_for_node($match, $node); };
eval {
PVE::API2::Storage::Config->cleanup_storages_for_node($match, $node);
};
warn $config_err = $@ if $@;
}
@ -264,6 +277,7 @@ __PACKAGE__->register_method ({
};
return $rpcenv->fork_worker('lvmthinremove', "${vg}-${lv}", $user, $worker);
}});
},
});
1;

View File

@ -19,7 +19,7 @@ my $ZPOOL = '/sbin/zpool';
my $ZFS = '/sbin/zfs';
sub get_pool_data {
die "zfsutils-linux not installed\n" if ! -f $ZPOOL;
die "zfsutils-linux not installed\n" if !-f $ZPOOL;
my $propnames = [qw(name size alloc free frag dedup health)];
my $numbers = {
@ -31,26 +31,29 @@ sub get_pool_data {
};
my $pools = [];
run_command([$ZPOOL, 'list', '-HpPLo', join(',', @$propnames)], outfunc => sub {
run_command(
[$ZPOOL, 'list', '-HpPLo', join(',', @$propnames)],
outfunc => sub {
my ($line) = @_;
my @props = split('\s+', trim($line));
my $pool = {};
for (my $i = 0; $i < scalar(@$propnames); $i++) {
if ($numbers->{$propnames->[$i]}) {
$pool->{$propnames->[$i]} = $props[$i] + 0;
if ($numbers->{ $propnames->[$i] }) {
$pool->{ $propnames->[$i] } = $props[$i] + 0;
} else {
$pool->{$propnames->[$i]} = $props[$i];
$pool->{ $propnames->[$i] } = $props[$i];
}
}
push @$pools, $pool;
});
},
);
return $pools;
}
__PACKAGE__->register_method ({
__PACKAGE__->register_method({
name => 'index',
path => '',
method => 'GET',
@ -101,20 +104,21 @@ __PACKAGE__->register_method ({
},
},
},
links => [ { rel => 'child', href => "{name}" } ],
links => [{ rel => 'child', href => "{name}" }],
},
code => sub {
my ($param) = @_;
return get_pool_data();
}});
},
});
sub preparetree {
my ($el) = @_;
delete $el->{lvl};
if ($el->{children} && scalar(@{$el->{children}})) {
if ($el->{children} && scalar(@{ $el->{children} })) {
$el->{leaf} = 0;
foreach my $child (@{$el->{children}}) {
foreach my $child (@{ $el->{children} }) {
preparetree($child);
}
} else {
@ -122,8 +126,7 @@ sub preparetree {
}
}
__PACKAGE__->register_method ({
__PACKAGE__->register_method({
name => 'detail',
path => '{name}',
method => 'GET',
@ -172,7 +175,8 @@ __PACKAGE__->register_method ({
},
children => {
type => 'array',
description => "The pool configuration information, including the vdevs for each section (e.g. spares, cache), may be nested.",
description =>
"The pool configuration information, including the vdevs for each section (e.g. spares, cache), may be nested.",
items => {
type => 'object',
properties => {
@ -199,8 +203,8 @@ __PACKAGE__->register_method ({
},
msg => {
type => 'string',
description => 'An optional message about the vdev.'
}
description => 'An optional message about the vdev.',
},
},
},
},
@ -225,7 +229,9 @@ __PACKAGE__->register_method ({
my $stack = [$pool];
my $curlvl = 0;
run_command($cmd, outfunc => sub {
run_command(
$cmd,
outfunc => sub {
my ($line) = @_;
if ($line =~ m/^\s*(\S+): (\S+.*)$/) {
@ -237,8 +243,12 @@ __PACKAGE__->register_method ({
$pool->{$curfield} .= " " . $1;
} elsif (!$config && $line =~ m/^\s*config:/) {
$config = 1;
} elsif ($config && $line =~ m/^(\s+)(\S+)\s*(\S+)?(?:\s+(\S+)\s+(\S+)\s+(\S+))?\s*(.*)$/) {
my ($space, $name, $state, $read, $write, $cksum, $msg) = ($1, $2, $3, $4, $5, $6, $7);
} elsif (
$config
&& $line =~ m/^(\s+)(\S+)\s*(\S+)?(?:\s+(\S+)\s+(\S+)\s+(\S+))?\s*(.*)$/
) {
my ($space, $name, $state, $read, $write, $cksum, $msg) =
($1, $2, $3, $4, $5, $6, $7);
if ($name ne "NAME") {
my $lvl = int(length($space) / 2) + 1; # two spaces per level
my $vdev = {
@ -255,15 +265,15 @@ __PACKAGE__->register_method ({
my $cur = pop @$stack;
if ($lvl > $curlvl) {
$cur->{children} = [ $vdev ];
$cur->{children} = [$vdev];
} elsif ($lvl == $curlvl) {
$cur = pop @$stack;
push @{$cur->{children}}, $vdev;
push @{ $cur->{children} }, $vdev;
} else {
while ($lvl <= $cur->{lvl} && $cur->{lvl} != 0) {
$cur = pop @$stack;
}
push @{$cur->{children}}, $vdev;
push @{ $cur->{children} }, $vdev;
}
push @$stack, $cur;
@ -271,14 +281,16 @@ __PACKAGE__->register_method ({
$curlvl = $lvl;
}
}
});
},
);
# change treenodes for extjs tree
$pool->{name} = delete $pool->{pool};
preparetree($pool);
return $pool;
}});
},
});
my $draid_config_format = {
spares => {
@ -293,14 +305,15 @@ my $draid_config_format = {
},
};
__PACKAGE__->register_method ({
__PACKAGE__->register_method({
name => 'create',
path => '',
method => 'POST',
proxyto => 'node',
protected => 1,
permissions => {
description => "Requires additionally 'Datastore.Allocate' on /storage when setting 'add_storage'",
description =>
"Requires additionally 'Datastore.Allocate' on /storage when setting 'add_storage'",
check => ['perm', '/', ['Sys.Modify']],
},
description => "Create a ZFS pool.",
@ -313,13 +326,20 @@ __PACKAGE__->register_method ({
type => 'string',
description => 'The RAID level to use.',
enum => [
'single', 'mirror',
'raid10', 'raidz', 'raidz2', 'raidz3',
'draid', 'draid2', 'draid3',
'single',
'mirror',
'raid10',
'raidz',
'raidz2',
'raidz3',
'draid',
'draid2',
'draid3',
],
},
devices => {
type => 'string', format => 'string-list',
type => 'string',
format => 'string-list',
description => 'The block devices you want to create the zpool on.',
},
'draid-config' => {
@ -366,7 +386,8 @@ __PACKAGE__->register_method ({
my $draid_config;
if (exists $param->{'draid-config'}) {
die "draid-config set without using dRAID level\n" if $raidlevel !~ m/^draid/;
$draid_config = parse_property_string($draid_config_format, $param->{'draid-config'});
$draid_config =
parse_property_string($draid_config_format, $param->{'draid-config'});
}
for my $dev (@$devs) {
@ -388,7 +409,8 @@ __PACKAGE__->register_method ({
# reserve the name and add as disabled, will be enabled below if creation works out
PVE::API2::Storage::Config->create_or_update(
$name, $node, $storage_params, $verify_params, 1);
$name, $node, $storage_params, $verify_params, 1,
);
}
my $pools = get_pool_data();
@ -439,7 +461,10 @@ __PACKAGE__->register_method ({
if ($is_partition) {
eval {
PVE::Diskmanage::change_parttype($dev, '6a898cc3-1dd2-11b2-99a6-080020736631');
PVE::Diskmanage::change_parttype(
$dev,
'6a898cc3-1dd2-11b2-99a6-080020736631',
);
};
warn $@ if $@;
}
@ -462,8 +487,8 @@ __PACKAGE__->register_method ({
my $cmd = [$ZPOOL, 'create', '-o', "ashift=$ashift", $name];
if ($raidlevel eq 'raid10') {
for (my $i = 0; $i < @$devs; $i+=2) {
push @$cmd, 'mirror', $devs->[$i], $devs->[$i+1];
for (my $i = 0; $i < @$devs; $i += 2) {
push @$cmd, 'mirror', $devs->[$i], $devs->[$i + 1];
}
} elsif ($raidlevel eq 'single') {
push @$cmd, $devs->[0];
@ -484,7 +509,8 @@ __PACKAGE__->register_method ({
run_command($cmd);
if (-e '/lib/systemd/system/zfs-import@.service') {
my $importunit = 'zfs-import@'. PVE::Systemd::escape_unit($name, undef) . '.service';
my $importunit =
'zfs-import@' . PVE::Systemd::escape_unit($name, undef) . '.service';
$cmd = ['systemctl', 'enable', $importunit];
print "# ", join(' ', @$cmd), "\n";
run_command($cmd);
@ -494,23 +520,31 @@ __PACKAGE__->register_method ({
if ($param->{add_storage}) {
PVE::API2::Storage::Config->create_or_update(
$name, $node, $storage_params, $verify_params);
$name, $node, $storage_params, $verify_params,
);
}
};
return $rpcenv->fork_worker('zfscreate', $name, $user, sub {
return $rpcenv->fork_worker(
'zfscreate',
$name,
$user,
sub {
PVE::Diskmanage::locked_disk_action($code);
});
}});
},
);
},
});
__PACKAGE__->register_method ({
__PACKAGE__->register_method({
name => 'delete',
path => '{name}',
method => 'DELETE',
proxyto => 'node',
protected => 1,
permissions => {
description => "Requires additionally 'Datastore.Allocate' on /storage when setting 'cleanup-config'",
description =>
"Requires additionally 'Datastore.Allocate' on /storage when setting 'cleanup-config'",
check => ['perm', '/', ['Sys.Modify']],
},
description => "Destroy a ZFS pool.",
@ -520,8 +554,9 @@ __PACKAGE__->register_method ({
node => get_standard_option('pve-node'),
name => get_standard_option('pve-storage-id'),
'cleanup-config' => {
description => "Marks associated storage(s) as not available on this node anymore ".
"or removes them from the configuration (if configured for this node only).",
description =>
"Marks associated storage(s) as not available on this node anymore "
. "or removes them from the configuration (if configured for this node only).",
type => 'boolean',
optional => 1,
default => 0,
@ -551,7 +586,9 @@ __PACKAGE__->register_method ({
my $to_wipe = [];
if ($param->{'cleanup-disks'}) {
# Using -o name does not only output the name in combination with -v.
run_command(['zpool', 'list', '-vHPL', $name], outfunc => sub {
run_command(
['zpool', 'list', '-vHPL', $name],
outfunc => sub {
my ($line) = @_;
my ($name) = PVE::Tools::split_list($line);
@ -562,7 +599,8 @@ __PACKAGE__->register_method ({
$dev =~ s|^/dev/||;
my $info = PVE::Diskmanage::get_disks($dev, 1, 1);
die "unable to obtain information for disk '$dev'\n" if !$info->{$dev};
die "unable to obtain information for disk '$dev'\n"
if !$info->{$dev};
# Wipe whole disk if usual ZFS layout with partition 9 as ZFS reserved.
my $parent = $info->{$dev}->{parent};
@ -571,15 +609,19 @@ __PACKAGE__->register_method ({
my $info9 = $info->{"${parent}9"};
$wipe = $info->{$dev}->{parent} # need leading /dev/
if $info9 && $info9->{used} && $info9->{used} =~ m/^ZFS reserved/;
if $info9
&& $info9->{used}
&& $info9->{used} =~ m/^ZFS reserved/;
}
push $to_wipe->@*, $wipe;
});
},
);
}
if (-e '/lib/systemd/system/zfs-import@.service') {
my $importunit = 'zfs-import@' . PVE::Systemd::escape_unit($name) . '.service';
my $importunit =
'zfs-import@' . PVE::Systemd::escape_unit($name) . '.service';
run_command(['systemctl', 'disable', $importunit]);
}
@ -591,7 +633,9 @@ __PACKAGE__->register_method ({
my ($scfg) = @_;
return $scfg->{type} eq 'zfspool' && $scfg->{pool} eq $name;
};
eval { PVE::API2::Storage::Config->cleanup_storages_for_node($match, $node); };
eval {
PVE::API2::Storage::Config->cleanup_storages_for_node($match, $node);
};
warn $config_err = $@ if $@;
}
@ -605,6 +649,7 @@ __PACKAGE__->register_method ({
};
return $rpcenv->fork_worker('zfsremove', $name, $user, $worker);
}});
},
});
1;

View File

@ -29,10 +29,12 @@ my $api_storage_config = sub {
my $scfg = dclone(PVE::Storage::storage_config($cfg, $storeid));
$scfg->{storage} = $storeid;
$scfg->{digest} = $cfg->{digest};
$scfg->{content} = PVE::Storage::Plugin->encode_value($scfg->{type}, 'content', $scfg->{content});
$scfg->{content} =
PVE::Storage::Plugin->encode_value($scfg->{type}, 'content', $scfg->{content});
if ($scfg->{nodes}) {
$scfg->{nodes} = PVE::Storage::Plugin->encode_value($scfg->{type}, 'nodes', $scfg->{nodes});
$scfg->{nodes} =
PVE::Storage::Plugin->encode_value($scfg->{type}, 'nodes', $scfg->{nodes});
}
return $scfg;
@ -60,7 +62,7 @@ sub cleanup_storages_for_node {
storage => $storeid,
});
} else {
$self->delete({storage => $storeid});
$self->delete({ storage => $storeid });
}
}
}
@ -91,11 +93,11 @@ sub create_or_update {
for my $key ('type', $verify_params->@*) {
if (!defined($scfg->{$key})) {
die "Option '${key}' is not configured for storage '$sid', "
."expected it to be '$storage_params->{$key}'";
. "expected it to be '$storage_params->{$key}'";
}
if ($storage_params->{$key} ne $scfg->{$key}) {
die "Option '${key}' ($storage_params->{$key}) does not match "
."existing storage configuration '$scfg->{$key}'\n";
. "existing storage configuration '$scfg->{$key}'\n";
}
}
}
@ -116,13 +118,14 @@ sub create_or_update {
}
}
__PACKAGE__->register_method ({
__PACKAGE__->register_method({
name => 'index',
path => '',
method => 'GET',
description => "Storage index.",
permissions => {
description => "Only list entries where you have 'Datastore.Audit' or 'Datastore.AllocateSpace' permissions on '/storage/<storage>'",
description =>
"Only list entries where you have 'Datastore.Audit' or 'Datastore.AllocateSpace' permissions on '/storage/<storage>'",
user => 'all',
},
parameters => {
@ -140,9 +143,9 @@ __PACKAGE__->register_method ({
type => 'array',
items => {
type => "object",
properties => { storage => { type => 'string'} },
properties => { storage => { type => 'string' } },
},
links => [ { rel => 'child', href => "{storage}" } ],
links => [{ rel => 'child', href => "{storage}" }],
},
code => sub {
my ($param) = @_;
@ -156,7 +159,7 @@ __PACKAGE__->register_method ({
my $res = [];
foreach my $storeid (@sids) {
my $privs = [ 'Datastore.Audit', 'Datastore.AllocateSpace' ];
my $privs = ['Datastore.Audit', 'Datastore.AllocateSpace'];
next if !$rpcenv->check_any($authuser, "/storage/$storeid", $privs, 1);
my $scfg = &$api_storage_config($cfg, $storeid);
@ -165,9 +168,10 @@ __PACKAGE__->register_method ({
}
return $res;
}});
},
});
__PACKAGE__->register_method ({
__PACKAGE__->register_method({
name => 'read',
path => '{storage}',
method => 'GET',
@ -188,9 +192,10 @@ __PACKAGE__->register_method ({
my $cfg = PVE::Storage::config();
return &$api_storage_config($cfg, $param->{storage});
}});
},
});
__PACKAGE__->register_method ({
__PACKAGE__->register_method({
name => 'create',
protected => 1,
path => '',
@ -244,7 +249,8 @@ __PACKAGE__->register_method ({
my $opts = $plugin->check_config($storeid, $param, 1, 1);
my $returned_config;
PVE::Storage::lock_storage_config(sub {
PVE::Storage::lock_storage_config(
sub {
my $cfg = PVE::Storage::config();
if (my $scfg = PVE::Storage::storage_config($cfg, $storeid, 1)) {
@ -256,8 +262,9 @@ __PACKAGE__->register_method ({
$returned_config = $plugin->on_add_hook($storeid, $opts, %$sensitive);
if (defined($opts->{mkdir})) { # TODO: remove complete option in Proxmox VE 9
warn "NOTE: The 'mkdir' option set for '${storeid}' is deprecated and will be removed"
." in Proxmox VE 9. Use 'create-base-path' or 'create-subdirs' instead.\n"
warn
"NOTE: The 'mkdir' option set for '${storeid}' is deprecated and will be removed"
. " in Proxmox VE 9. Use 'create-base-path' or 'create-subdirs' instead.\n";
}
eval {
@ -275,7 +282,9 @@ __PACKAGE__->register_method ({
PVE::Storage::write_config($cfg);
}, "create storage failed");
},
"create storage failed",
);
my $res = {
storage => $storeid,
@ -283,9 +292,10 @@ __PACKAGE__->register_method ({
};
$res->{config} = $returned_config if $returned_config;
return $res;
}});
},
});
__PACKAGE__->register_method ({
__PACKAGE__->register_method({
name => 'update',
protected => 1,
path => '{storage}',
@ -331,11 +341,12 @@ __PACKAGE__->register_method ({
my $type;
if ($delete) {
$delete = [ PVE::Tools::split_list($delete) ];
$delete = [PVE::Tools::split_list($delete)];
}
my $returned_config;
PVE::Storage::lock_storage_config(sub {
PVE::Storage::lock_storage_config(
sub {
my $cfg = PVE::Storage::config();
PVE::SectionConfig::assert_if_modified($cfg, $digest);
@ -369,13 +380,16 @@ __PACKAGE__->register_method ({
}
if (defined($scfg->{mkdir})) { # TODO: remove complete option in Proxmox VE 9
warn "NOTE: The 'mkdir' option set for '${storeid}' is deprecated and will be removed"
." in Proxmox VE 9. Use 'create-base-path' or 'create-subdirs' instead.\n"
warn
"NOTE: The 'mkdir' option set for '${storeid}' is deprecated and will be removed"
. " in Proxmox VE 9. Use 'create-base-path' or 'create-subdirs' instead.\n";
}
PVE::Storage::write_config($cfg);
}, "update storage failed");
},
"update storage failed",
);
my $res = {
storage => $storeid,
@ -383,9 +397,10 @@ __PACKAGE__->register_method ({
};
$res->{config} = $returned_config if $returned_config;
return $res;
}});
},
});
__PACKAGE__->register_method ({
__PACKAGE__->register_method({
name => 'delete',
protected => 1,
path => '{storage}', # /storage/config/{storage}
@ -397,9 +412,12 @@ __PACKAGE__->register_method ({
parameters => {
additionalProperties => 0,
properties => {
storage => get_standard_option('pve-storage-id', {
storage => get_standard_option(
'pve-storage-id',
{
completion => \&PVE::Storage::complete_storage,
}),
},
),
},
},
returns => { type => 'null' },
@ -408,7 +426,8 @@ __PACKAGE__->register_method ({
my $storeid = extract_param($param, 'storage');
PVE::Storage::lock_storage_config(sub {
PVE::Storage::lock_storage_config(
sub {
my $cfg = PVE::Storage::config();
my $scfg = PVE::Storage::storage_config($cfg, $storeid);
@ -424,11 +443,14 @@ __PACKAGE__->register_method ({
PVE::Storage::write_config($cfg);
}, "delete storage failed");
},
"delete storage failed",
);
PVE::AccessControl::remove_storage_access($storeid);
return undef;
}});
},
});
1;

View File

@ -16,13 +16,18 @@ use PVE::SSHInfo;
use base qw(PVE::RESTHandler);
__PACKAGE__->register_method ({
__PACKAGE__->register_method({
name => 'index',
path => '',
method => 'GET',
description => "List storage content.",
permissions => {
check => ['perm', '/storage/{storage}', ['Datastore.Audit', 'Datastore.AllocateSpace'], any => 1],
check => [
'perm',
'/storage/{storage}',
['Datastore.Audit', 'Datastore.AllocateSpace'],
any => 1,
],
},
protected => 1,
proxyto => 'node',
@ -30,20 +35,27 @@ __PACKAGE__->register_method ({
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
storage => get_standard_option('pve-storage-id', {
storage => get_standard_option(
'pve-storage-id',
{
completion => \&PVE::Storage::complete_storage_enabled,
}),
},
),
content => {
description => "Only list content of this type.",
type => 'string', format => 'pve-storage-content',
type => 'string',
format => 'pve-storage-content',
optional => 1,
completion => \&PVE::Storage::complete_content_type,
},
vmid => get_standard_option('pve-vmid', {
vmid => get_standard_option(
'pve-vmid',
{
description => "Only list images for this VM",
optional => 1,
completion => \&PVE::Cluster::complete_vmid,
}),
},
),
},
},
returns => {
@ -66,7 +78,8 @@ __PACKAGE__->register_method ({
optional => 1,
},
'format' => {
description => "Format identifier ('raw', 'qcow2', 'subvol', 'iso', 'tgz' ...)",
description =>
"Format identifier ('raw', 'qcow2', 'subvol', 'iso', 'tgz' ...)",
type => 'string',
},
size => {
@ -75,8 +88,8 @@ __PACKAGE__->register_method ({
renderer => 'bytes',
},
used => {
description => "Used space. Please note that most storage plugins " .
"do not report anything useful here.",
description => "Used space. Please note that most storage plugins "
. "do not report anything useful here.",
type => 'integer',
renderer => 'bytes',
optional => 1,
@ -88,18 +101,21 @@ __PACKAGE__->register_method ({
optional => 1,
},
notes => {
description => "Optional notes. If they contain multiple lines, only the first one is returned here.",
description =>
"Optional notes. If they contain multiple lines, only the first one is returned here.",
type => 'string',
optional => 1,
},
encrypted => {
description => "If whole backup is encrypted, value is the fingerprint or '1' "
." if encrypted. Only useful for the Proxmox Backup Server storage type.",
description =>
"If whole backup is encrypted, value is the fingerprint or '1' "
. " if encrypted. Only useful for the Proxmox Backup Server storage type.",
type => 'string',
optional => 1,
},
verification => {
description => "Last backup verification result, only useful for PBS storages.",
description =>
"Last backup verification result, only useful for PBS storages.",
type => 'object',
properties => {
state => {
@ -120,7 +136,7 @@ __PACKAGE__->register_method ({
},
},
},
links => [ { rel => 'child', href => "{volid}" } ],
links => [{ rel => 'child', href => "{volid}" }],
},
code => sub {
my ($param) = @_;
@ -133,11 +149,16 @@ __PACKAGE__->register_method ({
my $cfg = PVE::Storage::config();
my $vollist = PVE::Storage::volume_list($cfg, $storeid, $param->{vmid}, $param->{content});
my $vollist =
PVE::Storage::volume_list($cfg, $storeid, $param->{vmid}, $param->{content});
my $res = [];
foreach my $item (@$vollist) {
eval { PVE::Storage::check_volume_access($rpcenv, $authuser, $cfg, undef, $item->{volid}); };
eval {
PVE::Storage::check_volume_access(
$rpcenv, $authuser, $cfg, undef, $item->{volid},
);
};
next if $@;
$item->{vmid} = int($item->{vmid}) if defined($item->{vmid});
$item->{size} = int($item->{size}) if defined($item->{size});
@ -146,9 +167,10 @@ __PACKAGE__->register_method ({
}
return $res;
}});
},
});
__PACKAGE__->register_method ({
__PACKAGE__->register_method({
name => 'create',
path => '',
method => 'POST',
@ -162,26 +184,36 @@ __PACKAGE__->register_method ({
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
storage => get_standard_option('pve-storage-id', {
storage => get_standard_option(
'pve-storage-id',
{
completion => \&PVE::Storage::complete_storage_enabled,
}),
},
),
filename => {
description => "The name of the file to create.",
type => 'string',
},
vmid => get_standard_option('pve-vmid', {
vmid => get_standard_option(
'pve-vmid',
{
description => "Specify owner VM",
completion => \&PVE::Cluster::complete_vmid,
}),
},
),
size => {
description => "Size in kilobyte (1024 bytes). Optional suffixes 'M' (megabyte, 1024K) and 'G' (gigabyte, 1024M)",
description =>
"Size in kilobyte (1024 bytes). Optional suffixes 'M' (megabyte, 1024K) and 'G' (gigabyte, 1024M)",
type => 'string',
pattern => '\d+[MG]?',
},
format => get_standard_option('pve-storage-image-format', {
format => get_standard_option(
'pve-storage-image-format',
{
requires => 'size',
optional => 1,
}),
},
),
},
},
returns => {
@ -210,7 +242,8 @@ __PACKAGE__->register_method ({
if ($name =~ m/\.(raw|qcow2|vmdk)$/) {
my $fmt = $1;
raise_param_exc({ format => "different storage formats ($param->{format} != $fmt)" })
raise_param_exc({
format => "different storage formats ($param->{format} != $fmt)" })
if $param->{format} && $param->{format} ne $fmt;
$param->{format} = $fmt;
@ -218,12 +251,13 @@ __PACKAGE__->register_method ({
my $cfg = PVE::Storage::config();
my $volid = PVE::Storage::vdisk_alloc ($cfg, $storeid, $param->{vmid},
$param->{format},
$name, $size);
my $volid = PVE::Storage::vdisk_alloc(
$cfg, $storeid, $param->{vmid}, $param->{format}, $name, $size,
);
return $volid;
}});
},
});
# we allow to pass volume names (without storage prefix) if the storage
# is specified as separate parameter.
@ -234,7 +268,7 @@ my $real_volume_id = sub {
if ($volume =~ m/:/) {
eval {
my ($sid, $volname) = PVE::Storage::parse_volume_id ($volume);
my ($sid, $volname) = PVE::Storage::parse_volume_id($volume);
die "storage ID mismatch ($sid != $storeid)\n"
if $storeid && $sid ne $storeid;
$volid = $volume;
@ -252,7 +286,7 @@ my $real_volume_id = sub {
return wantarray ? ($volid, $storeid) : $volid;
};
__PACKAGE__->register_method ({
__PACKAGE__->register_method({
name => 'info',
path => '{volume}',
method => 'GET',
@ -287,8 +321,8 @@ __PACKAGE__->register_method ({
renderer => 'bytes',
},
used => {
description => "Used space. Please note that most storage plugins " .
"do not report anything useful here.",
description => "Used space. Please note that most storage plugins "
. "do not report anything useful here.",
type => 'integer',
renderer => 'bytes',
},
@ -343,9 +377,10 @@ __PACKAGE__->register_method ({
}
return $entry;
}});
},
});
__PACKAGE__->register_method ({
__PACKAGE__->register_method({
name => 'updateattributes',
path => '{volume}',
method => 'PUT',
@ -397,15 +432,17 @@ __PACKAGE__->register_method ({
}
return undef;
}});
},
});
__PACKAGE__->register_method ({
__PACKAGE__->register_method({
name => 'delete',
path => '{volume}',
method => 'DELETE',
description => "Delete volume",
permissions => {
description => "You need 'Datastore.Allocate' privilege on the storage (or 'Datastore.AllocateSpace' for backup volumes if you have VM.Backup privilege on the VM).",
description =>
"You need 'Datastore.Allocate' privilege on the storage (or 'Datastore.AllocateSpace' for backup volumes if you have VM.Backup privilege on the VM).",
user => 'all',
},
protected => 1,
@ -414,10 +451,13 @@ __PACKAGE__->register_method ({
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
storage => get_standard_option('pve-storage-id', {
storage => get_standard_option(
'pve-storage-id',
{
optional => 1,
completion => \&PVE::Storage::complete_storage,
}),
},
),
volume => {
description => "Volume identifier",
type => 'string',
@ -425,14 +465,15 @@ __PACKAGE__->register_method ({
},
delay => {
type => 'integer',
description => "Time to wait for the task to finish. We return 'null' if the task finish within that time.",
description =>
"Time to wait for the task to finish. We return 'null' if the task finish within that time.",
minimum => 1,
maximum => 30,
optional => 1,
},
},
},
returns => { type => 'string', optional => 1, },
returns => { type => 'string', optional => 1 },
code => sub {
my ($param) = @_;
@ -452,10 +493,12 @@ __PACKAGE__->register_method ({
}
my $worker = sub {
PVE::Storage::vdisk_free ($cfg, $volid);
PVE::Storage::vdisk_free($cfg, $volid);
print "Removed volume '$volid'\n";
if ($vtype eq 'backup'
&& $path =~ /(.*\/vzdump-\w+-\d+-\d{4}_\d{2}_\d{2}-\d{2}_\d{2}_\d{2})[^\/]+$/) {
if (
$vtype eq 'backup'
&& $path =~ /(.*\/vzdump-\w+-\d+-\d{4}_\d{2}_\d{2}-\d{2}_\d{2}_\d{2})[^\/]+$/
) {
# Remove log file #318 and notes file #3972 if they still exist
PVE::Storage::archive_auxiliaries_remove($path);
}
@ -469,7 +512,8 @@ __PACKAGE__->register_method ({
my $currently_deleting; # not necessarily true, e.g. sequential api call from cli
do {
my $task = PVE::Tools::upid_decode($upid);
$currently_deleting = PVE::ProcFSTools::check_process_running($task->{pid}, $task->{pstart});
$currently_deleting =
PVE::ProcFSTools::check_process_running($task->{pid}, $task->{pstart});
sleep 1 if $currently_deleting;
} while (time() < $end_time && $currently_deleting);
@ -481,9 +525,10 @@ __PACKAGE__->register_method ({
}
}
return $upid;
}});
},
});
__PACKAGE__->register_method ({
__PACKAGE__->register_method({
name => 'copy',
path => '{volume}',
method => 'POST',
@ -494,7 +539,7 @@ __PACKAGE__->register_method ({
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
storage => get_standard_option('pve-storage-id', { optional => 1}),
storage => get_standard_option('pve-storage-id', { optional => 1 }),
volume => {
description => "Source volume identifier",
type => 'string',
@ -503,10 +548,13 @@ __PACKAGE__->register_method ({
description => "Target volume identifier",
type => 'string',
},
target_node => get_standard_option('pve-node', {
target_node => get_standard_option(
'pve-node',
{
description => "Target node. Default is local node.",
optional => 1,
}),
},
),
},
},
returns => {
@ -548,13 +596,20 @@ __PACKAGE__->register_method ({
# you need to get this working (fails currently, because storage_migrate() uses
# ssh to connect to local host (which is not needed
my $sshinfo = PVE::SSHInfo::get_ssh_info($target_node);
PVE::Storage::storage_migrate($cfg, $src_volid, $sshinfo, $target_sid, {'target_volname' => $target_volname});
PVE::Storage::storage_migrate(
$cfg,
$src_volid,
$sshinfo,
$target_sid,
{ 'target_volname' => $target_volname },
);
print "DEBUG: end worker $upid\n";
};
return $rpcenv->fork_worker('imgcopy', undef, $user, $worker);
}});
},
});
1;

View File

@ -33,7 +33,7 @@ my $parse_volname_or_id = sub {
return $volid;
};
__PACKAGE__->register_method ({
__PACKAGE__->register_method({
name => 'list',
path => 'list',
method => 'GET',
@ -47,11 +47,15 @@ __PACKAGE__->register_method ({
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
storage => get_standard_option('pve-storage-id', {
storage => get_standard_option(
'pve-storage-id',
{
completion => \&PVE::Storage::complete_storage_enabled,
}),
},
),
volume => {
description => "Backup volume ID or name. Currently only PBS snapshots are supported.",
description =>
"Backup volume ID or name. Currently only PBS snapshots are supported.",
type => 'string',
completion => \&PVE::Storage::complete_volume,
},
@ -113,7 +117,7 @@ __PACKAGE__->register_method ({
PVE::Storage::check_volume_access($rpcenv, $user, $cfg, undef, $volid, 'backup');
raise_param_exc({'storage' => "Only PBS storages supported for file-restore."})
raise_param_exc({ 'storage' => "Only PBS storages supported for file-restore." })
if $scfg->{type} ne 'pbs';
my (undef, $snap) = PVE::Storage::parse_volname($cfg, $volid);
@ -139,9 +143,10 @@ __PACKAGE__->register_method ({
}
die "invalid proxmox-file-restore output";
}});
},
});
__PACKAGE__->register_method ({
__PACKAGE__->register_method({
name => 'download',
path => 'download',
method => 'GET',
@ -156,11 +161,15 @@ __PACKAGE__->register_method ({
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
storage => get_standard_option('pve-storage-id', {
storage => get_standard_option(
'pve-storage-id',
{
completion => \&PVE::Storage::complete_storage_enabled,
}),
},
),
volume => {
description => "Backup volume ID or name. Currently only PBS snapshots are supported.",
description =>
"Backup volume ID or name. Currently only PBS snapshots are supported.",
type => 'string',
completion => \&PVE::Storage::complete_volume,
},
@ -196,7 +205,7 @@ __PACKAGE__->register_method ({
PVE::Storage::check_volume_access($rpcenv, $user, $cfg, undef, $volid, 'backup');
raise_param_exc({'storage' => "Only PBS storages supported for file-restore."})
raise_param_exc({ 'storage' => "Only PBS storages supported for file-restore." })
if $scfg->{type} ne 'pbs';
my (undef, $snap) = PVE::Storage::parse_volname($cfg, $volid);
@ -204,11 +213,16 @@ __PACKAGE__->register_method ({
my $client = PVE::PBSClient->new($scfg, $storeid);
my $fifo = $client->file_restore_extract_prepare();
$rpcenv->fork_worker('pbs-download', undef, $user, sub {
$rpcenv->fork_worker(
'pbs-download',
undef,
$user,
sub {
my $name = decode_base64($path);
print "Starting download of file: $name\n";
$client->file_restore_extract($fifo, $snap, $path, 1, $tar);
});
},
);
my $ret = {
download => {
@ -218,6 +232,7 @@ __PACKAGE__->register_method ({
},
};
return $ret;
}});
},
});
1;

View File

@ -12,14 +12,20 @@ use PVE::Tools qw(extract_param);
use base qw(PVE::RESTHandler);
__PACKAGE__->register_method ({
__PACKAGE__->register_method({
name => 'dryrun',
path => '',
method => 'GET',
description => "Get prune information for backups. NOTE: this is only a preview and might not be " .
"what a subsequent prune call does if backups are removed/added in the meantime.",
description =>
"Get prune information for backups. NOTE: this is only a preview and might not be "
. "what a subsequent prune call does if backups are removed/added in the meantime.",
permissions => {
check => ['perm', '/storage/{storage}', ['Datastore.Audit', 'Datastore.AllocateSpace'], any => 1],
check => [
'perm',
'/storage/{storage}',
['Datastore.Audit', 'Datastore.AllocateSpace'],
any => 1,
],
},
protected => 1,
proxyto => 'node',
@ -27,24 +33,35 @@ __PACKAGE__->register_method ({
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
storage => get_standard_option('pve-storage-id', {
storage => get_standard_option(
'pve-storage-id',
{
completion => \&PVE::Storage::complete_storage_enabled,
}),
'prune-backups' => get_standard_option('prune-backups', {
description => "Use these retention options instead of those from the storage configuration.",
},
),
'prune-backups' => get_standard_option(
'prune-backups',
{
description =>
"Use these retention options instead of those from the storage configuration.",
optional => 1,
}),
},
),
type => {
description => "Either 'qemu' or 'lxc'. Only consider backups for guests of this type.",
description =>
"Either 'qemu' or 'lxc'. Only consider backups for guests of this type.",
type => 'string',
optional => 1,
enum => ['qemu', 'lxc'],
},
vmid => get_standard_option('pve-vmid', {
vmid => get_standard_option(
'pve-vmid',
{
description => "Only consider backups for this guest.",
optional => 1,
completion => \&PVE::Cluster::complete_vmid,
}),
},
),
},
},
returns => {
@ -57,12 +74,14 @@ __PACKAGE__->register_method ({
type => 'string',
},
'ctime' => {
description => "Creation time of the backup (seconds since the UNIX epoch).",
description =>
"Creation time of the backup (seconds since the UNIX epoch).",
type => 'integer',
},
'mark' => {
description => "Whether the backup would be kept or removed. Backups that are" .
" protected or don't use the standard naming scheme are not removed.",
description =>
"Whether the backup would be kept or removed. Backups that are"
. " protected or don't use the standard naming scheme are not removed.",
type => 'string',
enum => ['keep', 'remove', 'protected', 'renamed'],
},
@ -92,16 +111,17 @@ __PACKAGE__->register_method ({
if defined($prune_backups);
return PVE::Storage::prune_backups($cfg, $storeid, $prune_backups, $vmid, $type, 1);
}});
},
});
__PACKAGE__->register_method ({
__PACKAGE__->register_method({
name => 'delete',
path => '',
method => 'DELETE',
description => "Prune backups. Only those using the standard naming scheme are considered.",
permissions => {
description => "You need the 'Datastore.Allocate' privilege on the storage " .
"(or if a VM ID is specified, 'Datastore.AllocateSpace' and 'VM.Backup' for the VM).",
description => "You need the 'Datastore.Allocate' privilege on the storage "
. "(or if a VM ID is specified, 'Datastore.AllocateSpace' and 'VM.Backup' for the VM).",
user => 'all',
},
protected => 1,
@ -110,23 +130,34 @@ __PACKAGE__->register_method ({
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
storage => get_standard_option('pve-storage-id', {
storage => get_standard_option(
'pve-storage-id',
{
completion => \&PVE::Storage::complete_storage,
}),
'prune-backups' => get_standard_option('prune-backups', {
description => "Use these retention options instead of those from the storage configuration.",
}),
},
),
'prune-backups' => get_standard_option(
'prune-backups',
{
description =>
"Use these retention options instead of those from the storage configuration.",
},
),
type => {
description => "Either 'qemu' or 'lxc'. Only consider backups for guests of this type.",
description =>
"Either 'qemu' or 'lxc'. Only consider backups for guests of this type.",
type => 'string',
optional => 1,
enum => ['qemu', 'lxc'],
},
vmid => get_standard_option('pve-vmid', {
vmid => get_standard_option(
'pve-vmid',
{
description => "Only prune backups for this VM.",
completion => \&PVE::Cluster::complete_vmid,
optional => 1,
}),
},
),
},
},
returns => { type => 'string' },
@ -159,6 +190,7 @@ __PACKAGE__->register_method ({
};
return $rpcenv->fork_worker('prunebackups', $id, $authuser, $worker);
}});
},
});
1;

View File

@ -33,10 +33,10 @@ __PACKAGE__->register_method({
items => {
type => "object",
properties => {
method => { type => 'string'},
method => { type => 'string' },
},
},
links => [ { rel => 'child', href => "{method}" } ],
links => [{ rel => 'child', href => "{method}" }],
},
code => sub {
my ($param) = @_;
@ -52,7 +52,8 @@ __PACKAGE__->register_method({
];
return $res;
}});
},
});
__PACKAGE__->register_method({
name => 'nfsscan',
@ -70,7 +71,8 @@ __PACKAGE__->register_method({
node => get_standard_option('pve-node'),
server => {
description => "The server address (name or IP).",
type => 'string', format => 'pve-storage-server',
type => 'string',
format => 'pve-storage-server',
},
},
},
@ -101,7 +103,8 @@ __PACKAGE__->register_method({
push @$data, { path => $k, options => $res->{$k} };
}
return $data;
}});
},
});
__PACKAGE__->register_method({
name => 'cifsscan',
@ -119,7 +122,8 @@ __PACKAGE__->register_method({
node => get_standard_option('pve-node'),
server => {
description => "The server address (name or IP).",
type => 'string', format => 'pve-storage-server',
type => 'string',
format => 'pve-storage-server',
},
username => {
description => "User name.",
@ -172,7 +176,8 @@ __PACKAGE__->register_method({
}
return $data;
}});
},
});
__PACKAGE__->register_method({
name => 'pbsscan',
@ -190,7 +195,8 @@ __PACKAGE__->register_method({
node => get_standard_option('pve-node'),
server => {
description => "The server address (name or IP).",
type => 'string', format => 'pve-storage-server',
type => 'string',
format => 'pve-storage-server',
},
username => {
description => "User-name or API token-ID.",
@ -236,7 +242,7 @@ __PACKAGE__->register_method({
my $password = delete $param->{password};
return PVE::Storage::PBSPlugin::scan_datastores($param, $password);
}
},
});
# Note: GlusterFS currently does not have an equivalent of showmount.
@ -258,7 +264,8 @@ __PACKAGE__->register_method({
node => get_standard_option('pve-node'),
server => {
description => "The server address (name or IP).",
type => 'string', format => 'pve-storage-server',
type => 'string',
format => 'pve-storage-server',
},
},
},
@ -287,7 +294,8 @@ __PACKAGE__->register_method({
}
}
return $data;
}});
},
});
__PACKAGE__->register_method({
name => 'iscsiscan',
@ -305,7 +313,8 @@ __PACKAGE__->register_method({
node => get_standard_option('pve-node'),
portal => {
description => "The iSCSI portal (IP or DNS name with optional port).",
type => 'string', format => 'pve-storage-portal-dns',
type => 'string',
format => 'pve-storage-portal-dns',
},
},
},
@ -332,11 +341,12 @@ __PACKAGE__->register_method({
my $data = [];
foreach my $k (sort keys %$res) {
push @$data, { target => $k, portal => join(',', @{$res->{$k}}) };
push @$data, { target => $k, portal => join(',', @{ $res->{$k} }) };
}
return $data;
}});
},
});
__PACKAGE__->register_method({
name => 'lvmscan',
@ -371,7 +381,8 @@ __PACKAGE__->register_method({
my $res = PVE::Storage::LVMPlugin::lvm_vgs();
return PVE::RESTHandler::hash_to_array($res, 'vg');
}});
},
});
__PACKAGE__->register_method({
name => 'lvmthinscan',
@ -410,7 +421,8 @@ __PACKAGE__->register_method({
my ($param) = @_;
return PVE::Storage::LvmThinPlugin::list_thinpools($param->{vg});
}});
},
});
__PACKAGE__->register_method({
name => 'zfsscan',
@ -444,6 +456,7 @@ __PACKAGE__->register_method({
my ($param) = @_;
return PVE::Storage::scan_zfs();
}});
},
});
1;

View File

@ -23,12 +23,12 @@ use PVE::Storage;
use base qw(PVE::RESTHandler);
__PACKAGE__->register_method ({
__PACKAGE__->register_method({
subclass => "PVE::API2::Storage::PruneBackups",
path => '{storage}/prunebackups',
});
__PACKAGE__->register_method ({
__PACKAGE__->register_method({
subclass => "PVE::API2::Storage::Content",
# set fragment delimiter (no subdirs) - we need that, because volume
# IDs may contain a slash '/'
@ -36,7 +36,7 @@ __PACKAGE__->register_method ({
path => '{storage}/content',
});
__PACKAGE__->register_method ({
__PACKAGE__->register_method({
subclass => "PVE::API2::Storage::FileRestore",
path => '{storage}/file-restore',
});
@ -46,26 +46,30 @@ my sub assert_ova_contents {
# test if it's really a tar file with an ovf file inside
my $hasOvf = 0;
run_command(['tar', '-t', '-f', $file], outfunc => sub {
run_command(
['tar', '-t', '-f', $file],
outfunc => sub {
my ($line) = @_;
if ($line =~ m/\.ovf$/) {
$hasOvf = 1;
}
});
},
);
die "ova archive has no .ovf file inside\n" if !$hasOvf;
return 1;
}
__PACKAGE__->register_method ({
__PACKAGE__->register_method({
name => 'index',
path => '',
method => 'GET',
description => "Get status for all datastores.",
permissions => {
description => "Only list entries where you have 'Datastore.Audit' or 'Datastore.AllocateSpace' permissions on '/storage/<storage>'",
description =>
"Only list entries where you have 'Datastore.Audit' or 'Datastore.AllocateSpace' permissions on '/storage/<storage>'",
user => 'all',
},
protected => 1,
@ -74,14 +78,18 @@ __PACKAGE__->register_method ({
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
storage => get_standard_option('pve-storage-id', {
storage => get_standard_option(
'pve-storage-id',
{
description => "Only list status for specified storage",
optional => 1,
completion => \&PVE::Storage::complete_storage_enabled,
}),
},
),
content => {
description => "Only list stores which support this content type.",
type => 'string', format => 'pve-storage-content-list',
type => 'string',
format => 'pve-storage-content-list',
optional => 1,
completion => \&PVE::Storage::complete_content_type,
},
@ -91,12 +99,16 @@ __PACKAGE__->register_method ({
optional => 1,
default => 0,
},
target => get_standard_option('pve-node', {
description => "If target is different to 'node', we only lists shared storages which " .
"content is accessible on this 'node' and the specified 'target' node.",
target => get_standard_option(
'pve-node',
{
description =>
"If target is different to 'node', we only lists shared storages which "
. "content is accessible on this 'node' and the specified 'target' node.",
optional => 1,
completion => \&PVE::Cluster::get_nodelist,
}),
},
),
'format' => {
description => "Include information about formats",
type => 'boolean',
@ -117,7 +129,8 @@ __PACKAGE__->register_method ({
},
content => {
description => "Allowed storage content types.",
type => 'string', format => 'pve-storage-content-list',
type => 'string',
format => 'pve-storage-content-list',
},
enabled => {
description => "Set when storage is enabled (not disabled).",
@ -160,7 +173,7 @@ __PACKAGE__->register_method ({
},
},
},
links => [ { rel => 'child', href => "{storage}" } ],
links => [{ rel => 'child', href => "{storage}" }],
},
code => sub {
my ($param) = @_;
@ -179,14 +192,14 @@ __PACKAGE__->register_method ({
my $info = PVE::Storage::storage_info($cfg, $param->{content}, $param->{format});
raise_param_exc({ storage => "No such storage." })
if $param->{storage} && !defined($info->{$param->{storage}});
if $param->{storage} && !defined($info->{ $param->{storage} });
my $res = {};
my @sids = PVE::Storage::storage_ids($cfg);
foreach my $storeid (@sids) {
my $data = $info->{$storeid};
next if !$data;
my $privs = [ 'Datastore.Audit', 'Datastore.AllocateSpace' ];
my $privs = ['Datastore.Audit', 'Datastore.AllocateSpace'];
next if !$rpcenv->check_any($authuser, "/storage/$storeid", $privs, 1);
next if $param->{storage} && $param->{storage} ne $storeid;
@ -211,15 +224,21 @@ __PACKAGE__->register_method ({
}
return PVE::RESTHandler::hash_to_array($res, 'storage');
}});
},
});
__PACKAGE__->register_method ({
__PACKAGE__->register_method({
name => 'diridx',
path => '{storage}',
method => 'GET',
description => "",
permissions => {
check => ['perm', '/storage/{storage}', ['Datastore.Audit', 'Datastore.AllocateSpace'], any => 1],
check => [
'perm',
'/storage/{storage}',
['Datastore.Audit', 'Datastore.AllocateSpace'],
any => 1,
],
},
parameters => {
additionalProperties => 0,
@ -236,7 +255,7 @@ __PACKAGE__->register_method ({
subdir => { type => 'string' },
},
},
links => [ { rel => 'child', href => "{subdir}" } ],
links => [{ rel => 'child', href => "{subdir}" }],
},
code => sub {
my ($param) = @_;
@ -254,15 +273,21 @@ __PACKAGE__->register_method ({
];
return $res;
}});
},
});
__PACKAGE__->register_method ({
__PACKAGE__->register_method({
name => 'read_status',
path => '{storage}/status',
method => 'GET',
description => "Read storage status.",
permissions => {
check => ['perm', '/storage/{storage}', ['Datastore.Audit', 'Datastore.AllocateSpace'], any => 1],
check => [
'perm',
'/storage/{storage}',
['Datastore.Audit', 'Datastore.AllocateSpace'],
any => 1,
],
},
protected => 1,
proxyto => 'node',
@ -284,21 +309,27 @@ __PACKAGE__->register_method ({
my $info = PVE::Storage::storage_info($cfg, $param->{content});
my $data = $info->{$param->{storage}};
my $data = $info->{ $param->{storage} };
raise_param_exc({ storage => "No such storage." })
if !defined($data);
return $data;
}});
},
});
__PACKAGE__->register_method ({
__PACKAGE__->register_method({
name => 'rrd',
path => '{storage}/rrd',
method => 'GET',
description => "Read storage RRD statistics (returns PNG).",
permissions => {
check => ['perm', '/storage/{storage}', ['Datastore.Audit', 'Datastore.AllocateSpace'], any => 1],
check => [
'perm',
'/storage/{storage}',
['Datastore.Audit', 'Datastore.AllocateSpace'],
any => 1,
],
},
protected => 1,
proxyto => 'node',
@ -310,16 +341,17 @@ __PACKAGE__->register_method ({
timeframe => {
description => "Specify the time frame you are interested in.",
type => 'string',
enum => [ 'hour', 'day', 'week', 'month', 'year' ],
enum => ['hour', 'day', 'week', 'month', 'year'],
},
ds => {
description => "The list of datasources you want to display.",
type => 'string', format => 'pve-configid-list',
type => 'string',
format => 'pve-configid-list',
},
cf => {
description => "The RRD consolidation function",
type => 'string',
enum => [ 'AVERAGE', 'MAX' ],
enum => ['AVERAGE', 'MAX'],
optional => 1,
},
},
@ -333,18 +365,23 @@ __PACKAGE__->register_method ({
code => sub {
my ($param) = @_;
return PVE::RRD::create_rrd_graph(
"pve2-storage/$param->{node}/$param->{storage}",
return PVE::RRD::create_rrd_graph("pve2-storage/$param->{node}/$param->{storage}",
$param->{timeframe}, $param->{ds}, $param->{cf});
}});
},
});
__PACKAGE__->register_method ({
__PACKAGE__->register_method({
name => 'rrddata',
path => '{storage}/rrddata',
method => 'GET',
description => "Read storage RRD statistics.",
permissions => {
check => ['perm', '/storage/{storage}', ['Datastore.Audit', 'Datastore.AllocateSpace'], any => 1],
check => [
'perm',
'/storage/{storage}',
['Datastore.Audit', 'Datastore.AllocateSpace'],
any => 1,
],
},
protected => 1,
proxyto => 'node',
@ -356,12 +393,12 @@ __PACKAGE__->register_method ({
timeframe => {
description => "Specify the time frame you are interested in.",
type => 'string',
enum => [ 'hour', 'day', 'week', 'month', 'year' ],
enum => ['hour', 'day', 'week', 'month', 'year'],
},
cf => {
description => "The RRD consolidation function",
type => 'string',
enum => [ 'AVERAGE', 'MAX' ],
enum => ['AVERAGE', 'MAX'],
optional => 1,
},
},
@ -378,12 +415,15 @@ __PACKAGE__->register_method ({
return PVE::RRD::create_rrd_data(
"pve2-storage/$param->{node}/$param->{storage}",
$param->{timeframe}, $param->{cf});
}});
$param->{timeframe},
$param->{cf},
);
},
});
# makes no sense for big images and backup files (because it
# create a copy of the file).
__PACKAGE__->register_method ({
__PACKAGE__->register_method({
name => 'upload',
path => '{storage}/upload',
method => 'POST',
@ -399,11 +439,13 @@ __PACKAGE__->register_method ({
storage => get_standard_option('pve-storage-id'),
content => {
description => "Content type.",
type => 'string', format => 'pve-storage-content',
type => 'string',
format => 'pve-storage-content',
enum => ['iso', 'vztmpl', 'import'],
},
filename => {
description => "The name of the file to create. Caution: This will be normalized!",
description =>
"The name of the file to create. Caution: This will be normalized!",
maxLength => 255,
type => 'string',
},
@ -421,7 +463,8 @@ __PACKAGE__->register_method ({
optional => 1,
},
tmpfilename => {
description => "The source file name. This parameter is usually set by the REST handler. You can only overwrite it when connecting to the trusted port on localhost.",
description =>
"The source file name. This parameter is usually set by the REST handler. You can only overwrite it when connecting to the trusted port on localhost.",
type => 'string',
optional => 1,
pattern => '/var/tmp/pveupload-[0-9a-f]+',
@ -469,7 +512,9 @@ __PACKAGE__->register_method ({
}
$path = PVE::Storage::get_vztmpl_dir($cfg, $storage);
} elsif ($content eq 'import') {
if ($filename !~ m!${PVE::Storage::SAFE_CHAR_CLASS_RE}+$PVE::Storage::UPLOAD_IMPORT_EXT_RE_1$!) {
if ($filename !~
m!${PVE::Storage::SAFE_CHAR_CLASS_RE}+$PVE::Storage::UPLOAD_IMPORT_EXT_RE_1$!
) {
raise_param_exc({ filename => "invalid filename or wrong extension" });
}
my $format = $1;
@ -500,7 +545,8 @@ __PACKAGE__->register_method ({
if ($node ne 'localhost' && $node ne PVE::INotify::nodename()) {
my $remip = PVE::Cluster::remote_node_ip($node);
my $ssh_options = PVE::SSHInfo::ssh_info_to_ssh_opts({ ip => $remip, name => $node });
my $ssh_options =
PVE::SSHInfo::ssh_info_to_ssh_opts({ ip => $remip, name => $node });
my @remcmd = ('/usr/bin/ssh', $ssh_options->@*, $remip, '--');
@ -514,7 +560,14 @@ __PACKAGE__->register_method ({
errmsg => "mkdir failed",
);
$cmd = ['/usr/bin/scp', $ssh_options->@*, '-p', '--', $tmpfilename, "[$remip]:" . PVE::Tools::shell_quote($dest)];
$cmd = [
'/usr/bin/scp',
$ssh_options->@*,
'-p',
'--',
$tmpfilename,
"[$remip]:" . PVE::Tools::shell_quote($dest),
];
$err_cleanup = sub { run_command([@remcmd, 'rm', '-f', '--', $dest]) };
} else {
@ -530,11 +583,13 @@ __PACKAGE__->register_method ({
print "starting file import from: $tmpfilename\n";
eval {
my ($checksum, $checksum_algorithm) = $param->@{'checksum', 'checksum-algorithm'};
my ($checksum, $checksum_algorithm) =
$param->@{ 'checksum', 'checksum-algorithm' };
if ($checksum_algorithm) {
print "calculating checksum...";
my $checksum_got = PVE::Tools::get_file_hash($checksum_algorithm, $tmpfilename);
my $checksum_got =
PVE::Tools::get_file_hash($checksum_algorithm, $tmpfilename);
if (lc($checksum_got) eq lc($checksum)) {
print "OK, checksum verified\n";
@ -557,7 +612,8 @@ __PACKAGE__->register_method ({
};
if (my $err = $@) {
# unlinks only the temporary file from the http server
unlink $tmpfilename or $! == ENOENT
unlink $tmpfilename
or $! == ENOENT
or warn "unable to clean up temporory file '$tmpfilename' - $!\n";
die $err;
}
@ -570,7 +626,8 @@ __PACKAGE__->register_method ({
eval { run_command($cmd, errmsg => 'import failed'); };
# the temporary file got only uploaded locally, no need to rm remote
unlink $tmpfilename or $! == ENOENT
unlink $tmpfilename
or $! == ENOENT
or warn "unable to clean up temporary file '$tmpfilename' - $!\n";
if (my $err = $@) {
@ -582,7 +639,8 @@ __PACKAGE__->register_method ({
};
return $rpcenv->fork_worker('imgcopy', undef, $user, $worker);
}});
},
});
__PACKAGE__->register_method({
name => 'download_url',
@ -591,14 +649,17 @@ __PACKAGE__->register_method({
description => "Download templates, ISO images, OVAs and VM images by using an URL.",
proxyto => 'node',
permissions => {
description => 'Requires allocation access on the storage and as this allows one to probe'
.' the (local!) host network indirectly it also requires one of Sys.Modify on / (for'
.' backwards compatibility) or the newer Sys.AccessNetwork privilege on the node.',
check => [ 'and',
['perm', '/storage/{storage}', [ 'Datastore.AllocateTemplate' ]],
[ 'or',
['perm', '/', [ 'Sys.Audit', 'Sys.Modify' ]],
['perm', '/nodes/{node}', [ 'Sys.AccessNetwork' ]],
description =>
'Requires allocation access on the storage and as this allows one to probe'
. ' the (local!) host network indirectly it also requires one of Sys.Modify on / (for'
. ' backwards compatibility) or the newer Sys.AccessNetwork privilege on the node.',
check => [
'and',
['perm', '/storage/{storage}', ['Datastore.AllocateTemplate']],
[
'or',
['perm', '/', ['Sys.Audit', 'Sys.Modify']],
['perm', '/nodes/{node}', ['Sys.AccessNetwork']],
],
],
},
@ -615,11 +676,13 @@ __PACKAGE__->register_method({
},
content => {
description => "Content type.", # TODO: could be optional & detected in most cases
type => 'string', format => 'pve-storage-content',
type => 'string',
format => 'pve-storage-content',
enum => ['iso', 'vztmpl', 'import'],
},
filename => {
description => "The name of the file to create. Caution: This will be normalized!",
description =>
"The name of the file to create. Caution: This will be normalized!",
maxLength => 255,
type => 'string',
},
@ -652,7 +715,7 @@ __PACKAGE__->register_method({
},
},
returns => {
type => "string"
type => "string",
},
code => sub {
my ($param) = @_;
@ -668,7 +731,7 @@ __PACKAGE__->register_method({
die "can't upload to storage type '$scfg->{type}', not a file based storage!\n"
if !defined($scfg->{path});
my ($content, $url) = $param->@{'content', 'url'};
my ($content, $url) = $param->@{ 'content', 'url' };
die "storage '$storage' is not configured for content-type '$content'\n"
if !$scfg->{content}->{$content};
@ -690,7 +753,9 @@ __PACKAGE__->register_method({
}
$path = PVE::Storage::get_vztmpl_dir($cfg, $storage);
} elsif ($content eq 'import') {
if ($filename !~ m!${PVE::Storage::SAFE_CHAR_CLASS_RE}+$PVE::Storage::UPLOAD_IMPORT_EXT_RE_1$!) {
if ($filename !~
m!${PVE::Storage::SAFE_CHAR_CLASS_RE}+$PVE::Storage::UPLOAD_IMPORT_EXT_RE_1$!
) {
raise_param_exc({ filename => "invalid filename or wrong extension" });
}
my $format = $1;
@ -717,7 +782,7 @@ __PACKAGE__->register_method({
https_proxy => $dccfg->{http_proxy},
};
my ($checksum, $checksum_algorithm) = $param->@{'checksum', 'checksum-algorithm'};
my ($checksum, $checksum_algorithm) = $param->@{ 'checksum', 'checksum-algorithm' };
if ($checksum) {
$opts->{"${checksum_algorithm}sum"} = $checksum;
$opts->{hash_required} = 1;
@ -752,7 +817,8 @@ __PACKAGE__->register_method({
my $worker_id = PVE::Tools::encode_text($filename); # must not pass : or the like as w-ID
return $rpcenv->fork_worker('download', $worker_id, $user, $worker);
}});
},
});
__PACKAGE__->register_method({
name => 'get_import_metadata',
@ -760,7 +826,7 @@ __PACKAGE__->register_method({
method => 'GET',
description =>
"Get the base parameters for creating a guest which imports data from a foreign importable"
." guest, like an ESXi VM",
. " guest, like an ESXi VM",
proxyto => 'node',
permissions => {
description => "You need read access for the volume.",
@ -785,18 +851,19 @@ __PACKAGE__->register_method({
properties => {
type => {
type => 'string',
enum => [ 'vm' ],
enum => ['vm'],
description => 'The type of guest this is going to produce.',
},
source => {
type => 'string',
enum => [ 'esxi' ],
enum => ['esxi'],
description => 'The type of the import-source of this guest volume.',
},
'create-args' => {
type => 'object',
additionalProperties => 1,
description => 'Parameters which can be used in a call to create a VM or container.',
description =>
'Parameters which can be used in a call to create a VM or container.',
},
'disks' => {
type => 'object',
@ -808,12 +875,13 @@ __PACKAGE__->register_method({
type => 'object',
additionalProperties => 1,
optional => 1,
description => 'Recognised network interfaces as `net$id` => { ...params } object.',
description =>
'Recognised network interfaces as `net$id` => { ...params } object.',
},
'warnings' => {
type => 'array',
description => 'List of known issues that can affect the import of a guest.'
.' Note that lack of warning does not imply that there cannot be any problems.',
. ' Note that lack of warning does not imply that there cannot be any problems.',
optional => 1,
items => {
type => "object",
@ -860,9 +928,13 @@ __PACKAGE__->register_method({
PVE::Storage::check_volume_access($rpcenv, $authuser, $cfg, undef, $volid);
return PVE::Tools::run_with_timeout(30, sub {
return PVE::Tools::run_with_timeout(
30,
sub {
return PVE::Storage::get_import_metadata($cfg, $volid);
});
}});
},
);
},
});
1;

View File

@ -168,6 +168,7 @@ The message to be printed.
=back
=cut
sub new {
my ($class, $storage_plugin, $scfg, $storeid, $log_function) = @_;
@ -183,6 +184,7 @@ Returns the name of the backup provider. It will be printed in some log lines.
=back
=cut
sub provider_name {
my ($self) = @_;
@ -211,6 +213,7 @@ Unix time-stamp of when the job started.
=back
=cut
sub job_init {
my ($self, $start_time) = @_;
@ -227,6 +230,7 @@ the backup server. Called in both, success and failure scenarios.
=back
=cut
sub job_cleanup {
my ($self) = @_;
@ -271,6 +275,7 @@ Unix time-stamp of when the guest backup started.
=back
=cut
sub backup_init {
my ($self, $vmid, $vmtype, $start_time) = @_;
@ -326,6 +331,7 @@ Present if there was a failure. The error message indicating the failure.
=back
=cut
sub backup_cleanup {
my ($self, $vmid, $vmtype, $success, $info) = @_;
@ -366,6 +372,7 @@ The type of the guest being backed up. Currently, either C<qemu> or C<lxc>.
=back
=cut
sub backup_get_mechanism {
my ($self, $vmid, $vmtype) = @_;
@ -396,6 +403,7 @@ Path to the file with the backup log.
=back
=cut
sub backup_handle_log_file {
my ($self, $vmid, $filename) = @_;
@ -462,6 +470,7 @@ bitmap and existing ones will be discarded.
=back
=cut
sub backup_vm_query_incremental {
my ($self, $vmid, $volumes) = @_;
@ -619,6 +628,7 @@ configuration as raw data.
=back
=cut
sub backup_vm {
my ($self, $vmid, $guest_config, $volumes, $info) = @_;
@ -652,6 +662,7 @@ description there.
=back
=cut
sub backup_container_prepare {
my ($self, $vmid, $info) = @_;
@ -752,6 +763,7 @@ for unprivileged containers by default.
=back
=cut
sub backup_container {
my ($self, $vmid, $guest_config, $exclude_patterns, $info) = @_;
@ -797,6 +809,7 @@ The volume ID of the archive being restored.
=back
=cut
sub restore_get_mechanism {
my ($self, $volname) = @_;
@ -824,6 +837,7 @@ The volume ID of the archive being restored.
=back
=cut
sub archive_get_guest_config {
my ($self, $volname) = @_;
@ -853,6 +867,7 @@ The volume ID of the archive being restored.
=back
=cut
sub archive_get_firewall_config {
my ($self, $volname) = @_;
@ -901,6 +916,7 @@ The volume ID of the archive being restored.
=back
=cut
sub restore_vm_init {
my ($self, $volname) = @_;
@ -927,6 +943,7 @@ The volume ID of the archive being restored.
=back
=cut
sub restore_vm_cleanup {
my ($self, $volname) = @_;
@ -984,6 +1001,7 @@ empty.
=back
=cut
sub restore_vm_volume_init {
my ($self, $volname, $device_name, $info) = @_;
@ -1020,6 +1038,7 @@ empty.
=back
=cut
sub restore_vm_volume_cleanup {
my ($self, $volname, $device_name, $info) = @_;
@ -1086,6 +1105,7 @@ empty.
=back
=cut
sub restore_container_init {
my ($self, $volname, $info) = @_;
@ -1117,6 +1137,7 @@ empty.
=back
=cut
sub restore_container_cleanup {
my ($self, $volname, $info) = @_;

View File

@ -35,13 +35,16 @@ my $nodename = PVE::INotify::nodename();
sub param_mapping {
my ($name) = @_;
my $password_map = PVE::CLIHandler::get_standard_mapping('pve-password', {
my $password_map = PVE::CLIHandler::get_standard_mapping(
'pve-password',
{
func => sub {
my ($value) = @_;
return $value if $value;
return PVE::PTY::read_password("Enter Password: ");
},
});
},
);
my $enc_key_map = {
name => 'encryption-key',
@ -50,7 +53,7 @@ sub param_mapping {
my ($value) = @_;
return $value if $value eq 'autogen';
return PVE::Tools::file_get_contents($value);
}
},
};
my $master_key_map = {
@ -59,7 +62,7 @@ sub param_mapping {
func => sub {
my ($value) = @_;
return encode_base64(PVE::Tools::file_get_contents($value), '');
}
},
};
my $keyring_map = {
@ -72,11 +75,11 @@ sub param_mapping {
};
my $mapping = {
'cifsscan' => [ $password_map ],
'cifs' => [ $password_map ],
'pbs' => [ $password_map ],
'create' => [ $password_map, $enc_key_map, $master_key_map, $keyring_map ],
'update' => [ $password_map, $enc_key_map, $master_key_map, $keyring_map ],
'cifsscan' => [$password_map],
'cifs' => [$password_map],
'pbs' => [$password_map],
'create' => [$password_map, $enc_key_map, $master_key_map, $keyring_map],
'update' => [$password_map, $enc_key_map, $master_key_map, $keyring_map],
};
return $mapping->{$name};
}
@ -85,7 +88,7 @@ sub setup_environment {
PVE::RPCEnvironment->setup_default_cli_env();
}
__PACKAGE__->register_method ({
__PACKAGE__->register_method({
name => 'apiinfo',
path => 'apiinfo',
method => 'GET',
@ -106,10 +109,10 @@ __PACKAGE__->register_method ({
apiver => PVE::Storage::APIVER,
apiage => PVE::Storage::APIAGE,
};
}
},
});
__PACKAGE__->register_method ({
__PACKAGE__->register_method({
name => 'path',
path => 'path',
method => 'GET',
@ -119,7 +122,8 @@ __PACKAGE__->register_method ({
properties => {
volume => {
description => "Volume identifier",
type => 'string', format => 'pve-volume-id',
type => 'string',
format => 'pve-volume-id',
completion => \&PVE::Storage::complete_volume,
},
},
@ -131,21 +135,23 @@ __PACKAGE__->register_method ({
my $cfg = PVE::Storage::config();
my $path = PVE::Storage::path ($cfg, $param->{volume});
my $path = PVE::Storage::path($cfg, $param->{volume});
print "$path\n";
return undef;
}});
},
});
__PACKAGE__->register_method ({
__PACKAGE__->register_method({
name => 'extractconfig',
path => 'extractconfig',
method => 'GET',
description => "Extract configuration from vzdump backup archive.",
permissions => {
description => "The user needs 'VM.Backup' permissions on the backed up guest ID, and 'Datastore.AllocateSpace' on the backup storage.",
description =>
"The user needs 'VM.Backup' permissions on the backed up guest ID, and 'Datastore.AllocateSpace' on the backup storage.",
user => 'all',
},
protected => 1,
@ -169,12 +175,7 @@ __PACKAGE__->register_method ({
my $storage_cfg = PVE::Storage::config();
PVE::Storage::check_volume_access(
$rpcenv,
$authuser,
$storage_cfg,
undef,
$volume,
'backup',
$rpcenv, $authuser, $storage_cfg, undef, $volume, 'backup',
);
if (PVE::Storage::parse_volume_id($volume, 1)) {
@ -186,7 +187,8 @@ __PACKAGE__->register_method ({
print "$config_raw\n";
return;
}});
},
});
my $print_content = sub {
my ($list) = @_;
@ -194,7 +196,7 @@ my $print_content = sub {
my ($maxlenname, $maxsize) = (0, 0);
foreach my $info (@$list) {
my $volid = $info->{volid};
my $sidlen = length ($volid);
my $sidlen = length($volid);
$maxlenname = $sidlen if $sidlen > $maxlenname;
$maxsize = $info->{size} if ($info->{size} // 0) > $maxsize;
}
@ -207,7 +209,8 @@ my $print_content = sub {
next if !$info->{vmid};
my $volid = $info->{volid};
printf "$basefmt %d\n", $volid, $info->{format}, $info->{content}, $info->{size}, $info->{vmid};
printf "$basefmt %d\n", $volid, $info->{format}, $info->{content}, $info->{size},
$info->{vmid};
}
foreach my $info (sort { $a->{format} cmp $b->{format} } @$list) {
@ -224,9 +227,9 @@ my $print_status = sub {
my $maxlen = 0;
foreach my $res (@$res) {
my $storeid = $res->{storage};
$maxlen = length ($storeid) if length ($storeid) > $maxlen;
$maxlen = length($storeid) if length($storeid) > $maxlen;
}
$maxlen+=1;
$maxlen += 1;
printf "%-${maxlen}s %10s %10s %15s %15s %15s %8s\n", 'Name', 'Type',
'Status', 'Total', 'Used', 'Available', '%';
@ -236,7 +239,7 @@ my $print_status = sub {
my $active = $res->{active} ? 'active' : 'inactive';
my ($per, $per_fmt) = (0, '% 7.2f%%');
$per = ($res->{used}*100)/$res->{total} if $res->{total} > 0;
$per = ($res->{used} * 100) / $res->{total} if $res->{total} > 0;
if (!$res->{enabled}) {
$per = 'N/A';
@ -245,12 +248,12 @@ my $print_status = sub {
}
printf "%-${maxlen}s %10s %10s %15d %15d %15d $per_fmt\n", $storeid,
$res->{type}, $active, $res->{total}/1024, $res->{used}/1024,
$res->{avail}/1024, $per;
$res->{type}, $active, $res->{total} / 1024, $res->{used} / 1024,
$res->{avail} / 1024, $per;
}
};
__PACKAGE__->register_method ({
__PACKAGE__->register_method({
name => 'export',
path => 'export',
method => 'GET',
@ -288,8 +291,7 @@ __PACKAGE__->register_method ({
optional => 1,
},
'with-snapshots' => {
description =>
"Whether to include intermediate snapshots in the stream",
description => "Whether to include intermediate snapshots in the stream",
type => 'boolean',
optional => 1,
default => 0,
@ -320,14 +322,21 @@ __PACKAGE__->register_method ({
close(STDOUT);
open(STDOUT, '>', '/dev/null');
} else {
sysopen($outfh, $filename, O_CREAT|O_WRONLY|O_TRUNC)
sysopen($outfh, $filename, O_CREAT | O_WRONLY | O_TRUNC)
or die "open($filename): $!\n";
}
eval {
my $cfg = PVE::Storage::config();
PVE::Storage::volume_export($cfg, $outfh, $param->{volume}, $param->{format},
$param->{snapshot}, $param->{base}, $with_snapshots);
PVE::Storage::volume_export(
$cfg,
$outfh,
$param->{volume},
$param->{format},
$param->{snapshot},
$param->{base},
$with_snapshots,
);
};
my $err = $@;
if ($filename ne '-') {
@ -336,10 +345,10 @@ __PACKAGE__->register_method ({
}
die $err if $err;
return;
}
},
});
__PACKAGE__->register_method ({
__PACKAGE__->register_method({
name => 'import',
path => 'import',
method => 'PUT',
@ -359,10 +368,10 @@ __PACKAGE__->register_method ({
enum => $PVE::Storage::KNOWN_EXPORT_FORMATS,
},
filename => {
description => "Source file name. For '-' stdin is used, the " .
"tcp://<IP-or-CIDR> format allows to use a TCP connection, " .
"the unix://PATH-TO-SOCKET format a UNIX socket as input." .
"Else, the file is treated as common file.",
description => "Source file name. For '-' stdin is used, the "
. "tcp://<IP-or-CIDR> format allows to use a TCP connection, "
. "the unix://PATH-TO-SOCKET format a UNIX socket as input."
. "Else, the file is treated as common file.",
type => 'string',
},
base => {
@ -373,8 +382,7 @@ __PACKAGE__->register_method ({
optional => 1,
},
'with-snapshots' => {
description =>
"Whether the stream includes intermediate snapshots",
description => "Whether the stream includes intermediate snapshots",
type => 'boolean',
optional => 1,
default => 0,
@ -387,8 +395,8 @@ __PACKAGE__->register_method ({
optional => 1,
},
'allow-rename' => {
description => "Choose a new volume ID if the requested " .
"volume ID already exists, instead of throwing an error.",
description => "Choose a new volume ID if the requested "
. "volume ID already exists, instead of throwing an error.",
type => 'boolean',
optional => 1,
default => 0,
@ -474,21 +482,28 @@ __PACKAGE__->register_method ({
my $cfg = PVE::Storage::config();
my $volume = $param->{volume};
my $delete = $param->{'delete-snapshot'};
my $imported_volid = PVE::Storage::volume_import($cfg, $infh, $volume, $param->{format},
$param->{snapshot}, $param->{base}, $param->{'with-snapshots'},
$param->{'allow-rename'});
my $imported_volid = PVE::Storage::volume_import(
$cfg,
$infh,
$volume,
$param->{format},
$param->{snapshot},
$param->{base},
$param->{'with-snapshots'},
$param->{'allow-rename'},
);
PVE::Storage::volume_snapshot_delete($cfg, $imported_volid, $delete)
if defined($delete);
return $imported_volid;
}
},
});
__PACKAGE__->register_method ({
__PACKAGE__->register_method({
name => 'prunebackups',
path => 'prunebackups',
method => 'GET',
description => "Prune backups. Only those using the standard naming scheme are considered. " .
"If no keep options are specified, those from the storage configuration are used.",
description => "Prune backups. Only those using the standard naming scheme are considered. "
. "If no keep options are specified, those from the storage configuration are used.",
protected => 1,
proxyto => 'node',
parameters => {
@ -500,28 +515,36 @@ __PACKAGE__->register_method ({
optional => 1,
},
node => get_standard_option('pve-node'),
storage => get_standard_option('pve-storage-id', {
storage => get_standard_option(
'pve-storage-id',
{
completion => \&PVE::Storage::complete_storage_enabled,
}),
},
),
%{$PVE::Storage::Plugin::prune_backups_format},
type => {
description => "Either 'qemu' or 'lxc'. Only consider backups for guests of this type.",
description =>
"Either 'qemu' or 'lxc'. Only consider backups for guests of this type.",
type => 'string',
optional => 1,
enum => ['qemu', 'lxc'],
},
vmid => get_standard_option('pve-vmid', {
vmid => get_standard_option(
'pve-vmid',
{
description => "Only consider backups for this guest.",
optional => 1,
completion => \&PVE::Cluster::complete_vmid,
}),
},
),
},
},
returns => {
type => 'object',
properties => {
dryrun => {
description => 'If it was a dry run or not. The list will only be defined in that case.',
description =>
'If it was a dry run or not. The list will only be defined in that case.',
type => 'boolean',
},
list => {
@ -534,12 +557,14 @@ __PACKAGE__->register_method ({
type => 'string',
},
'ctime' => {
description => "Creation time of the backup (seconds since the UNIX epoch).",
description =>
"Creation time of the backup (seconds since the UNIX epoch).",
type => 'integer',
},
'mark' => {
description => "Whether the backup would be kept or removed. For backups that don't " .
"use the standard naming scheme, it's 'protected'.",
description =>
"Whether the backup would be kept or removed. For backups that don't "
. "use the standard naming scheme, it's 'protected'.",
type => 'string',
},
type => {
@ -566,7 +591,9 @@ __PACKAGE__->register_method ({
$keep_opts->{$keep} = extract_param($param, $keep) if defined($param->{$keep});
}
$param->{'prune-backups'} = PVE::JSONSchema::print_property_string(
$keep_opts, $PVE::Storage::Plugin::prune_backups_format) if $keep_opts;
$keep_opts,
$PVE::Storage::Plugin::prune_backups_format,
) if $keep_opts;
my $list = [];
if ($dryrun) {
@ -579,7 +606,8 @@ __PACKAGE__->register_method ({
dryrun => $dryrun,
list => $list,
};
}});
},
});
my $print_api_result = sub {
my ($data, $schema, $options) = @_;
@ -587,76 +615,120 @@ my $print_api_result = sub {
};
our $cmddef = {
add => [ "PVE::API2::Storage::Config", 'create', ['type', 'storage'] ],
set => [ "PVE::API2::Storage::Config", 'update', ['storage'] ],
remove => [ "PVE::API2::Storage::Config", 'delete', ['storage'] ],
status => [ "PVE::API2::Storage::Status", 'index', [],
{ node => $nodename }, $print_status ],
list => [ "PVE::API2::Storage::Content", 'index', ['storage'],
{ node => $nodename }, $print_content ],
alloc => [ "PVE::API2::Storage::Content", 'create', ['storage', 'vmid', 'filename', 'size'],
{ node => $nodename }, sub {
add => ["PVE::API2::Storage::Config", 'create', ['type', 'storage']],
set => ["PVE::API2::Storage::Config", 'update', ['storage']],
remove => ["PVE::API2::Storage::Config", 'delete', ['storage']],
status => ["PVE::API2::Storage::Status", 'index', [], { node => $nodename }, $print_status],
list => [
"PVE::API2::Storage::Content",
'index',
['storage'],
{ node => $nodename },
$print_content,
],
alloc => [
"PVE::API2::Storage::Content",
'create',
['storage', 'vmid', 'filename', 'size'],
{ node => $nodename },
sub {
my $volid = shift;
print "successfully created '$volid'\n";
}],
free => [ "PVE::API2::Storage::Content", 'delete', ['volume'],
{ node => $nodename } ],
},
],
free => ["PVE::API2::Storage::Content", 'delete', ['volume'], { node => $nodename }],
scan => {
nfs => [ "PVE::API2::Storage::Scan", 'nfsscan', ['server'], { node => $nodename }, sub {
nfs => [
"PVE::API2::Storage::Scan",
'nfsscan',
['server'],
{ node => $nodename },
sub {
my $res = shift;
my $maxlen = 0;
foreach my $rec (@$res) {
my $len = length ($rec->{path});
my $len = length($rec->{path});
$maxlen = $len if $len > $maxlen;
}
foreach my $rec (@$res) {
printf "%-${maxlen}s %s\n", $rec->{path}, $rec->{options};
}
}],
cifs => [ "PVE::API2::Storage::Scan", 'cifsscan', ['server'], { node => $nodename }, sub {
},
],
cifs => [
"PVE::API2::Storage::Scan",
'cifsscan',
['server'],
{ node => $nodename },
sub {
my $res = shift;
my $maxlen = 0;
foreach my $rec (@$res) {
my $len = length ($rec->{share});
my $len = length($rec->{share});
$maxlen = $len if $len > $maxlen;
}
foreach my $rec (@$res) {
printf "%-${maxlen}s %s\n", $rec->{share}, $rec->{description};
}
}],
glusterfs => [ "PVE::API2::Storage::Scan", 'glusterfsscan', ['server'], { node => $nodename }, sub {
},
],
glusterfs => [
"PVE::API2::Storage::Scan",
'glusterfsscan',
['server'],
{ node => $nodename },
sub {
my $res = shift;
foreach my $rec (@$res) {
printf "%s\n", $rec->{volname};
}
}],
iscsi => [ "PVE::API2::Storage::Scan", 'iscsiscan', ['portal'], { node => $nodename }, sub {
},
],
iscsi => [
"PVE::API2::Storage::Scan",
'iscsiscan',
['portal'],
{ node => $nodename },
sub {
my $res = shift;
my $maxlen = 0;
foreach my $rec (@$res) {
my $len = length ($rec->{target});
my $len = length($rec->{target});
$maxlen = $len if $len > $maxlen;
}
foreach my $rec (@$res) {
printf "%-${maxlen}s %s\n", $rec->{target}, $rec->{portal};
}
}],
lvm => [ "PVE::API2::Storage::Scan", 'lvmscan', [], { node => $nodename }, sub {
},
],
lvm => [
"PVE::API2::Storage::Scan",
'lvmscan',
[],
{ node => $nodename },
sub {
my $res = shift;
foreach my $rec (@$res) {
printf "$rec->{vg}\n";
}
}],
lvmthin => [ "PVE::API2::Storage::Scan", 'lvmthinscan', ['vg'], { node => $nodename }, sub {
},
],
lvmthin => [
"PVE::API2::Storage::Scan",
'lvmthinscan',
['vg'],
{ node => $nodename },
sub {
my $res = shift;
foreach my $rec (@$res) {
printf "$rec->{lv}\n";
}
}],
},
],
pbs => [
"PVE::API2::Storage::Scan",
'pbsscan',
@ -665,13 +737,19 @@ our $cmddef = {
$print_api_result,
$PVE::RESTHandler::standard_output_options,
],
zfs => [ "PVE::API2::Storage::Scan", 'zfsscan', [], { node => $nodename }, sub {
zfs => [
"PVE::API2::Storage::Scan",
'zfsscan',
[],
{ node => $nodename },
sub {
my $res = shift;
foreach my $rec (@$res) {
printf "$rec->{pool}\n";
}
}],
},
],
},
nfsscan => { alias => 'scan nfs' },
cifsscan => { alias => 'scan cifs' },
@ -680,20 +758,37 @@ our $cmddef = {
lvmscan => { alias => 'scan lvm' },
lvmthinscan => { alias => 'scan lvmthin' },
zfsscan => { alias => 'scan zfs' },
path => [ __PACKAGE__, 'path', ['volume']],
path => [__PACKAGE__, 'path', ['volume']],
extractconfig => [__PACKAGE__, 'extractconfig', ['volume']],
export => [ __PACKAGE__, 'export', ['volume', 'format', 'filename']],
import => [ __PACKAGE__, 'import', ['volume', 'format', 'filename'], {}, sub {
export => [__PACKAGE__, 'export', ['volume', 'format', 'filename']],
import => [
__PACKAGE__,
'import',
['volume', 'format', 'filename'],
{},
sub {
my $volid = shift;
print PVE::Storage::volume_imported_message($volid);
}],
apiinfo => [ __PACKAGE__, 'apiinfo', [], {}, sub {
},
],
apiinfo => [
__PACKAGE__,
'apiinfo',
[],
{},
sub {
my $res = shift;
print "APIVER $res->{apiver}\n";
print "APIAGE $res->{apiage}\n";
}],
'prune-backups' => [ __PACKAGE__, 'prunebackups', ['storage'], { node => $nodename }, sub {
},
],
'prune-backups' => [
__PACKAGE__,
'prunebackups',
['storage'],
{ node => $nodename },
sub {
my $res = shift;
my ($dryrun, $list) = ($res->{dryrun}, $res->{list});
@ -705,11 +800,12 @@ our $cmddef = {
return;
}
print "NOTE: this is only a preview and might not be what a subsequent\n" .
"prune call does if backups are removed/added in the meantime.\n\n";
print "NOTE: this is only a preview and might not be what a subsequent\n"
. "prune call does if backups are removed/added in the meantime.\n\n";
my @sorted = sort {
my $vmcmp = PVE::Tools::safe_compare($a->{vmid}, $b->{vmid}, sub { $_[0] <=> $_[1] });
my $vmcmp =
PVE::Tools::safe_compare($a->{vmid}, $b->{vmid}, sub { $_[0] <=> $_[1] });
return $vmcmp if $vmcmp ne 0;
return $a->{ctime} <=> $b->{ctime};
} @{$list};
@ -719,16 +815,22 @@ our $cmddef = {
my $volid = $backup->{volid};
$maxlen = length($volid) if length($volid) > $maxlen;
}
$maxlen+=1;
$maxlen += 1;
printf("%-${maxlen}s %15s %10s\n", 'Backup', 'Backup-ID', 'Prune-Mark');
foreach my $backup (@sorted) {
my $type = $backup->{type};
my $vmid = $backup->{vmid};
my $backup_id = defined($vmid) ? "$type/$vmid" : "$type";
printf("%-${maxlen}s %15s %10s\n", $backup->{volid}, $backup_id, $backup->{mark});
printf(
"%-${maxlen}s %15s %10s\n",
$backup->{volid},
$backup_id,
$backup->{mark},
);
}
}],
},
],
};
1;

View File

@ -6,9 +6,7 @@ use Net::IP;
use PVE::Tools qw(run_command);
use PVE::Cluster qw(cfs_register_file);
cfs_register_file('ceph.conf',
\&parse_ceph_config,
\&write_ceph_config);
cfs_register_file('ceph.conf', \&parse_ceph_config, \&write_ceph_config);
# For more information on how the Ceph parser works and how its grammar is
# defined, see:
@ -126,7 +124,7 @@ sub parse_ceph_config {
$key =~ s/$re_leading_ws//;
$key =~ s/\s/ /;
while ($key =~ s/\s\s/ /) {} # squeeze repeated whitespace
while ($key =~ s/\s\s/ /) { } # squeeze repeated whitespace
# Ceph treats *single* spaces in keys the same as underscores,
# but we'll just use underscores for readability
@ -258,7 +256,7 @@ my $parse_ceph_file = sub {
my $cfg = {};
return $cfg if ! -f $filename;
return $cfg if !-f $filename;
my $content = PVE::Tools::file_get_contents($filename);
@ -352,7 +350,7 @@ sub get_monaddr_list {
my $monhostlist = {};
# get all ip addresses from mon_host
my $monhosts = [ split (/[ ,;]+/, $config->{global}->{mon_host} // "") ];
my $monhosts = [split(/[ ,;]+/, $config->{global}->{mon_host} // "")];
foreach my $monhost (@$monhosts) {
$monhost =~ s/^\[?v\d\://; # remove beginning of vector
@ -364,7 +362,7 @@ sub get_monaddr_list {
}
# then get all addrs from mon. sections
for my $section ( keys %$config ) {
for my $section (keys %$config) {
next if $section !~ m/^mon\./;
if (my $addr = $config->{$section}->{mon_addr}) {
@ -385,7 +383,7 @@ sub hostlist {
my $ceph_check_keyfile = sub {
my ($filename, $type) = @_;
return if ! -f $filename;
return if !-f $filename;
my $content = PVE::Tools::file_get_contents($filename);
eval {
@ -417,7 +415,8 @@ sub ceph_connect_option {
if (-e "/etc/pve/priv/ceph/${storeid}.conf") {
# allow custom ceph configuration for external clusters
if ($pveceph_managed) {
warn "ignoring custom ceph config for storage '$storeid', 'monhost' is not set (assuming pveceph managed cluster)!\n";
warn
"ignoring custom ceph config for storage '$storeid', 'monhost' is not set (assuming pveceph managed cluster)!\n";
} else {
$cmd_option->{ceph_conf} = "/etc/pve/priv/ceph/${storeid}.conf";
}
@ -463,7 +462,8 @@ sub ceph_create_keyfile {
my $cephfs_secret = $ceph_get_key->($ceph_admin_keyring, 'admin');
mkdir '/etc/pve/priv/ceph';
chomp $cephfs_secret;
PVE::Tools::file_set_contents($ceph_storage_keyring, "${cephfs_secret}\n", 0400);
PVE::Tools::file_set_contents($ceph_storage_keyring, "${cephfs_secret}\n",
0400);
}
};
if (my $err = $@) {
@ -492,7 +492,7 @@ my $ceph_version_parser = sub {
# FIXME this is the same as pve-manager PVE::Ceph::Tools get_local_version
if ($ceph_version =~ /^ceph.*\sv?(\d+(?:\.\d+)+(?:-pve\d+)?)\s+(?:\(([a-zA-Z0-9]+)\))?/) {
my ($version, $buildcommit) = ($1, $2);
my $subversions = [ split(/\.|-/, $version) ];
my $subversions = [split(/\.|-/, $version)];
return ($subversions, $version, $buildcommit);
}
@ -504,9 +504,12 @@ sub local_ceph_version {
my $version_string = $cache;
if (!defined($version_string)) {
run_command('ceph --version', outfunc => sub {
run_command(
'ceph --version',
outfunc => sub {
$version_string = shift;
});
},
);
}
return undef if !defined($version_string);
# subversion is an array ref. with the version parts from major to minor

View File

@ -11,7 +11,8 @@ use File::Basename;
use File::stat;
use JSON;
use PVE::Tools qw(extract_param run_command file_get_contents file_read_firstline dir_glob_regex dir_glob_foreach trim);
use PVE::Tools
qw(extract_param run_command file_get_contents file_read_firstline dir_glob_regex dir_glob_foreach trim);
my $SMARTCTL = "/usr/sbin/smartctl";
my $ZPOOL = "/sbin/zpool";
@ -20,7 +21,7 @@ my $PVS = "/sbin/pvs";
my $LVS = "/sbin/lvs";
my $LSBLK = "/bin/lsblk";
my sub strip_dev :prototype($) {
my sub strip_dev : prototype($) {
my ($devpath) = @_;
$devpath =~ s|^/dev/||;
return $devpath;
@ -98,38 +99,46 @@ sub get_smart_data {
push @$cmd, $disk;
my $returncode = eval {
run_command($cmd, noerr => 1, outfunc => sub {
run_command(
$cmd,
noerr => 1,
outfunc => sub {
my ($line) = @_;
# ATA SMART attributes, e.g.:
# ID# ATTRIBUTE_NAME FLAGS VALUE WORST THRESH FAIL RAW_VALUE
# 1 Raw_Read_Error_Rate POSR-K 100 100 000 - 0
#
# SAS and NVME disks, e.g.:
# Data Units Written: 5,584,952 [2.85 TB]
# Accumulated start-stop cycles: 34
# ATA SMART attributes, e.g.:
# ID# ATTRIBUTE_NAME FLAGS VALUE WORST THRESH FAIL RAW_VALUE
# 1 Raw_Read_Error_Rate POSR-K 100 100 000 - 0
#
# SAS and NVME disks, e.g.:
# Data Units Written: 5,584,952 [2.85 TB]
# Accumulated start-stop cycles: 34
if (defined($type) && $type eq 'ata' && $line =~ m/^([ \d]{2}\d)\s+(\S+)\s+(\S{6})\s+(\d+)\s+(\d+)\s+(\S+)\s+(\S+)\s+(.*)$/) {
if (
defined($type)
&& $type eq 'ata'
&& $line =~
m/^([ \d]{2}\d)\s+(\S+)\s+(\S{6})\s+(\d+)\s+(\d+)\s+(\S+)\s+(\S+)\s+(.*)$/
) {
my $entry = {};
$entry->{name} = $2 if defined $2;
$entry->{flags} = $3 if defined $3;
# the +0 makes a number out of the strings
# FIXME: 'value' is depreacated by 'normalized'; remove with PVE 7.0
$entry->{value} = $4+0 if defined $4;
$entry->{normalized} = $4+0 if defined $4;
$entry->{worst} = $5+0 if defined $5;
$entry->{value} = $4 + 0 if defined $4;
$entry->{normalized} = $4 + 0 if defined $4;
$entry->{worst} = $5 + 0 if defined $5;
# some disks report the default threshold as --- instead of 000
if (defined($6) && $6 eq '---') {
$entry->{threshold} = 0;
} else {
$entry->{threshold} = $6+0 if defined $6;
$entry->{threshold} = $6 + 0 if defined $6;
}
$entry->{fail} = $7 if defined $7;
$entry->{raw} = $8 if defined $8;
$entry->{id} = $1 if defined $1;
push @{$smartdata->{attributes}}, $entry;
} elsif ($line =~ m/(?:Health Status|self\-assessment test result): (.*)$/ ) {
push @{ $smartdata->{attributes} }, $entry;
} elsif ($line =~ m/(?:Health Status|self\-assessment test result): (.*)$/) {
$smartdata->{health} = $1;
} elsif ($line =~ m/Vendor Specific SMART Attributes with Thresholds:/) {
$type = 'ata';
@ -140,13 +149,16 @@ sub get_smart_data {
$smartdata->{text} = '' if !defined $smartdata->{text};
$smartdata->{text} .= "$line\n";
# extract wearout from nvme/sas text, allow for decimal values
if ($line =~ m/Percentage Used(?: endurance indicator)?:\s*(\d+(?:\.\d+)?)\%/i) {
if ($line =~
m/Percentage Used(?: endurance indicator)?:\s*(\d+(?:\.\d+)?)\%/i
) {
$smartdata->{wearout} = 100 - $1;
}
} elsif ($line =~ m/SMART Disabled/) {
$smartdata->{health} = "SMART Disabled";
}
})
},
);
};
my $err = $@;
@ -163,7 +175,9 @@ sub get_smart_data {
sub get_lsblk_info {
my $cmd = [$LSBLK, '--json', '-o', 'path,parttype,fstype'];
my $output = "";
eval { run_command($cmd, outfunc => sub { $output .= "$_[0]\n"; }) };
eval {
run_command($cmd, outfunc => sub { $output .= "$_[0]\n"; });
};
warn "$@\n" if $@;
return {} if $output eq '';
@ -175,7 +189,7 @@ sub get_lsblk_info {
map {
$_->{path} => {
parttype => $_->{parttype},
fstype => $_->{fstype}
fstype => $_->{fstype},
}
} @{$list}
};
@ -203,12 +217,15 @@ sub get_zfs_devices {
# use zpool and parttype uuid, because log and cache do not have zfs type uuid
eval {
run_command([$ZPOOL, 'list', '-HPLv'], outfunc => sub {
run_command(
[$ZPOOL, 'list', '-HPLv'],
outfunc => sub {
my ($line) = @_;
if ($line =~ m|^\t([^\t]+)\t|) {
$res->{$1} = 1;
}
});
},
);
};
# only warn here, because maybe zfs tools are not installed
@ -219,7 +236,6 @@ sub get_zfs_devices {
"516e7cba-6ecf-11d6-8ff8-00022d09712b" => 1, # bsd
};
$res = get_devices_by_partuuid($lsblk_info, $uuids, $res);
return $res;
@ -229,13 +245,16 @@ sub get_lvm_devices {
my ($lsblk_info) = @_;
my $res = {};
eval {
run_command([$PVS, '--noheadings', '--readonly', '-o', 'pv_name'], outfunc => sub{
run_command(
[$PVS, '--noheadings', '--readonly', '-o', 'pv_name'],
outfunc => sub {
my ($line) = @_;
$line = trim($line);
if ($line =~ m|^/dev/|) {
$res->{$line} = 1;
}
});
},
);
};
# if something goes wrong, we do not want to give up, but indicate an error has occurred
@ -270,23 +289,37 @@ sub get_ceph_journals {
sub get_ceph_volume_infos {
my $result = {};
my $cmd = [ $LVS, '-S', 'lv_name=~^osd-', '-o', 'devices,lv_name,lv_tags',
'--noheadings', '--readonly', '--separator', ';' ];
my $cmd = [
$LVS,
'-S',
'lv_name=~^osd-',
'-o',
'devices,lv_name,lv_tags',
'--noheadings',
'--readonly',
'--separator',
';',
];
run_command($cmd, outfunc => sub {
run_command(
$cmd,
outfunc => sub {
my $line = shift;
$line =~ s/(?:^\s+)|(?:\s+$)//g; # trim whitespaces
my $fields = [ split(';', $line) ];
my $fields = [split(';', $line)];
# lvs syntax is /dev/sdX(Y) where Y is the start (which we do not need)
my ($dev) = $fields->[0] =~ m|^(/dev/[a-z]+[^(]*)|;
if ($fields->[1] =~ m|^osd-([^-]+)-|) {
my $type = $1;
# $result autovivification is wanted, to not creating empty hashes
if (($type eq 'block' || $type eq 'data') && $fields->[2] =~ m/ceph.osd_id=([^,]+)/) {
if (
($type eq 'block' || $type eq 'data')
&& $fields->[2] =~ m/ceph.osd_id=([^,]+)/
) {
$result->{$dev}->{osdid} = $1;
if ( !defined($result->{$dev}->{'osdid-list'}) ) {
if (!defined($result->{$dev}->{'osdid-list'})) {
$result->{$dev}->{'osdid-list'} = [];
}
push($result->{$dev}->{'osdid-list'}->@*, $1);
@ -299,7 +332,8 @@ sub get_ceph_volume_infos {
$result->{$dev}->{$type}++;
}
}
});
},
);
return $result;
}
@ -310,10 +344,13 @@ sub get_udev_info {
my $info = "";
my $data = {};
eval {
run_command(['udevadm', 'info', '-p', $dev, '--query', 'all'], outfunc => sub {
run_command(
['udevadm', 'info', '-p', $dev, '--query', 'all'],
outfunc => sub {
my ($line) = @_;
$info .= "$line\n";
});
},
);
};
warn $@ if $@;
return if !$info;
@ -343,7 +380,7 @@ sub get_udev_info {
$data->{wwn} = $1 if $info =~ m/^E: ID_WWN=(.*)$/m;
if ($info =~ m/^E: DEVLINKS=(.+)$/m) {
my @devlinks = grep(m#^/dev/disk/by-id/(ata|scsi|nvme(?!-eui))#, split (/ /, $1));
my @devlinks = grep(m#^/dev/disk/by-id/(ata|scsi|nvme(?!-eui))#, split(/ /, $1));
$data->{by_id_link} = $devlinks[0] if defined($devlinks[0]);
}
@ -363,7 +400,7 @@ sub get_sysdir_size {
sub get_sysdir_info {
my ($sysdir) = @_;
return if ! -d "$sysdir/device";
return if !-d "$sysdir/device";
my $data = {};
@ -403,7 +440,7 @@ sub get_wear_leveling_info {
"Lifetime_Remaining",
"Percent_Life_Remaining",
"Percent_Lifetime_Used",
"Perc_Rated_Life_Used"
"Perc_Rated_Life_Used",
);
# Search for S.M.A.R.T. attributes for known register
@ -422,7 +459,7 @@ sub get_wear_leveling_info {
sub dir_is_empty {
my ($dir) = @_;
my $dh = IO::Dir->new ($dir);
my $dh = IO::Dir->new($dir);
return 1 if !$dh;
while (defined(my $tmp = $dh->read)) {
@ -456,8 +493,8 @@ sub mounted_blockdevs {
foreach my $mount (@$mounts) {
next if $mount->[0] !~ m|^/dev/|;
$mounted->{abs_path($mount->[0])} = $mount->[1];
};
$mounted->{ abs_path($mount->[0]) } = $mount->[1];
}
return $mounted;
}
@ -469,8 +506,8 @@ sub mounted_paths {
my $mounts = PVE::ProcFSTools::parse_proc_mounts();
foreach my $mount (@$mounts) {
$mounted->{abs_path($mount->[1])} = $mount->[0];
};
$mounted->{ abs_path($mount->[1]) } = $mount->[0];
}
return $mounted;
}
@ -493,7 +530,7 @@ sub get_disks {
my $disk_regex = ".*";
if (defined($disks)) {
if (!ref($disks)) {
$disks = [ $disks ];
$disks = [$disks];
} elsif (ref($disks) ne 'ARRAY') {
die "disks is not a string or array reference\n";
}
@ -511,7 +548,10 @@ sub get_disks {
$disk_regex = "(?:" . join('|', @$disks) . ")";
}
dir_glob_foreach('/sys/block', $disk_regex, sub {
dir_glob_foreach(
'/sys/block',
$disk_regex,
sub {
my ($dev) = @_;
# whitelisting following devices
# - hdX ide block device
@ -520,9 +560,10 @@ sub get_disks {
# - xvdX: xen virtual block device
# - nvmeXnY: nvme devices
# - cciss!cXnY cciss devices
return if $dev !~ m/^(h|s|x?v)d[a-z]+$/ &&
$dev !~ m/^nvme\d+n\d+$/ &&
$dev !~ m/^cciss\!c\d+d\d+$/;
return
if $dev !~ m/^(h|s|x?v)d[a-z]+$/
&& $dev !~ m/^nvme\d+n\d+$/
&& $dev !~ m/^cciss\!c\d+d\d+$/;
my $data = get_udev_info("/sys/block/$dev") // return;
my $devpath = $data->{devpath};
@ -604,9 +645,10 @@ sub get_disks {
my $info = $lsblk_info->{$devpath} // {};
if (defined(my $parttype = $info->{parttype})) {
return 'BIOS boot'if $parttype eq '21686148-6449-6e6f-744e-656564454649';
return 'BIOS boot' if $parttype eq '21686148-6449-6e6f-744e-656564454649';
return 'EFI' if $parttype eq 'c12a7328-f81f-11d2-ba4b-00a0c93ec93b';
return 'ZFS reserved' if $parttype eq '6a945a3b-1dd2-11b2-99a6-080020736631';
return 'ZFS reserved'
if $parttype eq '6a945a3b-1dd2-11b2-99a6-080020736631';
}
return "$info->{fstype}" if defined($info->{fstype});
@ -640,7 +682,10 @@ sub get_disks {
};
my $partitions = {};
dir_glob_foreach("$sysdir", "$dev.+", sub {
dir_glob_foreach(
"$sysdir",
"$dev.+",
sub {
my ($part) = @_;
$partitions->{$part} = $collect_ceph_info->("$partpath/$part");
@ -652,7 +697,8 @@ sub get_disks {
$partitions->{$part}->{gpt} = $data->{gpt};
$partitions->{$part}->{type} = 'partition';
$partitions->{$part}->{size} = get_sysdir_size("$sysdir/$part") // 0;
$partitions->{$part}->{used} = $determine_usage->("$partpath/$part", "$sysdir/$part", 1);
$partitions->{$part}->{used} =
$determine_usage->("$partpath/$part", "$sysdir/$part", 1);
$partitions->{$part}->{osdid} //= -1;
$partitions->{$part}->{'osdid-list'} //= undef;
@ -680,7 +726,8 @@ sub get_disks {
$partitions->{$part}->{wal} = 1 if $journal_part == 3;
$partitions->{$part}->{bluestore} = 1 if $journal_part == 4;
}
});
},
);
my $used = $determine_usage->($devpath, $sysdir, 0);
if (!$include_partitions) {
@ -712,7 +759,8 @@ sub get_disks {
if ($include_partitions) {
$disklist->{$_} = $partitions->{$_} for keys %{$partitions};
}
});
},
);
return $disklist;
}
@ -783,28 +831,38 @@ sub append_partition {
$devname =~ s|^/dev/||;
my $newpartid = 1;
dir_glob_foreach("/sys/block/$devname", qr/\Q$devname\E.*?(\d+)/, sub {
dir_glob_foreach(
"/sys/block/$devname",
qr/\Q$devname\E.*?(\d+)/,
sub {
my ($part, $partid) = @_;
if ($partid >= $newpartid) {
$newpartid = $partid + 1;
}
});
},
);
$size = PVE::Tools::convert_size($size, 'b' => 'mb');
run_command([ $SGDISK, '-n', "$newpartid:0:+${size}M", $dev ],
errmsg => "error creating partition '$newpartid' on '$dev'");
run_command(
[$SGDISK, '-n', "$newpartid:0:+${size}M", $dev],
errmsg => "error creating partition '$newpartid' on '$dev'",
);
my $partition;
# loop again to detect the real partition device which does not always follow
# a strict $devname$partition scheme like /dev/nvme0n1 -> /dev/nvme0n1p1
dir_glob_foreach("/sys/block/$devname", qr/\Q$devname\E.*$newpartid/, sub {
dir_glob_foreach(
"/sys/block/$devname",
qr/\Q$devname\E.*$newpartid/,
sub {
my ($part) = @_;
$partition = "/dev/$part";
});
},
);
return $partition;
}
@ -820,10 +878,14 @@ sub has_holder {
return $devpath if !dir_is_empty("/sys/class/block/${dev}/holders");
my $found;
dir_glob_foreach("/sys/block/${dev}", "${dev}.+", sub {
dir_glob_foreach(
"/sys/block/${dev}",
"${dev}.+",
sub {
my ($part) = @_;
$found = "/dev/${part}" if !dir_is_empty("/sys/class/block/${part}/holders");
});
},
);
return $found;
}
@ -841,12 +903,16 @@ sub is_mounted {
my $dev = strip_dev($devpath);
my $found;
dir_glob_foreach("/sys/block/${dev}", "${dev}.+", sub {
dir_glob_foreach(
"/sys/block/${dev}",
"${dev}.+",
sub {
my ($part) = @_;
my $partpath = "/dev/${part}";
$found = $partpath if $mounted->{$partpath};
});
},
);
return $found;
}
@ -884,13 +950,17 @@ sub wipe_blockdev {
my $count = ($size < 200) ? $size : 200;
my $to_wipe = [];
dir_glob_foreach("/sys/class/block/${devname}", "${devname}.+", sub {
dir_glob_foreach(
"/sys/class/block/${devname}",
"${devname}.+",
sub {
my ($part) = @_;
push $to_wipe->@*, "/dev/${part}" if -b "/dev/${part}";
});
},
);
if (scalar($to_wipe->@*) > 0) {
print "found child partitions to wipe: ". join(', ', $to_wipe->@*) ."\n";
print "found child partitions to wipe: " . join(', ', $to_wipe->@*) . "\n";
}
push $to_wipe->@*, $devpath; # put actual device last

View File

@ -54,14 +54,17 @@ sub extract_disk_from_import_file {
'-x',
'--force-local',
'--no-same-owner',
'-C', $tmpdir,
'-f', $ova_path,
'-C',
$tmpdir,
'-f',
$ova_path,
$inner_file,
]);
# check for symlinks and other non regular files
if (-l $source_path || ! -f $source_path) {
die "extracted file '$inner_file' from archive '$archive_volid' is not a regular file\n";
if (-l $source_path || !-f $source_path) {
die
"extracted file '$inner_file' from archive '$archive_volid' is not a regular file\n";
}
# check potentially untrusted image file!
@ -69,7 +72,8 @@ sub extract_disk_from_import_file {
# create temporary 1M image that will get overwritten by the rename
# to reserve the filename and take care of locking
$target_volid = PVE::Storage::vdisk_alloc($cfg, $target_storeid, $vmid, $inner_fmt, undef, 1024);
$target_volid =
PVE::Storage::vdisk_alloc($cfg, $target_storeid, $vmid, $inner_fmt, undef, 1024);
$target_path = PVE::Storage::path($cfg, $target_volid);
print "renaming $source_path to $target_path\n";

View File

@ -36,7 +36,7 @@ my @resources = (
{ id => 17, dtmf_name => 'Disk Drive' },
{ id => 18, dtmf_name => 'Tape Drive' },
{ id => 19, dtmf_name => 'Storage Extent' },
{ id => 20, dtmf_name => 'Other storage device', pve_type => 'sata'},
{ id => 20, dtmf_name => 'Other storage device', pve_type => 'sata' },
{ id => 21, dtmf_name => 'Serial port' },
{ id => 22, dtmf_name => 'Parallel port' },
{ id => 23, dtmf_name => 'USB Controller' },
@ -51,7 +51,7 @@ my @resources = (
{ id => 32, dtmf_name => 'Storage Volume' },
{ id => 33, dtmf_name => 'Ethernet Connection' },
{ id => 34, dtmf_name => 'DMTF reserved' },
{ id => 35, dtmf_name => 'Vendor Reserved'}
{ id => 35, dtmf_name => 'Vendor Reserved' },
);
# see https://schemas.dmtf.org/wbem/cim-html/2.55.0+/CIM_OperatingSystem.html
@ -120,9 +120,7 @@ sub get_ostype {
}
my $allowed_nic_models = [
'e1000',
'e1000e',
'vmxnet3',
'e1000', 'e1000e', 'vmxnet3',
];
sub find_by {
@ -163,7 +161,7 @@ sub try_parse_capacity_unit {
if ($unit_text =~ m/^\s*byte\s*\*\s*([0-9]+)\s*\^\s*([0-9]+)\s*$/) {
my $base = $1;
my $exp = $2;
return $base ** $exp;
return $base**$exp;
}
return undef;
@ -177,24 +175,31 @@ sub parse_ovf {
my $dom;
if ($isOva) {
my $raw = "";
PVE::Tools::run_command(['tar', '-xO', '--wildcards', '--occurrence=1', '-f', $ovf, '*.ovf'], outfunc => sub {
PVE::Tools::run_command(
['tar', '-xO', '--wildcards', '--occurrence=1', '-f', $ovf, '*.ovf'],
outfunc => sub {
my $line = shift;
$raw .= $line;
});
},
);
$dom = XML::LibXML->load_xml(string => $raw, no_blanks => 1);
} else {
$dom = XML::LibXML->load_xml(location => $ovf, no_blanks => 1);
}
# register the xml namespaces in a xpath context object
# 'ovf' is the default namespace so it will prepended to each xml element
my $xpc = XML::LibXML::XPathContext->new($dom);
$xpc->registerNs('ovf', 'http://schemas.dmtf.org/ovf/envelope/1');
$xpc->registerNs('vmw', 'http://www.vmware.com/schema/ovf');
$xpc->registerNs('rasd', 'http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData');
$xpc->registerNs('vssd', 'http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData');
$xpc->registerNs(
'rasd',
'http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData',
);
$xpc->registerNs(
'vssd',
'http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData',
);
# hash to save qm.conf parameters
my $qm;
@ -222,32 +227,39 @@ sub parse_ovf {
$ovf_name =~ s/\s+/-/g;
($qm->{name} = $ovf_name) =~ s/[^a-zA-Z0-9\-\.]//g;
} else {
warn "warning: unable to parse the VM name in this OVF manifest, generating a default value\n";
warn
"warning: unable to parse the VM name in this OVF manifest, generating a default value\n";
}
# middle level xpath
# element[child] search the elements which have this [child]
my $processor_id = dtmf_name_to_id('Processor');
my $xpath_find_vcpu_count = "/ovf:Envelope/ovf:VirtualSystem/ovf:VirtualHardwareSection/ovf:Item[rasd:ResourceType=${processor_id}]/rasd:VirtualQuantity";
my $xpath_find_vcpu_count =
"/ovf:Envelope/ovf:VirtualSystem/ovf:VirtualHardwareSection/ovf:Item[rasd:ResourceType=${processor_id}]/rasd:VirtualQuantity";
$qm->{'cores'} = $xpc->findvalue($xpath_find_vcpu_count);
my $memory_id = dtmf_name_to_id('Memory');
my $xpath_find_memory = ("/ovf:Envelope/ovf:VirtualSystem/ovf:VirtualHardwareSection/ovf:Item[rasd:ResourceType=${memory_id}]/rasd:VirtualQuantity");
my $xpath_find_memory = (
"/ovf:Envelope/ovf:VirtualSystem/ovf:VirtualHardwareSection/ovf:Item[rasd:ResourceType=${memory_id}]/rasd:VirtualQuantity"
);
$qm->{'memory'} = $xpc->findvalue($xpath_find_memory);
# middle level xpath
# here we expect multiple results, so we do not read the element value with
# findvalue() but store multiple elements with findnodes()
my $disk_id = dtmf_name_to_id('Disk Drive');
my $xpath_find_disks = "/ovf:Envelope/ovf:VirtualSystem/ovf:VirtualHardwareSection/ovf:Item[rasd:ResourceType=${disk_id}]";
my $xpath_find_disks =
"/ovf:Envelope/ovf:VirtualSystem/ovf:VirtualHardwareSection/ovf:Item[rasd:ResourceType=${disk_id}]";
my @disk_items = $xpc->findnodes($xpath_find_disks);
my $xpath_find_ostype_id = "/ovf:Envelope/ovf:VirtualSystem/ovf:OperatingSystemSection/\@ovf:id";
my $xpath_find_ostype_id =
"/ovf:Envelope/ovf:VirtualSystem/ovf:OperatingSystemSection/\@ovf:id";
my $ostype_id = $xpc->findvalue($xpath_find_ostype_id);
$qm->{ostype} = get_ostype($ostype_id);
# vmware specific firmware config, seems to not be standardized in ovf ?
my $xpath_find_firmware = "/ovf:Envelope/ovf:VirtualSystem/ovf:VirtualHardwareSection/vmw:Config[\@vmw:key=\"firmware\"]/\@vmw:value";
my $xpath_find_firmware =
"/ovf:Envelope/ovf:VirtualSystem/ovf:VirtualHardwareSection/vmw:Config[\@vmw:key=\"firmware\"]/\@vmw:value";
my $firmware = $xpc->findvalue($xpath_find_firmware) || 'seabios';
$qm->{bios} = 'ovmf' if $firmware eq 'efi';
@ -290,12 +302,18 @@ sub parse_ovf {
# tricky xpath
# @ means we filter the result query based on a the value of an item attribute ( @ = attribute)
# @ needs to be escaped to prevent Perl double quote interpolation
my $xpath_find_fileref = sprintf("/ovf:Envelope/ovf:DiskSection/\
ovf:Disk[\@ovf:diskId='%s']/\@ovf:fileRef", $disk_id);
my $xpath_find_capacity = sprintf("/ovf:Envelope/ovf:DiskSection/\
ovf:Disk[\@ovf:diskId='%s']/\@ovf:capacity", $disk_id);
my $xpath_find_capacity_unit = sprintf("/ovf:Envelope/ovf:DiskSection/\
ovf:Disk[\@ovf:diskId='%s']/\@ovf:capacityAllocationUnits", $disk_id);
my $xpath_find_fileref = sprintf(
"/ovf:Envelope/ovf:DiskSection/\
ovf:Disk[\@ovf:diskId='%s']/\@ovf:fileRef", $disk_id,
);
my $xpath_find_capacity = sprintf(
"/ovf:Envelope/ovf:DiskSection/\
ovf:Disk[\@ovf:diskId='%s']/\@ovf:capacity", $disk_id,
);
my $xpath_find_capacity_unit = sprintf(
"/ovf:Envelope/ovf:DiskSection/\
ovf:Disk[\@ovf:diskId='%s']/\@ovf:capacityAllocationUnits", $disk_id,
);
my $fileref = $xpc->findvalue($xpath_find_fileref);
my $capacity = $xpc->findvalue($xpath_find_capacity);
my $capacity_unit = $xpc->findvalue($xpath_find_capacity_unit);
@ -312,8 +330,10 @@ ovf:Disk[\@ovf:diskId='%s']/\@ovf:capacityAllocationUnits", $disk_id);
# from Item, find owning Controller type
my $controller_id = $xpc->findvalue('rasd:Parent', $item_node);
my $xpath_find_parent_type = sprintf("/ovf:Envelope/ovf:VirtualSystem/ovf:VirtualHardwareSection/\
ovf:Item[rasd:InstanceID='%s']/rasd:ResourceType", $controller_id);
my $xpath_find_parent_type = sprintf(
"/ovf:Envelope/ovf:VirtualSystem/ovf:VirtualHardwareSection/\
ovf:Item[rasd:InstanceID='%s']/rasd:ResourceType", $controller_id,
);
my $controller_type = $xpc->findvalue($xpath_find_parent_type);
if (!$controller_type) {
warn "invalid or missing controller: $controller_type, skipping\n";
@ -326,7 +346,8 @@ ovf:Item[rasd:InstanceID='%s']/rasd:ResourceType", $controller_id);
my $pve_disk_address = id_to_pve($controller_type) . $adress_on_controller;
# from Disk Node, find corresponding filepath
my $xpath_find_filepath = sprintf("/ovf:Envelope/ovf:References/ovf:File[\@ovf:id='%s']/\@ovf:href", $fileref);
my $xpath_find_filepath =
sprintf("/ovf:Envelope/ovf:References/ovf:File[\@ovf:id='%s']/\@ovf:href", $fileref);
my $filepath = $xpc->findvalue($xpath_find_filepath);
if (!$filepath) {
warn "invalid file reference $fileref, skipping\n";
@ -335,13 +356,14 @@ ovf:Item[rasd:InstanceID='%s']/rasd:ResourceType", $controller_id);
print "file path: $filepath\n" if $debug;
my $original_filepath = $filepath;
($filepath) = $filepath =~ m|^(${PVE::Storage::SAFE_CHAR_WITH_WHITESPACE_CLASS_RE}+)$|; # untaint & check no sub/parent dirs
die "referenced path '$original_filepath' is invalid\n" if !$filepath || $filepath eq "." || $filepath eq "..";
die "referenced path '$original_filepath' is invalid\n"
if !$filepath || $filepath eq "." || $filepath eq "..";
# resolve symlinks and relative path components
# and die if the diskimage is not somewhere under the $ovf path
my $ovf_dir = realpath(dirname(File::Spec->rel2abs($ovf)))
or die "could not get absolute path of $ovf: $!\n";
my $backing_file_path = realpath(join ('/', $ovf_dir, $filepath))
my $backing_file_path = realpath(join('/', $ovf_dir, $filepath))
or die "could not get absolute path of $filepath: $!\n";
if ($backing_file_path !~ /^\Q${ovf_dir}\E/) {
die "error parsing $filepath, are you using a symlink ?\n";
@ -374,7 +396,8 @@ ovf:Item[rasd:InstanceID='%s']/rasd:ResourceType", $controller_id);
$qm->{boot} = "order=" . join(';', @$boot_order) if scalar(@$boot_order) > 0;
my $nic_id = dtmf_name_to_id('Ethernet Adapter');
my $xpath_find_nics = "/ovf:Envelope/ovf:VirtualSystem/ovf:VirtualHardwareSection/ovf:Item[rasd:ResourceType=${nic_id}]";
my $xpath_find_nics =
"/ovf:Envelope/ovf:VirtualSystem/ovf:VirtualHardwareSection/ovf:Item[rasd:ResourceType=${nic_id}]";
my @nic_items = $xpc->findnodes($xpath_find_nics);
my $net = {};
@ -383,12 +406,12 @@ ovf:Item[rasd:InstanceID='%s']/rasd:ResourceType", $controller_id);
for my $item_node (@nic_items) {
my $model = $xpc->findvalue('rasd:ResourceSubType', $item_node);
$model = lc($model);
$model = 'e1000' if ! grep { $_ eq $model } @$allowed_nic_models;
$model = 'e1000' if !grep { $_ eq $model } @$allowed_nic_models;
$net->{"net${net_count}"} = { model => $model };
$net_count++;
}
return {qm => $qm, disks => \@disks, net => $net};
return { qm => $qm, disks => \@disks, net => $net };
}
1;

View File

@ -68,8 +68,11 @@ PVE::Storage::BTRFSPlugin->register();
PVE::Storage::ESXiPlugin->register();
# load third-party plugins
if ( -d '/usr/share/perl5/PVE/Storage/Custom' ) {
dir_glob_foreach('/usr/share/perl5/PVE/Storage/Custom', '.*\.pm$', sub {
if (-d '/usr/share/perl5/PVE/Storage/Custom') {
dir_glob_foreach(
'/usr/share/perl5/PVE/Storage/Custom',
'.*\.pm$',
sub {
my ($file) = @_;
my $modname = 'PVE::Storage::Custom::' . $file;
$modname =~ s!\.pm$!!;
@ -79,11 +82,13 @@ if ( -d '/usr/share/perl5/PVE/Storage/Custom' ) {
require $file;
# Check perl interface:
die "not derived from PVE::Storage::Plugin\n" if !$modname->isa('PVE::Storage::Plugin');
die "not derived from PVE::Storage::Plugin\n"
if !$modname->isa('PVE::Storage::Plugin');
die "does not provide an api() method\n" if !$modname->can('api');
# Check storage API version and that file is really storage plugin.
my $version = $modname->api();
die "implements an API version newer than current ($version > " . APIVER . ")\n"
die "implements an API version newer than current ($version > "
. APIVER . ")\n"
if $version > APIVER;
my $min_version = (APIVER - APIAGE);
die "API version too old, please update the plugin ($version < $min_version)\n"
@ -93,13 +98,15 @@ if ( -d '/usr/share/perl5/PVE/Storage/Custom' ) {
$modname->register();
# If we got this far and the API version is not the same, make some noise:
warn "Plugin \"$modname\" is implementing an older storage API, an upgrade is recommended\n"
warn
"Plugin \"$modname\" is implementing an older storage API, an upgrade is recommended\n"
if $version != APIVER;
};
if ($@) {
warn "Error loading storage plugin \"$modname\": $@";
}
});
},
);
}
# initialize all plugins
@ -164,8 +171,7 @@ my $convert_maxfiles_to_prune_backups = sub {
$prune_backups = { 'keep-all' => 1 };
}
$scfg->{'prune-backups'} = PVE::JSONSchema::print_property_string(
$prune_backups,
'prune-backups'
$prune_backups, 'prune-backups',
);
}
};
@ -246,7 +252,7 @@ sub get_max_protected_backups {
sub storage_ids {
my ($cfg) = @_;
return keys %{$cfg->{ids}};
return keys %{ $cfg->{ids} };
}
sub file_size_info {
@ -287,12 +293,13 @@ sub update_volume_attribute {
my ($backup_type) = map { $_->{subtype} } grep { $_->{volid} eq $volid } $backups->@*;
my $protected_count = grep {
$_->{protected} && (!$backup_type || ($_->{subtype} && $_->{subtype} eq $backup_type))
$_->{protected}
&& (!$backup_type || ($_->{subtype} && $_->{subtype} eq $backup_type))
} $backups->@*;
if ($max_protected_backups <= $protected_count) {
die "The number of protected backups per guest is limited to $max_protected_backups ".
"on storage '$storeid'\n";
die "The number of protected backups per guest is limited to $max_protected_backups "
. "on storage '$storeid'\n";
}
}
@ -429,7 +436,8 @@ sub volume_has_feature {
if ($storeid) {
my $scfg = storage_config($cfg, $storeid);
my $plugin = PVE::Storage::Plugin->lookup($scfg->{type});
return $plugin->volume_has_feature($scfg, $feature, $storeid, $volname, $snap, $running, $opts);
return $plugin->volume_has_feature($scfg, $feature, $storeid, $volname, $snap, $running,
$opts);
} elsif ($volid =~ m|^(/.+)$| && -e $volid) {
return undef;
} else {
@ -553,7 +561,11 @@ sub check_volume_access {
if ($vtype eq 'iso' || $vtype eq 'vztmpl' || $vtype eq 'import') {
# require at least read access to storage, (custom) templates/ISOs could be sensitive
$rpcenv->check_any($user, "/storage/$sid", ['Datastore.AllocateSpace', 'Datastore.Audit']);
$rpcenv->check_any(
$user,
"/storage/$sid",
['Datastore.AllocateSpace', 'Datastore.Audit'],
);
} elsif (defined($ownervm) && defined($vmid) && ($ownervm == $vmid)) {
# we are owner - allow access
} elsif ($vtype eq 'backup' && $ownervm) {
@ -583,8 +595,7 @@ sub volume_is_base_and_used {
my $scfg = storage_config($cfg, $storeid);
my $plugin = PVE::Storage::Plugin->lookup($scfg->{type});
my ($vtype, $name, $vmid, undef, undef, $isBase, undef) =
$plugin->parse_volname($volname);
my ($vtype, $name, $vmid, undef, undef, $isBase, undef) = $plugin->parse_volname($volname);
if ($isBase) {
my $vollist = $plugin->list_images($storeid, $scfg);
@ -593,7 +604,7 @@ sub volume_is_base_and_used {
my $basename = undef;
my $basevmid = undef;
eval{
eval {
(undef, undef, undef, $basename, $basevmid) =
$plugin->parse_volname($tmpvolname);
};
@ -693,8 +704,8 @@ sub abs_filesystem_path {
my ($cfg, $volid, $allow_blockdev) = @_;
my $path;
if (parse_volume_id ($volid, 1)) {
activate_volumes($cfg, [ $volid ]);
if (parse_volume_id($volid, 1)) {
activate_volumes($cfg, [$volid]);
$path = PVE::Storage::path($cfg, $volid);
} else {
if (-f $volid || ($allow_blockdev && -b $volid)) {
@ -782,8 +793,10 @@ my $volume_export_prepare = sub {
my $cstream;
if (defined($ratelimit_bps)) {
$cstream = [ '/usr/bin/cstream', '-t', $ratelimit_bps ];
$logfunc->("using a bandwidth limit of $ratelimit_bps bytes per second for transferring '$volid'") if $logfunc;
$cstream = ['/usr/bin/cstream', '-t', $ratelimit_bps];
$logfunc->(
"using a bandwidth limit of $ratelimit_bps bytes per second for transferring '$volid'")
if $logfunc;
}
volume_snapshot($cfg, $volid, $snapshot) if $migration_snapshot;
@ -794,7 +807,7 @@ my $volume_export_prepare = sub {
activate_volumes($cfg, [$volid]);
}
return $cstream ? [ $send, $cstream ] : [ $send ];
return $cstream ? [$send, $cstream] : [$send];
};
sub storage_migrate {
@ -830,11 +843,19 @@ sub storage_migrate {
local $ENV{RSYNC_RSH} = PVE::Tools::cmd2string($ssh_base);
if (!defined($opts->{snapshot})) {
$opts->{migration_snapshot} = storage_migrate_snapshot($cfg, $storeid, $opts->{with_snapshots});
$opts->{migration_snapshot} =
storage_migrate_snapshot($cfg, $storeid, $opts->{with_snapshots});
$opts->{snapshot} = '__migration__' if $opts->{migration_snapshot};
}
my @formats = volume_transfer_formats($cfg, $volid, $target_volid, $opts->{snapshot}, $opts->{base_snapshot}, $opts->{with_snapshots});
my @formats = volume_transfer_formats(
$cfg,
$volid,
$target_volid,
$opts->{snapshot},
$opts->{base_snapshot},
$opts->{with_snapshots},
);
die "cannot migrate from storage type '$scfg->{type}' to '$tcfg->{type}'\n" if !@formats;
my $format = $formats[0];
@ -844,7 +865,8 @@ sub storage_migrate {
$import_fn = "tcp://$net";
}
my $recv = [ @$ssh, '--', $volume_import_prepare->($target_volid, $format, $import_fn, $opts)->@* ];
my $recv =
[@$ssh, '--', $volume_import_prepare->($target_volid, $format, $import_fn, $opts)->@*];
my $new_volid;
my $pattern = volume_imported_message(undef, 1);
@ -902,7 +924,7 @@ sub storage_migrate {
eval {
run_command(
$cmds,
output => '>&'.fileno($socket),
output => '>&' . fileno($socket),
errfunc => $match_volid_and_log,
);
};
@ -961,10 +983,15 @@ sub vdisk_clone {
activate_storage($cfg, $storeid);
# lock shared storage
return $plugin->cluster_lock_storage($storeid, $scfg->{shared}, undef, sub {
return $plugin->cluster_lock_storage(
$storeid,
$scfg->{shared},
undef,
sub {
my $volname = $plugin->clone_image($scfg, $storeid, $volname, $vmid, $snap);
return "$storeid:$volname";
});
},
);
}
sub vdisk_create_base {
@ -979,10 +1006,15 @@ sub vdisk_create_base {
activate_storage($cfg, $storeid);
# lock shared storage
return $plugin->cluster_lock_storage($storeid, $scfg->{shared}, undef, sub {
return $plugin->cluster_lock_storage(
$storeid,
$scfg->{shared},
undef,
sub {
my $volname = $plugin->create_base($storeid, $scfg, $volname);
return "$storeid:$volname";
});
},
);
}
sub map_volume {
@ -1031,14 +1063,20 @@ sub vdisk_alloc {
my $plugin = PVE::Storage::Plugin->lookup($scfg->{type});
# lock shared storage
return $plugin->cluster_lock_storage($storeid, $scfg->{shared}, undef, sub {
my $old_umask = umask(umask|0037);
my $volname = eval { $plugin->alloc_image($storeid, $scfg, $vmid, $fmt, $name, $size) };
return $plugin->cluster_lock_storage(
$storeid,
$scfg->{shared},
undef,
sub {
my $old_umask = umask(umask | 0037);
my $volname =
eval { $plugin->alloc_image($storeid, $scfg, $vmid, $fmt, $name, $size) };
my $err = $@;
umask $old_umask;
die $err if $err;
return "$storeid:$volname";
});
},
);
}
sub vdisk_free {
@ -1053,7 +1091,11 @@ sub vdisk_free {
my $cleanup_worker;
# lock shared storage
$plugin->cluster_lock_storage($storeid, $scfg->{shared}, undef, sub {
$plugin->cluster_lock_storage(
$storeid,
$scfg->{shared},
undef,
sub {
# LVM-thin allows deletion of still referenced base volumes!
die "base volume '$volname' is still in use by linked clones\n"
if volume_is_base_and_used($cfg, $volid);
@ -1061,7 +1103,8 @@ sub vdisk_free {
my (undef, undef, undef, undef, undef, $isBase, $format) =
$plugin->parse_volname($volname);
$cleanup_worker = $plugin->free_image($storeid, $scfg, $volname, $isBase, $format);
});
},
);
return if !$cleanup_worker;
@ -1111,7 +1154,8 @@ sub vdisk_list {
my $scfg = $ids->{$sid};
my $plugin = PVE::Storage::Plugin->lookup($scfg->{type});
$res->{$sid} = $plugin->list_images($sid, $scfg, $vmid, $vollist, $cache);
@{$res->{$sid}} = sort {lc($a->{volid}) cmp lc ($b->{volid}) } @{$res->{$sid}} if $res->{$sid};
@{ $res->{$sid} } = sort { lc($a->{volid}) cmp lc($b->{volid}) } @{ $res->{$sid} }
if $res->{$sid};
}
return $res;
@ -1151,11 +1195,11 @@ sub volume_list {
my @ctypes = qw(rootdir images vztmpl iso backup snippets import);
my $cts = $content ? [ $content ] : [ @ctypes ];
my $cts = $content ? [$content] : [@ctypes];
my $scfg = PVE::Storage::storage_config($cfg, $storeid);
$cts = [ grep { defined($scfg->{content}->{$_}) } @$cts ];
$cts = [grep { defined($scfg->{content}->{$_}) } @$cts];
my $plugin = PVE::Storage::Plugin->lookup($scfg->{type});
@ -1163,7 +1207,7 @@ sub volume_list {
my $res = $plugin->list_volumes($storeid, $scfg, $vmid, $cts);
@$res = sort {lc($a->{volid}) cmp lc ($b->{volid}) } @$res;
@$res = sort { lc($a->{volid}) cmp lc($b->{volid}) } @$res;
return $res;
}
@ -1178,7 +1222,7 @@ sub uevent_seqnum {
if ($line =~ m/^(\d+)$/) {
$seqnum = int($1);
}
close ($fh);
close($fh);
}
return $seqnum;
}
@ -1197,22 +1241,22 @@ sub activate_storage {
my $plugin = PVE::Storage::Plugin->lookup($scfg->{type});
if ($scfg->{base}) {
my ($baseid, undef) = parse_volume_id ($scfg->{base});
my ($baseid, undef) = parse_volume_id($scfg->{base});
activate_storage($cfg, $baseid, $cache);
}
if (! eval { $plugin->check_connection($storeid, $scfg) }) {
if (!eval { $plugin->check_connection($storeid, $scfg) }) {
die "connection check for storage '$storeid' failed - $@\n" if $@;
die "storage '$storeid' is not online\n";
}
$plugin->activate_storage($storeid, $scfg, $cache);
my $newseq = uevent_seqnum ();
my $newseq = uevent_seqnum();
# only call udevsettle if there are events
if ($newseq > $cache->{uevent_seqnum}) {
system ("udevadm settle --timeout=30"); # ignore errors
system("udevadm settle --timeout=30"); # ignore errors
$cache->{uevent_seqnum} = $newseq;
}
@ -1232,7 +1276,7 @@ sub activate_storage_list {
sub deactivate_storage {
my ($cfg, $storeid) = @_;
my $scfg = storage_config ($cfg, $storeid);
my $scfg = storage_config($cfg, $storeid);
my $plugin = PVE::Storage::Plugin->lookup($scfg->{type});
my $cache = {};
@ -1276,9 +1320,7 @@ sub deactivate_volumes {
my $scfg = storage_config($cfg, $storeid);
my $plugin = PVE::Storage::Plugin->lookup($scfg->{type});
eval {
$plugin->deactivate_volume($storeid, $scfg, $volname, $snapname, $cache);
};
eval { $plugin->deactivate_volume($storeid, $scfg, $volname, $snapname, $cache); };
if (my $err = $@) {
warn $err;
push @errlist, $volid;
@ -1321,7 +1363,8 @@ sub storage_info {
avail => 0,
used => 0,
shared => $ids->{$storeid}->{shared} ? 1 : 0,
content => PVE::Storage::Plugin::content_hash_to_string($ids->{$storeid}->{content}),
content =>
PVE::Storage::Plugin::content_hash_to_string($ids->{$storeid}->{content}),
active => 0,
enabled => $storage_enabled ? 1 : 0,
};
@ -1383,21 +1426,24 @@ sub scan_nfs {
my ($server_in) = @_;
my $server;
if (!($server = resolv_server ($server_in))) {
if (!($server = resolv_server($server_in))) {
die "unable to resolve address for server '${server_in}'\n";
}
my $cmd = ['/sbin/showmount', '--no-headers', '--exports', $server];
my $res = {};
run_command($cmd, outfunc => sub {
run_command(
$cmd,
outfunc => sub {
my $line = shift;
# note: howto handle white spaces in export path??
if ($line =~ m!^(/\S+)\s+(.+)$!) {
$res->{$1} = $2;
}
});
},
);
return $res;
}
@ -1418,10 +1464,11 @@ sub scan_cifs {
my $res = {};
my $err = '';
run_command($cmd,
run_command(
$cmd,
noerr => 1,
errfunc => sub {
$err .= "$_[0]\n"
$err .= "$_[0]\n";
},
outfunc => sub {
my $line = shift;
@ -1445,19 +1492,22 @@ sub scan_zfs {
my $cmd = ['zfs', 'list', '-t', 'filesystem', '-Hp', '-o', 'name,avail,used'];
my $res = [];
run_command($cmd, outfunc => sub {
run_command(
$cmd,
outfunc => sub {
my $line = shift;
if ($line =~m/^(\S+)\s+(\S+)\s+(\S+)$/) {
if ($line =~ m/^(\S+)\s+(\S+)\s+(\S+)$/) {
my ($pool, $size_str, $used_str) = ($1, $2, $3);
my $size = $size_str + 0;
my $used = $used_str + 0;
# ignore subvolumes generated by our ZFSPoolPlugin
return if $pool =~ m!/subvol-\d+-[^/]+$!;
return if $pool =~ m!/basevol-\d+-[^/]+$!;
push @$res, { pool => $pool, size => $size, free => $size-$used };
push @$res, { pool => $pool, size => $size, free => $size - $used };
}
});
},
);
return $res;
}
@ -1478,7 +1528,6 @@ sub resolv_portal {
raise_param_exc({ portal => "unable to resolve portal address '$portal'" });
}
sub scan_iscsi {
my ($portal_in) = @_;
@ -1487,13 +1536,13 @@ sub scan_iscsi {
die "unable to parse/resolve portal address '${portal_in}'\n";
}
return PVE::Storage::ISCSIPlugin::iscsi_discovery(undef, [ $portal ]);
return PVE::Storage::ISCSIPlugin::iscsi_discovery(undef, [$portal]);
}
sub storage_default_format {
my ($cfg, $storeid) = @_;
my $scfg = storage_config ($cfg, $storeid);
my $scfg = storage_config($cfg, $storeid);
return PVE::Storage::Plugin::default_format($scfg);
}
@ -1501,7 +1550,7 @@ sub storage_default_format {
sub vgroup_is_used {
my ($cfg, $vgname) = @_;
foreach my $storeid (keys %{$cfg->{ids}}) {
foreach my $storeid (keys %{ $cfg->{ids} }) {
my $scfg = storage_config($cfg, $storeid);
if ($scfg->{type} eq 'lvm' && $scfg->{vgname} eq $vgname) {
return 1;
@ -1514,7 +1563,7 @@ sub vgroup_is_used {
sub target_is_used {
my ($cfg, $target) = @_;
foreach my $storeid (keys %{$cfg->{ids}}) {
foreach my $storeid (keys %{ $cfg->{ids} }) {
my $scfg = storage_config($cfg, $storeid);
if ($scfg->{type} eq 'iscsi' && $scfg->{target} eq $target) {
return 1;
@ -1527,7 +1576,7 @@ sub target_is_used {
sub volume_is_used {
my ($cfg, $volid) = @_;
foreach my $storeid (keys %{$cfg->{ids}}) {
foreach my $storeid (keys %{ $cfg->{ids} }) {
my $scfg = storage_config($cfg, $storeid);
if ($scfg->{base} && $scfg->{base} eq $volid) {
return 1;
@ -1540,7 +1589,7 @@ sub volume_is_used {
sub storage_is_used {
my ($cfg, $storeid) = @_;
foreach my $sid (keys %{$cfg->{ids}}) {
foreach my $sid (keys %{ $cfg->{ids} }) {
my $scfg = storage_config($cfg, $sid);
next if !$scfg->{base};
my ($st) = parse_volume_id($scfg->{base});
@ -1556,11 +1605,11 @@ sub foreach_volid {
return if !$list;
foreach my $sid (keys %$list) {
foreach my $info (@{$list->{$sid}}) {
foreach my $info (@{ $list->{$sid} }) {
my $volid = $info->{volid};
my ($sid1, $volname) = parse_volume_id($volid, 1);
if ($sid1 && $sid1 eq $sid) {
&$func ($volid, $sid, $info);
&$func($volid, $sid, $info);
} else {
warn "detected strange volid '$volid' in volume list for '$sid'\n";
}
@ -1630,9 +1679,11 @@ sub archive_info {
$info->{filename} = $filename;
$info->{type} = $type;
if ($volid =~ /^(vzdump-${type}-([1-9][0-9]{2,8})-(\d{4})_(\d{2})_(\d{2})-(\d{2})_(\d{2})_(\d{2}))\.${extension}$/) {
$info->{logfilename} = "$1".PVE::Storage::Plugin::LOG_EXT;
$info->{notesfilename} = "$filename".PVE::Storage::Plugin::NOTES_EXT;
if ($volid =~
/^(vzdump-${type}-([1-9][0-9]{2,8})-(\d{4})_(\d{2})_(\d{2})-(\d{2})_(\d{2})_(\d{2}))\.${extension}$/
) {
$info->{logfilename} = "$1" . PVE::Storage::Plugin::LOG_EXT;
$info->{notesfilename} = "$filename" . PVE::Storage::Plugin::NOTES_EXT;
$info->{vmid} = int($2);
$info->{ctime} = timelocal($8, $7, $6, $5, $4 - 1, $3);
$info->{is_std_name} = 1;
@ -1676,10 +1727,10 @@ sub archive_auxiliaries_remove {
sub extract_vzdump_config_tar {
my ($archive, $conf_re) = @_;
die "ERROR: file '$archive' does not exist\n" if ! -f $archive;
die "ERROR: file '$archive' does not exist\n" if !-f $archive;
my $pid = open(my $fh, '-|', 'tar', 'tf', $archive) ||
die "unable to open file '$archive'\n";
my $pid = open(my $fh, '-|', 'tar', 'tf', $archive)
|| die "unable to open file '$archive'\n";
my $file;
while (defined($file = <$fh>)) {
@ -1718,16 +1769,20 @@ sub extract_vzdump_config_vma {
my $decompressor = $info->{decompressor};
if ($comp) {
my $cmd = [ [@$decompressor, $archive], ["vma", "config", "-"] ];
my $cmd = [[@$decompressor, $archive], ["vma", "config", "-"]];
# lzop/zcat exits with 1 when the pipe is closed early by vma, detect this and ignore the exit code later
my $broken_pipe;
my $errstring;
my $err = sub {
my $output = shift;
if ($output =~ m/lzop: Broken pipe: <stdout>/ || $output =~ m/gzip: stdout: Broken pipe/ || $output =~ m/zstd: error 70 : Write error.*Broken pipe/) {
if (
$output =~ m/lzop: Broken pipe: <stdout>/
|| $output =~ m/gzip: stdout: Broken pipe/
|| $output =~ m/zstd: error 70 : Write error.*Broken pipe/
) {
$broken_pipe = 1;
} elsif (!defined ($errstring) && $output !~ m/^\s*$/) {
} elsif (!defined($errstring) && $output !~ m/^\s*$/) {
$errstring = "Failed to extract config from VMA archive: $output\n";
}
};
@ -1853,39 +1908,63 @@ sub prune_mark_backup_group {
return;
}
my $prune_list = [ sort { $b->{ctime} <=> $a->{ctime} } @{$backup_group} ];
my $prune_list = [sort { $b->{ctime} <=> $a->{ctime} } @{$backup_group}];
$prune_mark->($prune_list, $keep->{'keep-last'}, sub {
$prune_mark->(
$prune_list,
$keep->{'keep-last'},
sub {
my ($ctime) = @_;
return $ctime;
});
$prune_mark->($prune_list, $keep->{'keep-hourly'}, sub {
},
);
$prune_mark->(
$prune_list,
$keep->{'keep-hourly'},
sub {
my ($ctime) = @_;
my (undef, undef, $hour, $day, $month, $year) = localtime($ctime);
return "$hour/$day/$month/$year";
});
$prune_mark->($prune_list, $keep->{'keep-daily'}, sub {
},
);
$prune_mark->(
$prune_list,
$keep->{'keep-daily'},
sub {
my ($ctime) = @_;
my (undef, undef, undef, $day, $month, $year) = localtime($ctime);
return "$day/$month/$year";
});
$prune_mark->($prune_list, $keep->{'keep-weekly'}, sub {
},
);
$prune_mark->(
$prune_list,
$keep->{'keep-weekly'},
sub {
my ($ctime) = @_;
my ($sec, $min, $hour, $day, $month, $year) = localtime($ctime);
my $iso_week = int(strftime("%V", $sec, $min, $hour, $day, $month, $year));
my $iso_week_year = int(strftime("%G", $sec, $min, $hour, $day, $month, $year));
return "$iso_week/$iso_week_year";
});
$prune_mark->($prune_list, $keep->{'keep-monthly'}, sub {
},
);
$prune_mark->(
$prune_list,
$keep->{'keep-monthly'},
sub {
my ($ctime) = @_;
my (undef, undef, undef, undef, $month, $year) = localtime($ctime);
return "$month/$year";
});
$prune_mark->($prune_list, $keep->{'keep-yearly'}, sub {
},
);
$prune_mark->(
$prune_list,
$keep->{'keep-yearly'},
sub {
my ($ctime) = @_;
my $year = (localtime($ctime))[5];
return "$year";
});
},
);
foreach my $prune_entry (@{$prune_list}) {
$prune_entry->{mark} //= 'remove';
@ -1899,8 +1978,9 @@ sub volume_export : prototype($$$$$$$) {
die "cannot export volume '$volid'\n" if !$storeid;
my $scfg = storage_config($cfg, $storeid);
my $plugin = PVE::Storage::Plugin->lookup($scfg->{type});
return $plugin->volume_export($scfg, $storeid, $fh, $volname, $format,
$snapshot, $base_snapshot, $with_snapshots);
return $plugin->volume_export(
$scfg, $storeid, $fh, $volname, $format, $snapshot, $base_snapshot, $with_snapshots,
);
}
sub volume_import : prototype($$$$$$$$) {
@ -1930,9 +2010,9 @@ sub volume_export_formats : prototype($$$$$) {
return if !$storeid;
my $scfg = storage_config($cfg, $storeid);
my $plugin = PVE::Storage::Plugin->lookup($scfg->{type});
return $plugin->volume_export_formats($scfg, $storeid, $volname,
$snapshot, $base_snapshot,
$with_snapshots);
return $plugin->volume_export_formats(
$scfg, $storeid, $volname, $snapshot, $base_snapshot, $with_snapshots,
);
}
sub volume_import_formats : prototype($$$$$) {
@ -1943,19 +2023,16 @@ sub volume_import_formats : prototype($$$$$) {
my $scfg = storage_config($cfg, $storeid);
my $plugin = PVE::Storage::Plugin->lookup($scfg->{type});
return $plugin->volume_import_formats(
$scfg,
$storeid,
$volname,
$snapshot,
$base_snapshot,
$with_snapshots,
$scfg, $storeid, $volname, $snapshot, $base_snapshot, $with_snapshots,
);
}
sub volume_transfer_formats {
my ($cfg, $src_volid, $dst_volid, $snapshot, $base_snapshot, $with_snapshots) = @_;
my @export_formats = volume_export_formats($cfg, $src_volid, $snapshot, $base_snapshot, $with_snapshots);
my @import_formats = volume_import_formats($cfg, $dst_volid, $snapshot, $base_snapshot, $with_snapshots);
my @export_formats =
volume_export_formats($cfg, $src_volid, $snapshot, $base_snapshot, $with_snapshots);
my @import_formats =
volume_import_formats($cfg, $dst_volid, $snapshot, $base_snapshot, $with_snapshots);
my %import_hash = map { $_ => 1 } @import_formats;
my @common = grep { $import_hash{$_} } @export_formats;
return @common;
@ -1987,7 +2064,8 @@ sub volume_import_start {
my $volid = "$storeid:$volname";
# find common import/export format, like volume_transfer_formats
my @import_formats = PVE::Storage::volume_import_formats($cfg, $volid, $opts->{snapshot}, undef, $with_snapshots);
my @import_formats = PVE::Storage::volume_import_formats($cfg, $volid, $opts->{snapshot}, undef,
$with_snapshots);
my @export_formats = PVE::Tools::split_list($opts->{export_formats});
my %import_hash = map { $_ => 1 } @import_formats;
my @common = grep { $import_hash{$_} } @export_formats;
@ -2025,7 +2103,7 @@ sub volume_import_start {
sub volume_export_start {
my ($cfg, $volid, $format, $log, $opts) = @_;
my $known_format = [ grep { $_ eq $format } $KNOWN_EXPORT_FORMATS->@* ];
my $known_format = [grep { $_ eq $format } $KNOWN_EXPORT_FORMATS->@*];
if (!$known_format->@*) {
die "Cannot export '$volid' using unknown export format '$format'\n";
}
@ -2053,7 +2131,7 @@ sub complete_storage {
my $cfg = PVE::Storage::config();
return $cmdname eq 'add' ? [] : [ PVE::Storage::storage_ids($cfg) ];
return $cmdname eq 'add' ? [] : [PVE::Storage::storage_ids($cfg)];
}
sub complete_storage_enabled {
@ -2062,7 +2140,7 @@ sub complete_storage_enabled {
my $res = [];
my $cfg = PVE::Storage::config();
foreach my $sid (keys %{$cfg->{ids}}) {
foreach my $sid (keys %{ $cfg->{ids} }) {
next if !storage_check_enabled($cfg, $sid, undef, 1);
push @$res, $sid;
}
@ -2083,7 +2161,7 @@ sub complete_volume {
my $storage_list = complete_storage_enabled();
if ($cvalue =~ m/^([^:]+):/) {
$storage_list = [ $1 ];
$storage_list = [$1];
} else {
if (scalar(@$storage_list) > 1) {
# only list storage IDs to avoid large listings
@ -2124,9 +2202,16 @@ sub rename_volume {
$target_vmid = ($plugin->parse_volname($source_volname))[3] if !$target_vmid;
return $plugin->cluster_lock_storage($storeid, $scfg->{shared}, undef, sub {
return $plugin->rename_volume($scfg, $storeid, $source_volname, $target_vmid, $target_volname);
});
return $plugin->cluster_lock_storage(
$storeid,
$scfg->{shared},
undef,
sub {
return $plugin->rename_volume(
$scfg, $storeid, $source_volname, $target_vmid, $target_volname,
);
},
);
}
# Various io-heavy operations require io/bandwidth limits which can be
@ -2171,7 +2256,8 @@ sub get_bandwidth_limit {
# limits, therefore it also allows us to override them.
# Since we have most likely multiple storages to check, do a quick check on
# the general '/storage' path to see if we can skip the checks entirely:
return $override if $rpcenv && $rpcenv->check($authuser, '/storage', ['Datastore.Allocate'], 1);
return $override
if $rpcenv && $rpcenv->check($authuser, '/storage', ['Datastore.Allocate'], 1);
my %done;
foreach my $storage (@$storage_list) {
@ -2181,7 +2267,10 @@ sub get_bandwidth_limit {
$done{$storage} = 1;
# Otherwise we may still have individual /storage/$ID permissions:
if (!$rpcenv || !$rpcenv->check($authuser, "/storage/$storage", ['Datastore.Allocate'], 1)) {
if (
!$rpcenv
|| !$rpcenv->check($authuser, "/storage/$storage", ['Datastore.Allocate'], 1)
) {
# And if not: apply the limits.
my $storecfg = storage_config($config, $storage);
$apply_limit->($storecfg->{bwlimit});

View File

@ -44,7 +44,7 @@ sub plugindata {
},
{ images => 1, rootdir => 1 },
],
format => [ { raw => 1, subvol => 1 }, 'raw', ],
format => [{ raw => 1, subvol => 1 }, 'raw'],
'sensitive-properties' => {},
};
}
@ -95,7 +95,8 @@ sub options {
# Reuse `DirPlugin`'s `check_config`. This simply checks for invalid paths.
sub check_config {
my ($self, $sectionId, $config, $create, $skipSchemaCheck) = @_;
return PVE::Storage::DirPlugin::check_config($self, $sectionId, $config, $create, $skipSchemaCheck);
return PVE::Storage::DirPlugin::check_config($self, $sectionId, $config, $create,
$skipSchemaCheck);
}
my sub getfsmagic($) {
@ -127,7 +128,7 @@ sub activate_storage {
my $mp = PVE::Storage::DirPlugin::parse_is_mountpoint($scfg);
if (defined($mp) && !PVE::Storage::DirPlugin::path_is_mounted($mp, $cache->{mountdata})) {
die "unable to activate storage '$storeid' - directory is expected to be a mount point but"
." is not mounted: '$mp'\n";
. " is not mounted: '$mp'\n";
}
assert_btrfs($path); # only assert this stuff now, ensures $path is there and better UX
@ -142,18 +143,14 @@ sub status {
sub get_volume_attribute {
my ($class, $scfg, $storeid, $volname, $attribute) = @_;
return PVE::Storage::DirPlugin::get_volume_attribute($class, $scfg, $storeid, $volname, $attribute);
return PVE::Storage::DirPlugin::get_volume_attribute($class, $scfg, $storeid, $volname,
$attribute);
}
sub update_volume_attribute {
my ($class, $scfg, $storeid, $volname, $attribute, $value) = @_;
return PVE::Storage::DirPlugin::update_volume_attribute(
$class,
$scfg,
$storeid,
$volname,
$attribute,
$value,
$class, $scfg, $storeid, $volname, $attribute, $value,
);
}
@ -190,8 +187,7 @@ sub raw_file_to_subvol($) {
sub filesystem_path {
my ($class, $scfg, $volname, $snapname) = @_;
my ($vtype, $name, $vmid, undef, undef, $isBase, $format) =
$class->parse_volname($volname);
my ($vtype, $name, $vmid, undef, undef, $isBase, $format) = $class->parse_volname($volname);
my $path = $class->get_subdir($scfg, $vtype);
@ -415,19 +411,22 @@ my sub foreach_snapshot_of_subvol : prototype($$) {
my $basename = basename($subvol);
my $dir = dirname($subvol);
dir_glob_foreach($dir, $BTRFS_SNAPSHOT_REGEX, sub {
dir_glob_foreach(
$dir,
$BTRFS_SNAPSHOT_REGEX,
sub {
my ($volume, $name, $snap_name) = ($1, $2, $3);
return if !path_is_subvolume("$dir/$volume");
return if $name ne $basename;
$code->($snap_name);
});
},
);
}
sub free_image {
my ($class, $storeid, $scfg, $volname, $isBase, $_format) = @_;
my ($vtype, undef, $vmid, undef, undef, undef, $format) =
$class->parse_volname($volname);
my ($vtype, undef, $vmid, undef, undef, undef, $format) = $class->parse_volname($volname);
if (!defined($format) || $vtype ne 'images' || ($format ne 'subvol' && $format ne 'raw')) {
return $class->SUPER::free_image($storeid, $scfg, $volname, $isBase, $_format);
@ -441,10 +440,13 @@ sub free_image {
}
my @snapshot_vols;
foreach_snapshot_of_subvol($subvol, sub {
foreach_snapshot_of_subvol(
$subvol,
sub {
my ($snap_name) = @_;
push @snapshot_vols, "$subvol\@$snap_name";
});
},
);
$class->btrfs_cmd(['subvolume', 'delete', '--', @snapshot_vols, $subvol]);
# try to cleanup directory to not clutter storage with empty $vmid dirs if
@ -514,7 +516,7 @@ sub volume_resize {
sub volume_snapshot {
my ($class, $scfg, $storeid, $volname, $snap) = @_;
my ($name, $vmid, $format) = ($class->parse_volname($volname))[1,2,6];
my ($name, $vmid, $format) = ($class->parse_volname($volname))[1, 2, 6];
if ($format ne 'subvol' && $format ne 'raw') {
return PVE::Storage::Plugin::volume_snapshot(@_);
}
@ -543,7 +545,7 @@ sub volume_rollback_is_possible {
sub volume_snapshot_rollback {
my ($class, $scfg, $storeid, $volname, $snap) = @_;
my ($name, $format) = ($class->parse_volname($volname))[1,6];
my ($name, $format) = ($class->parse_volname($volname))[1, 6];
if ($format ne 'subvol' && $format ne 'raw') {
return PVE::Storage::Plugin::volume_snapshot_rollback(@_);
@ -581,7 +583,7 @@ sub volume_snapshot_rollback {
sub volume_snapshot_delete {
my ($class, $scfg, $storeid, $volname, $snap, $running) = @_;
my ($name, $vmid, $format) = ($class->parse_volname($volname))[1,2,6];
my ($name, $vmid, $format) = ($class->parse_volname($volname))[1, 2, 6];
if ($format ne 'subvol' && $format ne 'raw') {
return PVE::Storage::Plugin::volume_snapshot_delete(@_);
@ -604,7 +606,7 @@ sub volume_has_feature {
my $features = {
snapshot => {
current => { qcow2 => 1, raw => 1, subvol => 1 },
snap => { qcow2 => 1, raw => 1, subvol => 1 }
snap => { qcow2 => 1, raw => 1, subvol => 1 },
},
clone => {
base => { qcow2 => 1, raw => 1, subvol => 1, vmdk => 1 },
@ -628,7 +630,8 @@ sub volume_has_feature {
},
};
my ($vtype, $name, $vmid, $basename, $basevmid, $isBase, $format) = $class->parse_volname($volname);
my ($vtype, $name, $vmid, $basename, $basevmid, $isBase, $format) =
$class->parse_volname($volname);
my $key = undef;
if ($snapname) {
@ -674,9 +677,8 @@ sub list_images {
$format = 'subvol';
} else {
$format = $ext;
($size, undef, $used, $parent, $ctime) = eval {
PVE::Storage::Plugin::file_size_info($fn, undef, $format);
};
($size, undef, $used, $parent, $ctime) =
eval { PVE::Storage::Plugin::file_size_info($fn, undef, $format); };
if (my $err = $@) {
die $err if $err !~ m/Image is not in \S+ format$/;
warn "image '$fn' is not in expected format '$format', querying as raw\n";
@ -688,12 +690,16 @@ sub list_images {
next if !defined($size);
if ($vollist) {
next if ! grep { $_ eq $volid } @$vollist;
next if !grep { $_ eq $volid } @$vollist;
}
my $info = {
volid => $volid, format => $format,
size => $size, vmid => $owner, used => $used, parent => $parent,
volid => $volid,
format => $format,
size => $size,
vmid => $owner,
used => $used,
parent => $parent,
};
$info->{ctime} = $ctime if $ctime;
@ -730,13 +736,7 @@ sub volume_import_formats {
# Same as export-formats, beware the parameter order:
return volume_export_formats(
$class,
$scfg,
$storeid,
$volname,
$snapshot,
$base_snapshot,
$with_snapshots,
$class, $scfg, $storeid, $volname, $snapshot, $base_snapshot, $with_snapshots,
);
}
@ -787,16 +787,20 @@ sub volume_export {
push @$cmd, (map { "$path\@$_" } ($with_snapshots // [])->@*);
push @$cmd, $path if !defined($base_snapshot);
} else {
foreach_snapshot_of_subvol($path, sub {
foreach_snapshot_of_subvol(
$path,
sub {
my ($snap_name) = @_;
# NOTE: if there is a $snapshot specified via the arguments, it is added last below.
push @$cmd, "$path\@$snap_name" if !(defined($snapshot) && $snap_name eq $snapshot);
});
push @$cmd, "$path\@$snap_name"
if !(defined($snapshot) && $snap_name eq $snapshot);
},
);
}
$path .= "\@$snapshot" if defined($snapshot);
push @$cmd, $path;
run_command($cmd, output => '>&'.fileno($fh));
run_command($cmd, output => '>&' . fileno($fh));
return;
}
@ -858,7 +862,10 @@ sub volume_import {
my $dh = IO::Dir->new($tmppath)
or die "failed to open temporary receive directory '$tmppath' - $!\n";
eval {
run_command(['btrfs', '-q', 'receive', '-e', '--', $tmppath], input => '<&'.fileno($fh));
run_command(
['btrfs', '-q', 'receive', '-e', '--', $tmppath],
input => '<&' . fileno($fh),
);
# Analyze the received subvolumes;
my ($diskname, $found_snapshot, @snapshots);
@ -891,38 +898,39 @@ sub volume_import {
# Rotate the disk into place, first the current state:
# Note that read-only subvolumes cannot be moved into different directories, but for the
# "current" state we also want a writable copy, so start with that:
$class->btrfs_cmd(['property', 'set', '-f', "$tmppath/$diskname\@$snapshot", 'ro', 'false']);
$class->btrfs_cmd(
['property', 'set', '-f', "$tmppath/$diskname\@$snapshot", 'ro', 'false']);
PVE::Tools::renameat2(
-1,
"$tmppath/$diskname\@$snapshot",
-1,
$destination,
&PVE::Tools::RENAME_NOREPLACE,
) or die "failed to move received snapshot '$tmppath/$diskname\@$snapshot'"
)
or die "failed to move received snapshot '$tmppath/$diskname\@$snapshot'"
. " into place at '$destination' - $!\n";
# Now recreate the actual snapshot:
$class->btrfs_cmd([
'subvolume',
'snapshot',
'-r',
'--',
$destination,
"$destination\@$snapshot",
'subvolume', 'snapshot', '-r', '--', $destination, "$destination\@$snapshot",
]);
# Now go through the remaining snapshots (if any)
foreach my $snap (@snapshots) {
$class->btrfs_cmd(['property', 'set', '-f', "$tmppath/$diskname\@$snap", 'ro', 'false']);
$class->btrfs_cmd(
['property', 'set', '-f', "$tmppath/$diskname\@$snap", 'ro', 'false']);
PVE::Tools::renameat2(
-1,
"$tmppath/$diskname\@$snap",
-1,
"$destination\@$snap",
&PVE::Tools::RENAME_NOREPLACE,
) or die "failed to move received snapshot '$tmppath/$diskname\@$snap'"
)
or die "failed to move received snapshot '$tmppath/$diskname\@$snap'"
. " into place at '$destination\@$snap' - $!\n";
eval { $class->btrfs_cmd(['property', 'set', "$destination\@$snap", 'ro', 'true']) };
eval {
$class->btrfs_cmd(['property', 'set', "$destination\@$snap", 'ro', 'true']);
};
warn "failed to make $destination\@$snap read-only - $!\n" if $@;
}
};
@ -938,10 +946,11 @@ sub volume_import {
eval { $class->btrfs_cmd(['subvolume', 'delete', '--', "$tmppath/$entry"]) };
warn $@ if $@;
}
$dh->close; undef $dh;
$dh->close;
undef $dh;
}
if (!rmdir($tmppath)) {
warn "failed to remove temporary directory '$tmppath' - $!\n"
warn "failed to remove temporary directory '$tmppath' - $!\n";
}
};
warn $@ if $@;
@ -961,7 +970,9 @@ sub rename_volume {
my $format = ($class->parse_volname($source_volname))[6];
if ($format ne 'raw' && $format ne 'subvol') {
return $class->SUPER::rename_volume($scfg, $storeid, $source_volname, $target_vmid, $target_volname);
return $class->SUPER::rename_volume(
$scfg, $storeid, $source_volname, $target_vmid, $target_volname,
);
}
$target_volname = $class->find_free_diskname($storeid, $scfg, $target_vmid, $format, 1)
@ -978,8 +989,8 @@ sub rename_volume {
my $new_path = "${basedir}/${target_dir}";
die "target volume '${target_volname}' already exists\n" if -e $new_path;
rename $old_path, $new_path ||
die "rename '$old_path' to '$new_path' failed - $!\n";
rename $old_path, $new_path
|| die "rename '$old_path' to '$new_path' failed - $!\n";
return "${storeid}:$target_volname";
}

View File

@ -16,7 +16,7 @@ use base qw(PVE::Storage::Plugin);
sub cifs_is_mounted : prototype($$) {
my ($scfg, $mountdata) = @_;
my ($mountpoint, $server, $share) = $scfg->@{'path', 'server', 'share'};
my ($mountpoint, $server, $share) = $scfg->@{ 'path', 'server', 'share' };
my $subdir = $scfg->{subdir} // '';
$server = "[$server]" if Net::IP::ip_is_ipv6($server);
@ -24,9 +24,9 @@ sub cifs_is_mounted : prototype($$) {
$mountdata = PVE::ProcFSTools::parse_proc_mounts() if !$mountdata;
return $mountpoint if grep {
$_->[2] =~ /^cifs/ &&
$_->[0] =~ m|^\Q$source\E/?$| &&
$_->[1] eq $mountpoint
$_->[2] =~ /^cifs/
&& $_->[0] =~ m|^\Q$source\E/?$|
&& $_->[1] eq $mountpoint
} @$mountdata;
return undef;
}
@ -69,7 +69,7 @@ sub get_cred_file {
sub cifs_mount : prototype($$$$$) {
my ($scfg, $storeid, $smbver, $user, $domain) = @_;
my ($mountpoint, $server, $share, $options) = $scfg->@{'path', 'server', 'share', 'options'};
my ($mountpoint, $server, $share, $options) = $scfg->@{ 'path', 'server', 'share', 'options' };
my $subdir = $scfg->{subdir} // '';
$server = "[$server]" if Net::IP::ip_is_ipv6($server);
@ -98,9 +98,19 @@ sub type {
sub plugindata {
return {
content => [ { images => 1, rootdir => 1, vztmpl => 1, iso => 1,
backup => 1, snippets => 1, import => 1}, { images => 1 }],
format => [ { raw => 1, qcow2 => 1, vmdk => 1 } , 'raw' ],
content => [
{
images => 1,
rootdir => 1,
vztmpl => 1,
iso => 1,
backup => 1,
snippets => 1,
import => 1,
},
{ images => 1 },
],
format => [{ raw => 1, qcow2 => 1, vmdk => 1 }, 'raw'],
'sensitive-properties' => { password => 1 },
};
}
@ -123,8 +133,9 @@ sub properties {
maxLength => 256,
},
smbversion => {
description => "SMB protocol version. 'default' if not set, negotiates the highest SMB2+"
." version supported by both the client and server.",
description =>
"SMB protocol version. 'default' if not set, negotiates the highest SMB2+"
. " version supported by both the client and server.",
type => 'string',
default => 'default',
enum => ['default', '2.0', '2.1', '3', '3.0', '3.11'],
@ -148,9 +159,9 @@ sub options {
content => { optional => 1 },
format => { optional => 1 },
username => { optional => 1 },
password => { optional => 1},
domain => { optional => 1},
smbversion => { optional => 1},
password => { optional => 1 },
domain => { optional => 1 },
smbversion => { optional => 1 },
mkdir => { optional => 1 },
'create-base-path' => { optional => 1 },
'create-subdirs' => { optional => 1 },
@ -160,7 +171,6 @@ sub options {
};
}
sub check_config {
my ($class, $sectionId, $config, $create, $skipSchemaCheck) = @_;
@ -235,11 +245,10 @@ sub activate_storage {
$class->config_aware_base_mkdir($scfg, $path);
die "unable to activate storage '$storeid' - " .
"directory '$path' does not exist\n" if ! -d $path;
die "unable to activate storage '$storeid' - " . "directory '$path' does not exist\n"
if !-d $path;
cifs_mount($scfg, $storeid, $scfg->{smbversion},
$scfg->{username}, $scfg->{domain});
cifs_mount($scfg, $storeid, $scfg->{smbversion}, $scfg->{username}, $scfg->{domain});
}
$class->SUPER::activate_storage($storeid, $scfg, $cache);
@ -262,7 +271,7 @@ sub deactivate_storage {
sub check_connection {
my ($class, $storeid, $scfg) = @_;
my $servicename = '//'.$scfg->{server}.'/'.$scfg->{share};
my $servicename = '//' . $scfg->{server} . '/' . $scfg->{share};
my $cmd = ['/usr/bin/smbclient', $servicename, '-d', '0'];
@ -275,18 +284,21 @@ sub check_connection {
push @$cmd, '-U', $scfg->{username}, '-A', $cred_file;
push @$cmd, '-W', $scfg->{domain} if $scfg->{domain};
} else {
push @$cmd, '-U', 'Guest','-N';
push @$cmd, '-U', 'Guest', '-N';
}
push @$cmd, '-c', 'echo 1 0';
my $out_str;
my $out = sub { $out_str .= shift };
eval { run_command($cmd, timeout => 10, outfunc => $out, errfunc => sub {}) };
eval {
run_command($cmd, timeout => 10, outfunc => $out, errfunc => sub { });
};
if (my $err = $@) {
die "$out_str\n" if defined($out_str) &&
($out_str =~ m/NT_STATUS_(ACCESS_DENIED|INVALID_PARAMETER|LOGON_FAILURE)/);
die "$out_str\n"
if defined($out_str)
&& ($out_str =~ m/NT_STATUS_(ACCESS_DENIED|INVALID_PARAMETER|LOGON_FAILURE)/);
return 0;
}

View File

@ -27,9 +27,9 @@ sub cephfs_is_mounted {
$mountdata = PVE::ProcFSTools::parse_proc_mounts() if !$mountdata;
return $mountpoint if grep {
$_->[2] =~ m#^ceph|fuse\.ceph-fuse# &&
$_->[0] =~ m#\Q:$subdir\E$|^ceph-fuse$# &&
$_->[1] eq $mountpoint
$_->[2] =~ m#^ceph|fuse\.ceph-fuse#
&& $_->[0] =~ m#\Q:$subdir\E$|^ceph-fuse$#
&& $_->[1] eq $mountpoint
} @$mountdata;
warn "A filesystem is already mounted on $mountpoint\n"
@ -42,11 +42,11 @@ sub cephfs_is_mounted {
sub systemd_netmount {
my ($where, $type, $what, $opts) = @_;
# don't do default deps, systemd v241 generator produces ordering deps on both
# local-fs(-pre) and remote-fs(-pre) targets if we use the required _netdev
# option. Over three corners this gets us an ordering cycle on shutdown, which
# may make shutdown hang if the random cycle breaking hits the "wrong" unit to
# delete.
# don't do default deps, systemd v241 generator produces ordering deps on both
# local-fs(-pre) and remote-fs(-pre) targets if we use the required _netdev
# option. Over three corners this gets us an ordering cycle on shutdown, which
# may make shutdown hang if the random cycle breaking hits the "wrong" unit to
# delete.
my $unit = <<"EOF";
[Unit]
Description=${where}
@ -116,8 +116,8 @@ sub type {
sub plugindata {
return {
content => [ { vztmpl => 1, iso => 1, backup => 1, snippets => 1, import => 1 },
{ backup => 1 }],
content =>
[{ vztmpl => 1, iso => 1, backup => 1, snippets => 1, import => 1 }, { backup => 1 }],
'sensitive-properties' => { keyring => 1 },
};
}
@ -130,7 +130,8 @@ sub properties {
},
'fs-name' => {
description => "The Ceph filesystem name.",
type => 'string', format => 'pve-configid',
type => 'string',
format => 'pve-configid',
},
};
}
@ -139,7 +140,7 @@ sub options {
return {
path => { fixed => 1 },
'content-dirs' => { optional => 1 },
monhost => { optional => 1},
monhost => { optional => 1 },
nodes => { optional => 1 },
subdir => { optional => 1 },
disable => { optional => 1 },
@ -219,8 +220,8 @@ sub activate_storage {
$class->config_aware_base_mkdir($scfg, $path);
die "unable to activate storage '$storeid' - " .
"directory '$path' does not exist\n" if ! -d $path;
die "unable to activate storage '$storeid' - " . "directory '$path' does not exist\n"
if !-d $path;
cephfs_mount($scfg, $storeid);
}

View File

@ -50,11 +50,14 @@ Possible formats a guest image can have.
# Those formats should either be allowed here or support for them should be phased out (at least in
# the storage layer). Can still be added again in the future, should any plugin provider request it.
PVE::JSONSchema::register_standard_option('pve-storage-image-format', {
PVE::JSONSchema::register_standard_option(
'pve-storage-image-format',
{
type => 'string',
enum => ['raw', 'qcow2', 'subvol', 'vmdk'],
description => "Format of the image.",
});
},
);
=pod

View File

@ -24,9 +24,20 @@ sub type {
sub plugindata {
return {
content => [ { images => 1, rootdir => 1, vztmpl => 1, iso => 1, backup => 1, snippets => 1, none => 1, import => 1 },
{ images => 1, rootdir => 1 }],
format => [ { raw => 1, qcow2 => 1, vmdk => 1, subvol => 1 } , 'raw' ],
content => [
{
images => 1,
rootdir => 1,
vztmpl => 1,
iso => 1,
backup => 1,
snippets => 1,
none => 1,
import => 1,
},
{ images => 1, rootdir => 1 },
],
format => [{ raw => 1, qcow2 => 1, vmdk => 1, subvol => 1 }, 'raw'],
'sensitive-properties' => {},
};
}
@ -35,11 +46,13 @@ sub properties {
return {
path => {
description => "File system path.",
type => 'string', format => 'pve-storage-path',
type => 'string',
format => 'pve-storage-path',
},
mkdir => {
description => "Create the directory if it doesn't exist and populate it with default sub-dirs."
." NOTE: Deprecated, use the 'create-base-path' and 'create-subdirs' options instead.",
description =>
"Create the directory if it doesn't exist and populate it with default sub-dirs."
. " NOTE: Deprecated, use the 'create-base-path' and 'create-subdirs' options instead.",
type => 'boolean',
default => 'yes',
},
@ -54,10 +67,9 @@ sub properties {
default => 'yes',
},
is_mountpoint => {
description =>
"Assume the given path is an externally managed mountpoint " .
"and consider the storage offline if it is not mounted. ".
"Using a boolean (yes/no) value serves as a shortcut to using the target path in this field.",
description => "Assume the given path is an externally managed mountpoint "
. "and consider the storage offline if it is not mounted. "
. "Using a boolean (yes/no) value serves as a shortcut to using the target path in this field.",
type => 'string',
default => 'no',
},
@ -201,7 +213,8 @@ sub update_volume_attribute {
or die "unable to create protection file '$protection_path' - $!\n";
close($fh);
} else {
unlink $protection_path or $! == ENOENT
unlink $protection_path
or $! == ENOENT
or die "could not delete protection file '$protection_path' - $!\n";
}
@ -224,7 +237,6 @@ sub status {
return $class->SUPER::status($storeid, $scfg, $cache);
}
sub activate_storage {
my ($class, $storeid, $scfg, $cache) = @_;
@ -232,8 +244,8 @@ sub activate_storage {
my $mp = parse_is_mountpoint($scfg);
if (defined($mp) && !path_is_mounted($mp, $cache->{mountdata})) {
die "unable to activate storage '$storeid' - " .
"directory is expected to be a mount point but is not mounted: '$mp'\n";
die "unable to activate storage '$storeid' - "
. "directory is expected to be a mount point but is not mounted: '$mp'\n";
}
$class->config_aware_base_mkdir($scfg, $path);
@ -242,7 +254,8 @@ sub activate_storage {
sub check_config {
my ($self, $sectionId, $config, $create, $skipSchemaCheck) = @_;
my $opts = PVE::SectionConfig::check_config($self, $sectionId, $config, $create, $skipSchemaCheck);
my $opts =
PVE::SectionConfig::check_config($self, $sectionId, $config, $create, $skipSchemaCheck);
return $opts if !$create;
if ($opts->{path} !~ m|^/[-/a-zA-Z0-9_.@]+$|) {
die "illegal path for directory storage: $opts->{path}\n";
@ -278,7 +291,7 @@ sub get_import_metadata {
if ($isOva) {
$volid = "$storeid:$volname/$path";
} else {
$volid = "$storeid:import/$path",
$volid = "$storeid:import/$path",;
}
$disks->{$id} = {
volid => $volid,

View File

@ -29,8 +29,8 @@ sub type {
sub plugindata {
return {
content => [ { import => 1 }, { import => 1 }],
format => [ { raw => 1, qcow2 => 1, vmdk => 1 } , 'raw' ],
content => [{ import => 1 }, { import => 1 }],
format => [{ raw => 1, qcow2 => 1, vmdk => 1 }, 'raw'],
'sensitive-properties' => { password => 1 },
};
}
@ -38,7 +38,8 @@ sub plugindata {
sub properties {
return {
'skip-cert-verification' => {
description => 'Disable TLS certificate verification, only enable on fully trusted networks!',
description =>
'Disable TLS certificate verification, only enable on fully trusted networks!',
type => 'boolean',
default => 'false',
},
@ -54,8 +55,8 @@ sub options {
# FIXME: bwlimit => { optional => 1 },
server => {},
username => {},
password => { optional => 1},
'skip-cert-verification' => { optional => 1},
password => { optional => 1 },
'skip-cert-verification' => { optional => 1 },
port => { optional => 1 },
};
}
@ -241,7 +242,7 @@ sub esxi_mount : prototype($$$;$) {
print {$wr} "ERROR: $err";
}
POSIX::_exit(1);
};
}
undef $wr;
my $result = do { local $/ = undef; <$rd> };
@ -261,7 +262,7 @@ sub esxi_unmount : prototype($$$) {
my $scope = "${scope_name_base}.scope";
my $mount_dir = mount_dir($storeid);
my %silence_std_outs = (outfunc => sub {}, errfunc => sub {});
my %silence_std_outs = (outfunc => sub { }, errfunc => sub { });
eval { run_command(['/bin/systemctl', 'reset-failed', $scope], %silence_std_outs) };
eval { run_command(['/bin/systemctl', 'stop', $scope], %silence_std_outs) };
run_command(['/bin/umount', $mount_dir]);
@ -291,11 +292,7 @@ sub get_import_metadata : prototype($$$$$) {
my $manifest = $class->get_manifest($storeid, $scfg, 0);
my $contents = file_get_contents($vmx_path);
my $vmx = PVE::Storage::ESXiPlugin::VMX->parse(
$storeid,
$scfg,
$volname,
$contents,
$manifest,
$storeid, $scfg, $volname, $contents, $manifest,
);
return $vmx->get_create_args();
}
@ -306,12 +303,13 @@ sub query_vmdk_size : prototype($;$) {
my $json = eval {
my $json = '';
run_command(['/usr/bin/qemu-img', 'info', '--output=json', $filename],
run_command(
['/usr/bin/qemu-img', 'info', '--output=json', $filename],
timeout => $timeout,
outfunc => sub { $json .= $_[0]; },
errfunc => sub { warn "$_[0]\n"; }
errfunc => sub { warn "$_[0]\n"; },
);
from_json($json)
from_json($json);
};
warn $@ if $@;
@ -447,7 +445,8 @@ sub list_volumes {
my $vm = $vms->{$vm_name};
my $ds_name = $vm->{config}->{datastore};
my $path = $vm->{config}->{path};
push @$res, {
push @$res,
{
content => 'import',
format => 'vmx',
name => $vm_name,
@ -507,7 +506,8 @@ sub volume_export_formats {
}
sub volume_export {
my ($class, $scfg, $storeid, $fh, $volname, $format, $snapshot, $base_snapshot, $with_snapshots) = @_;
my ($class, $scfg, $storeid, $fh, $volname, $format, $snapshot, $base_snapshot, $with_snapshots)
= @_;
# FIXME: maybe we can support raw+size via `qemu-img dd`?
@ -521,7 +521,18 @@ sub volume_import_formats {
}
sub volume_import {
my ($class, $scfg, $storeid, $fh, $volname, $format, $snapshot, $base_snapshot, $with_snapshots, $allow_rename) = @_;
my (
$class,
$scfg,
$storeid,
$fh,
$volname,
$format,
$snapshot,
$base_snapshot,
$with_snapshots,
$allow_rename,
) = @_;
die "importing not supported for $class\n";
}
@ -554,6 +565,7 @@ sub volume_snapshot_delete {
die "deleting snapshots is not supported for $class\n";
}
sub volume_snapshot_info {
my ($class, $scfg, $storeid, $volname) = @_;
@ -664,7 +676,7 @@ sub config_path_for_vm {
}
die "failed to resolve path for vm '$vm' "
."($dc_name, $cfg->{datastore}, $cfg->{path})\n";
. "($dc_name, $cfg->{datastore}, $cfg->{path})\n";
}
die "no such vm '$vm'\n";
@ -978,14 +990,15 @@ sub smbios1_uuid {
# vmware stores space separated bytes and has 1 dash in the middle...
$uuid =~ s/[^0-9a-fA-f]//g;
if ($uuid =~ /^
if (
$uuid =~ /^
([0-9a-fA-F]{8})
([0-9a-fA-F]{4})
([0-9a-fA-F]{4})
([0-9a-fA-F]{4})
([0-9a-fA-F]{12})
$/x)
{
$/x
) {
return "$1-$2-$3-$4-$5";
}
return;
@ -1052,7 +1065,7 @@ sub get_create_args {
$create_net->{"net$id"} = $param;
});
my %counts = ( scsi => 0, sata => 0, ide => 0 );
my %counts = (scsi => 0, sata => 0, ide => 0);
my $boot_order = '';
@ -1108,7 +1121,7 @@ sub get_create_args {
}
$boot_order .= ';' if length($boot_order);
$boot_order .= $bus.$count;
$boot_order .= $bus . $count;
};
$self->for_each_disk($add_disk);
if (@nvmes) {
@ -1157,7 +1170,7 @@ sub get_create_args {
++$serid;
});
$warn->('guest-is-running') if defined($vminfo) && ($vminfo->{power}//'') ne 'poweredOff';
$warn->('guest-is-running') if defined($vminfo) && ($vminfo->{power} // '') ne 'poweredOff';
return {
type => 'vm',

View File

@ -26,7 +26,7 @@ my $get_active_server = sub {
return $defaultserver;
}
my $serverlist = [ $defaultserver ];
my $serverlist = [$defaultserver];
push @$serverlist, $scfg->{server2} if $scfg->{server2};
my $ctime = time();
@ -54,7 +54,12 @@ my $get_active_server = sub {
my $cmd = ['/usr/sbin/gluster', 'volume', 'info', $scfg->{volume}];
run_command($cmd, errmsg => "glusterfs error", errfunc => sub {}, outfunc => $parser);
run_command(
$cmd,
errmsg => "glusterfs error",
errfunc => sub { },
outfunc => $parser,
);
}
$server_test_results->{$server} = { time => time(), active => $status };
@ -72,9 +77,9 @@ sub glusterfs_is_mounted {
$mountdata = PVE::ProcFSTools::parse_proc_mounts() if !$mountdata;
return $mountpoint if grep {
$_->[2] eq 'fuse.glusterfs' &&
$_->[0] =~ /^\S+:\Q$volume\E$/ &&
$_->[1] eq $mountpoint
$_->[2] eq 'fuse.glusterfs'
&& $_->[0] =~ /^\S+:\Q$volume\E$/
&& $_->[1] eq $mountpoint
} @$mountdata;
return undef;
}
@ -97,9 +102,11 @@ sub type {
sub plugindata {
return {
content => [ { images => 1, vztmpl => 1, iso => 1, backup => 1, snippets => 1, import => 1},
{ images => 1 }],
format => [ { raw => 1, qcow2 => 1, vmdk => 1 } , 'raw' ],
content => [
{ images => 1, vztmpl => 1, iso => 1, backup => 1, snippets => 1, import => 1 },
{ images => 1 },
],
format => [{ raw => 1, qcow2 => 1, vmdk => 1 }, 'raw'],
'sensitive-properties' => {},
};
}
@ -112,7 +119,8 @@ sub properties {
},
server2 => {
description => "Backup volfile server IP or DNS name.",
type => 'string', format => 'pve-storage-server',
type => 'string',
format => 'pve-storage-server',
requires => 'server',
},
transport => {
@ -145,7 +153,6 @@ sub options {
};
}
sub check_config {
my ($class, $sectionId, $config, $create, $skipSchemaCheck) = @_;
@ -169,8 +176,7 @@ sub parse_name_dir {
sub path {
my ($class, $scfg, $volname, $storeid, $snapname) = @_;
my ($vtype, $name, $vmid, undef, undef, $isBase, $format) =
$class->parse_volname($volname);
my ($vtype, $name, $vmid, undef, undef, $isBase, $format) = $class->parse_volname($volname);
# Note: qcow2/qed has internal snapshot, so path is always
# the same (with or without snapshot => same file).
@ -232,8 +238,17 @@ sub clone_image {
my $glustervolume = $scfg->{volume};
my $volumepath = "gluster://$server/$glustervolume/images/$vmid/$name";
my $cmd = ['/usr/bin/qemu-img', 'create', '-b', "../$basevmid/$basename",
'-F', $format, '-f', 'qcow2', $volumepath];
my $cmd = [
'/usr/bin/qemu-img',
'create',
'-b',
"../$basevmid/$basename",
'-F',
$format,
'-f',
'qcow2',
$volumepath,
];
run_command($cmd, errmsg => "unable to create image");
@ -307,8 +322,8 @@ sub activate_storage {
if (!glusterfs_is_mounted($volume, $path, $cache->{mountdata})) {
$class->config_aware_base_mkdir($scfg, $path);
die "unable to activate storage '$storeid' - " .
"directory '$path' does not exist\n" if ! -d $path;
die "unable to activate storage '$storeid' - " . "directory '$path' does not exist\n"
if !-d $path;
my $server = &$get_active_server($scfg, 1);

View File

@ -18,20 +18,25 @@ sub iscsi_ls {
my ($scfg) = @_;
my $portal = $scfg->{portal};
my $cmd = ['/usr/bin/iscsi-ls', '-s', 'iscsi://'.$portal ];
my $cmd = ['/usr/bin/iscsi-ls', '-s', 'iscsi://' . $portal];
my $list = {};
my %unittobytes = (
"k" => 1024,
"M" => 1024*1024,
"G" => 1024*1024*1024,
"T" => 1024*1024*1024*1024
"M" => 1024 * 1024,
"G" => 1024 * 1024 * 1024,
"T" => 1024 * 1024 * 1024 * 1024,
);
eval {
run_command($cmd, errmsg => "iscsi error", errfunc => sub {}, outfunc => sub {
run_command(
$cmd,
errmsg => "iscsi error",
errfunc => sub { },
outfunc => sub {
my $line = shift;
$line = trim($line);
if( $line =~ /Lun:(\d+)\s+([A-Za-z0-9\-\_\.\:]*)\s+\(Size:([0-9\.]*)(k|M|G|T)\)/ ) {
my $image = "lun".$1;
if ($line =~ /Lun:(\d+)\s+([A-Za-z0-9\-\_\.\:]*)\s+\(Size:([0-9\.]*)(k|M|G|T)\)/
) {
my $image = "lun" . $1;
my $size = $3;
my $unit = $4;
@ -41,7 +46,8 @@ sub iscsi_ls {
format => 'raw',
};
}
});
},
);
};
my $err = $@;
@ -58,7 +64,7 @@ sub type {
sub plugindata {
return {
content => [ {images => 1, none => 1}, { images => 1 }],
content => [{ images => 1, none => 1 }, { images => 1 }],
select_existing => 1,
'sensitive-properties' => {},
};
@ -68,9 +74,9 @@ sub options {
return {
portal => { fixed => 1 },
target => { fixed => 1 },
nodes => { optional => 1},
disable => { optional => 1},
content => { optional => 1},
nodes => { optional => 1 },
disable => { optional => 1 },
content => { optional => 1 },
bwlimit => { optional => 1 },
};
}
@ -80,7 +86,6 @@ sub options {
sub parse_volname {
my ($class, $volname) = @_;
if ($volname =~ m/^lun(\d+)$/) {
return ('images', $1, undef, undef, undef, undef, 'raw');
}
@ -164,7 +169,7 @@ sub status {
my $free = 0;
my $used = 0;
my $active = 1;
return ($total,$free,$used,$active);
return ($total, $free, $used, $active);
return undef;
}
@ -228,16 +233,15 @@ sub volume_has_feature {
my ($class, $scfg, $feature, $storeid, $volname, $snapname, $running) = @_;
my $features = {
copy => { current => 1},
copy => { current => 1 },
};
my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) =
$class->parse_volname($volname);
my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) = $class->parse_volname($volname);
my $key = undef;
if($snapname){
if ($snapname) {
$key = 'snap';
}else{
} else {
$key = $isBase ? 'base' : 'current';
}
return 1 if $features->{$feature}->{$key};
@ -290,7 +294,7 @@ sub volume_export {
PVE::Storage::Plugin::write_common_header($fh, $size);
run_command(
['qemu-img', 'dd', 'bs=64k', "if=$file", '-f', 'raw', '-O', 'raw'],
output => '>&'.fileno($fh),
output => '>&' . fileno($fh),
);
return;
}

View File

@ -9,7 +9,8 @@ use IO::File;
use PVE::JSONSchema qw(get_standard_option);
use PVE::Storage::Plugin;
use PVE::Tools qw(run_command file_read_firstline trim dir_glob_regex dir_glob_foreach $IPV4RE $IPV6RE);
use PVE::Tools
qw(run_command file_read_firstline trim dir_glob_regex dir_glob_foreach $IPV4RE $IPV6RE);
use base qw(PVE::Storage::Plugin);
@ -41,15 +42,21 @@ sub iscsi_session_list {
my $res = {};
eval {
run_command($cmd, errmsg => 'iscsi session scan failed', outfunc => sub {
run_command(
$cmd,
errmsg => 'iscsi session scan failed',
outfunc => sub {
my $line = shift;
# example: tcp: [1] 192.168.122.252:3260,1 iqn.2003-01.org.linux-iscsi.proxmox-nfs.x8664:sn.00567885ba8f (non-flash)
if ($line =~ m/^tcp:\s+\[(\S+)\]\s+((?:$IPV4RE|\[$IPV6RE\]):\d+)\,\S+\s+(\S+)\s+\S+?\s*$/) {
if ($line =~
m/^tcp:\s+\[(\S+)\]\s+((?:$IPV4RE|\[$IPV6RE\]):\d+)\,\S+\s+(\S+)\s+\S+?\s*$/
) {
my ($session_id, $portal, $target) = ($1, $2, $3);
# there can be several sessions per target (multipath)
push @{$res->{$target}}, { session_id => $session_id, portal => $portal };
push @{ $res->{$target} }, { session_id => $session_id, portal => $portal };
}
});
},
);
};
if (my $err = $@) {
die $err if $err !~ m/: No active sessions.$/i;
@ -95,7 +102,9 @@ sub iscsi_portals {
my $res = [];
my $cmd = [$ISCSIADM, '--mode', 'node'];
eval {
run_command($cmd, outfunc => sub {
run_command(
$cmd,
outfunc => sub {
my $line = shift;
if ($line =~ $ISCSI_TARGET_RE) {
@ -104,14 +113,15 @@ sub iscsi_portals {
push @{$res}, $portal;
}
}
});
},
);
};
my $err = $@;
warn $err if $err;
if ($err || !scalar(@$res)) {
return [ $portal_in ];
return [$portal_in];
} else {
return $res;
}
@ -128,16 +138,19 @@ sub iscsi_discovery {
my $cmd = [$ISCSIADM, '--mode', 'discovery', '--type', 'sendtargets', '--portal', $portal];
eval {
run_command($cmd, outfunc => sub {
run_command(
$cmd,
outfunc => sub {
my $line = shift;
if ($line =~ $ISCSI_TARGET_RE) {
my ($portal, $target) = ($1, $2);
# one target can have more than one portal (multipath)
# and sendtargets should return all of them in single call
push @{$res->{$target}}, $portal;
push @{ $res->{$target} }, $portal;
}
});
},
);
};
# In case of multipath we can stop after receiving targets from any available portal
@ -159,11 +172,16 @@ sub iscsi_login {
eval {
my $cmd = [
$ISCSIADM,
'--mode', 'node',
'--targetname', $target,
'--op', 'update',
'--name', 'node.session.initial_login_retry_max',
'--value', '0',
'--mode',
'node',
'--targetname',
$target,
'--op',
'update',
'--name',
'node.session.initial_login_retry_max',
'--value',
'0',
];
run_command($cmd);
};
@ -204,7 +222,9 @@ sub iscsi_session_rescan {
foreach my $session (@$session_list) {
my $cmd = [$ISCSIADM, '--mode', 'session', '--sid', $session->{session_id}, '--rescan'];
eval { run_command($cmd, outfunc => sub {}); };
eval {
run_command($cmd, outfunc => sub { });
};
warn $@ if $@;
}
}
@ -220,11 +240,11 @@ sub load_stable_scsi_paths {
# exclude filenames with part in name (same disk but partitions)
# use only filenames with scsi(with multipath i have the same device
# with dm-uuid-mpath , dm-name and scsi in name)
if($tmp !~ m/-part\d+$/ && ($tmp =~ m/^scsi-/ || $tmp =~ m/^dm-uuid-mpath-/)) {
if ($tmp !~ m/-part\d+$/ && ($tmp =~ m/^scsi-/ || $tmp =~ m/^dm-uuid-mpath-/)) {
my $path = "$stabledir/$tmp";
my $bdevdest = readlink($path);
if ($bdevdest && $bdevdest =~ m|^../../([^/]+)|) {
$stable_paths->{$1}=$tmp;
$stable_paths->{$1} = $tmp;
}
}
}
@ -241,7 +261,10 @@ sub iscsi_device_list {
my $stable_paths = load_stable_scsi_paths();
dir_glob_foreach($dirname, 'session(\d+)', sub {
dir_glob_foreach(
$dirname,
'session(\d+)',
sub {
my ($ent, $session) = @_;
my $target = file_read_firstline("$dirname/$ent/targetname");
@ -250,7 +273,10 @@ sub iscsi_device_list {
my (undef, $host) = dir_glob_regex("$dirname/$ent/device", 'target(\d+):.*');
return if !defined($host);
dir_glob_foreach("/sys/bus/scsi/devices", "$host:" . '(\d+):(\d+):(\d+)', sub {
dir_glob_foreach(
"/sys/bus/scsi/devices",
"$host:" . '(\d+):(\d+):(\d+)',
sub {
my ($tmp, $channel, $id, $lun) = @_;
my $type = file_read_firstline("/sys/bus/scsi/devices/$tmp/type");
@ -258,15 +284,18 @@ sub iscsi_device_list {
my $bdev;
if (-d "/sys/bus/scsi/devices/$tmp/block") { # newer kernels
(undef, $bdev) = dir_glob_regex("/sys/bus/scsi/devices/$tmp/block/", '([A-Za-z]\S*)');
(undef, $bdev) =
dir_glob_regex("/sys/bus/scsi/devices/$tmp/block/", '([A-Za-z]\S*)');
} else {
(undef, $bdev) = dir_glob_regex("/sys/bus/scsi/devices/$tmp", 'block:(\S+)');
(undef, $bdev) =
dir_glob_regex("/sys/bus/scsi/devices/$tmp", 'block:(\S+)');
}
return if !$bdev;
#check multipath
if (-d "/sys/block/$bdev/holders") {
my $multipathdev = dir_glob_regex("/sys/block/$bdev/holders", '[A-Za-z]\S*');
my $multipathdev =
dir_glob_regex("/sys/block/$bdev/holders", '[A-Za-z]\S*');
$bdev = $multipathdev if $multipathdev;
}
@ -288,9 +317,11 @@ sub iscsi_device_list {
};
#print "TEST: $target $session $host,$bus,$tg,$lun $blockdev\n";
});
},
);
});
},
);
return $res;
}
@ -303,7 +334,7 @@ sub type {
sub plugindata {
return {
content => [ {images => 1, none => 1}, { images => 1 }],
content => [{ images => 1, none => 1 }, { images => 1 }],
select_existing => 1,
'sensitive-properties' => {},
};
@ -317,7 +348,8 @@ sub properties {
},
portal => {
description => "iSCSI portal (IP or DNS name with optional port).",
type => 'string', format => 'pve-storage-portal-dns',
type => 'string',
format => 'pve-storage-portal-dns',
},
};
}
@ -326,9 +358,9 @@ sub options {
return {
portal => { fixed => 1 },
target => { fixed => 1 },
nodes => { optional => 1},
disable => { optional => 1},
content => { optional => 1},
nodes => { optional => 1 },
disable => { optional => 1 },
content => { optional => 1 },
bwlimit => { optional => 1 },
};
}
@ -456,7 +488,7 @@ sub activate_storage {
if (!$do_login) {
# We should check that sessions for all portals are available
my $session_portals = [ map { $_->{portal} } (@$sessions) ];
my $session_portals = [map { $_->{portal} } (@$sessions)];
for my $portal (@$portals) {
if (!grep(/^\Q$portal\E$/, @$session_portals)) {
@ -514,15 +546,15 @@ my $udev_query_path = sub {
my $device_path;
my $cmd = [
'udevadm',
'info',
'--query=path',
$dev,
'udevadm', 'info', '--query=path', $dev,
];
eval {
run_command($cmd, outfunc => sub {
run_command(
$cmd,
outfunc => sub {
$device_path = shift;
});
},
);
};
die "failed to query device path for '$dev': $@\n" if $@;
@ -540,7 +572,10 @@ $resolve_virtual_devices = sub {
my $resolved = [];
if ($dev =~ m!^/devices/virtual/block/!) {
dir_glob_foreach("/sys/$dev/slaves", '([^.].+)', sub {
dir_glob_foreach(
"/sys/$dev/slaves",
'([^.].+)',
sub {
my ($slave) = @_;
# don't check devices multiple times
@ -554,7 +589,8 @@ $resolve_virtual_devices = sub {
my $nested_resolved = $resolve_virtual_devices->($path, $visited);
push @$resolved, @$nested_resolved;
});
},
);
} else {
push @$resolved, $dev;
}
@ -570,7 +606,7 @@ sub activate_volume {
die "failed to get realpath for '$path': $!\n" if !$real_path;
# in case $path does not exist or is not a symlink, check if the returned
# $real_path is a block device
die "resolved realpath '$real_path' is not a block device\n" if ! -b $real_path;
die "resolved realpath '$real_path' is not a block device\n" if !-b $real_path;
my $device_path = $udev_query_path->($real_path);
my $resolved_paths = $resolve_virtual_devices->($device_path);
@ -601,14 +637,13 @@ sub volume_has_feature {
my ($class, $scfg, $feature, $storeid, $volname, $snapname, $running) = @_;
my $features = {
copy => { current => 1},
copy => { current => 1 },
};
my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) =
$class->parse_volname($volname);
my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) = $class->parse_volname($volname);
my $key = undef;
if ($snapname){
if ($snapname) {
$key = 'snap';
} else {
$key = $isBase ? 'base' : 'current';
@ -647,13 +682,16 @@ sub volume_export {
my $file = $class->filesystem_path($scfg, $volname, $snapshot);
my $size;
run_command(['/sbin/blockdev', '--getsize64', $file], outfunc => sub {
run_command(
['/sbin/blockdev', '--getsize64', $file],
outfunc => sub {
my ($line) = @_;
die "unexpected output from /sbin/blockdev: $line\n" if $line !~ /^(\d+)$/;
$size = int($1);
});
},
);
PVE::Storage::Plugin::write_common_header($fh, $size);
run_command(['dd', "if=$file", "bs=64k", "status=progress"], output => '>&'.fileno($fh));
run_command(['dd', "if=$file", "bs=64k", "status=progress"], output => '>&' . fileno($fh));
return;
}

View File

@ -32,19 +32,34 @@ sub lvm_pv_info {
my $has_label = 0;
my $cmd = ['/usr/bin/file', '-L', '-s', $device];
run_command($cmd, outfunc => sub {
run_command(
$cmd,
outfunc => sub {
my $line = shift;
$has_label = 1 if $line =~ m/LVM2/;
});
},
);
return undef if !$has_label;
$cmd = ['/sbin/pvs', '--separator', ':', '--noheadings', '--units', 'k',
'--unbuffered', '--nosuffix', '--options',
'pv_name,pv_size,vg_name,pv_uuid', $device];
$cmd = [
'/sbin/pvs',
'--separator',
':',
'--noheadings',
'--units',
'k',
'--unbuffered',
'--nosuffix',
'--options',
'pv_name,pv_size,vg_name,pv_uuid',
$device,
];
my $pvinfo;
run_command($cmd, outfunc => sub {
run_command(
$cmd,
outfunc => sub {
my $line = shift;
$line = trim($line);
@ -60,7 +75,8 @@ sub lvm_pv_info {
vgname => $vgname,
uuid => $uuid,
};
});
},
);
return $pvinfo;
}
@ -96,7 +112,12 @@ sub lvm_create_volume_group {
$cmd = ['/sbin/vgcreate', $vgname, $device];
# push @$cmd, '-c', 'y' if $shared; # we do not use this yet
run_command($cmd, errmsg => "vgcreate $vgname $device error", errfunc => $ignore_no_medium_warnings, outfunc => $ignore_no_medium_warnings);
run_command(
$cmd,
errmsg => "vgcreate $vgname $device error",
errfunc => $ignore_no_medium_warnings,
outfunc => $ignore_no_medium_warnings,
);
}
sub lvm_destroy_volume_group {
@ -113,8 +134,17 @@ sub lvm_destroy_volume_group {
sub lvm_vgs {
my ($includepvs) = @_;
my $cmd = ['/sbin/vgs', '--separator', ':', '--noheadings', '--units', 'b',
'--unbuffered', '--nosuffix', '--options'];
my $cmd = [
'/sbin/vgs',
'--separator',
':',
'--noheadings',
'--units',
'b',
'--unbuffered',
'--nosuffix',
'--options',
];
my $cols = [qw(vg_name vg_size vg_free lv_count)];
@ -126,20 +156,24 @@ sub lvm_vgs {
my $vgs = {};
eval {
run_command($cmd, outfunc => sub {
run_command(
$cmd,
outfunc => sub {
my $line = shift;
$line = trim($line);
my ($name, $size, $free, $lvcount, $pvname, $pvsize, $pvfree) = split (':', $line);
my ($name, $size, $free, $lvcount, $pvname, $pvsize, $pvfree) =
split(':', $line);
$vgs->{$name} //= {
size => int ($size),
free => int ($free),
lvcount => int($lvcount)
size => int($size),
free => int($free),
lvcount => int($lvcount),
};
if (defined($pvname) && defined($pvsize) && defined($pvfree)) {
push @{$vgs->{$name}->{pvs}}, {
push @{ $vgs->{$name}->{pvs} },
{
name => $pvname,
size => int($pvsize),
free => int($pvfree),
@ -161,24 +195,48 @@ sub lvm_vgs {
sub lvm_list_volumes {
my ($vgname) = @_;
my $option_list = 'vg_name,lv_name,lv_size,lv_attr,pool_lv,data_percent,metadata_percent,snap_percent,uuid,tags,metadata_size,time';
my $option_list =
'vg_name,lv_name,lv_size,lv_attr,pool_lv,data_percent,metadata_percent,snap_percent,uuid,tags,metadata_size,time';
my $cmd = [
'/sbin/lvs', '--separator', ':', '--noheadings', '--units', 'b',
'--unbuffered', '--nosuffix',
'--config', 'report/time_format="%s"',
'--options', $option_list,
'/sbin/lvs',
'--separator',
':',
'--noheadings',
'--units',
'b',
'--unbuffered',
'--nosuffix',
'--config',
'report/time_format="%s"',
'--options',
$option_list,
];
push @$cmd, $vgname if $vgname;
my $lvs = {};
run_command($cmd, outfunc => sub {
run_command(
$cmd,
outfunc => sub {
my $line = shift;
$line = trim($line);
my ($vg_name, $lv_name, $lv_size, $lv_attr, $pool_lv, $data_percent, $meta_percent, $snap_percent, $uuid, $tags, $meta_size, $ctime) = split(':', $line);
my (
$vg_name,
$lv_name,
$lv_size,
$lv_attr,
$pool_lv,
$data_percent,
$meta_percent,
$snap_percent,
$uuid,
$tags,
$meta_size,
$ctime,
) = split(':', $line);
return if !$vg_name;
return if !$lv_name;
@ -198,8 +256,8 @@ sub lvm_list_volumes {
$meta_percent ||= 0;
$snap_percent ||= 0;
$d->{metadata_size} = int($meta_size);
$d->{metadata_used} = int(($meta_percent * $meta_size)/100);
$d->{used} = int(($data_percent * $lv_size)/100);
$d->{metadata_used} = int(($meta_percent * $meta_size) / 100);
$d->{used} = int(($data_percent * $lv_size) / 100);
}
$lvs->{$vg_name}->{$lv_name} = $d;
},
@ -217,7 +275,7 @@ sub type {
sub plugindata {
return {
content => [ {images => 1, rootdir => 1}, { images => 1 }],
content => [{ images => 1, rootdir => 1 }, { images => 1 }],
'sensitive-properties' => {},
};
}
@ -226,11 +284,13 @@ sub properties {
return {
vgname => {
description => "Volume group name.",
type => 'string', format => 'pve-storage-vgname',
type => 'string',
format => 'pve-storage-vgname',
},
base => {
description => "Base volume. This volume is automatically activated.",
type => 'string', format => 'pve-volume-id',
type => 'string',
format => 'pve-volume-id',
},
saferemove => {
description => "Zero-out data when removing LVs.",
@ -243,7 +303,7 @@ sub properties {
tagged_only => {
description => "Only use logical volumes tagged with 'pve-vm-ID'.",
type => 'boolean',
}
},
};
}
@ -271,7 +331,7 @@ sub on_add_hook {
my ($baseid, $volname) = PVE::Storage::parse_volume_id($base);
my $cfg = PVE::Storage::config();
my $basecfg = PVE::Storage::storage_config ($cfg, $baseid, 1);
my $basecfg = PVE::Storage::storage_config($cfg, $baseid, 1);
die "base storage ID '$baseid' does not exist\n" if !$basecfg;
# we only support iscsi for now
@ -303,7 +363,7 @@ sub parse_volname {
sub filesystem_path {
my ($class, $scfg, $volname, $snapname) = @_;
die "lvm snapshot is not implemented"if defined($snapname);
die "lvm snapshot is not implemented" if defined($snapname);
my ($vtype, $name, $vmid) = $class->parse_volname($volname);
@ -333,7 +393,7 @@ sub find_free_diskname {
my $lvs = lvm_list_volumes($vg);
my $disk_list = [ keys %{$lvs->{$vg}} ];
my $disk_list = [keys %{ $lvs->{$vg} }];
return PVE::Storage::Plugin::get_next_vm_diskname($disk_list, $storeid, $vmid, undef, $scfg);
}
@ -375,7 +435,7 @@ sub alloc_image {
my $vg = $scfg->{vgname};
die "no such volume group '$vg'\n" if !defined ($vgs->{$vg});
die "no such volume group '$vg'\n" if !defined($vgs->{$vg});
my $free = int($vgs->{$vg}->{free});
@ -408,20 +468,36 @@ sub free_image {
my $cmd = [
'/usr/bin/cstream',
'-i', '/dev/zero',
'-o', "/dev/$vg/del-$volname",
'-T', '10',
'-v', '1',
'-b', '1048576',
'-t', "$throughput"
'-i',
'/dev/zero',
'-o',
"/dev/$vg/del-$volname",
'-T',
'10',
'-v',
'1',
'-b',
'1048576',
'-t',
"$throughput",
];
eval { run_command($cmd, errmsg => "zero out finished (note: 'No space left on device' is ok here)"); };
eval {
run_command(
$cmd,
errmsg => "zero out finished (note: 'No space left on device' is ok here)",
);
};
warn $@ if $@;
$class->cluster_lock_storage($storeid, $scfg->{shared}, undef, sub {
$class->cluster_lock_storage(
$storeid,
$scfg->{shared},
undef,
sub {
my $cmd = ['/sbin/lvremove', '-f', "$vg/del-$volname"];
run_command($cmd, errmsg => "lvremove '$vg/del-$volname' error");
});
},
);
print "successfully removed volume $volname ($vg/del-$volname)\n";
};
@ -482,8 +558,12 @@ sub list_images {
next if defined($vmid) && ($owner ne $vmid);
}
push @$res, {
volid => $volid, format => 'raw', size => $info->{lv_size}, vmid => $owner,
push @$res,
{
volid => $volid,
format => 'raw',
size => $info->{lv_size},
vmid => $owner,
ctime => $info->{ctime},
};
}
@ -513,11 +593,16 @@ sub activate_storage {
# In LVM2, vgscans take place automatically;
# this is just to be sure
if ($cache->{vgs} && !$cache->{vgscaned} &&
!$cache->{vgs}->{$scfg->{vgname}}) {
if (
$cache->{vgs}
&& !$cache->{vgscaned}
&& !$cache->{vgs}->{ $scfg->{vgname} }
) {
$cache->{vgscaned} = 1;
my $cmd = ['/sbin/vgscan', '--ignorelockingfailure', '--mknodes'];
eval { run_command($cmd, outfunc => sub {}); };
eval {
run_command($cmd, outfunc => sub { });
};
warn $@ if $@;
}
@ -549,7 +634,7 @@ sub deactivate_volume {
my ($class, $storeid, $scfg, $volname, $snapname, $cache) = @_;
my $path = $class->path($scfg, $volname, $storeid, $snapname);
return if ! -b $path;
return if !-b $path;
my $cmd = ['/sbin/lvchange', '-aln', $path];
run_command($cmd, errmsg => "can't deactivate LV '$path'");
@ -558,14 +643,19 @@ sub deactivate_volume {
sub volume_resize {
my ($class, $scfg, $storeid, $volname, $size, $running) = @_;
$size = ($size/1024/1024) . "M";
$size = ($size / 1024 / 1024) . "M";
my $path = $class->path($scfg, $volname);
my $cmd = ['/sbin/lvextend', '-L', $size, $path];
$class->cluster_lock_storage($storeid, $scfg->{shared}, undef, sub {
$class->cluster_lock_storage(
$storeid,
$scfg->{shared},
undef,
sub {
run_command($cmd, errmsg => "error resizing volume '$path'");
});
},
);
return 1;
}
@ -574,14 +664,29 @@ sub volume_size_info {
my ($class, $scfg, $storeid, $volname, $timeout) = @_;
my $path = $class->filesystem_path($scfg, $volname);
my $cmd = ['/sbin/lvs', '--separator', ':', '--noheadings', '--units', 'b',
'--unbuffered', '--nosuffix', '--options', 'lv_size', $path];
my $cmd = [
'/sbin/lvs',
'--separator',
':',
'--noheadings',
'--units',
'b',
'--unbuffered',
'--nosuffix',
'--options',
'lv_size',
$path,
];
my $size;
run_command($cmd, timeout => $timeout, errmsg => "can't get size of '$path'",
run_command(
$cmd,
timeout => $timeout,
errmsg => "can't get size of '$path'",
outfunc => sub {
$size = int(shift);
});
},
);
return wantarray ? ($size, 'raw', 0, undef) : $size;
}
@ -607,17 +712,16 @@ sub volume_has_feature {
my ($class, $scfg, $feature, $storeid, $volname, $snapname, $running) = @_;
my $features = {
copy => { base => 1, current => 1},
rename => {current => 1},
copy => { base => 1, current => 1 },
rename => { current => 1 },
};
my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) =
$class->parse_volname($volname);
my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) = $class->parse_volname($volname);
my $key = undef;
if($snapname){
if ($snapname) {
$key = 'snap';
}else{
} else {
$key = $isBase ? 'base' : 'current';
}
return 1 if $features->{$feature}->{$key};
@ -628,11 +732,14 @@ sub volume_has_feature {
sub volume_export_formats {
my ($class, $scfg, $storeid, $volname, $snapshot, $base_snapshot, $with_snapshots) = @_;
return () if defined($snapshot); # lvm-thin only
return volume_import_formats($class, $scfg, $storeid, $volname, $snapshot, $base_snapshot, $with_snapshots);
return volume_import_formats(
$class, $scfg, $storeid, $volname, $snapshot, $base_snapshot, $with_snapshots,
);
}
sub volume_export {
my ($class, $scfg, $storeid, $fh, $volname, $format, $snapshot, $base_snapshot, $with_snapshots) = @_;
my ($class, $scfg, $storeid, $fh, $volname, $format, $snapshot, $base_snapshot, $with_snapshots)
= @_;
die "volume export format $format not available for $class\n"
if $format ne 'raw+size';
die "cannot export volumes together with their snapshots in $class\n"
@ -642,13 +749,16 @@ sub volume_export {
my $file = $class->path($scfg, $volname, $storeid);
my $size;
# should be faster than querying LVM, also checks for the device file's availability
run_command(['/sbin/blockdev', '--getsize64', $file], outfunc => sub {
run_command(
['/sbin/blockdev', '--getsize64', $file],
outfunc => sub {
my ($line) = @_;
die "unexpected output from /sbin/blockdev: $line\n" if $line !~ /^(\d+)$/;
$size = int($1);
});
},
);
PVE::Storage::Plugin::write_common_header($fh, $size);
run_command(['dd', "if=$file", "bs=64k", "status=progress"], output => '>&'.fileno($fh));
run_command(['dd', "if=$file", "bs=64k", "status=progress"], output => '>&' . fileno($fh));
}
sub volume_import_formats {
@ -659,7 +769,18 @@ sub volume_import_formats {
}
sub volume_import {
my ($class, $scfg, $storeid, $fh, $volname, $format, $snapshot, $base_snapshot, $with_snapshots, $allow_rename) = @_;
my (
$class,
$scfg,
$storeid,
$fh,
$volname,
$format,
$snapshot,
$base_snapshot,
$with_snapshots,
$allow_rename,
) = @_;
die "volume import format $format not available for $class\n"
if $format ne 'raw+size';
die "cannot import volumes together with their snapshots in $class\n"
@ -713,21 +834,14 @@ sub volume_import {
sub volume_import_write {
my ($class, $input_fh, $output_file) = @_;
run_command(['dd', "of=$output_file", 'bs=64k'],
input => '<&'.fileno($input_fh));
run_command(['dd', "of=$output_file", 'bs=64k'], input => '<&' . fileno($input_fh));
}
sub rename_volume {
my ($class, $scfg, $storeid, $source_volname, $target_vmid, $target_volname) = @_;
my (
undef,
$source_image,
$source_vmid,
$base_name,
$base_vmid,
undef,
$format
undef, $source_image, $source_vmid, $base_name, $base_vmid, undef, $format,
) = $class->parse_volname($source_volname);
$target_volname = $class->find_free_diskname($storeid, $scfg, $target_vmid, $format)
if !$target_volname;

View File

@ -83,7 +83,15 @@ sub run_lun_command {
$target = 'root@' . $scfg->{portal};
my $cmd = [@ssh_cmd, '-i', "$id_rsa_path/$scfg->{portal}_id_rsa", $target, $luncmd, $lunmethod, @params];
my $cmd = [
@ssh_cmd,
'-i',
"$id_rsa_path/$scfg->{portal}_id_rsa",
$target,
$luncmd,
$lunmethod,
@params,
];
run_command($cmd, outfunc => $output, timeout => $timeout);

View File

@ -59,25 +59,31 @@ my $execute_command = sub {
if ($exec eq 'scp') {
$target = 'root@[' . $scfg->{portal} . ']';
$cmd = [@scp_cmd, '-i', "$id_rsa_path/$scfg->{portal}_id_rsa", '--', $method, "$target:$params[0]"];
$cmd = [
@scp_cmd,
'-i',
"$id_rsa_path/$scfg->{portal}_id_rsa",
'--',
$method,
"$target:$params[0]",
];
} else {
$target = 'root@' . $scfg->{portal};
$cmd = [@ssh_cmd, '-i', "$id_rsa_path/$scfg->{portal}_id_rsa", $target, '--', $method, @params];
$cmd = [@ssh_cmd, '-i', "$id_rsa_path/$scfg->{portal}_id_rsa", $target, '--', $method,
@params];
}
eval {
run_command($cmd, outfunc => $output, errfunc => $errfunc, timeout => $timeout);
};
eval { run_command($cmd, outfunc => $output, errfunc => $errfunc, timeout => $timeout); };
if ($@) {
$res = {
result => 0,
msg => $err,
}
};
} else {
$res = {
result => 1,
msg => $msg,
}
};
}
return $res;
@ -104,10 +110,9 @@ my $read_config = sub {
$target = 'root@' . $scfg->{portal};
my $cmd = [@ssh_cmd, '-i', "$id_rsa_path/$scfg->{portal}_id_rsa", $target, $luncmd, $CONFIG_FILE];
eval {
run_command($cmd, outfunc => $output, errfunc => $errfunc, timeout => $timeout);
};
my $cmd =
[@ssh_cmd, '-i', "$id_rsa_path/$scfg->{portal}_id_rsa", $target, $luncmd, $CONFIG_FILE];
eval { run_command($cmd, outfunc => $output, errfunc => $errfunc, timeout => $timeout); };
if ($@) {
die $err if ($err !~ /No such file or directory/);
die "No configuration found. Install iet on $scfg->{portal}" if $msg eq '';
@ -141,7 +146,7 @@ my $parser = sub {
foreach (@cfgfile) {
$line++;
if ($_ =~ /^\s*Target\s*([\w\-\:\.]+)\s*$/) {
if ($1 eq $scfg->{target} && ! $cfg_target) {
if ($1 eq $scfg->{target} && !$cfg_target) {
# start colect info
die "$line: Parse error [$_]" if $SETTINGS;
$SETTINGS->{target} = $1;
@ -157,7 +162,7 @@ my $parser = sub {
} else {
if ($cfg_target) {
$SETTINGS->{text} .= "$_\n";
next if ($_ =~ /^\s*#/ || ! $_);
next if ($_ =~ /^\s*#/ || !$_);
my $option = $_;
if ($_ =~ /^(\w+)\s*#/) {
$option = $1;
@ -176,7 +181,7 @@ my $parser = sub {
foreach (@lun) {
my @lun_opt = split '=', $_;
die "$line: Parse error [$option]" unless (scalar(@lun_opt) == 2);
$conf->{$lun_opt[0]} = $lun_opt[1];
$conf->{ $lun_opt[0] } = $lun_opt[1];
}
if ($conf->{Path} && $conf->{Path} =~ /^$base\/$scfg->{pool}\/([\w\-]+)$/) {
$conf->{include} = 1;
@ -184,7 +189,7 @@ my $parser = sub {
$conf->{include} = 0;
}
$conf->{lun} = $num;
push @{$SETTINGS->{luns}}, $conf;
push @{ $SETTINGS->{luns} }, $conf;
} else {
die "$line: Parse error [$option]";
}
@ -202,14 +207,19 @@ my $update_config = sub {
my $config = '';
while ((my $option, my $value) = each(%$SETTINGS)) {
next if ($option eq 'include' || $option eq 'luns' || $option eq 'Path' || $option eq 'text' || $option eq 'used');
next
if ($option eq 'include'
|| $option eq 'luns'
|| $option eq 'Path'
|| $option eq 'text'
|| $option eq 'used');
if ($option eq 'target') {
$config = "\n\nTarget " . $SETTINGS->{target} . "\n" . $config;
} else {
$config .= "\t$option\t\t\t$value\n";
}
}
foreach my $lun (@{$SETTINGS->{luns}}) {
foreach my $lun (@{ $SETTINGS->{luns} }) {
my $lun_opt = '';
while ((my $option, my $value) = each(%$lun)) {
next if ($option eq 'include' || $option eq 'lun' || $option eq 'Path');
@ -260,12 +270,12 @@ my $get_lu_name = sub {
my $used = ();
my $i;
if (! exists $SETTINGS->{used}) {
if (!exists $SETTINGS->{used}) {
for ($i = 0; $i < $MAX_LUNS; $i++) {
$used->{$i} = 0;
}
foreach my $lun (@{$SETTINGS->{luns}}) {
$used->{$lun->{lun}} = 1;
foreach my $lun (@{ $SETTINGS->{luns} }) {
$used->{ $lun->{lun} } = 1;
}
$SETTINGS->{used} = $used;
}
@ -282,14 +292,14 @@ my $get_lu_name = sub {
my $init_lu_name = sub {
my $used = ();
if (! exists($SETTINGS->{used})) {
if (!exists($SETTINGS->{used})) {
for (my $i = 0; $i < $MAX_LUNS; $i++) {
$used->{$i} = 0;
}
$SETTINGS->{used} = $used;
}
foreach my $lun (@{$SETTINGS->{luns}}) {
$SETTINGS->{used}->{$lun->{lun}} = 1;
foreach my $lun (@{ $SETTINGS->{luns} }) {
$SETTINGS->{used}->{ $lun->{lun} } = 1;
}
};
@ -297,7 +307,7 @@ my $free_lu_name = sub {
my ($lu_name) = @_;
my $new;
foreach my $lun (@{$SETTINGS->{luns}}) {
foreach my $lun (@{ $SETTINGS->{luns} }) {
if ($lun->{lun} != $lu_name) {
push @$new, $lun;
}
@ -310,7 +320,8 @@ my $free_lu_name = sub {
my $make_lun = sub {
my ($scfg, $path) = @_;
die 'Maximum number of LUNs per target is 16384' if scalar @{$SETTINGS->{luns}} >= $MAX_LUNS;
die 'Maximum number of LUNs per target is 16384'
if scalar @{ $SETTINGS->{luns} } >= $MAX_LUNS;
my $lun = $get_lu_name->();
my $conf = {
@ -319,7 +330,7 @@ my $make_lun = sub {
Type => 'blockio',
include => 1,
};
push @{$SETTINGS->{luns}}, $conf;
push @{ $SETTINGS->{luns} }, $conf;
return $conf;
};
@ -329,7 +340,7 @@ my $list_view = sub {
my $lun = undef;
my $object = $params[0];
foreach my $lun (@{$SETTINGS->{luns}}) {
foreach my $lun (@{ $SETTINGS->{luns} }) {
next unless $lun->{include} == 1;
if ($lun->{Path} =~ /^$object$/) {
return $lun->{lun} if (defined($lun->{lun}));
@ -345,7 +356,7 @@ my $list_lun = sub {
my $name = undef;
my $object = $params[0];
foreach my $lun (@{$SETTINGS->{luns}}) {
foreach my $lun (@{ $SETTINGS->{luns} }) {
next unless $lun->{include} == 1;
if ($lun->{Path} =~ /^$object$/) {
return $lun->{Path};
@ -381,12 +392,12 @@ my $create_lun = sub {
my $delete_lun = sub {
my ($scfg, $timeout, $method, @params) = @_;
my $res = {msg => undef};
my $res = { msg => undef };
my $path = $params[0];
my $tid = $get_target_tid->($scfg);
foreach my $lun (@{$SETTINGS->{luns}}) {
foreach my $lun (@{ $SETTINGS->{luns} }) {
if ($lun->{Path} eq $path) {
@params = ('--op', 'delete', "--tid=$tid", "--lun=$lun->{lun}");
$res = $execute_command->($scfg, 'ssh', $timeout, $ietadm, @params);
@ -417,7 +428,7 @@ my $modify_lun = sub {
my $path = $params[1];
my $tid = $get_target_tid->($scfg);
foreach my $cfg (@{$SETTINGS->{luns}}) {
foreach my $cfg (@{ $SETTINGS->{luns} }) {
if ($cfg->{Path} eq $path) {
$lun = $cfg;
last;

View File

@ -83,7 +83,8 @@ my $read_config = sub {
my $daemon = 0;
foreach my $config (@CONFIG_FILES) {
$err = undef;
my $cmd = [@ssh_cmd, '-i', "$id_rsa_path/$scfg->{portal}_id_rsa", $target, $luncmd, $config];
my $cmd =
[@ssh_cmd, '-i', "$id_rsa_path/$scfg->{portal}_id_rsa", $target, $luncmd, $config];
eval {
run_command($cmd, outfunc => $output, errfunc => $errfunc, timeout => $timeout);
};
@ -124,11 +125,11 @@ my $parse_size = sub {
if ($unit eq 'KB') {
$size *= 1024;
} elsif ($unit eq 'MB') {
$size *= 1024*1024;
$size *= 1024 * 1024;
} elsif ($unit eq 'GB') {
$size *= 1024*1024*1024;
$size *= 1024 * 1024 * 1024;
} elsif ($unit eq 'TB') {
$size *= 1024*1024*1024*1024;
$size *= 1024 * 1024 * 1024 * 1024;
}
if ($reminder) {
$size = ceil($size);
@ -151,9 +152,9 @@ my $size_with_unit = sub {
if ($size =~ m/^\d+$/) {
++$n and $size /= 1024 until $size < 1024;
if ($size =~ /\./) {
return sprintf "%.2f%s", $size, ( qw[bytes KB MB GB TB] )[ $n ];
return sprintf "%.2f%s", $size, (qw[bytes KB MB GB TB])[$n];
} else {
return sprintf "%d%s", $size, ( qw[bytes KB MB GB TB] )[ $n ];
return sprintf "%d%s", $size, (qw[bytes KB MB GB TB])[$n];
}
}
die "$size: Not a number";
@ -170,7 +171,7 @@ my $lun_dumper = sub {
$config .= 'UnitType ' . $SETTINGS->{$lun}->{UnitType} . "\n";
$config .= 'QueueDepth ' . $SETTINGS->{$lun}->{QueueDepth} . "\n";
foreach my $conf (@{$SETTINGS->{$lun}->{luns}}) {
foreach my $conf (@{ $SETTINGS->{$lun}->{luns} }) {
$config .= "$conf->{lun} Storage " . $conf->{Storage};
$config .= ' ' . $size_with_unit->($conf->{Size}) . "\n";
foreach ($conf->{options}) {
@ -189,11 +190,11 @@ my $get_lu_name = sub {
my $used = ();
my $i;
if (! exists $SETTINGS->{$target}->{used}) {
if (!exists $SETTINGS->{$target}->{used}) {
for ($i = 0; $i < $MAX_LUNS; $i++) {
$used->{$i} = 0;
}
foreach my $lun (@{$SETTINGS->{$target}->{luns}}) {
foreach my $lun (@{ $SETTINGS->{$target}->{luns} }) {
$lun->{lun} =~ /^LUN(\d+)$/;
$used->{$1} = 1;
}
@ -213,13 +214,13 @@ my $init_lu_name = sub {
my ($target) = @_;
my $used = ();
if (! exists($SETTINGS->{$target}->{used})) {
if (!exists($SETTINGS->{$target}->{used})) {
for (my $i = 0; $i < $MAX_LUNS; $i++) {
$used->{$i} = 0;
}
$SETTINGS->{$target}->{used} = $used;
}
foreach my $lun (@{$SETTINGS->{$target}->{luns}}) {
foreach my $lun (@{ $SETTINGS->{$target}->{luns} }) {
$lun->{lun} =~ /^LUN(\d+)$/;
$SETTINGS->{$target}->{used}->{$1} = 1;
}
@ -236,7 +237,8 @@ my $make_lun = sub {
my ($scfg, $path) = @_;
my $target = $SETTINGS->{current};
die 'Maximum number of LUNs per target is 63' if scalar @{$SETTINGS->{$target}->{luns}} >= $MAX_LUNS;
die 'Maximum number of LUNs per target is 63'
if scalar @{ $SETTINGS->{$target}->{luns} } >= $MAX_LUNS;
my @options = ();
my $lun = $get_lu_name->($target);
@ -249,7 +251,7 @@ my $make_lun = sub {
Size => 'AUTO',
options => @options,
};
push @{$SETTINGS->{$target}->{luns}}, $conf;
push @{ $SETTINGS->{$target}->{luns} }, $conf;
return $conf->{lun};
};
@ -290,7 +292,7 @@ my $parser = sub {
if ($arg2 =~ /^Storage\s*(.+)/i) {
$SETTINGS->{$lun}->{$arg1}->{storage} = $1;
} elsif ($arg2 =~ /^Option\s*(.+)/i) {
push @{$SETTINGS->{$lun}->{$arg1}->{options}}, $1;
push @{ $SETTINGS->{$lun}->{$arg1}->{options} }, $1;
} else {
$SETTINGS->{$lun}->{$arg1} = $arg2;
}
@ -307,10 +309,10 @@ my $parser = sub {
my $base = get_base;
for (my $i = 1; $i <= $max; $i++) {
my $target = $SETTINGS->{nodebase}.':'.$SETTINGS->{"LogicalUnit$i"}->{TargetName};
my $target = $SETTINGS->{nodebase} . ':' . $SETTINGS->{"LogicalUnit$i"}->{TargetName};
if ($target eq $scfg->{target}) {
my $lu = ();
while ((my $key, my $val) = each(%{$SETTINGS->{"LogicalUnit$i"}})) {
while ((my $key, my $val) = each(%{ $SETTINGS->{"LogicalUnit$i"} })) {
if ($key =~ /^LUN\d+/) {
$val->{storage} =~ /^([\w\/\-]+)\s+(\w+)/;
my $storage = $1;
@ -318,7 +320,7 @@ my $parser = sub {
my $conf = undef;
my @options = ();
if ($val->{options}) {
@options = @{$val->{options}};
@options = @{ $val->{options} };
}
if ($storage =~ /^$base\/$scfg->{pool}\/([\w\-]+)$/) {
$conf = {
@ -326,7 +328,7 @@ my $parser = sub {
Storage => $storage,
Size => $size,
options => @options,
}
};
}
push @$lu, $conf if $conf;
delete $SETTINGS->{"LogicalUnit$i"}->{$key};
@ -351,7 +353,7 @@ my $list_lun = sub {
my $object = $params[0];
for my $key (keys %$SETTINGS) {
next unless $key =~ /^LogicalUnit\d+$/;
foreach my $lun (@{$SETTINGS->{$key}->{luns}}) {
foreach my $lun (@{ $SETTINGS->{$key}->{luns} }) {
if ($lun->{Storage} =~ /^$object$/) {
return $lun->{Storage};
}
@ -399,7 +401,7 @@ my $delete_lun = sub {
my $target = $SETTINGS->{current};
my $luns = ();
foreach my $conf (@{$SETTINGS->{$target}->{luns}}) {
foreach my $conf (@{ $SETTINGS->{$target}->{luns} }) {
if ($conf->{Storage} =~ /^$params[0]$/) {
$free_lu_name->($target, $conf->{lun});
} else {
@ -448,7 +450,7 @@ my $add_view = sub {
params => \@params,
};
} else {
@params = ('-HUP', '`cat '. "$SETTINGS->{pidfile}`");
@params = ('-HUP', '`cat ' . "$SETTINGS->{pidfile}`");
$cmdmap = {
cmd => 'ssh',
method => 'kill',
@ -479,7 +481,7 @@ my $list_view = sub {
my $object = $params[0];
for my $key (keys %$SETTINGS) {
next unless $key =~ /^LogicalUnit\d+$/;
foreach my $lun (@{$SETTINGS->{$key}->{luns}}) {
foreach my $lun (@{ $SETTINGS->{$key}->{luns} }) {
if ($lun->{Storage} =~ /^$object$/) {
if ($lun->{lun} =~ /^LUN(\d+)/) {
return $1;
@ -531,18 +533,31 @@ sub run_lun_command {
$parser->($scfg) unless $SETTINGS;
my $cmdmap = $get_lun_cmd_map->($method);
if ($method eq 'add_view') {
$is_add_view = 1 ;
$is_add_view = 1;
$timeout = 15;
}
if (ref $cmdmap->{cmd} eq 'CODE') {
$res = $cmdmap->{cmd}->($scfg, $timeout, $method, @params);
if (ref $res) {
$method = $res->{method};
@params = @{$res->{params}};
@params = @{ $res->{params} };
if ($res->{cmd} eq 'scp') {
$cmd = [@scp_cmd, '-i', "$id_rsa_path/$scfg->{portal}_id_rsa", $method, "$target:$params[0]"];
$cmd = [
@scp_cmd,
'-i',
"$id_rsa_path/$scfg->{portal}_id_rsa",
$method,
"$target:$params[0]",
];
} else {
$cmd = [@ssh_cmd, '-i', "$id_rsa_path/$scfg->{portal}_id_rsa", $target, $method, @params];
$cmd = [
@ssh_cmd,
'-i',
"$id_rsa_path/$scfg->{portal}_id_rsa",
$target,
$method,
@params,
];
}
} else {
return $res;
@ -550,12 +565,18 @@ sub run_lun_command {
} else {
$luncmd = $cmdmap->{cmd};
$method = $cmdmap->{method};
$cmd = [@ssh_cmd, '-i', "$id_rsa_path/$scfg->{portal}_id_rsa", $target, $luncmd, $method, @params];
$cmd = [
@ssh_cmd,
'-i',
"$id_rsa_path/$scfg->{portal}_id_rsa",
$target,
$luncmd,
$method,
@params,
];
}
eval {
run_command($cmd, outfunc => $output, timeout => $timeout);
};
eval { run_command($cmd, outfunc => $output, timeout => $timeout); };
if ($@ && $is_add_view) {
my $err = $@;
if ($OLD_CONFIG) {
@ -565,15 +586,11 @@ sub run_lun_command {
print $fh $OLD_CONFIG;
close $fh;
$cmd = [@scp_cmd, '-i', "$id_rsa_path/$scfg->{portal}_id_rsa", $file, $CONFIG_FILE];
eval {
run_command($cmd, outfunc => $output, timeout => $timeout);
};
eval { run_command($cmd, outfunc => $output, timeout => $timeout); };
$err1 = $@ if $@;
unlink $file;
die "$err\n$err1" if $err1;
eval {
run_lun_command($scfg, undef, 'add_view', 'restart');
};
eval { run_lun_command($scfg, undef, 'add_view', 'restart'); };
die "$err\n$@" if ($@);
}
die $err;

View File

@ -30,7 +30,7 @@ sub get_base;
# config file location differs from distro to distro
my @CONFIG_FILES = (
'/etc/rtslib-fb-target/saveconfig.json', # Debian 9.x et al
'/etc/target/saveconfig.json' , # ArchLinux, CentOS
'/etc/target/saveconfig.json', # ArchLinux, CentOS
);
my $BACKSTORE = '/backstores/block';
@ -58,21 +58,27 @@ my $execute_remote_command = sub {
my $errfunc = sub { $err .= "$_[0]\n" };
$target = 'root@' . $scfg->{portal};
$cmd = [@ssh_cmd, '-i', "$id_rsa_path/$scfg->{portal}_id_rsa", $target, '--', $remote_command, @params];
$cmd = [
@ssh_cmd,
'-i',
"$id_rsa_path/$scfg->{portal}_id_rsa",
$target,
'--',
$remote_command,
@params,
];
eval {
run_command($cmd, outfunc => $output, errfunc => $errfunc, timeout => $timeout);
};
eval { run_command($cmd, outfunc => $output, errfunc => $errfunc, timeout => $timeout); };
if ($@) {
$res = {
result => 0,
msg => $err,
}
};
} else {
$res = {
result => 1,
msg => $msg,
}
};
}
return $res;
@ -96,7 +102,8 @@ my $read_config = sub {
$target = 'root@' . $scfg->{portal};
foreach my $oneFile (@CONFIG_FILES) {
my $cmd = [@ssh_cmd, '-i', "$id_rsa_path/$scfg->{portal}_id_rsa", $target, $luncmd, $oneFile];
my $cmd =
[@ssh_cmd, '-i', "$id_rsa_path/$scfg->{portal}_id_rsa", $target, $luncmd, $oneFile];
eval {
run_command($cmd, outfunc => $output, errfunc => $errfunc, timeout => $timeout);
};
@ -139,21 +146,22 @@ my $parser = sub {
if ($tpg =~ /^tpg(\d+)$/) {
$tpg_tag = $1;
} else {
die "Target Portal Group has invalid value, must contain string 'tpg' and a suffix number, eg 'tpg17'\n";
die
"Target Portal Group has invalid value, must contain string 'tpg' and a suffix number, eg 'tpg17'\n";
}
my $config = $get_config->($scfg);
my $jsonconfig = JSON->new->utf8->decode($config);
my $haveTarget = 0;
foreach my $target (@{$jsonconfig->{targets}}) {
foreach my $target (@{ $jsonconfig->{targets} }) {
# only interested in iSCSI targets
next if !($target->{fabric} eq 'iscsi' && $target->{wwn} eq $scfg->{target});
# find correct TPG
foreach my $tpg (@{$target->{tpgs}}) {
foreach my $tpg (@{ $target->{tpgs} }) {
if ($tpg->{tag} == $tpg_tag) {
my $res = [];
foreach my $lun (@{$tpg->{luns}}) {
foreach my $lun (@{ $tpg->{luns} }) {
my ($idx, $storage_object);
if ($lun->{index} =~ /^(\d+)$/) {
$idx = $1;
@ -194,7 +202,7 @@ my $free_lu_name = sub {
my $new = [];
my $target = $get_target_settings->($scfg);
foreach my $lun (@{$target->{luns}}) {
foreach my $lun (@{ $target->{luns} }) {
if ($lun->{storage_object} ne "$BACKSTORE/$lu_name") {
push @$new, $lun;
}
@ -213,7 +221,7 @@ my $register_lun = sub {
is_new => 1,
};
my $target = $get_target_settings->($scfg);
push @{$target->{luns}}, $conf;
push @{ $target->{luns} }, $conf;
return $conf;
};
@ -228,7 +236,7 @@ my $extract_volname = sub {
$volname = $1;
my $prefix = $get_backstore_prefix->($scfg);
my $target = $get_target_settings->($scfg);
foreach my $lun (@{$target->{luns}}) {
foreach my $lun (@{ $target->{luns} }) {
# If we have a lun with the pool prefix matching this vol, then return this one
# like pool-pve-vm-100-disk-0
# Else, just fallback to the old name scheme which is vm-100-disk-0
@ -252,7 +260,7 @@ my $list_view = sub {
return undef if !defined($volname); # nothing to search for..
foreach my $lun (@{$target->{luns}}) {
foreach my $lun (@{ $target->{luns} }) {
if ($lun->{storage_object} eq "$BACKSTORE/$volname") {
return $lun->{index};
}
@ -269,7 +277,7 @@ my $list_lun = sub {
my $volname = $extract_volname->($scfg, $object);
my $target = $get_target_settings->($scfg);
foreach my $lun (@{$target->{luns}}) {
foreach my $lun (@{ $target->{luns} }) {
if ($lun->{storage_object} eq "$BACKSTORE/$volname") {
return $object;
}
@ -294,18 +302,18 @@ my $create_lun = sub {
my $tpg = $scfg->{lio_tpg} || die "Target Portal Group not set, aborting!\n";
# step 1: create backstore for device
my @cliparams = ($BACKSTORE, 'create', "name=$volname", "dev=$device" );
my @cliparams = ($BACKSTORE, 'create', "name=$volname", "dev=$device");
my $res = $execute_remote_command->($scfg, $timeout, $targetcli, @cliparams);
die $res->{msg} if !$res->{result};
# step 2: enable unmap support on the backstore
@cliparams = ($BACKSTORE . '/' . $volname, 'set', 'attribute', 'emulate_tpu=1' );
@cliparams = ($BACKSTORE . '/' . $volname, 'set', 'attribute', 'emulate_tpu=1');
$res = $execute_remote_command->($scfg, $timeout, $targetcli, @cliparams);
die $res->{msg} if !$res->{result};
# step 3: register lun with target
# targetcli /iscsi/iqn.2018-04.at.bestsolution.somehost:target/tpg1/luns/ create /backstores/block/foobar
@cliparams = ("/iscsi/$scfg->{target}/$tpg/luns/", 'create', "$BACKSTORE/$volname" );
@cliparams = ("/iscsi/$scfg->{target}/$tpg/luns/", 'create', "$BACKSTORE/$volname");
$res = $execute_remote_command->($scfg, $timeout, $targetcli, @cliparams);
die $res->{msg} if !$res->{result};
@ -330,7 +338,7 @@ my $create_lun = sub {
my $delete_lun = sub {
my ($scfg, $timeout, $method, @params) = @_;
my $res = {msg => undef};
my $res = { msg => undef };
my $tpg = $scfg->{lio_tpg} || die "Target Portal Group not set, aborting!\n";
@ -338,11 +346,11 @@ my $delete_lun = sub {
my $volname = $extract_volname->($scfg, $path);
my $target = $get_target_settings->($scfg);
foreach my $lun (@{$target->{luns}}) {
foreach my $lun (@{ $target->{luns} }) {
next if $lun->{storage_object} ne "$BACKSTORE/$volname";
# step 1: delete the lun
my @cliparams = ("/iscsi/$scfg->{target}/$tpg/luns/", 'delete', "lun$lun->{index}" );
my @cliparams = ("/iscsi/$scfg->{target}/$tpg/luns/", 'delete', "lun$lun->{index}");
my $res = $execute_remote_command->($scfg, $timeout, $targetcli, @cliparams);
do {
die $res->{msg};

View File

@ -30,7 +30,7 @@ sub type {
sub plugindata {
return {
content => [ {images => 1, rootdir => 1}, { images => 1, rootdir => 1}],
content => [{ images => 1, rootdir => 1 }, { images => 1, rootdir => 1 }],
'sensitive-properties' => {},
};
}
@ -39,7 +39,8 @@ sub properties {
return {
thinpool => {
description => "LVM thin pool LV name.",
type => 'string', format => 'pve-storage-vgname',
type => 'string',
format => 'pve-storage-vgname',
},
};
}
@ -77,7 +78,7 @@ sub filesystem_path {
my $vg = $scfg->{vgname};
my $path = defined($snapname) ? "/dev/$vg/snap_${name}_$snapname": "/dev/$vg/$name";
my $path = defined($snapname) ? "/dev/$vg/snap_${name}_$snapname" : "/dev/$vg/$name";
return wantarray ? ($path, $vmid, $vtype) : $path;
}
@ -94,13 +95,21 @@ sub alloc_image {
my $vg = $scfg->{vgname};
die "no such volume group '$vg'\n" if !defined ($vgs->{$vg});
die "no such volume group '$vg'\n" if !defined($vgs->{$vg});
$name = $class->find_free_diskname($storeid, $scfg, $vmid)
if !$name;
my $cmd = ['/sbin/lvcreate', '-aly', '-V', "${size}k", '--name', $name,
'--thinpool', "$vg/$scfg->{thinpool}" ];
my $cmd = [
'/sbin/lvcreate',
'-aly',
'-V',
"${size}k",
'--name',
$name,
'--thinpool',
"$vg/$scfg->{thinpool}",
];
run_command($cmd, errmsg => "lvcreate '$vg/$name' error");
@ -114,7 +123,7 @@ sub free_image {
my $lvs = PVE::Storage::LVMPlugin::lvm_list_volumes($vg);
if (my $dat = $lvs->{$scfg->{vgname}}) {
if (my $dat = $lvs->{ $scfg->{vgname} }) {
# remove all volume snapshots first
foreach my $lv (keys %$dat) {
@ -164,8 +173,12 @@ sub list_images {
next if defined($vmid) && ($owner ne $vmid);
}
push @$res, {
volid => $volid, format => 'raw', size => $info->{lv_size}, vmid => $owner,
push @$res,
{
volid => $volid,
format => 'raw',
size => $info->{lv_size},
vmid => $owner,
ctime => $info->{ctime},
};
}
@ -181,7 +194,7 @@ sub list_thinpools {
my $thinpools = [];
foreach my $vg (keys %$lvs) {
foreach my $lvname (keys %{$lvs->{$vg}}) {
foreach my $lvname (keys %{ $lvs->{$vg} }) {
next if $lvs->{$vg}->{$lvname}->{lv_type} ne 't';
my $lv = $lvs->{$vg}->{$lvname};
$lv->{lv} = $lvname;
@ -198,9 +211,9 @@ sub status {
my $lvs = $cache->{lvs} ||= PVE::Storage::LVMPlugin::lvm_list_volumes();
return if !$lvs->{$scfg->{vgname}};
return if !$lvs->{ $scfg->{vgname} };
my $info = $lvs->{$scfg->{vgname}}->{$scfg->{thinpool}};
my $info = $lvs->{ $scfg->{vgname} }->{ $scfg->{thinpool} };
return if !$info || $info->{lv_type} ne 't' || !$info->{lv_size};
@ -221,7 +234,10 @@ my $activate_lv = sub {
return if $lvs->{$vg}->{$lv}->{lv_state} eq 'a';
run_command(['lvchange', '-ay', '-K', "$vg/$lv"], errmsg => "activating LV '$vg/$lv' failed");
run_command(
['lvchange', '-ay', '-K', "$vg/$lv"],
errmsg => "activating LV '$vg/$lv' failed",
);
$lvs->{$vg}->{$lv}->{lv_state} = 'a'; # update cache
@ -271,8 +287,7 @@ sub clone_image {
if ($snap) {
$lv = "$vg/snap_${volname}_$snap";
} else {
my ($vtype, undef, undef, undef, undef, $isBase, $format) =
$class->parse_volname($volname);
my ($vtype, undef, undef, undef, undef, $isBase, $format) = $class->parse_volname($volname);
die "clone_image only works on base images\n" if !$isBase;
@ -290,8 +305,7 @@ sub clone_image {
sub create_base {
my ($class, $storeid, $scfg, $volname) = @_;
my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) =
$class->parse_volname($volname);
my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) = $class->parse_volname($volname);
die "create_base not possible with base image\n" if $isBase;
@ -363,20 +377,19 @@ sub volume_has_feature {
my $features = {
snapshot => { current => 1 },
clone => { base => 1, snap => 1},
template => { current => 1},
copy => { base => 1, current => 1, snap => 1},
sparseinit => { base => 1, current => 1},
rename => {current => 1},
clone => { base => 1, snap => 1 },
template => { current => 1 },
copy => { base => 1, current => 1, snap => 1 },
sparseinit => { base => 1, current => 1 },
rename => { current => 1 },
};
my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) =
$class->parse_volname($volname);
my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) = $class->parse_volname($volname);
my $key = undef;
if($snapname){
if ($snapname) {
$key = 'snap';
}else{
} else {
$key = $isBase ? 'base' : 'current';
}
return 1 if $features->{$feature}->{$key};
@ -385,7 +398,18 @@ sub volume_has_feature {
}
sub volume_import {
my ($class, $scfg, $storeid, $fh, $volname, $format, $snapshot, $base_snapshot, $with_snapshots, $allow_rename) = @_;
my (
$class,
$scfg,
$storeid,
$fh,
$volname,
$format,
$snapshot,
$base_snapshot,
$with_snapshots,
$allow_rename,
) = @_;
my ($vtype, $name, $vmid, $basename, $basevmid, $isBase, $file_format) =
$class->parse_volname($volname);
@ -400,7 +424,7 @@ sub volume_import {
$snapshot,
$base_snapshot,
$with_snapshots,
$allow_rename
$allow_rename,
);
} else {
my $tempname;
@ -425,9 +449,9 @@ sub volume_import {
$snapshot,
$base_snapshot,
$with_snapshots,
$allow_rename
$allow_rename,
);
($storeid,my $newname) = PVE::Storage::parse_volume_id($newvolid);
($storeid, my $newname) = PVE::Storage::parse_volume_id($newvolid);
$volname = $class->create_base($storeid, $scfg, $newname);
}
@ -438,8 +462,10 @@ sub volume_import {
# used in LVMPlugin->volume_import
sub volume_import_write {
my ($class, $input_fh, $output_file) = @_;
run_command(['dd', "of=$output_file", 'conv=sparse', 'bs=64k'],
input => '<&'.fileno($input_fh));
run_command(
['dd', "of=$output_file", 'conv=sparse', 'bs=64k'],
input => '<&' . fileno($input_fh),
);
}
1;

View File

@ -24,9 +24,9 @@ sub nfs_is_mounted {
$mountdata = PVE::ProcFSTools::parse_proc_mounts() if !$mountdata;
return $mountpoint if grep {
$_->[2] =~ /^nfs/ &&
$_->[0] =~ m|^\Q$source\E/?$| &&
$_->[1] eq $mountpoint
$_->[2] =~ /^nfs/
&& $_->[0] =~ m|^\Q$source\E/?$|
&& $_->[1] eq $mountpoint
} @$mountdata;
return undef;
}
@ -53,9 +53,19 @@ sub type {
sub plugindata {
return {
content => [ { images => 1, rootdir => 1, vztmpl => 1, iso => 1, backup => 1, snippets => 1, import => 1 },
{ images => 1 }],
format => [ { raw => 1, qcow2 => 1, vmdk => 1 } , 'raw' ],
content => [
{
images => 1,
rootdir => 1,
vztmpl => 1,
iso => 1,
backup => 1,
snippets => 1,
import => 1,
},
{ images => 1 },
],
format => [{ raw => 1, qcow2 => 1, vmdk => 1 }, 'raw'],
'sensitive-properties' => {},
};
}
@ -64,11 +74,13 @@ sub properties {
return {
export => {
description => "NFS export path.",
type => 'string', format => 'pve-storage-path',
type => 'string',
format => 'pve-storage-path',
},
server => {
description => "Server IP or DNS name.",
type => 'string', format => 'pve-storage-server',
type => 'string',
format => 'pve-storage-server',
},
};
}
@ -95,7 +107,6 @@ sub options {
};
}
sub check_config {
my ($class, $sectionId, $config, $create, $skipSchemaCheck) = @_;
@ -135,8 +146,8 @@ sub activate_storage {
# NOTE: only call mkpath when not mounted (avoid hang when NFS server is offline
$class->config_aware_base_mkdir($scfg, $path);
die "unable to activate storage '$storeid' - " .
"directory '$path' does not exist\n" if ! -d $path;
die "unable to activate storage '$storeid' - " . "directory '$path' does not exist\n"
if !-d $path;
nfs_mount($server, $export, $path, $scfg->{options});
}
@ -184,7 +195,9 @@ sub check_connection {
$cmd = ['/sbin/showmount', '--no-headers', '--exports', $server];
}
eval { run_command($cmd, timeout => 10, outfunc => sub {}, errfunc => sub {}) };
eval {
run_command($cmd, timeout => 10, outfunc => sub { }, errfunc => sub { });
};
if (my $err = $@) {
if ($is_v4) {
my $port = 2049;

View File

@ -29,7 +29,7 @@ sub type {
sub plugindata {
return {
content => [ {backup => 1, none => 1}, { backup => 1 }],
content => [{ backup => 1, none => 1 }, { backup => 1 }],
'sensitive-properties' => {
'encryption-key' => 1,
'master-pubkey' => 1,
@ -47,11 +47,13 @@ sub properties {
# openssl s_client -connect <host>:8007 2>&1 |openssl x509 -fingerprint -sha256
fingerprint => get_standard_option('fingerprint-sha256'),
'encryption-key' => {
description => "Encryption key. Use 'autogen' to generate one automatically without passphrase.",
description =>
"Encryption key. Use 'autogen' to generate one automatically without passphrase.",
type => 'string',
},
'master-pubkey' => {
description => "Base64-encoded, PEM-formatted public RSA key. Used to encrypt a copy of the encryption-key which will be added to each encrypted backup.",
description =>
"Base64-encoded, PEM-formatted public RSA key. Used to encrypt a copy of the encryption-key which will be added to each encrypted backup.",
type => 'string',
},
};
@ -63,9 +65,9 @@ sub options {
datastore => { fixed => 1 },
namespace => { optional => 1 },
port => { optional => 1 },
nodes => { optional => 1},
disable => { optional => 1},
content => { optional => 1},
nodes => { optional => 1 },
disable => { optional => 1 },
content => { optional => 1 },
username => { optional => 1 },
password => { optional => 1 },
'encryption-key' => { optional => 1 },
@ -244,7 +246,7 @@ my sub api_param_from_volname : prototype($$$) {
my @tm = (POSIX::strptime($timestr, "%FT%TZ"));
# expect sec, min, hour, mday, mon, year
die "error parsing time from '$volname'" if grep { !defined($_) } @tm[0..5];
die "error parsing time from '$volname'" if grep { !defined($_) } @tm[0 .. 5];
my $btime;
{
@ -283,7 +285,7 @@ my sub do_raw_client_cmd {
my $client_exe = '/usr/bin/proxmox-backup-client';
die "executable not found '$client_exe'! Proxmox backup client not installed?\n"
if ! -x $client_exe;
if !-x $client_exe;
my $repo = PVE::PBSClient::get_repository($scfg);
@ -303,13 +305,13 @@ my sub do_raw_client_cmd {
// die "failed to get file descriptor flags: $!\n";
fcntl($keyfd, F_SETFD, $flags & ~FD_CLOEXEC)
or die "failed to remove FD_CLOEXEC from encryption key file descriptor\n";
push @$cmd, '--crypt-mode=encrypt', '--keyfd='.fileno($keyfd);
push @$cmd, '--crypt-mode=encrypt', '--keyfd=' . fileno($keyfd);
if ($use_master && defined($master_fd = pbs_open_master_pubkey($scfg, $storeid))) {
my $flags = fcntl($master_fd, F_GETFD, 0)
// die "failed to get file descriptor flags: $!\n";
fcntl($master_fd, F_SETFD, $flags & ~FD_CLOEXEC)
or die "failed to remove FD_CLOEXEC from master public key file descriptor\n";
push @$cmd, '--master-pubkey-fd='.fileno($master_fd);
push @$cmd, '--master-pubkey-fd=' . fileno($master_fd);
}
} else {
push @$cmd, '--crypt-mode=none';
@ -357,12 +359,15 @@ sub run_client_cmd {
my $outfunc = sub { $json_str .= "$_[0]\n" };
$param = [] if !defined($param);
$param = [ $param ] if !ref($param);
$param = [$param] if !ref($param);
$param = [@$param, '--output-format=json'] if !$no_output;
do_raw_client_cmd($scfg, $storeid, $client_cmd, $param,
outfunc => $outfunc, errmsg => 'proxmox-backup-client failed');
do_raw_client_cmd(
$scfg, $storeid, $client_cmd, $param,
outfunc => $outfunc,
errmsg => 'proxmox-backup-client failed',
);
return undef if $no_output;
@ -390,8 +395,11 @@ sub extract_vzdump_config {
die "unable to extract configuration for backup format '$format'\n";
}
do_raw_client_cmd($scfg, $storeid, 'restore', [ $name, $config_name, '-' ],
outfunc => $outfunc, errmsg => 'proxmox-backup-client failed');
do_raw_client_cmd(
$scfg, $storeid, 'restore', [$name, $config_name, '-'],
outfunc => $outfunc,
errmsg => 'proxmox-backup-client failed',
);
return $config;
}
@ -445,7 +453,7 @@ sub prune_backups {
$logfunc->('info', "running 'proxmox-backup-client prune' for '$backup_group'")
if !$dryrun;
eval {
my $res = run_client_cmd($scfg, $storeid, 'prune', [ $backup_group, @param ]);
my $res = run_client_cmd($scfg, $storeid, 'prune', [$backup_group, @param]);
foreach my $backup (@{$res}) {
die "result from proxmox-backup-client is not as expected\n"
@ -462,7 +470,8 @@ sub prune_backups {
my $mark = $backup->{keep} ? 'keep' : 'remove';
$mark = 'protected' if $backup->{protected};
push @{$prune_list}, {
push @{$prune_list},
{
ctime => $ctime,
mark => $mark,
type => $type eq 'vm' ? 'qemu' : 'lxc',
@ -596,7 +605,9 @@ sub on_delete_hook {
sub parse_volname {
my ($class, $volname) = @_;
if ($volname =~ m!^backup/([^\s_]+)/([^\s_]+)/([0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z)$!) {
if ($volname =~
m!^backup/([^\s_]+)/([^\s_]+)/([0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z)$!
) {
my $btype = $1;
my $bid = $2;
my $btime = $3;
@ -657,12 +668,11 @@ sub free_image {
my ($vtype, $name, $vmid) = $class->parse_volname($volname);
run_client_cmd($scfg, $storeid, "forget", [ $name ], 1);
run_client_cmd($scfg, $storeid, "forget", [$name], 1);
return;
}
sub list_images {
my ($class, $storeid, $scfg, $vmid, $vollist, $cache) = @_;
@ -706,7 +716,7 @@ my sub pbs_api_connect {
}
if (my $fp = $scfg->{fingerprint}) {
$params->{cached_fingerprints}->{uc($fp)} = 1;
$params->{cached_fingerprints}->{ uc($fp) } = 1;
}
my $conn = PVE::APIClient::LWP->new(
@ -862,7 +872,7 @@ sub get_volume_notes {
my (undef, $name, undef, undef, undef, undef, $format) = $class->parse_volname($volname);
my $data = run_client_cmd($scfg, $storeid, "snapshot", [ "notes", "show", $name ]);
my $data = run_client_cmd($scfg, $storeid, "snapshot", ["notes", "show", $name]);
return $data->{notes};
}
@ -874,7 +884,7 @@ sub update_volume_notes {
my (undef, $name, undef, undef, undef, undef, $format) = $class->parse_volname($volname);
run_client_cmd($scfg, $storeid, "snapshot", [ "notes", "update", $name, $notes ], 1);
run_client_cmd($scfg, $storeid, "snapshot", ["notes", "update", $name, $notes], 1);
return undef;
}
@ -936,7 +946,7 @@ sub volume_size_info {
my ($vtype, $name, undef, undef, undef, undef, $format) = $class->parse_volname($volname);
my $data = run_client_cmd($scfg, $storeid, "files", [ $name ]);
my $data = run_client_cmd($scfg, $storeid, "files", [$name]);
my $size = 0;
foreach my $info (@$data) {

View File

@ -35,16 +35,7 @@ our @COMMON_TAR_FLAGS = qw(
);
our @SHARED_STORAGE = (
'iscsi',
'nfs',
'cifs',
'rbd',
'cephfs',
'iscsidirect',
'glusterfs',
'zfs',
'drbd',
'pbs',
'iscsi', 'nfs', 'cifs', 'rbd', 'cephfs', 'iscsidirect', 'glusterfs', 'zfs', 'drbd', 'pbs',
);
our $QCOW2_PREALLOCATION = {
@ -62,13 +53,16 @@ our $RAW_PREALLOCATION = {
our $MAX_VOLUMES_PER_GUEST = 1024;
cfs_register_file ('storage.cfg',
cfs_register_file(
'storage.cfg',
sub { __PACKAGE__->parse_config(@_); },
sub { __PACKAGE__->write_config(@_); });
sub { __PACKAGE__->write_config(@_); },
);
my %prune_option = (
optional => 1,
type => 'integer', minimum => '0',
type => 'integer',
minimum => '0',
format_description => 'N',
);
@ -79,36 +73,36 @@ our $prune_backups_format = {
optional => 1,
},
'keep-last' => {
%prune_option,
description => 'Keep the last <N> backups.',
%prune_option, description => 'Keep the last <N> backups.',
},
'keep-hourly' => {
%prune_option,
description => 'Keep backups for the last <N> different hours. If there is more' .
'than one backup for a single hour, only the latest one is kept.'
description => 'Keep backups for the last <N> different hours. If there is more'
. 'than one backup for a single hour, only the latest one is kept.',
},
'keep-daily' => {
%prune_option,
description => 'Keep backups for the last <N> different days. If there is more' .
'than one backup for a single day, only the latest one is kept.'
description => 'Keep backups for the last <N> different days. If there is more'
. 'than one backup for a single day, only the latest one is kept.',
},
'keep-weekly' => {
%prune_option,
description => 'Keep backups for the last <N> different weeks. If there is more' .
'than one backup for a single week, only the latest one is kept.'
description => 'Keep backups for the last <N> different weeks. If there is more'
. 'than one backup for a single week, only the latest one is kept.',
},
'keep-monthly' => {
%prune_option,
description => 'Keep backups for the last <N> different months. If there is more' .
'than one backup for a single month, only the latest one is kept.'
description => 'Keep backups for the last <N> different months. If there is more'
. 'than one backup for a single month, only the latest one is kept.',
},
'keep-yearly' => {
%prune_option,
description => 'Keep backups for the last <N> different years. If there is more' .
'than one backup for a single year, only the latest one is kept.'
description => 'Keep backups for the last <N> different years. If there is more'
. 'than one backup for a single year, only the latest one is kept.',
},
};
PVE::JSONSchema::register_format('prune-backups', $prune_backups_format, \&validate_prune_backups);
sub validate_prune_backups {
my ($prune_backups) = @_;
@ -124,30 +118,39 @@ sub validate_prune_backups {
return $res;
}
register_standard_option('prune-backups', {
description => "The retention options with shorter intervals are processed first " .
"with --keep-last being the very first one. Each option covers a " .
"specific period of time. We say that backups within this period " .
"are covered by this option. The next option does not take care " .
"of already covered backups and only considers older backups.",
register_standard_option(
'prune-backups',
{
description => "The retention options with shorter intervals are processed first "
. "with --keep-last being the very first one. Each option covers a "
. "specific period of time. We say that backups within this period "
. "are covered by this option. The next option does not take care "
. "of already covered backups and only considers older backups.",
optional => 1,
type => 'string',
format => 'prune-backups',
});
},
);
my $defaultData = {
propertyList => {
type => { description => "Storage type." },
storage => get_standard_option('pve-storage-id',
{ completion => \&PVE::Storage::complete_storage }),
nodes => get_standard_option('pve-node-list', {
storage => get_standard_option(
'pve-storage-id',
{ completion => \&PVE::Storage::complete_storage },
),
nodes => get_standard_option(
'pve-node-list',
{
description => "List of nodes for which the storage configuration applies.",
optional => 1,
}),
},
),
content => {
description => "Allowed content types.\n\nNOTE: the value " .
"'rootdir' is used for Containers, and value 'images' for VMs.\n",
type => 'string', format => 'pve-storage-content-list',
description => "Allowed content types.\n\nNOTE: the value "
. "'rootdir' is used for Containers, and value 'images' for VMs.\n",
type => 'string',
format => 'pve-storage-content-list',
optional => 1,
completion => \&PVE::Storage::complete_content_type,
},
@ -157,47 +160,56 @@ my $defaultData = {
optional => 1,
},
maxfiles => {
description => "Deprecated: use 'prune-backups' instead. " .
"Maximal number of backup files per VM. Use '0' for unlimited.",
description => "Deprecated: use 'prune-backups' instead. "
. "Maximal number of backup files per VM. Use '0' for unlimited.",
type => 'integer',
minimum => 0,
optional => 1,
},
'prune-backups' => get_standard_option('prune-backups'),
'max-protected-backups' => {
description => "Maximal number of protected backups per guest. Use '-1' for unlimited.",
description =>
"Maximal number of protected backups per guest. Use '-1' for unlimited.",
type => 'integer',
minimum => -1,
optional => 1,
default => "Unlimited for users with Datastore.Allocate privilege, 5 for other users",
default =>
"Unlimited for users with Datastore.Allocate privilege, 5 for other users",
},
shared => {
description => "Indicate that this is a single storage with the same contents on all "
."nodes (or all listed in the 'nodes' option). It will not make the contents of a "
."local storage automatically accessible to other nodes, it just marks an already "
."shared storage as such!",
description =>
"Indicate that this is a single storage with the same contents on all "
. "nodes (or all listed in the 'nodes' option). It will not make the contents of a "
. "local storage automatically accessible to other nodes, it just marks an already "
. "shared storage as such!",
type => 'boolean',
optional => 1,
},
subdir => {
description => "Subdir to mount.",
type => 'string', format => 'pve-storage-path',
type => 'string',
format => 'pve-storage-path',
optional => 1,
},
format => get_standard_option('pve-storage-image-format', {
format => get_standard_option(
'pve-storage-image-format',
{
description => "Default image format.",
optional => 1,
}),
},
),
preallocation => {
description => "Preallocation mode for raw and qcow2 images. " .
"Using 'metadata' on raw images results in preallocation=off.",
type => 'string', enum => ['off', 'metadata', 'falloc', 'full'],
description => "Preallocation mode for raw and qcow2 images. "
. "Using 'metadata' on raw images results in preallocation=off.",
type => 'string',
enum => ['off', 'metadata', 'falloc', 'full'],
default => 'metadata',
optional => 1,
},
'content-dirs' => {
description => "Overrides for default content type directories.",
type => "string", format => "pve-dir-override-list",
type => "string",
format => "pve-dir-override-list",
optional => 1,
},
options => {
@ -207,9 +219,10 @@ my $defaultData = {
optional => 1,
},
port => {
description => "Use this port to connect to the storage instead of the default one (for"
." example, with PBS or ESXi). For NFS and CIFS, use the 'options' option to"
." configure the port via the mount options.",
description =>
"Use this port to connect to the storage instead of the default one (for"
. " example, with PBS or ESXi). For NFS and CIFS, use the 'options' option to"
. " configure the port via the mount options.",
type => 'integer',
minimum => 1,
maximum => 65535,
@ -274,17 +287,18 @@ sub default_format {
my $def = $defaultData->{plugindata}->{$type};
my $def_format = 'raw';
my $valid_formats = [ $def_format ];
my $valid_formats = [$def_format];
if (defined($def->{format})) {
$def_format = $scfg->{format} || $def->{format}->[1];
$valid_formats = [ sort keys %{$def->{format}->[0]} ];
$valid_formats = [sort keys %{ $def->{format}->[0] }];
}
return wantarray ? ($def_format, $valid_formats) : $def_format;
}
PVE::JSONSchema::register_format('pve-storage-path', \&verify_path);
sub verify_path {
my ($path, $noerr) = @_;
@ -298,12 +312,14 @@ sub verify_path {
}
PVE::JSONSchema::register_format('pve-storage-server', \&verify_server);
sub verify_server {
my ($server, $noerr) = @_;
if (!(PVE::JSONSchema::pve_verify_ip($server, 1) ||
PVE::JSONSchema::pve_verify_dns_name($server, 1)))
{
if (!(
PVE::JSONSchema::pve_verify_ip($server, 1)
|| PVE::JSONSchema::pve_verify_dns_name($server, 1)
)) {
return undef if $noerr;
die "value does not look like a valid server name or IP address\n";
}
@ -311,6 +327,7 @@ sub verify_server {
}
PVE::JSONSchema::register_format('pve-storage-vgname', \&parse_lvm_name);
sub parse_lvm_name {
my ($name, $noerr) = @_;
@ -336,6 +353,7 @@ sub parse_lvm_name {
#}
PVE::JSONSchema::register_format('pve-storage-portal-dns', \&verify_portal_dns);
sub verify_portal_dns {
my ($portal, $noerr) = @_;
@ -348,6 +366,7 @@ sub verify_portal_dns {
}
PVE::JSONSchema::register_format('pve-storage-content', \&verify_content);
sub verify_content {
my ($ct, $noerr) = @_;
@ -368,6 +387,7 @@ sub verify_content {
# TODO PVE 9 - remove after doing a versioned breaks for pve-guest-common, which was using this
# format.
PVE::JSONSchema::register_format('pve-storage-format', \&verify_format);
sub verify_format {
my ($fmt, $noerr) = @_;
@ -380,6 +400,7 @@ sub verify_format {
}
PVE::JSONSchema::register_format('pve-storage-options', \&verify_options);
sub verify_options {
my ($value, $noerr) = @_;
@ -393,6 +414,7 @@ sub verify_options {
}
PVE::JSONSchema::register_format('pve-volume-id', \&parse_volume_id);
sub parse_volume_id {
my ($volid, $noerr) = @_;
@ -404,6 +426,7 @@ sub parse_volume_id {
}
PVE::JSONSchema::register_format('pve-dir-override', \&verify_dir_override);
sub verify_dir_override {
my ($value, $noerr) = @_;
@ -411,7 +434,10 @@ sub verify_dir_override {
my ($content_type, $relative_path) = ($1, $2);
if (verify_content($content_type, $noerr)) {
# linux has 4k max-path, but limit total length to lower as its concat'd for full path
if (length($relative_path) < 1023 && !(grep { length($_) >= 255 } split('/', $relative_path))) {
if (
length($relative_path) < 1023
&& !(grep { length($_) >= 255 } split('/', $relative_path))
) {
return $value;
}
}
@ -457,7 +483,7 @@ sub decode_value {
$res->{$c} = 1;
}
if ($res->{none} && scalar (keys %$res) > 1) {
if ($res->{none} && scalar(keys %$res) > 1) {
die "unable to combine 'none' with other content types\n";
}
@ -536,8 +562,11 @@ sub parse_config {
# make sure we have a reasonable 'local:' storage
# we want 'local' to be always the same 'type' (on all cluster nodes)
if (!$ids->{local} || $ids->{local}->{type} ne 'dir' ||
($ids->{local}->{path} && $ids->{local}->{path} ne '/var/lib/vz')) {
if (
!$ids->{local}
|| $ids->{local}->{type} ne 'dir'
|| ($ids->{local}->{path} && $ids->{local}->{path} ne '/var/lib/vz')
) {
$ids->{local} = {
type => 'dir',
priority => 0, # force first entry
@ -690,11 +719,15 @@ sub parse_volname {
return ('backup', $fn, undef, undef, undef, undef, 'raw');
} elsif ($volname =~ m!^snippets/([^/]+)$!) {
return ('snippets', $1, undef, undef, undef, undef, 'raw');
} elsif ($volname =~ m!^import/(${PVE::Storage::SAFE_CHAR_WITH_WHITESPACE_CLASS_RE}+\.ova\/${PVE::Storage::OVA_CONTENT_RE_1})$!) {
} elsif ($volname =~
m!^import/(${PVE::Storage::SAFE_CHAR_WITH_WHITESPACE_CLASS_RE}+\.ova\/${PVE::Storage::OVA_CONTENT_RE_1})$!
) {
my $packed_image = $1;
my $format = $2;
return ('import', $packed_image, undef, undef, undef, undef, "ova+$format");
} elsif ($volname =~ m!^import/(${PVE::Storage::SAFE_CHAR_WITH_WHITESPACE_CLASS_RE}+$PVE::Storage::IMPORT_EXT_RE_1)$!) {
} elsif ($volname =~
m!^import/(${PVE::Storage::SAFE_CHAR_WITH_WHITESPACE_CLASS_RE}+$PVE::Storage::IMPORT_EXT_RE_1)$!
) {
return ('import', $1, undef, undef, undef, undef, $2);
}
@ -731,8 +764,7 @@ sub get_subdir {
sub filesystem_path {
my ($class, $scfg, $volname, $snapname) = @_;
my ($vtype, $name, $vmid, undef, undef, $isBase, $format) =
$class->parse_volname($volname);
my ($vtype, $name, $vmid, undef, undef, $isBase, $format) = $class->parse_volname($volname);
# Note: qcow2/qed has internal snapshot, so path is always
# the same (with or without snapshot => same file).
@ -778,15 +810,17 @@ sub create_base {
my $newname = $name;
$newname =~ s/^vm-/base-/;
my $newvolname = $basename ? "$basevmid/$basename/$vmid/$newname" :
"$vmid/$newname";
my $newvolname =
$basename
? "$basevmid/$basename/$vmid/$newname"
: "$vmid/$newname";
my $newpath = $class->filesystem_path($scfg, $newvolname);
die "file '$newpath' already exists\n" if -f $newpath;
rename($path, $newpath) ||
die "rename '$path' to '$newpath' failed - $!\n";
rename($path, $newpath)
|| die "rename '$path' to '$newpath' failed - $!\n";
# We try to protect base volume
@ -805,9 +839,9 @@ my $get_vm_disk_number = sub {
my $disk_regex = qr/(vm|base)-$vmid-disk-(\d+)$suffix/;
my $type = $scfg->{type};
my $def = { %{$defaultData->{plugindata}->{$type}} };
my $def = { %{ $defaultData->{plugindata}->{$type} } };
my $valid = $def->{format}[0];
my $valid = $def->{format}->[0];
if ($valid->{subvol}) {
$disk_regex = qr/(vm|base|subvol|basevol)-$vmid-disk-(\d+)/;
}
@ -838,7 +872,7 @@ sub get_next_vm_diskname {
}
}
die "unable to allocate an image name for VM $vmid in storage '$storeid'\n"
die "unable to allocate an image name for VM $vmid in storage '$storeid'\n";
}
sub find_free_diskname {
@ -846,7 +880,7 @@ sub find_free_diskname {
my $disks = $class->list_images($storeid, $scfg, $vmid);
my $disk_list = [ map { $_->{volid} } @$disks ];
my $disk_list = [map { $_->{volid} } @$disks];
return get_next_vm_diskname($disk_list, $storeid, $vmid, $fmt, $scfg, $add_fmt_suffix);
}
@ -885,8 +919,17 @@ sub clone_image {
eval {
local $CWD = $imagedir;
my $cmd = ['/usr/bin/qemu-img', 'create', '-b', "../$basevmid/$basename",
'-F', $format, '-f', 'qcow2', $path];
my $cmd = [
'/usr/bin/qemu-img',
'create',
'-b',
"../$basevmid/$basename",
'-F',
$format,
'-f',
'qcow2',
$path,
];
run_command($cmd);
};
@ -999,7 +1042,7 @@ sub file_size_info {
# TODO PVE 9 remove
if (defined($file_format) && ($file_format eq '1' || $file_format eq '0')) {
warn "file_size_info: detected call with legacy parameter order: \$untrusted before"
." \$file_format\n";
. " \$file_format\n";
$untrusted = $file_format;
$file_format = undef;
}
@ -1040,7 +1083,8 @@ sub file_size_info {
# TODO PVE 9 - consider upgrading to "die" if an unsupported format is passed in after
# evaluating breakage potential.
if ($file_format && !grep { $_ eq $file_format } @checked_qemu_img_formats) {
warn "file_size_info: '$filename': falling back to 'raw' from unknown format '$file_format'\n";
warn
"file_size_info: '$filename': falling back to 'raw' from unknown format '$file_format'\n";
$file_format = 'raw';
}
my $cmd = ['/usr/bin/qemu-img', 'info', '--output=json', $filename];
@ -1049,10 +1093,11 @@ sub file_size_info {
my $json = '';
my $err_output = '';
eval {
run_command($cmd,
run_command(
$cmd,
timeout => $timeout,
outfunc => sub { $json .= shift },
errfunc => sub { $err_output .= shift . "\n"},
errfunc => sub { $err_output .= shift . "\n" },
);
};
warn $@ if $@;
@ -1084,7 +1129,8 @@ sub file_size_info {
}
}
my ($size, $format, $used, $parent) = $info->@{qw(virtual-size format actual-size backing-filename)};
my ($size, $format, $used, $parent) =
$info->@{qw(virtual-size format actual-size backing-filename)};
die "backing file not allowed for untrusted image '$filename'!\n" if $untrusted && $parent;
@ -1175,7 +1221,7 @@ sub volume_resize {
my $format = ($class->parse_volname($volname))[6];
my $cmd = ['/usr/bin/qemu-img', 'resize', '-f', $format, $path , $size];
my $cmd = ['/usr/bin/qemu-img', 'resize', '-f', $format, $path, $size];
run_command($cmd, timeout => 10);
@ -1189,7 +1235,7 @@ sub volume_snapshot {
my $path = $class->filesystem_path($scfg, $volname);
my $cmd = ['/usr/bin/qemu-img', 'snapshot','-c', $snap, $path];
my $cmd = ['/usr/bin/qemu-img', 'snapshot', '-c', $snap, $path];
run_command($cmd);
@ -1212,7 +1258,7 @@ sub volume_snapshot_rollback {
my $path = $class->filesystem_path($scfg, $volname);
my $cmd = ['/usr/bin/qemu-img', 'snapshot','-a', $snap, $path];
my $cmd = ['/usr/bin/qemu-img', 'snapshot', '-a', $snap, $path];
run_command($cmd);
@ -1230,7 +1276,7 @@ sub volume_snapshot_delete {
$class->deactivate_volume($storeid, $scfg, $volname, $snap, {});
my $cmd = ['/usr/bin/qemu-img', 'snapshot','-d', $snap, $path];
my $cmd = ['/usr/bin/qemu-img', 'snapshot', '-d', $snap, $path];
run_command($cmd);
@ -1241,6 +1287,7 @@ sub volume_snapshot_needs_fsfreeze {
return 0;
}
sub storage_can_replicate {
my ($class, $scfg, $storeid, $format) = @_;
@ -1271,14 +1318,14 @@ sub volume_has_feature {
current => { qcow2 => 1, raw => 1, vmdk => 1 },
},
rename => {
current => {qcow2 => 1, raw => 1, vmdk => 1},
current => { qcow2 => 1, raw => 1, vmdk => 1 },
},
};
if ($feature eq 'clone') {
if (
defined($opts->{valid_target_formats})
&& !(grep { $_ eq 'qcow2' } @{$opts->{valid_target_formats}})
&& !(grep { $_ eq 'qcow2' } @{ $opts->{valid_target_formats} })
) {
return 0; # clone_image creates a qcow2 volume
}
@ -1286,13 +1333,13 @@ sub volume_has_feature {
return 0 if $class->can('api') && $class->api() < 10;
}
my ($vtype, $name, $vmid, $basename, $basevmid, $isBase, $format) = $class->parse_volname($volname);
my ($vtype, $name, $vmid, $basename, $basevmid, $isBase, $format) =
$class->parse_volname($volname);
my $key = undef;
if($snapname){
if ($snapname) {
$key = 'snap';
}else{
} else {
$key = $isBase ? 'base' : 'current';
}
@ -1307,7 +1354,7 @@ sub list_images {
my $imagedir = $class->get_subdir($scfg, 'images');
my ($defFmt, $vaidFmts) = default_format($scfg);
my $fmts = join ('|', @$vaidFmts);
my $fmts = join('|', @$vaidFmts);
my $res = [];
@ -1322,9 +1369,7 @@ sub list_images {
next if !$vollist && defined($vmid) && ($owner ne $vmid);
my ($size, undef, $used, $parent, $ctime) = eval {
file_size_info($fn, undef, $format);
};
my ($size, undef, $used, $parent, $ctime) = eval { file_size_info($fn, undef, $format); };
if (my $err = $@) {
die $err if $err !~ m/Image is not in \S+ format$/;
warn "image '$fn' is not in expected format '$format', querying as raw\n";
@ -1347,8 +1392,12 @@ sub list_images {
}
my $info = {
volid => $volid, format => $format,
size => $size, vmid => $owner, used => $used, parent => $parent
volid => $volid,
format => $format,
size => $size,
vmid => $owner,
used => $used,
parent => $parent,
};
$info->{ctime} = $ctime if $ctime;
@ -1402,21 +1451,24 @@ my $get_subdir_files = sub {
$info->{vmid} = $vmid // $1;
}
my $notes_fn = $original.NOTES_EXT;
my $notes_fn = $original . NOTES_EXT;
if (-f $notes_fn) {
my $notes = PVE::Tools::file_read_firstline($notes_fn);
$info->{notes} = eval { decode('UTF-8', $notes, 1) } // $notes if defined($notes);
$info->{notes} = eval { decode('UTF-8', $notes, 1) } // $notes
if defined($notes);
}
$info->{protected} = 1 if -e PVE::Storage::protection_file_path($original);
} elsif ($tt eq 'snippets') {
$info = {
volid => "$sid:snippets/". basename($fn),
volid => "$sid:snippets/" . basename($fn),
format => 'snippet',
};
} elsif ($tt eq 'import') {
next if $fn !~ m!/(${PVE::Storage::SAFE_CHAR_CLASS_RE}+$PVE::Storage::IMPORT_EXT_RE_1)$!i;
next
if $fn !~
m!/(${PVE::Storage::SAFE_CHAR_CLASS_RE}+$PVE::Storage::IMPORT_EXT_RE_1)$!i;
$info = { volid => "$sid:import/$1", format => "$2" };
}
@ -1447,7 +1499,7 @@ sub list_volumes {
if ($type eq 'iso' && !defined($vmid)) {
$data = $get_subdir_files->($storeid, $path, 'iso');
} elsif ($type eq 'vztmpl'&& !defined($vmid)) {
} elsif ($type eq 'vztmpl' && !defined($vmid)) {
$data = $get_subdir_files->($storeid, $path, 'vztmpl');
} elsif ($type eq 'backup') {
$data = $get_subdir_files->($storeid, $path, 'backup', $vmid);
@ -1462,7 +1514,7 @@ sub list_volumes {
foreach my $item (@$data) {
if ($type eq 'images' || $type eq 'rootdir') {
my $vminfo = $vmlist->{ids}->{$item->{vmid}};
my $vminfo = $vmlist->{ids}->{ $item->{vmid} };
my $vmtype;
if (defined($vminfo)) {
$vmtype = $vminfo->{type};
@ -1518,9 +1570,9 @@ sub activate_storage {
# this path test may hang indefinitely on unresponsive mounts
my $timeout = 2;
if (! PVE::Tools::run_fork_with_timeout($timeout, sub {-d $path})) {
die "unable to activate storage '$storeid' - " .
"directory '$path' does not exist or is unreachable\n";
if (!PVE::Tools::run_fork_with_timeout($timeout, sub { -d $path })) {
die "unable to activate storage '$storeid' - "
. "directory '$path' does not exist or is unreachable\n";
}
# TODO: mkdir is basically deprecated since 8.0, but we don't warn here until 8.4 or 9.0, as we
@ -1586,9 +1638,9 @@ sub activate_volume {
# check is volume exists
if ($scfg->{path}) {
die "volume '$storeid:$volname' does not exist\n" if ! -e $path;
die "volume '$storeid:$volname' does not exist\n" if !-e $path;
} else {
die "volume '$storeid:$volname' does not exist\n" if ! -b $path;
die "volume '$storeid:$volname' does not exist\n" if !-b $path;
}
}
@ -1637,7 +1689,7 @@ sub prune_backups {
$prune_entry->{ctime} = $archive_info->{ctime};
my $group = "$backup_type/$backup_vmid";
push @{$backup_groups->{$group}}, $prune_entry;
push @{ $backup_groups->{$group} }, $prune_entry;
} else {
# ignore backups that don't use the standard naming scheme
$prune_entry->{mark} = 'renamed';
@ -1720,7 +1772,8 @@ sub read_common_header($) {
# Export a volume into a file handle as a stream of desired format.
sub volume_export {
my ($class, $scfg, $storeid, $fh, $volname, $format, $snapshot, $base_snapshot, $with_snapshots) = @_;
my ($class, $scfg, $storeid, $fh, $volname, $format, $snapshot, $base_snapshot, $with_snapshots)
= @_;
my $err_msg = "volume export format $format not available for $class\n";
if ($scfg->{path} && !defined($snapshot) && !defined($base_snapshot)) {
@ -1732,23 +1785,42 @@ sub volume_export {
die $err_msg if $with_snapshots || $file_format eq 'subvol';
write_common_header($fh, $size);
if ($file_format eq 'raw') {
run_command(['dd', "if=$file", "bs=4k", "status=progress"], output => '>&'.fileno($fh));
run_command(
['dd', "if=$file", "bs=4k", "status=progress"],
output => '>&' . fileno($fh),
);
} else {
run_command(['qemu-img', 'convert', '-f', $file_format, '-O', 'raw', $file, '/dev/stdout'],
output => '>&'.fileno($fh));
run_command(
[
'qemu-img',
'convert',
'-f',
$file_format,
'-O',
'raw',
$file,
'/dev/stdout',
],
output => '>&' . fileno($fh),
);
}
return;
} elsif ($format =~ /^(qcow2|vmdk)\+size$/) {
my $data_format = $1;
die $err_msg if !$with_snapshots || $file_format ne $data_format;
write_common_header($fh, $size);
run_command(['dd', "if=$file", "bs=4k", "status=progress"], output => '>&'.fileno($fh));
run_command(
['dd', "if=$file", "bs=4k", "status=progress"],
output => '>&' . fileno($fh),
);
return;
} elsif ($format eq 'tar+size') {
die $err_msg if $file_format ne 'subvol';
write_common_header($fh, $size);
run_command(['tar', @COMMON_TAR_FLAGS, '-cf', '-', '-C', $file, '.'],
output => '>&'.fileno($fh));
run_command(
['tar', @COMMON_TAR_FLAGS, '-cf', '-', '-C', $file, '.'],
output => '>&' . fileno($fh),
);
return;
}
}
@ -1761,7 +1833,7 @@ sub volume_export_formats {
my $format = ($class->parse_volname($volname))[6];
if ($with_snapshots) {
return ($format.'+size') if ($format eq 'qcow2' || $format eq 'vmdk');
return ($format . '+size') if ($format eq 'qcow2' || $format eq 'vmdk');
return ();
}
return ('tar+size') if $format eq 'subvol';
@ -1772,7 +1844,18 @@ sub volume_export_formats {
# Import data from a stream, creating a new or replacing or adding to an existing volume.
sub volume_import {
my ($class, $scfg, $storeid, $fh, $volname, $format, $snapshot, $base_snapshot, $with_snapshots, $allow_rename) = @_;
my (
$class,
$scfg,
$storeid,
$fh,
$volname,
$format,
$snapshot,
$base_snapshot,
$with_snapshots,
$allow_rename,
) = @_;
die "volume import format '$format' not available for $class\n"
if $format !~ /^(raw|tar|qcow2|vmdk)\+size$/;
@ -1813,11 +1896,15 @@ sub volume_import {
my ($file) = $class->path($scfg, $volname, $storeid)
or die "internal error: failed to get path to newly allocated volume $volname\n";
if ($data_format eq 'raw' || $data_format eq 'qcow2' || $data_format eq 'vmdk') {
run_command(['dd', "of=$file", 'conv=sparse', 'bs=64k'],
input => '<&'.fileno($fh));
run_command(
['dd', "of=$file", 'conv=sparse', 'bs=64k'],
input => '<&' . fileno($fh),
);
} elsif ($data_format eq 'tar') {
run_command(['tar', @COMMON_TAR_FLAGS, '-C', $file, '-xf', '-'],
input => '<&'.fileno($fh));
run_command(
['tar', @COMMON_TAR_FLAGS, '-C', $file, '-xf', '-'],
input => '<&' . fileno($fh),
);
} else {
die "volume import format '$format' not available for $class";
}
@ -1836,7 +1923,7 @@ sub volume_import_formats {
if ($scfg->{path} && !defined($base_snapshot)) {
my $format = ($class->parse_volname($volname))[6];
if ($with_snapshots) {
return ($format.'+size') if ($format eq 'qcow2' || $format eq 'vmdk');
return ($format . '+size') if ($format eq 'qcow2' || $format eq 'vmdk');
return ();
}
return ('tar+size') if $format eq 'subvol';
@ -1851,13 +1938,7 @@ sub rename_volume {
die "no path found\n" if !$scfg->{path};
my (
undef,
$source_image,
$source_vmid,
$base_name,
$base_vmid,
undef,
$format
undef, $source_image, $source_vmid, $base_name, $base_vmid, undef, $format,
) = $class->parse_volname($source_volname);
$target_volname = $class->find_free_diskname($storeid, $scfg, $target_vmid, $format, 1)
@ -1874,8 +1955,8 @@ sub rename_volume {
my $base = $base_name ? "${base_vmid}/${base_name}/" : '';
rename($old_path, $new_path) ||
die "rename '$old_path' to '$new_path' failed - $!\n";
rename($old_path, $new_path)
|| die "rename '$old_path' to '$new_path' failed - $!\n";
return "${storeid}:${base}${target_vmid}/${target_volname}";
}

View File

@ -10,7 +10,7 @@ use Net::IP;
use POSIX qw(ceil);
use PVE::CephConfig;
use PVE::Cluster qw(cfs_read_file);;
use PVE::Cluster qw(cfs_read_file);
use PVE::JSONSchema qw(get_standard_option);
use PVE::ProcFSTools;
use PVE::RADOS;
@ -47,7 +47,7 @@ my sub get_rbd_path {
$path .= "/$scfg->{namespace}" if defined($scfg->{namespace});
$path .= "/$volume" if defined($volume);
return $path;
};
}
my sub get_rbd_dev_path {
my ($scfg, $storeid, $volume) = @_;
@ -106,7 +106,8 @@ my $rbd_cmd = sub {
}
push @$cmd, '-c', $cmd_option->{ceph_conf} if ($cmd_option->{ceph_conf});
push @$cmd, '-m', $cmd_option->{mon_host} if ($cmd_option->{mon_host});
push @$cmd, '--auth_supported', $cmd_option->{auth_supported} if ($cmd_option->{auth_supported});
push @$cmd, '--auth_supported', $cmd_option->{auth_supported}
if ($cmd_option->{auth_supported});
push @$cmd, '-n', "client.$cmd_option->{userid}" if ($cmd_option->{userid});
push @$cmd, '--keyring', $cmd_option->{keyring} if ($cmd_option->{keyring});
@ -141,14 +142,16 @@ my $krbd_feature_update = sub {
my $active_features = { map { $_ => 1 } @$active_features_list };
my $to_disable = join(',', grep { $active_features->{$_} } @disable);
my $to_enable = join(',', grep { !$active_features->{$_} } @enable );
my $to_enable = join(',', grep { !$active_features->{$_} } @enable);
if ($to_disable) {
print "disable RBD image features this kernel RBD drivers is not compatible with: $to_disable\n";
print
"disable RBD image features this kernel RBD drivers is not compatible with: $to_disable\n";
my $cmd = $rbd_cmd->($scfg, $storeid, 'feature', 'disable', $name, $to_disable);
run_rbd_command(
$cmd,
errmsg => "could not disable krbd-incompatible image features '$to_disable' for rbd image: $name",
errmsg =>
"could not disable krbd-incompatible image features '$to_disable' for rbd image: $name",
);
}
if ($to_enable) {
@ -157,7 +160,8 @@ my $krbd_feature_update = sub {
my $cmd = $rbd_cmd->($scfg, $storeid, 'feature', 'enable', $name, $to_enable);
run_rbd_command(
$cmd,
errmsg => "could not enable krbd-compatible image features '$to_enable' for rbd image: $name",
errmsg =>
"could not enable krbd-compatible image features '$to_enable' for rbd image: $name",
);
};
warn "$@" if $@;
@ -174,7 +178,9 @@ sub run_rbd_command {
# at least 1 child(ren) in pool cephstor1
$args{errfunc} = sub {
my $line = shift;
if ($line =~ m/^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d+ [0-9a-f]+ [\-\d]+ librbd: (.*)$/) {
if ($line =~
m/^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d+ [0-9a-f]+ [\-\d]+ librbd: (.*)$/
) {
$lasterr = "$1\n";
} else {
$lasterr = $line;
@ -200,7 +206,7 @@ sub rbd_ls {
my $parser = sub { $raw .= shift };
my $cmd = $rbd_cmd->($scfg, $storeid, 'ls', '-l', '--format', 'json');
run_rbd_command($cmd, errmsg => "rbd error", errfunc => sub {}, outfunc => $parser);
run_rbd_command($cmd, errmsg => "rbd error", errfunc => sub { }, outfunc => $parser);
my $result;
if ($raw eq '') {
@ -225,7 +231,7 @@ sub rbd_ls {
name => $image,
size => $el->{size},
parent => $get_parent_image_name->($el->{parent}),
vmid => $owner
vmid => $owner,
};
}
@ -238,7 +244,12 @@ sub rbd_ls_snap {
my $cmd = $rbd_cmd->($scfg, $storeid, 'snap', 'ls', $name, '--format', 'json');
my $raw = '';
run_rbd_command($cmd, errmsg => "rbd error", errfunc => sub {}, outfunc => sub { $raw .= shift; });
run_rbd_command(
$cmd,
errmsg => "rbd error",
errfunc => sub { },
outfunc => sub { $raw .= shift; },
);
my $list;
if ($raw =~ m/^(\[.*\])$/s) { # untaint
@ -279,7 +290,7 @@ sub rbd_volume_info {
my $raw = '';
my $parser = sub { $raw .= shift };
run_rbd_command($cmd, errmsg => "rbd error", errfunc => sub {}, outfunc => $parser);
run_rbd_command($cmd, errmsg => "rbd error", errfunc => sub { }, outfunc => $parser);
my $volume;
if ($raw eq '') {
@ -291,7 +302,8 @@ sub rbd_volume_info {
}
$volume->{parent} = $get_parent_image_name->($volume->{parent});
$volume->{protected} = defined($volume->{protected}) && $volume->{protected} eq "true" ? 1 : undef;
$volume->{protected} =
defined($volume->{protected}) && $volume->{protected} eq "true" ? 1 : undef;
return $volume->@{qw(size parent format protected features)};
}
@ -305,7 +317,7 @@ sub rbd_volume_du {
my $raw = '';
my $parser = sub { $raw .= shift };
run_rbd_command($cmd, errmsg => "rbd error", errfunc => sub {}, outfunc => $parser);
run_rbd_command($cmd, errmsg => "rbd error", errfunc => sub { }, outfunc => $parser);
my $volume;
if ($raw eq '') {
@ -341,7 +353,11 @@ my sub rbd_volume_exists {
my $cmd = $rbd_cmd->($scfg, $storeid, 'ls', '--format', 'json');
my $raw = '';
run_rbd_command(
$cmd, errmsg => "rbd error", errfunc => sub {}, outfunc => sub { $raw .= shift; });
$cmd,
errmsg => "rbd error",
errfunc => sub { },
outfunc => sub { $raw .= shift; },
);
my $list;
if ($raw =~ m/^(\[.*\])$/s) { # untaint
@ -366,7 +382,7 @@ sub type {
sub plugindata {
return {
content => [ {images => 1, rootdir => 1}, { images => 1 }],
content => [{ images => 1, rootdir => 1 }, { images => 1 }],
'sensitive-properties' => { keyring => 1 },
};
}
@ -375,7 +391,8 @@ sub properties {
return {
monhost => {
description => "IP addresses of monitors (for external clusters).",
type => 'string', format => 'pve-storage-portal-dns-list',
type => 'string',
format => 'pve-storage-portal-dns-list',
},
pool => {
description => "Pool.",
@ -413,7 +430,7 @@ sub options {
return {
nodes => { optional => 1 },
disable => { optional => 1 },
monhost => { optional => 1},
monhost => { optional => 1 },
pool => { optional => 1 },
'data-pool' => { optional => 1 },
namespace => { optional => 1 },
@ -470,7 +487,7 @@ sub path {
my $cmd_option = PVE::CephConfig::ceph_connect_option($scfg, $storeid);
my ($vtype, $name, $vmid) = $class->parse_volname($volname);
$name .= '@'.$snapname if $snapname;
$name .= '@' . $snapname if $snapname;
if ($scfg->{krbd}) {
my $rbd_dev_path = get_rbd_dev_path($scfg, $storeid, $name);
@ -508,7 +525,7 @@ sub find_free_diskname {
};
eval {
run_rbd_command($cmd, errmsg => "rbd error", errfunc => sub {}, outfunc => $parser);
run_rbd_command($cmd, errmsg => "rbd error", errfunc => sub { }, outfunc => $parser);
};
my $err = $@;
@ -522,8 +539,7 @@ sub create_base {
my $snap = '__base__';
my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) =
$class->parse_volname($volname);
my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) = $class->parse_volname($volname);
die "create_base not possible with base image\n" if $isBase;
@ -533,7 +549,7 @@ sub create_base {
die "rbd image must be at format V2" if $format ne "2";
die "volname '$volname' contains wrong information about parent $parent $basename\n"
if $basename && (!$parent || $parent ne $basename."@".$snap);
if $basename && (!$parent || $parent ne $basename . "@" . $snap);
my $newname = $name;
$newname =~ s/^vm-/base-/;
@ -541,9 +557,7 @@ sub create_base {
my $newvolname = $basename ? "$basename/$newname" : "$newname";
my $cmd = $rbd_cmd->(
$scfg,
$storeid,
'rename',
$scfg, $storeid, 'rename',
get_rbd_path($scfg, $name),
get_rbd_path($scfg, $newname),
);
@ -558,7 +572,7 @@ sub create_base {
my (undef, undef, undef, $protected) = rbd_volume_info($scfg, $storeid, $newname, $snap);
if (!$protected){
if (!$protected) {
my $cmd = $rbd_cmd->($scfg, $storeid, 'snap', 'protect', $newname, '--snap', $snap);
run_rbd_command($cmd, errmsg => "rbd protect $newname snap '$snap' error");
}
@ -573,8 +587,7 @@ sub clone_image {
my $snap = '__base__';
$snap = $snapname if length $snapname;
my ($vtype, $basename, $basevmid, undef, undef, $isBase) =
$class->parse_volname($volname);
my ($vtype, $basename, $basevmid, undef, undef, $isBase) = $class->parse_volname($volname);
die "$volname is not a base image and snapname is not provided\n"
if !$isBase && !length($snapname);
@ -584,7 +597,8 @@ sub clone_image {
warn "clone $volname: $basename snapname $snap to $name\n";
if (length($snapname)) {
my (undef, undef, undef, $protected) = rbd_volume_info($scfg, $storeid, $volname, $snapname);
my (undef, undef, undef, $protected) =
rbd_volume_info($scfg, $storeid, $volname, $snapname);
if (!$protected) {
my $cmd = $rbd_cmd->($scfg, $storeid, 'snap', 'protect', $volname, '--snap', $snapname);
@ -596,8 +610,7 @@ sub clone_image {
$newvol = $name if length($snapname);
my @options = (
get_rbd_path($scfg, $basename),
'--snap', $snap,
get_rbd_path($scfg, $basename), '--snap', $snap,
);
push @options, ('--data-pool', $scfg->{'data-pool'}) if $scfg->{'data-pool'};
@ -610,15 +623,13 @@ sub clone_image {
sub alloc_image {
my ($class, $storeid, $scfg, $vmid, $fmt, $name, $size) = @_;
die "illegal name '$name' - should be 'vm-$vmid-*'\n"
if $name && $name !~ m/^vm-$vmid-/;
$name = $class->find_free_diskname($storeid, $scfg, $vmid) if !$name;
my @options = (
'--image-format' , 2,
'--size', int(($size + 1023) / 1024),
'--image-format', 2, '--size', int(($size + 1023) / 1024),
);
push @options, ('--data-pool', $scfg->{'data-pool'}) if $scfg->{'data-pool'};
@ -631,9 +642,7 @@ sub alloc_image {
sub free_image {
my ($class, $storeid, $scfg, $volname, $isBase) = @_;
my ($vtype, $name, $vmid, undef, undef, undef) =
$class->parse_volname($volname);
my ($vtype, $name, $vmid, undef, undef, undef) = $class->parse_volname($volname);
my $snaps = rbd_ls_snap($scfg, $storeid, $name);
foreach my $snap (keys %$snaps) {
@ -663,7 +672,7 @@ sub list_images {
my $res = [];
for my $image (sort keys %$dat) {
my $info = $dat->{$image};
my ($volname, $parent, $owner) = $info->@{'name', 'parent', 'vmid'};
my ($volname, $parent, $owner) = $info->@{ 'name', 'parent', 'vmid' };
if ($parent && $parent =~ m/^(base-\d+-\S+)\@__base__$/) {
$info->{volid} = "$storeid:$1/$volname";
@ -675,7 +684,7 @@ sub list_images {
my $found = grep { $_ eq $info->{volid} } @$vollist;
next if !$found;
} else {
next if defined ($vmid) && ($owner ne $vmid);
next if defined($vmid) && ($owner ne $vmid);
}
$info->{format} = 'raw';
@ -694,7 +703,7 @@ sub status {
my $pool = $scfg->{'data-pool'} // $scfg->{pool} // 'rbd';
my ($d) = grep { $_->{name} eq $pool } @{$df->{pools}};
my ($d) = grep { $_->{name} eq $pool } @{ $df->{pools} };
if (!defined($d)) {
warn "could not get usage stats for pool '$pool'\n";
@ -727,7 +736,7 @@ sub map_volume {
my ($vtype, $img_name, $vmid) = $class->parse_volname($volname);
my $name = $img_name;
$name .= '@'.$snapname if $snapname;
$name .= '@' . $snapname if $snapname;
my $kerneldev = get_rbd_dev_path($scfg, $storeid, $name);
@ -746,7 +755,7 @@ sub unmap_volume {
my ($class, $storeid, $scfg, $volname, $snapname) = @_;
my ($vtype, $name, $vmid) = $class->parse_volname($volname);
$name .= '@'.$snapname if $snapname;
$name .= '@' . $snapname if $snapname;
my $kerneldev = get_rbd_dev_path($scfg, $storeid, $name);
@ -790,7 +799,8 @@ sub volume_resize {
my ($vtype, $name, $vmid) = $class->parse_volname($volname);
my $cmd = $rbd_cmd->($scfg, $storeid, 'resize', '--size', int(ceil($size/1024/1024)), $name);
my $cmd =
$rbd_cmd->($scfg, $storeid, 'resize', '--size', int(ceil($size / 1024 / 1024)), $name);
run_rbd_command($cmd, errmsg => "rbd resize '$volname' error");
return undef;
}
@ -822,7 +832,7 @@ sub volume_snapshot_delete {
my ($vtype, $name, $vmid) = $class->parse_volname($volname);
my (undef, undef, undef, $protected) = rbd_volume_info($scfg, $storeid, $name, $snap);
if ($protected){
if ($protected) {
my $cmd = $rbd_cmd->($scfg, $storeid, 'snap', 'unprotect', $name, '--snap', $snap);
run_rbd_command($cmd, errmsg => "rbd unprotect $name snap '$snap' error");
}
@ -842,18 +852,18 @@ sub volume_has_feature {
my ($class, $scfg, $feature, $storeid, $volname, $snapname, $running) = @_;
my $features = {
snapshot => { current => 1, snap => 1},
clone => { base => 1, snap => 1},
template => { current => 1},
copy => { base => 1, current => 1, snap => 1},
sparseinit => { base => 1, current => 1},
rename => {current => 1},
snapshot => { current => 1, snap => 1 },
clone => { base => 1, snap => 1 },
template => { current => 1 },
copy => { base => 1, current => 1, snap => 1 },
sparseinit => { base => 1, current => 1 },
rename => { current => 1 },
};
my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) = $class->parse_volname($volname);
my $key = undef;
if ($snapname){
if ($snapname) {
$key = 'snap';
} else {
$key = $isBase ? 'base' : 'current';
@ -867,7 +877,8 @@ sub volume_export_formats {
my ($class, $scfg, $storeid, $volname, $snapshot, $base_snapshot, $with_snapshots) = @_;
return $class->volume_import_formats(
$scfg, $storeid, $volname, $snapshot, $base_snapshot, $with_snapshots);
$scfg, $storeid, $volname, $snapshot, $base_snapshot, $with_snapshots,
);
}
sub volume_export {
@ -893,7 +904,7 @@ sub volume_export {
run_rbd_command(
$cmd,
errmsg => 'could not export image',
output => '>&'.fileno($fh),
output => '>&' . fileno($fh),
);
return;
@ -942,7 +953,7 @@ sub volume_import {
run_rbd_command(
$cmd,
errmsg => 'could not import image',
input => '<&'.fileno($fh),
input => '<&' . fileno($fh),
);
};
if (my $err = $@) {
@ -961,13 +972,7 @@ sub rename_volume {
my ($class, $scfg, $storeid, $source_volname, $target_vmid, $target_volname) = @_;
my (
undef,
$source_image,
$source_vmid,
$base_name,
$base_vmid,
undef,
$format
undef, $source_image, $source_vmid, $base_name, $base_vmid, undef, $format,
) = $class->parse_volname($source_volname);
$target_volname = $class->find_free_diskname($storeid, $scfg, $target_vmid, $format)
if !$target_volname;

View File

@ -14,7 +14,6 @@ use PVE::Storage::LunCmd::Istgt;
use PVE::Storage::LunCmd::Iet;
use PVE::Storage::LunCmd::LIO;
my @ssh_opts = ('-o', 'BatchMode=yes');
my @ssh_cmd = ('/usr/bin/ssh', @ssh_opts);
my $id_rsa_path = '/etc/pve/priv/zfs';
@ -54,14 +53,15 @@ my $zfs_get_base = sub {
sub zfs_request {
my ($class, $scfg, $timeout, $method, @params) = @_;
$timeout = PVE::RPCEnvironment->is_worker() ? 60*60 : 10
$timeout = PVE::RPCEnvironment->is_worker() ? 60 * 60 : 10
if !$timeout;
my $msg = '';
if ($lun_cmds->{$method}) {
if ($scfg->{iscsiprovider} eq 'comstar') {
$msg = PVE::Storage::LunCmd::Comstar::run_lun_command($scfg, $timeout, $method, @params);
$msg =
PVE::Storage::LunCmd::Comstar::run_lun_command($scfg, $timeout, $method, @params);
} elsif ($scfg->{iscsiprovider} eq 'istgt') {
$msg = PVE::Storage::LunCmd::Istgt::run_lun_command($scfg, $timeout, $method, @params);
} elsif ($scfg->{iscsiprovider} eq 'iet') {
@ -174,7 +174,7 @@ sub type {
sub plugindata {
return {
content => [ {images => 1}, { images => 1 }],
content => [{ images => 1 }, { images => 1 }],
'sensitive-properties' => {},
};
}
@ -252,8 +252,7 @@ sub create_base {
my $snap = '__base__';
my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) =
$class->parse_volname($volname);
my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) = $class->parse_volname($volname);
die "create_base not possible with base image\n" if $isBase;
@ -370,14 +369,13 @@ sub volume_has_feature {
my ($class, $scfg, $feature, $storeid, $volname, $snapname, $running) = @_;
my $features = {
snapshot => { current => 1, snap => 1},
clone => { base => 1},
template => { current => 1},
copy => { base => 1, current => 1},
snapshot => { current => 1, snap => 1 },
clone => { base => 1 },
template => { current => 1 },
copy => { base => 1, current => 1 },
};
my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) =
$class->parse_volname($volname);
my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) = $class->parse_volname($volname);
my $key = undef;

View File

@ -20,8 +20,8 @@ sub type {
sub plugindata {
return {
content => [ {images => 1, rootdir => 1}, {images => 1 , rootdir => 1}],
format => [ { raw => 1, subvol => 1 } , 'raw' ],
content => [{ images => 1, rootdir => 1 }, { images => 1, rootdir => 1 }],
format => [{ raw => 1, subvol => 1 }, 'raw'],
'sensitive-properties' => {},
};
}
@ -38,7 +38,8 @@ sub properties {
},
mountpoint => {
description => "mount point",
type => 'string', format => 'pve-storage-path',
type => 'string',
format => 'pve-storage-path',
},
};
}
@ -129,8 +130,8 @@ sub on_add_hook {
if (defined($cfg_mountpoint)) {
if (defined($mountpoint) && !($cfg_mountpoint =~ m|^\Q$mountpoint\E/?$|)) {
warn "warning for $storeid - mountpoint: $cfg_mountpoint " .
"does not match current mount point: $mountpoint\n";
warn "warning for $storeid - mountpoint: $cfg_mountpoint "
. "does not match current mount point: $mountpoint\n";
}
} else {
$scfg->{mountpoint} = $mountpoint;
@ -180,8 +181,8 @@ sub zfs_request {
my $output = sub { $msg .= "$_[0]\n" };
if (PVE::RPCEnvironment->is_worker()) {
$timeout = 60*60 if !$timeout;
$timeout = 60*5 if $timeout < 60*5;
$timeout = 60 * 60 if !$timeout;
$timeout = 60 * 5 if $timeout < 60 * 5;
} else {
$timeout = 10 if !$timeout;
}
@ -194,7 +195,7 @@ sub zfs_request {
sub zfs_wait_for_zvol_link {
my ($class, $scfg, $volname, $timeout) = @_;
my $default_timeout = PVE::RPCEnvironment->is_worker() ? 60*5 : 10;
my $default_timeout = PVE::RPCEnvironment->is_worker() ? 60 * 5 : 10;
$timeout = $default_timeout if !defined($timeout);
my ($devname, undef, undef) = $class->path($scfg, $volname);
@ -223,7 +224,7 @@ sub alloc_image {
$class->zfs_create_zvol($scfg, $volname, $size);
$class->zfs_wait_for_zvol_link($scfg, $volname);
} elsif ( $fmt eq 'subvol') {
} elsif ($fmt eq 'subvol') {
die "illegal name '$volname' - should be 'subvol-$vmid-*'\n"
if $volname && $volname !~ m/^subvol-$vmid-/;
@ -275,7 +276,7 @@ sub list_images {
my $found = grep { $_ eq $info->{volid} } @$vollist;
next if !$found;
} else {
next if defined ($vmid) && ($owner ne $vmid);
next if defined($vmid) && ($owner ne $vmid);
}
push @$res, $info;
@ -286,8 +287,8 @@ sub list_images {
sub zfs_get_properties {
my ($class, $scfg, $properties, $dataset, $timeout) = @_;
my $result = $class->zfs_request($scfg, $timeout, 'get', '-o', 'value',
'-Hp', $properties, $dataset);
my $result =
$class->zfs_request($scfg, $timeout, 'get', '-o', 'value', '-Hp', $properties, $dataset);
my @values = split /\n/, $result;
return wantarray ? @values : $values[0];
}
@ -300,11 +301,11 @@ sub zfs_get_pool_stats {
my @lines = $class->zfs_get_properties($scfg, 'available,used', $scfg->{pool});
if($lines[0] =~ /^(\d+)$/) {
if ($lines[0] =~ /^(\d+)$/) {
$available = $1;
}
if($lines[1] =~ /^(\d+)$/) {
if ($lines[1] =~ /^(\d+)$/) {
$used = $1;
}
@ -336,8 +337,8 @@ sub zfs_create_subvol {
my $dataset = "$scfg->{pool}/$volname";
my $quota = $size ? "${size}k" : "none";
my $cmd = ['create', '-o', 'acltype=posixacl', '-o', 'xattr=sa',
'-o', "refquota=${quota}", $dataset];
my $cmd =
['create', '-o', 'acltype=posixacl', '-o', 'xattr=sa', '-o', "refquota=${quota}", $dataset];
$class->zfs_request($scfg, undef, @$cmd);
}
@ -391,7 +392,7 @@ sub zfs_list_zvol {
foreach my $zvol (@$zvols) {
my $name = $zvol->{name};
my $parent = $zvol->{origin};
if($zvol->{origin} && $zvol->{origin} =~ m/^$scfg->{pool}\/(\S+)$/){
if ($zvol->{origin} && $zvol->{origin} =~ m/^$scfg->{pool}\/(\S+)$/) {
$parent = $1;
}
@ -447,11 +448,11 @@ sub status {
sub volume_size_info {
my ($class, $scfg, $storeid, $volname, $timeout) = @_;
my (undef, $vname, undef, $parent, undef, undef, $format) =
$class->parse_volname($volname);
my (undef, $vname, undef, $parent, undef, undef, $format) = $class->parse_volname($volname);
my $attr = $format eq 'subvol' ? 'refquota' : 'volsize';
my ($size, $used) = $class->zfs_get_properties($scfg, "$attr,usedbydataset", "$scfg->{pool}/$vname");
my ($size, $used) =
$class->zfs_get_properties($scfg, "$attr,usedbydataset", "$scfg->{pool}/$vname");
$used = ($used =~ /^(\d+)$/) ? $1 : 0;
@ -639,11 +640,27 @@ sub clone_image {
my $name = $class->find_free_diskname($storeid, $scfg, $vmid, $format);
if ($format eq 'subvol') {
my $size = $class->zfs_request($scfg, undef, 'list', '-Hp', '-o', 'refquota', "$scfg->{pool}/$basename");
my $size = $class->zfs_request(
$scfg, undef, 'list', '-Hp', '-o', 'refquota', "$scfg->{pool}/$basename",
);
chomp($size);
$class->zfs_request($scfg, undef, 'clone', "$scfg->{pool}/$basename\@$snap", "$scfg->{pool}/$name", '-o', "refquota=$size");
$class->zfs_request(
$scfg,
undef,
'clone',
"$scfg->{pool}/$basename\@$snap",
"$scfg->{pool}/$name",
'-o',
"refquota=$size",
);
} else {
$class->zfs_request($scfg, undef, 'clone', "$scfg->{pool}/$basename\@$snap", "$scfg->{pool}/$name");
$class->zfs_request(
$scfg,
undef,
'clone',
"$scfg->{pool}/$basename\@$snap",
"$scfg->{pool}/$name",
);
}
return "$basename/$name";
@ -660,7 +677,7 @@ sub create_base {
die "create_base not possible with base image\n" if $isBase;
my $newname = $name;
if ( $format eq 'subvol' ) {
if ($format eq 'subvol') {
$newname =~ s/^subvol-/basevol-/;
} else {
$newname =~ s/^vm-/base-/;
@ -679,10 +696,9 @@ sub create_base {
sub volume_resize {
my ($class, $scfg, $storeid, $volname, $size, $running) = @_;
my $new_size = int($size/1024);
my $new_size = int($size / 1024);
my (undef, $vname, undef, undef, undef, undef, $format) =
$class->parse_volname($volname);
my (undef, $vname, undef, undef, undef, undef, $format) = $class->parse_volname($volname);
my $attr = $format eq 'subvol' ? 'refquota' : 'volsize';
@ -709,17 +725,16 @@ sub volume_has_feature {
my ($class, $scfg, $feature, $storeid, $volname, $snapname, $running) = @_;
my $features = {
snapshot => { current => 1, snap => 1},
clone => { base => 1},
template => { current => 1},
copy => { base => 1, current => 1},
sparseinit => { base => 1, current => 1},
replicate => { base => 1, current => 1},
rename => {current => 1},
snapshot => { current => 1, snap => 1 },
clone => { base => 1 },
template => { current => 1 },
copy => { base => 1, current => 1 },
sparseinit => { base => 1, current => 1 },
replicate => { base => 1, current => 1 },
rename => { current => 1 },
};
my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) =
$class->parse_volname($volname);
my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) = $class->parse_volname($volname);
my $key = undef;
@ -735,7 +750,8 @@ sub volume_has_feature {
}
sub volume_export {
my ($class, $scfg, $storeid, $fh, $volname, $format, $snapshot, $base_snapshot, $with_snapshots) = @_;
my ($class, $scfg, $storeid, $fh, $volname, $format, $snapshot, $base_snapshot, $with_snapshots)
= @_;
die "unsupported export stream format for $class: $format\n"
if $format ne 'zfs';
@ -776,7 +792,18 @@ sub volume_export_formats {
}
sub volume_import {
my ($class, $scfg, $storeid, $fh, $volname, $format, $snapshot, $base_snapshot, $with_snapshots, $allow_rename) = @_;
my (
$class,
$scfg,
$storeid,
$fh,
$volname,
$format,
$snapshot,
$base_snapshot,
$with_snapshots,
$allow_rename,
) = @_;
die "unsupported import stream format for $class: $format\n"
if $format ne 'zfs';
@ -790,8 +817,11 @@ sub volume_import {
my $zfspath = "$scfg->{pool}/$dataset";
my $suffix = defined($base_snapshot) ? "\@$base_snapshot" : '';
my $exists = 0 == run_command(['zfs', 'get', '-H', 'name', $zfspath.$suffix],
noerr => 1, quiet => 1);
my $exists = 0 == run_command(
['zfs', 'get', '-H', 'name', $zfspath . $suffix],
noerr => 1,
quiet => 1,
);
if (defined($base_snapshot)) {
die "base snapshot '$zfspath\@$base_snapshot' doesn't exist\n" if !$exists;
} elsif ($exists) {
@ -817,20 +847,16 @@ sub volume_import {
sub volume_import_formats {
my ($class, $scfg, $storeid, $volname, $snapshot, $base_snapshot, $with_snapshots) = @_;
return $class->volume_export_formats($scfg, $storeid, $volname, $snapshot, $base_snapshot, $with_snapshots);
return $class->volume_export_formats(
$scfg, $storeid, $volname, $snapshot, $base_snapshot, $with_snapshots,
);
}
sub rename_volume {
my ($class, $scfg, $storeid, $source_volname, $target_vmid, $target_volname) = @_;
my (
undef,
$source_image,
$source_vmid,
$base_name,
$base_vmid,
undef,
$format
undef, $source_image, $source_vmid, $base_name, $base_vmid, undef, $format,
) = $class->parse_volname($source_volname);
$target_volname = $class->find_free_diskname($storeid, $scfg, $target_vmid, $format)
if !$target_volname;
@ -839,8 +865,11 @@ sub rename_volume {
my $source_zfspath = "${pool}/${source_image}";
my $target_zfspath = "${pool}/${target_volname}";
my $exists = 0 == run_command(['zfs', 'get', '-H', 'name', $target_zfspath],
noerr => 1, quiet => 1);
my $exists = 0 == run_command(
['zfs', 'get', '-H', 'name', $target_zfspath],
noerr => 1,
quiet => 1,
);
die "target volume '${target_volname}' already exists\n" if $exists;
$class->zfs_request($scfg, 5, 'rename', ${source_zfspath}, ${target_zfspath});

View File

@ -9,7 +9,6 @@ use Test::More;
use PVE::CephConfig;
# An array of test cases.
# Each test case is comprised of the following keys:
# description => to identify a single test
@ -91,8 +90,8 @@ my $tests = [
EOF
},
{
description => 'single section, section header ' .
'with preceding whitespace and comment',
description => 'single section, section header '
. 'with preceding whitespace and comment',
expected_cfg => {
foo => {
bar => 'baz',
@ -263,8 +262,7 @@ my $tests = [
EOF
},
{
description => 'single section, keys with quoted values, '
. 'comments after values',
description => 'single section, keys with quoted values, ' . 'comments after values',
expected_cfg => {
foo => {
bar => 'baz',
@ -525,8 +523,7 @@ my $tests = [
EOF
},
{
description => 'single section, key-value pairs with ' .
'continued lines and comments',
description => 'single section, key-value pairs with ' . 'continued lines and comments',
expected_cfg => {
foo => {
bar => 'baz continued baz',
@ -548,8 +545,8 @@ my $tests = [
EOF
},
{
description => 'single section, key-value pairs with ' .
'escaped commment literals in values',
description => 'single section, key-value pairs with '
. 'escaped commment literals in values',
expected_cfg => {
foo => {
bar => 'baz#escaped',
@ -563,8 +560,8 @@ my $tests = [
EOF
},
{
description => 'single section, key-value pairs with ' .
'continued lines and escaped commment literals in values',
description => 'single section, key-value pairs with '
. 'continued lines and escaped commment literals in values',
expected_cfg => {
foo => {
bar => 'baz#escaped',
@ -771,8 +768,7 @@ sub test_write_ceph_config {
sub main {
my $test_subs = [
\&test_parse_ceph_config,
\&test_write_ceph_config,
\&test_parse_ceph_config, \&test_write_ceph_config,
];
plan(tests => scalar($tests->@*) * scalar($test_subs->@*));
@ -781,11 +777,11 @@ sub main {
for my $test_sub ($test_subs->@*) {
eval {
# suppress warnings here to make output less noisy for certain tests
local $SIG{__WARN__} = sub {};
local $SIG{__WARN__} = sub { };
$test_sub->($case);
};
warn "$@\n" if $@;
};
}
}
done_testing();

View File

@ -26,14 +26,14 @@ my $tests = [
archive => "backup/vzdump-lxc-$vmid-3070_01_01-00_00_00.tgz",
expected => {
'filename' => "vzdump-lxc-$vmid-3070_01_01-00_00_00.tgz",
'logfilename' => "vzdump-lxc-$vmid-3070_01_01-00_00_00".$LOG_EXT,
'notesfilename'=> "vzdump-lxc-$vmid-3070_01_01-00_00_00.tgz".$NOTES_EXT,
'logfilename' => "vzdump-lxc-$vmid-3070_01_01-00_00_00" . $LOG_EXT,
'notesfilename' => "vzdump-lxc-$vmid-3070_01_01-00_00_00.tgz" . $NOTES_EXT,
'type' => 'lxc',
'format' => 'tar',
'decompressor' => ['tar', '-z'],
'compression' => 'gz',
'vmid' => $vmid,
'ctime' => 60*60*24 * (365*1100 + 267),
'ctime' => 60 * 60 * 24 * (365 * 1100 + 267),
'is_std_name' => 1,
},
},
@ -42,14 +42,14 @@ my $tests = [
archive => "backup/vzdump-lxc-$vmid-1970_01_01-02_00_30.tgz",
expected => {
'filename' => "vzdump-lxc-$vmid-1970_01_01-02_00_30.tgz",
'logfilename' => "vzdump-lxc-$vmid-1970_01_01-02_00_30".$LOG_EXT,
'notesfilename'=> "vzdump-lxc-$vmid-1970_01_01-02_00_30.tgz".$NOTES_EXT,
'logfilename' => "vzdump-lxc-$vmid-1970_01_01-02_00_30" . $LOG_EXT,
'notesfilename' => "vzdump-lxc-$vmid-1970_01_01-02_00_30.tgz" . $NOTES_EXT,
'type' => 'lxc',
'format' => 'tar',
'decompressor' => ['tar', '-z'],
'compression' => 'gz',
'vmid' => $vmid,
'ctime' => 60*60*2 + 30,
'ctime' => 60 * 60 * 2 + 30,
'is_std_name' => 1,
},
},
@ -58,8 +58,8 @@ my $tests = [
archive => "backup/vzdump-lxc-$vmid-2020_03_30-21_39_30.tgz",
expected => {
'filename' => "vzdump-lxc-$vmid-2020_03_30-21_39_30.tgz",
'logfilename' => "vzdump-lxc-$vmid-2020_03_30-21_39_30".$LOG_EXT,
'notesfilename'=> "vzdump-lxc-$vmid-2020_03_30-21_39_30.tgz".$NOTES_EXT,
'logfilename' => "vzdump-lxc-$vmid-2020_03_30-21_39_30" . $LOG_EXT,
'notesfilename' => "vzdump-lxc-$vmid-2020_03_30-21_39_30.tgz" . $NOTES_EXT,
'type' => 'lxc',
'format' => 'tar',
'decompressor' => ['tar', '-z'],
@ -74,8 +74,8 @@ my $tests = [
archive => "backup/vzdump-openvz-$vmid-2020_03_30-21_39_30.tgz",
expected => {
'filename' => "vzdump-openvz-$vmid-2020_03_30-21_39_30.tgz",
'logfilename' => "vzdump-openvz-$vmid-2020_03_30-21_39_30".$LOG_EXT,
'notesfilename'=> "vzdump-openvz-$vmid-2020_03_30-21_39_30.tgz".$NOTES_EXT,
'logfilename' => "vzdump-openvz-$vmid-2020_03_30-21_39_30" . $LOG_EXT,
'notesfilename' => "vzdump-openvz-$vmid-2020_03_30-21_39_30.tgz" . $NOTES_EXT,
'type' => 'openvz',
'format' => 'tar',
'decompressor' => ['tar', '-z'],
@ -90,8 +90,8 @@ my $tests = [
archive => "/here/be/Back-ups/vzdump-qemu-$vmid-2020_03_30-21_39_30.tgz",
expected => {
'filename' => "vzdump-qemu-$vmid-2020_03_30-21_39_30.tgz",
'logfilename' => "vzdump-qemu-$vmid-2020_03_30-21_39_30".$LOG_EXT,
'notesfilename'=> "vzdump-qemu-$vmid-2020_03_30-21_39_30.tgz".$NOTES_EXT,
'logfilename' => "vzdump-qemu-$vmid-2020_03_30-21_39_30" . $LOG_EXT,
'notesfilename' => "vzdump-qemu-$vmid-2020_03_30-21_39_30.tgz" . $NOTES_EXT,
'type' => 'qemu',
'format' => 'tar',
'decompressor' => ['tar', '-z'],
@ -132,9 +132,9 @@ my $decompressor = {
};
my $bkp_suffix = {
qemu => [ 'vma', $decompressor->{vma}, ],
lxc => [ 'tar', $decompressor->{tar}, ],
openvz => [ 'tar', $decompressor->{tar}, ],
qemu => ['vma', $decompressor->{vma}],
lxc => ['tar', $decompressor->{tar}],
openvz => ['tar', $decompressor->{tar}],
};
# create more test cases for backup files matches
@ -143,13 +143,14 @@ for my $virt (sort keys %$bkp_suffix) {
my $archive_name = "vzdump-$virt-$vmid-2020_03_30-21_12_40";
for my $suffix (sort keys %$decomp) {
push @$tests, {
push @$tests,
{
description => "Backup archive, $virt, $format.$suffix",
archive => "backup/$archive_name.$format.$suffix",
expected => {
'filename' => "$archive_name.$format.$suffix",
'logfilename' => $archive_name.$LOG_EXT,
'notesfilename'=> "$archive_name.$format.$suffix".$NOTES_EXT,
'logfilename' => $archive_name . $LOG_EXT,
'notesfilename' => "$archive_name.$format.$suffix" . $NOTES_EXT,
'type' => "$virt",
'format' => "$format",
'decompressor' => $decomp->{$suffix},
@ -162,13 +163,12 @@ for my $virt (sort keys %$bkp_suffix) {
}
}
# add compression formats to test failed matches
my $non_bkp_suffix = {
'openvz' => [ 'zip', 'tgz.lzo', 'zip.gz', '', ],
'lxc' => [ 'zip', 'tgz.lzo', 'zip.gz', '', ],
'qemu' => [ 'vma.xz', 'vms.gz', 'vmx.zst', '', ],
'none' => [ 'tar.gz', ],
'openvz' => ['zip', 'tgz.lzo', 'zip.gz', ''],
'lxc' => ['zip', 'tgz.lzo', 'zip.gz', ''],
'qemu' => ['vma.xz', 'vms.gz', 'vmx.zst', ''],
'none' => ['tar.gz'],
};
# create tests for failed matches
@ -176,7 +176,8 @@ for my $virt (sort keys %$non_bkp_suffix) {
my $suffix = $non_bkp_suffix->{$virt};
for my $s (@$suffix) {
my $archive = "backup/vzdump-$virt-$vmid-2020_03_30-21_12_40.$s";
push @$tests, {
push @$tests,
{
description => "Failed match: Backup archive, $virt, $s",
archive => $archive,
expected => "ERROR: couldn't determine archive info from '$archive'\n",
@ -184,7 +185,6 @@ for my $virt (sort keys %$non_bkp_suffix) {
}
}
plan tests => scalar @$tests;
for my $tt (@$tests) {

View File

@ -107,7 +107,7 @@ sub mocked_dir_glob_foreach {
my $lines = [];
# read lines in from file
if ($dir =~ m{^/sys/block$} ) {
if ($dir =~ m{^/sys/block$}) {
@$lines = split(/\n/, read_test_file('disklist'));
} elsif ($dir =~ m{^/sys/block/([^/]+)}) {
@$lines = split(/\n/, read_test_file('partlist'));
@ -125,7 +125,7 @@ sub mocked_parse_proc_mounts {
my $mounts = [];
foreach my $line(split(/\n/, $text)) {
foreach my $line (split(/\n/, $text)) {
push @$mounts, [split(/\s+/, $line)];
}
@ -139,7 +139,7 @@ sub read_test_file {
print "file '$testcasedir/$filename' not found\n";
return '';
}
open (my $fh, '<', "disk_tests/$testcasedir/$filename")
open(my $fh, '<', "disk_tests/$testcasedir/$filename")
or die "Cannot open disk_tests/$testcasedir/$filename: $!";
my $output = <$fh> // '';
@ -152,7 +152,6 @@ sub read_test_file {
return $output;
}
sub test_disk_list {
my ($testdir) = @_;
subtest "Test '$testdir'" => sub {
@ -161,9 +160,7 @@ sub test_disk_list {
my $disks;
my $expected_disk_list;
eval {
$disks = PVE::Diskmanage::get_disks();
};
eval { $disks = PVE::Diskmanage::get_disks(); };
warn $@ if $@;
$expected_disk_list = decode_json(read_test_file('disklist_expected.json'));
@ -194,20 +191,25 @@ sub test_disk_list {
warn $@ if $@;
$testcount++;
print Dumper $disk_tmp if $print;
is_deeply($disk_tmp->{$disk}, $expected_disk_list->{$disk}, "disk $disk should be the same");
is_deeply(
$disk_tmp->{$disk},
$expected_disk_list->{$disk},
"disk $disk should be the same",
);
# test wrong parameter
eval {
PVE::Diskmanage::get_disks( { test => 1 } );
};
eval { PVE::Diskmanage::get_disks({ test => 1 }); };
my $err = $@;
$testcount++;
is_deeply($err, "disks is not a string or array reference\n", "error message should be the same");
is_deeply(
$err,
"disks is not a string or array reference\n",
"error message should be the same",
);
}
# test multi disk parameter
$disks = PVE::Diskmanage::get_disks( [ keys %$disks ] );
$disks = PVE::Diskmanage::get_disks([keys %$disks]);
$testcount++;
is_deeply($disks, $expected_disk_list, 'disk list should be the same');
@ -235,24 +237,26 @@ $diskmanage_module->mock('is_iscsi' => \&mocked_is_iscsi);
print("\tMocked is_iscsi\n");
$diskmanage_module->mock('assert_blockdev' => sub { return 1; });
print("\tMocked assert_blockdev\n");
$diskmanage_module->mock('dir_is_empty' => sub {
$diskmanage_module->mock(
'dir_is_empty' => sub {
# all partitions have a holder dir
my $val = shift;
if ($val =~ m|^/sys/block/.+/.+/|) {
return 0;
}
return 1;
});
},
);
print("\tMocked dir_is_empty\n");
$diskmanage_module->mock('check_bin' => sub { return 1; });
print("\tMocked check_bin\n");
my $tools_module= Test::MockModule->new('PVE::ProcFSTools', no_auto => 1);
my $tools_module = Test::MockModule->new('PVE::ProcFSTools', no_auto => 1);
$tools_module->mock('parse_proc_mounts' => \&mocked_parse_proc_mounts);
print("\tMocked parse_proc_mounts\n");
print("Done Setting up Mocking\n\n");
print("Beginning Tests:\n\n");
opendir (my $dh, 'disk_tests')
opendir(my $dh, 'disk_tests')
or die "Cannot open disk_tests: $!";
while (readdir $dh) {

View File

@ -19,50 +19,40 @@ my $tests = [
volname => '1234/vm-1234-disk-0.raw',
snapname => undef,
expected => [
"$path/images/1234/vm-1234-disk-0.raw",
'1234',
'images'
"$path/images/1234/vm-1234-disk-0.raw", '1234', 'images',
],
},
{
volname => '1234/vm-1234-disk-0.raw',
snapname => 'my_snap',
expected => "can't snapshot this image format\n"
expected => "can't snapshot this image format\n",
},
{
volname => '1234/vm-1234-disk-0.qcow2',
snapname => undef,
expected => [
"$path/images/1234/vm-1234-disk-0.qcow2",
'1234',
'images'
"$path/images/1234/vm-1234-disk-0.qcow2", '1234', 'images',
],
},
{
volname => '1234/vm-1234-disk-0.qcow2',
snapname => 'my_snap',
expected => [
"$path/images/1234/vm-1234-disk-0.qcow2",
'1234',
'images'
"$path/images/1234/vm-1234-disk-0.qcow2", '1234', 'images',
],
},
{
volname => 'iso/my-awesome-proxmox.iso',
snapname => undef,
expected => [
"$path/template/iso/my-awesome-proxmox.iso",
undef,
'iso'
"$path/template/iso/my-awesome-proxmox.iso", undef, 'iso',
],
},
{
volname => "backup/vzdump-qemu-1234-2020_03_30-21_12_40.vma",
snapname => undef,
expected => [
"$path/dump/vzdump-qemu-1234-2020_03_30-21_12_40.vma",
1234,
'backup'
"$path/dump/vzdump-qemu-1234-2020_03_30-21_12_40.vma", 1234, 'backup',
],
},
];
@ -76,9 +66,7 @@ foreach my $tt (@$tests) {
my $scfg = { path => $path };
my $got;
eval {
$got = [ PVE::Storage::Plugin->filesystem_path($scfg, $volname, $snapname) ];
};
eval { $got = [PVE::Storage::Plugin->filesystem_path($scfg, $volname, $snapname)]; };
$got = $@ if $@;
is_deeply($got, $expected, "wantarray: filesystem_path for $volname")

View File

@ -17,21 +17,26 @@ my $vtype_subdirs = PVE::Storage::Plugin::get_vtype_subdirs();
# [2] => expected return from get_subdir
my $tests = [
# failed matches
[ $scfg_with_path, 'none', "unknown vtype 'none'\n" ],
[ {}, 'iso', "storage definition has no path\n" ],
[$scfg_with_path, 'none', "unknown vtype 'none'\n"],
[{}, 'iso', "storage definition has no path\n"],
];
# creates additional positive tests
foreach my $type (keys %$vtype_subdirs) {
my $path = "$scfg_with_path->{path}/$vtype_subdirs->{$type}";
push @$tests, [ $scfg_with_path, $type, $path ];
push @$tests, [$scfg_with_path, $type, $path];
}
# creates additional tests for overrides
foreach my $type (keys %$vtype_subdirs) {
my $override = "${type}_override";
my $scfg_with_override = { path => '/some/path', 'content-dirs' => { $type => $override } };
push @$tests, [ $scfg_with_override, $type, "$scfg_with_override->{path}/$scfg_with_override->{'content-dirs'}->{$type}" ];
push @$tests,
[
$scfg_with_override,
$type,
"$scfg_with_override->{path}/$scfg_with_override->{'content-dirs'}->{$type}",
];
}
plan tests => scalar @$tests;
@ -43,7 +48,7 @@ foreach my $tt (@$tests) {
eval { $got = PVE::Storage::Plugin->get_subdir($scfg, $type) };
$got = $@ if $@;
is ($got, $expected, "get_subdir for $type") || diag(explain($got));
is($got, $expected, "get_subdir for $type") || diag(explain($got));
}
done_testing();

View File

@ -56,8 +56,8 @@ my $mocked_vmlist = {
'node' => 'x42',
'type' => 'qemu',
'version' => 6,
}
}
},
},
};
my $storage_dir = File::Temp->newdir();
@ -257,8 +257,7 @@ my @tests = (
"$storage_dir/images/16114/vm-16114-disk-1.qcow2",
],
parent => [
"../9004/base-9004-disk-0.qcow2",
"../9004/base-9004-disk-1.qcow2",
"../9004/base-9004-disk-0.qcow2", "../9004/base-9004-disk-1.qcow2",
],
expected => [
{
@ -444,7 +443,7 @@ my @tests = (
'used' => DEFAULT_USED,
'vmid' => '1234',
'volid' => 'local:1234/vm-1234-disk-0.qcow2',
}
},
],
},
{
@ -466,7 +465,6 @@ my @tests = (
},
);
# provide static vmlist for tests
my $mock_cluster = Test::MockModule->new('PVE::Cluster', no_auto => 1);
$mock_cluster->redefine(get_vmlist => sub { return $mocked_vmlist; });
@ -474,7 +472,8 @@ $mock_cluster->redefine(get_vmlist => sub { return $mocked_vmlist; });
# populate is File::stat's method to fill all information from CORE::stat into
# an blessed array.
my $mock_stat = Test::MockModule->new('File::stat', no_auto => 1);
$mock_stat->redefine(populate => sub {
$mock_stat->redefine(
populate => sub {
my (@st) = @_;
$st[7] = DEFAULT_SIZE;
$st[10] = DEFAULT_CTIME;
@ -482,18 +481,22 @@ $mock_stat->redefine(populate => sub {
my $result = $mock_stat->original('populate')->(@st);
return $result;
});
},
);
# override info provided by qemu-img in file_size_info
my $mock_fsi = Test::MockModule->new('PVE::Storage::Plugin', no_auto => 1);
$mock_fsi->redefine(file_size_info => sub {
my ($size, $format, $used, $parent, $ctime) = $mock_fsi->original('file_size_info')->(@_);
$mock_fsi->redefine(
file_size_info => sub {
my ($size, $format, $used, $parent, $ctime) =
$mock_fsi->original('file_size_info')->(@_);
$size = DEFAULT_SIZE;
$used = DEFAULT_USED;
return wantarray ? ($size, $format, $used, $parent, $ctime) : $size;
});
},
);
my $plan = scalar @tests;
plan tests => $plan + 1;
@ -507,17 +510,19 @@ plan tests => $plan + 1;
PVE::Storage::Plugin->list_volumes('sid', $scfg_with_type, undef, ['images']);
is_deeply ($tested_vmlist, $original_vmlist,
'PVE::Cluster::vmlist remains unmodified')
|| diag ("Expected vmlist to remain\n", explain($original_vmlist),
"but it turned to\n", explain($tested_vmlist));
is_deeply($tested_vmlist, $original_vmlist, 'PVE::Cluster::vmlist remains unmodified')
|| diag(
"Expected vmlist to remain\n",
explain($original_vmlist),
"but it turned to\n",
explain($tested_vmlist),
);
}
{
my $sid = 'local';
my $types = [ 'rootdir', 'images', 'vztmpl', 'iso', 'backup', 'snippets' ];
my @suffixes = ( 'qcow2', 'raw', 'vmdk', 'vhdx' );
my $types = ['rootdir', 'images', 'vztmpl', 'iso', 'backup', 'snippets'];
my @suffixes = ('qcow2', 'raw', 'vmdk', 'vhdx');
# run through test cases
foreach my $tt (@tests) {
@ -536,10 +541,10 @@ plan tests => $plan + 1;
if ($name) {
# using qemu-img to also be able to represent the backing device
my @cmd = ( '/usr/bin/qemu-img', 'create', "$file", DEFAULT_SIZE );
push @cmd, ( '-f', $suffix ) if $suffix;
push @cmd, ( '-u', '-b', @$parent[$num] ) if $parent;
push @cmd, ( '-F', $suffix ) if $parent && $suffix;
my @cmd = ('/usr/bin/qemu-img', 'create', "$file", DEFAULT_SIZE);
push @cmd, ('-f', $suffix) if $suffix;
push @cmd, ('-u', '-b', @$parent[$num]) if $parent;
push @cmd, ('-F', $suffix) if $parent && $suffix;
$num++;
run_command([@cmd]);

View File

@ -21,7 +21,15 @@ my $tests = [
{
description => 'VM disk image, linked, qcow2, vm- as base-',
volname => "$vmid/vm-$vmid-disk-0.qcow2/$vmid/vm-$vmid-disk-0.qcow2",
expected => [ 'images', "vm-$vmid-disk-0.qcow2", "$vmid", "vm-$vmid-disk-0.qcow2", "$vmid", undef, 'qcow2', ],
expected => [
'images',
"vm-$vmid-disk-0.qcow2",
"$vmid",
"vm-$vmid-disk-0.qcow2",
"$vmid",
undef,
'qcow2',
],
},
#
# iso
@ -34,7 +42,8 @@ my $tests = [
{
description => 'ISO image, img',
volname => 'iso/some-other-installation-disk.img',
expected => ['iso', 'some-other-installation-disk.img', undef, undef, undef, undef, 'raw'],
expected =>
['iso', 'some-other-installation-disk.img', undef, undef, undef, undef, 'raw'],
},
#
# container templates
@ -42,17 +51,41 @@ my $tests = [
{
description => 'Container template tar.gz',
volname => 'vztmpl/debian-10.0-standard_10.0-1_amd64.tar.gz',
expected => ['vztmpl', 'debian-10.0-standard_10.0-1_amd64.tar.gz', undef, undef, undef, undef, 'raw'],
expected => [
'vztmpl',
'debian-10.0-standard_10.0-1_amd64.tar.gz',
undef,
undef,
undef,
undef,
'raw',
],
},
{
description => 'Container template tar.xz',
volname => 'vztmpl/debian-10.0-standard_10.0-1_amd64.tar.xz',
expected => ['vztmpl', 'debian-10.0-standard_10.0-1_amd64.tar.xz', undef, undef, undef, undef, 'raw'],
expected => [
'vztmpl',
'debian-10.0-standard_10.0-1_amd64.tar.xz',
undef,
undef,
undef,
undef,
'raw',
],
},
{
description => 'Container template tar.bz2',
volname => 'vztmpl/debian-10.0-standard_10.0-1_amd64.tar.bz2',
expected => ['vztmpl', 'debian-10.0-standard_10.0-1_amd64.tar.bz2', undef, undef, undef, undef, 'raw'],
expected => [
'vztmpl',
'debian-10.0-standard_10.0-1_amd64.tar.bz2',
undef,
undef,
undef,
undef,
'raw',
],
},
#
# container rootdir
@ -65,12 +98,21 @@ my $tests = [
{
description => 'Container rootdir, subvol',
volname => "$vmid/subvol-$vmid-disk-0.subvol",
expected => [ 'images', "subvol-$vmid-disk-0.subvol", "$vmid", undef, undef, undef, 'subvol' ],
expected =>
['images', "subvol-$vmid-disk-0.subvol", "$vmid", undef, undef, undef, 'subvol'],
},
{
description => 'Backup archive, no virtualization type',
volname => "backup/vzdump-none-$vmid-2020_03_30-21_39_30.tar",
expected => ['backup', "vzdump-none-$vmid-2020_03_30-21_39_30.tar", undef, undef, undef, undef, 'raw'],
expected => [
'backup',
"vzdump-none-$vmid-2020_03_30-21_39_30.tar",
undef,
undef,
undef,
undef,
'raw',
],
},
#
# Snippets
@ -91,17 +133,18 @@ my $tests = [
{
description => "Import, ova",
volname => 'import/import.ova',
expected => ['import', 'import.ova', undef, undef, undef ,undef, 'ova'],
expected => ['import', 'import.ova', undef, undef, undef, undef, 'ova'],
},
{
description => "Import, ovf",
volname => 'import/import.ovf',
expected => ['import', 'import.ovf', undef, undef, undef ,undef, 'ovf'],
expected => ['import', 'import.ovf', undef, undef, undef, undef, 'ovf'],
},
{
description => "Import, innner file of ova",
volname => 'import/import.ova/disk.qcow2',
expected => ['import', 'import.ova/disk.qcow2', undef, undef, undef, undef, 'ova+qcow2'],
expected =>
['import', 'import.ova/disk.qcow2', undef, undef, undef, undef, 'ova+qcow2'],
},
{
description => "Import, innner file of ova",
@ -111,7 +154,8 @@ my $tests = [
{
description => "Import, innner file of ova with whitespace in name",
volname => 'import/import.ova/OS disk.vmdk',
expected => ['import', 'import.ova/OS disk.vmdk', undef, undef, undef, undef, 'ova+vmdk'],
expected =>
['import', 'import.ova/OS disk.vmdk', undef, undef, undef, undef, 'ova+vmdk'],
},
{
description => "Import, innner file of ova",
@ -129,12 +173,14 @@ my $tests = [
{
description => 'Failed match: ISO image, dvd',
volname => 'iso/yet-again-a-installation-disk.dvd',
expected => "unable to parse directory volume name 'iso/yet-again-a-installation-disk.dvd'\n",
expected =>
"unable to parse directory volume name 'iso/yet-again-a-installation-disk.dvd'\n",
},
{
description => 'Failed match: Container template, zip.gz',
volname => 'vztmpl/debian-10.0-standard_10.0-1_amd64.zip.gz',
expected => "unable to parse directory volume name 'vztmpl/debian-10.0-standard_10.0-1_amd64.zip.gz'\n",
expected =>
"unable to parse directory volume name 'vztmpl/debian-10.0-standard_10.0-1_amd64.zip.gz'\n",
},
{
description => 'Failed match: Container rootdir, subvol',
@ -149,12 +195,14 @@ my $tests = [
{
description => 'Failed match: VM disk image, linked, qcow2, first vmid',
volname => "ssss/base-$vmid-disk-0.qcow2/$vmid/vm-$vmid-disk-0.qcow2",
expected => "unable to parse directory volume name 'ssss/base-$vmid-disk-0.qcow2/$vmid/vm-$vmid-disk-0.qcow2'\n",
expected =>
"unable to parse directory volume name 'ssss/base-$vmid-disk-0.qcow2/$vmid/vm-$vmid-disk-0.qcow2'\n",
},
{
description => 'Failed match: VM disk image, linked, qcow2, second vmid',
volname => "$vmid/base-$vmid-disk-0.qcow2/ssss/vm-$vmid-disk-0.qcow2",
expected => "unable to parse volume filename 'base-$vmid-disk-0.qcow2/ssss/vm-$vmid-disk-0.qcow2'\n",
expected =>
"unable to parse volume filename 'base-$vmid-disk-0.qcow2/ssss/vm-$vmid-disk-0.qcow2'\n",
},
{
description => "Failed match: import dir but no ova/ovf/disk image",
@ -164,20 +212,14 @@ my $tests = [
];
# create more test cases for VM disk images matches
my $disk_suffix = [ 'raw', 'qcow2', 'vmdk' ];
my $disk_suffix = ['raw', 'qcow2', 'vmdk'];
foreach my $s (@$disk_suffix) {
my @arr = (
{
description => "VM disk image, $s",
volname => "$vmid/vm-$vmid-disk-1.$s",
expected => [
'images',
"vm-$vmid-disk-1.$s",
"$vmid",
undef,
undef,
undef,
"$s",
'images', "vm-$vmid-disk-1.$s", "$vmid", undef, undef, undef, "$s",
],
},
{
@ -197,13 +239,7 @@ foreach my $s (@$disk_suffix) {
description => "VM disk image, base, $s",
volname => "$vmid/base-$vmid-disk-0.$s",
expected => [
'images',
"base-$vmid-disk-0.$s",
"$vmid",
undef,
undef,
'base-',
"$s"
'images', "base-$vmid-disk-0.$s", "$vmid", undef, undef, 'base-', "$s",
],
},
);
@ -211,12 +247,11 @@ foreach my $s (@$disk_suffix) {
push @$tests, @arr;
}
# create more test cases for backup files matches
my $bkp_suffix = {
qemu => [ 'vma', 'vma.gz', 'vma.lzo', 'vma.zst' ],
lxc => [ 'tar', 'tgz', 'tar.gz', 'tar.lzo', 'tar.zst', 'tar.bz2' ],
openvz => [ 'tar', 'tgz', 'tar.gz', 'tar.lzo', 'tar.zst' ],
qemu => ['vma', 'vma.gz', 'vma.lzo', 'vma.zst'],
lxc => ['tar', 'tgz', 'tar.gz', 'tar.lzo', 'tar.zst', 'tar.bz2'],
openvz => ['tar', 'tgz', 'tar.gz', 'tar.lzo', 'tar.zst'],
};
foreach my $virt (keys %$bkp_suffix) {
@ -233,7 +268,7 @@ foreach my $virt (keys %$bkp_suffix) {
undef,
undef,
undef,
'raw'
'raw',
],
},
);
@ -242,11 +277,10 @@ foreach my $virt (keys %$bkp_suffix) {
}
}
# create more test cases for failed backup files matches
my $non_bkp_suffix = {
qemu => [ 'vms.gz', 'vma.xz' ],
lxc => [ 'zip.gz', 'tgz.lzo' ],
qemu => ['vms.gz', 'vma.xz'],
lxc => ['zip.gz', 'tgz.lzo'],
};
foreach my $virt (keys %$non_bkp_suffix) {
my $suffix = $non_bkp_suffix->{$virt};
@ -255,7 +289,8 @@ foreach my $virt (keys %$non_bkp_suffix) {
{
description => "Failed match: Backup archive, $virt, $s",
volname => "backup/vzdump-$virt-$vmid-2020_03_30-21_12_40.$s",
expected => "unable to parse directory volume name 'backup/vzdump-$virt-$vmid-2020_03_30-21_12_40.$s'\n",
expected =>
"unable to parse directory volume name 'backup/vzdump-$virt-$vmid-2020_03_30-21_12_40.$s'\n",
},
);
@ -263,7 +298,6 @@ foreach my $virt (keys %$non_bkp_suffix) {
}
}
#
# run through test case array
#
@ -278,12 +312,12 @@ foreach my $t (@$tests) {
my $expected = $t->{expected};
my $got;
eval { $got = [ PVE::Storage::Plugin->parse_volname($volname) ] };
eval { $got = [PVE::Storage::Plugin->parse_volname($volname)] };
$got = $@ if $@;
is_deeply($got, $expected, $description);
$seen_vtype->{@$expected[0]} = 1 if ref $expected eq 'ARRAY';
$seen_vtype->{ @$expected[0] } = 1 if ref $expected eq 'ARRAY';
}
# to check if all $vtype_subdirs are defined in path_to_volume_id

View File

@ -47,24 +47,21 @@ my @tests = (
description => 'Image, qcow2',
volname => "$storage_dir/images/16110/vm-16110-disk-0.qcow2",
expected => [
'images',
'local:16110/vm-16110-disk-0.qcow2',
'images', 'local:16110/vm-16110-disk-0.qcow2',
],
},
{
description => 'Image, raw',
volname => "$storage_dir/images/16112/vm-16112-disk-0.raw",
expected => [
'images',
'local:16112/vm-16112-disk-0.raw',
'images', 'local:16112/vm-16112-disk-0.raw',
],
},
{
description => 'Image template, qcow2',
volname => "$storage_dir/images/9004/base-9004-disk-0.qcow2",
expected => [
'images',
'local:9004/base-9004-disk-0.qcow2',
'images', 'local:9004/base-9004-disk-0.qcow2',
],
},
@ -72,56 +69,49 @@ my @tests = (
description => 'Backup, vma.gz',
volname => "$storage_dir/dump/vzdump-qemu-16110-2020_03_30-21_11_40.vma.gz",
expected => [
'backup',
'local:backup/vzdump-qemu-16110-2020_03_30-21_11_40.vma.gz',
'backup', 'local:backup/vzdump-qemu-16110-2020_03_30-21_11_40.vma.gz',
],
},
{
description => 'Backup, vma.lzo',
volname => "$storage_dir/dump/vzdump-qemu-16110-2020_03_30-21_12_45.vma.lzo",
expected => [
'backup',
'local:backup/vzdump-qemu-16110-2020_03_30-21_12_45.vma.lzo',
'backup', 'local:backup/vzdump-qemu-16110-2020_03_30-21_12_45.vma.lzo',
],
},
{
description => 'Backup, vma',
volname => "$storage_dir/dump/vzdump-qemu-16110-2020_03_30-21_13_55.vma",
expected => [
'backup',
'local:backup/vzdump-qemu-16110-2020_03_30-21_13_55.vma',
'backup', 'local:backup/vzdump-qemu-16110-2020_03_30-21_13_55.vma',
],
},
{
description => 'Backup, tar.lzo',
volname => "$storage_dir/dump/vzdump-lxc-16112-2020_03_30-21_39_30.tar.lzo",
expected => [
'backup',
'local:backup/vzdump-lxc-16112-2020_03_30-21_39_30.tar.lzo',
'backup', 'local:backup/vzdump-lxc-16112-2020_03_30-21_39_30.tar.lzo',
],
},
{
description => 'Backup, vma.zst',
volname => "$storage_dir/dump/vzdump-qemu-16110-2020_03_30-21_13_55.vma.zst",
expected => [
'backup',
'local:backup/vzdump-qemu-16110-2020_03_30-21_13_55.vma.zst'
'backup', 'local:backup/vzdump-qemu-16110-2020_03_30-21_13_55.vma.zst',
],
},
{
description => 'Backup, tar.zst',
volname => "$storage_dir/dump/vzdump-lxc-16112-2020_03_30-21_39_30.tar.zst",
expected => [
'backup',
'local:backup/vzdump-lxc-16112-2020_03_30-21_39_30.tar.zst'
'backup', 'local:backup/vzdump-lxc-16112-2020_03_30-21_39_30.tar.zst',
],
},
{
description => 'Backup, tar.bz2',
volname => "$storage_dir/dump/vzdump-openvz-16112-2020_03_30-21_39_30.tar.bz2",
expected => [
'backup',
'local:backup/vzdump-openvz-16112-2020_03_30-21_39_30.tar.bz2',
'backup', 'local:backup/vzdump-openvz-16112-2020_03_30-21_39_30.tar.bz2',
],
},
@ -129,24 +119,21 @@ my @tests = (
description => 'ISO file',
volname => "$storage_dir/template/iso/yet-again-a-installation-disk.iso",
expected => [
'iso',
'local:iso/yet-again-a-installation-disk.iso',
'iso', 'local:iso/yet-again-a-installation-disk.iso',
],
},
{
description => 'CT template, tar.gz',
volname => "$storage_dir/template/cache/debian-10.0-standard_10.0-1_amd64.tar.gz",
expected => [
'vztmpl',
'local:vztmpl/debian-10.0-standard_10.0-1_amd64.tar.gz',
'vztmpl', 'local:vztmpl/debian-10.0-standard_10.0-1_amd64.tar.gz',
],
},
{
description => 'CT template, wrong ending, tar bz2',
volname => "$storage_dir/template/cache/debian-10.0-standard_10.0-1_amd64.tar.bz2",
expected => [
'vztmpl',
'local:vztmpl/debian-10.0-standard_10.0-1_amd64.tar.bz2',
'vztmpl', 'local:vztmpl/debian-10.0-standard_10.0-1_amd64.tar.bz2',
],
},
@ -154,56 +141,49 @@ my @tests = (
description => 'Rootdir',
volname => "$storage_dir/private/1234/", # fileparse needs / at the end
expected => [
'rootdir',
'local:rootdir/1234',
'rootdir', 'local:rootdir/1234',
],
},
{
description => 'Rootdir, folder subvol',
volname => "$storage_dir/images/1234/subvol-1234-disk-0.subvol/", # fileparse needs / at the end
expected => [
'images',
'local:1234/subvol-1234-disk-0.subvol'
'images', 'local:1234/subvol-1234-disk-0.subvol',
],
},
{
description => 'Snippets, yaml',
volname => "$storage_dir/snippets/userconfig.yaml",
expected => [
'snippets',
'local:snippets/userconfig.yaml',
'snippets', 'local:snippets/userconfig.yaml',
],
},
{
description => 'Snippets, hookscript',
volname => "$storage_dir/snippets/hookscript.pl",
expected => [
'snippets',
'local:snippets/hookscript.pl',
'snippets', 'local:snippets/hookscript.pl',
],
},
{
description => 'CT template, tar.xz',
volname => "$storage_dir/template/cache/debian-10.0-standard_10.0-1_amd64.tar.xz",
expected => [
'vztmpl',
'local:vztmpl/debian-10.0-standard_10.0-1_amd64.tar.xz',
'vztmpl', 'local:vztmpl/debian-10.0-standard_10.0-1_amd64.tar.xz',
],
},
{
description => 'Import, ova',
volname => "$storage_dir/import/import.ova",
expected => [
'import',
'local:import/import.ova',
'import', 'local:import/import.ova',
],
},
{
description => 'Import, ovf',
volname => "$storage_dir/import/import.ovf",
expected => [
'import',
'local:import/import.ovf',
'import', 'local:import/import.ovf',
],
},
@ -281,13 +261,13 @@ foreach my $tt (@tests) {
# run tests
my $got;
eval { $got = [ PVE::Storage::path_to_volume_id($scfg, $file) ] };
eval { $got = [PVE::Storage::path_to_volume_id($scfg, $file)] };
$got = $@ if $@;
is_deeply($got, $expected, $description) || diag(explain($got));
$seen_vtype->{@$expected[0]} = 1
if ( @$expected[0] ne '' && scalar @$expected > 1);
$seen_vtype->{ @$expected[0] } = 1
if (@$expected[0] ne '' && scalar @$expected > 1);
}
# to check if all $vtype_subdirs are defined in path_to_volume_id

View File

@ -18,31 +18,32 @@ my $mocked_backups_lists = {};
my $basetime = 1577881101; # 2020_01_01-12_18_21 UTC
foreach my $vmid (@vmids) {
push @{$mocked_backups_lists->{default}}, (
push @{ $mocked_backups_lists->{default} },
(
{
'volid' => "$storeid:backup/vzdump-qemu-$vmid-2018_05_26-11_18_21.tar.zst",
'ctime' => $basetime - 585*24*60*60 - 60*60,
'ctime' => $basetime - 585 * 24 * 60 * 60 - 60 * 60,
'vmid' => $vmid,
},
{
'volid' => "$storeid:backup/vzdump-qemu-$vmid-2019_12_31-11_18_21.tar.zst",
'ctime' => $basetime - 24*60*60 - 60*60,
'ctime' => $basetime - 24 * 60 * 60 - 60 * 60,
'vmid' => $vmid,
},
{
'volid' => "$storeid:backup/vzdump-qemu-$vmid-2019_12_31-11_18_51.tar.zst",
'ctime' => $basetime - 24*60*60 - 60*60 + 30,
'ctime' => $basetime - 24 * 60 * 60 - 60 * 60 + 30,
'vmid' => $vmid,
'protected' => 1,
},
{
'volid' => "$storeid:backup/vzdump-qemu-$vmid-2019_12_31-11_19_21.tar.zst",
'ctime' => $basetime - 24*60*60 - 60*60 + 60,
'ctime' => $basetime - 24 * 60 * 60 - 60 * 60 + 60,
'vmid' => $vmid,
},
{
'volid' => "$storeid:backup/vzdump-qemu-$vmid-2020_01_01-11_18_21.tar.zst",
'ctime' => $basetime - 60*60,
'ctime' => $basetime - 60 * 60,
'vmid' => $vmid,
},
{
@ -62,7 +63,8 @@ foreach my $vmid (@vmids) {
},
);
}
push @{$mocked_backups_lists->{year1970}}, (
push @{ $mocked_backups_lists->{year1970} },
(
{
'volid' => "$storeid:backup/vzdump-lxc-321-1970_01_01-00_01_23.tar.zst",
'ctime' => 83,
@ -70,25 +72,27 @@ push @{$mocked_backups_lists->{year1970}}, (
},
{
'volid' => "$storeid:backup/vzdump-lxc-321-2070_01_01-00_01_00.tar.zst",
'ctime' => 60*60*24 * (365*100 + 25) + 60,
'ctime' => 60 * 60 * 24 * (365 * 100 + 25) + 60,
'vmid' => 321,
},
);
push @{$mocked_backups_lists->{novmid}}, (
);
push @{ $mocked_backups_lists->{novmid} },
(
{
'volid' => "$storeid:backup/vzdump-lxc-novmid.tar.gz",
'ctime' => 1234,
},
);
push @{$mocked_backups_lists->{threeway}}, (
);
push @{ $mocked_backups_lists->{threeway} },
(
{
'volid' => "$storeid:backup/vzdump-qemu-7654-2019_12_25-12_18_21.tar.zst",
'ctime' => $basetime - 7*24*60*60,
'ctime' => $basetime - 7 * 24 * 60 * 60,
'vmid' => 7654,
},
{
'volid' => "$storeid:backup/vzdump-qemu-7654-2019_12_31-12_18_21.tar.zst",
'ctime' => $basetime - 24*60*60,
'ctime' => $basetime - 24 * 60 * 60,
'vmid' => 7654,
},
{
@ -96,74 +100,78 @@ push @{$mocked_backups_lists->{threeway}}, (
'ctime' => $basetime,
'vmid' => 7654,
},
);
push @{$mocked_backups_lists->{weekboundary}}, (
);
push @{ $mocked_backups_lists->{weekboundary} },
(
{
'volid' => "$storeid:backup/vzdump-qemu-7654-2020_12_03-12_18_21.tar.zst",
'ctime' => $basetime + (366-31+2)*24*60*60,
'ctime' => $basetime + (366 - 31 + 2) * 24 * 60 * 60,
'vmid' => 7654,
},
{
'volid' => "$storeid:backup/vzdump-qemu-7654-2020_12_04-12_18_21.tar.zst",
'ctime' => $basetime + (366-31+3)*24*60*60,
'ctime' => $basetime + (366 - 31 + 3) * 24 * 60 * 60,
'vmid' => 7654,
},
{
'volid' => "$storeid:backup/vzdump-qemu-7654-2020_12_07-12_18_21.tar.zst",
'ctime' => $basetime + (366-31+6)*24*60*60,
'ctime' => $basetime + (366 - 31 + 6) * 24 * 60 * 60,
'vmid' => 7654,
},
);
);
my $current_list;
my $mock_plugin = Test::MockModule->new('PVE::Storage::Plugin');
$mock_plugin->redefine(list_volumes => sub {
$mock_plugin->redefine(
list_volumes => sub {
my ($class, $storeid, $scfg, $vmid, $content_types) = @_;
my $list = $mocked_backups_lists->{$current_list};
return $list if !defined($vmid);
return [ grep { $_->{vmid} eq $vmid } @{$list} ];
});
return [grep { $_->{vmid} eq $vmid } @{$list}];
},
);
sub generate_expected {
my ($vmids, $type, $marks) = @_;
my @expected;
foreach my $vmid (@{$vmids}) {
push @expected, (
push @expected,
(
{
'volid' => "$storeid:backup/vzdump-qemu-$vmid-2018_05_26-11_18_21.tar.zst",
'type' => 'qemu',
'ctime' => $basetime - 585*24*60*60 - 60*60,
'ctime' => $basetime - 585 * 24 * 60 * 60 - 60 * 60,
'mark' => $marks->[0],
'vmid' => $vmid,
},
{
'volid' => "$storeid:backup/vzdump-qemu-$vmid-2019_12_31-11_18_21.tar.zst",
'type' => 'qemu',
'ctime' => $basetime - 24*60*60 - 60*60,
'ctime' => $basetime - 24 * 60 * 60 - 60 * 60,
'mark' => $marks->[1],
'vmid' => $vmid,
},
{
'volid' => "$storeid:backup/vzdump-qemu-$vmid-2019_12_31-11_18_51.tar.zst",
'type' => 'qemu',
'ctime' => $basetime - 24*60*60 - 60*60 + 30,
'ctime' => $basetime - 24 * 60 * 60 - 60 * 60 + 30,
'mark' => 'protected',
'vmid' => $vmid,
},
{
'volid' => "$storeid:backup/vzdump-qemu-$vmid-2019_12_31-11_19_21.tar.zst",
'type' => 'qemu',
'ctime' => $basetime - 24*60*60 - 60*60 + 60,
'ctime' => $basetime - 24 * 60 * 60 - 60 * 60 + 60,
'mark' => $marks->[2],
'vmid' => $vmid,
},
{
'volid' => "$storeid:backup/vzdump-qemu-$vmid-2020_01_01-11_18_21.tar.zst",
'type' => 'qemu',
'ctime' => $basetime - 60*60,
'ctime' => $basetime - 60 * 60,
'mark' => $marks->[3],
'vmid' => $vmid,
},
@ -175,7 +183,8 @@ sub generate_expected {
'vmid' => $vmid,
},
) if !defined($type) || $type eq 'qemu';
push @expected, (
push @expected,
(
{
'volid' => "$storeid:backup/vzdump-lxc-$vmid-2020_01_01-12_18_21.tar.zst",
'type' => 'lxc',
@ -184,7 +193,8 @@ sub generate_expected {
'vmid' => $vmid,
},
) if !defined($type) || $type eq 'lxc';
push @expected, (
push @expected,
(
{
'volid' => "$storeid:backup/vzdump-$vmid-renamed.tar.zst",
'type' => 'unknown',
@ -194,7 +204,7 @@ sub generate_expected {
},
) if !defined($type);
}
return [ sort { $a->{volid} cmp $b->{volid} } @expected ];
return [sort { $a->{volid} cmp $b->{volid} } @expected];
}
# an array of test cases, each test is comprised of the following keys:
@ -212,7 +222,8 @@ my $tests = [
keep => {
'keep-last' => 3,
},
expected => generate_expected(\@vmids, undef, ['remove', 'remove', 'keep', 'keep', 'keep', 'keep']),
expected =>
generate_expected(\@vmids, undef, ['remove', 'remove', 'keep', 'keep', 'keep', 'keep']),
},
{
description => 'weekly=2, one ID',
@ -220,7 +231,11 @@ my $tests = [
keep => {
'keep-weekly' => 2,
},
expected => generate_expected([$vmids[0]], undef, ['keep', 'remove', 'remove', 'remove', 'keep', 'keep']),
expected => generate_expected(
[$vmids[0]],
undef,
['keep', 'remove', 'remove', 'remove', 'keep', 'keep'],
),
},
{
description => 'daily=weekly=monthly=1, multiple IDs',
@ -230,7 +245,8 @@ my $tests = [
'keep-weekly' => 1,
'keep-monthly' => 1,
},
expected => generate_expected(\@vmids, undef, ['keep', 'remove', 'keep', 'remove', 'keep', 'keep']),
expected =>
generate_expected(\@vmids, undef, ['keep', 'remove', 'keep', 'remove', 'keep', 'keep']),
},
{
description => 'hourly=4, one ID',
@ -239,7 +255,11 @@ my $tests = [
'keep-hourly' => 4,
'keep-daily' => 0,
},
expected => generate_expected([$vmids[0]], undef, ['keep', 'remove', 'keep', 'keep', 'keep', 'keep']),
expected => generate_expected(
[$vmids[0]],
undef,
['keep', 'remove', 'keep', 'keep', 'keep', 'keep'],
),
},
{
description => 'yearly=2, multiple IDs',
@ -250,7 +270,11 @@ my $tests = [
'keep-monthly' => 0,
'keep-yearly' => 2,
},
expected => generate_expected(\@vmids, undef, ['remove', 'remove', 'keep', 'remove', 'keep', 'keep']),
expected => generate_expected(
\@vmids,
undef,
['remove', 'remove', 'keep', 'remove', 'keep', 'keep'],
),
},
{
description => 'last=2,hourly=2 one ID',
@ -259,7 +283,11 @@ my $tests = [
'keep-last' => 2,
'keep-hourly' => 2,
},
expected => generate_expected([$vmids[0]], undef, ['keep', 'remove', 'keep', 'keep', 'keep', 'keep']),
expected => generate_expected(
[$vmids[0]],
undef,
['keep', 'remove', 'keep', 'keep', 'keep', 'keep'],
),
},
{
description => 'last=1,monthly=2, multiple IDs',
@ -267,7 +295,8 @@ my $tests = [
'keep-last' => 1,
'keep-monthly' => 2,
},
expected => generate_expected(\@vmids, undef, ['keep', 'remove', 'keep', 'remove', 'keep', 'keep']),
expected =>
generate_expected(\@vmids, undef, ['keep', 'remove', 'keep', 'remove', 'keep', 'keep']),
},
{
description => 'monthly=3, one ID',
@ -275,7 +304,11 @@ my $tests = [
keep => {
'keep-monthly' => 3,
},
expected => generate_expected([$vmids[0]], undef, ['keep', 'remove', 'keep', 'remove', 'keep', 'keep']),
expected => generate_expected(
[$vmids[0]],
undef,
['keep', 'remove', 'keep', 'remove', 'keep', 'keep'],
),
},
{
description => 'last=daily=weekly=1, multiple IDs',
@ -284,7 +317,8 @@ my $tests = [
'keep-daily' => 1,
'keep-weekly' => 1,
},
expected => generate_expected(\@vmids, undef, ['keep', 'remove', 'keep', 'remove', 'keep', 'keep']),
expected =>
generate_expected(\@vmids, undef, ['keep', 'remove', 'keep', 'remove', 'keep', 'keep']),
},
{
description => 'last=daily=weekly=1, others zero, multiple IDs',
@ -296,7 +330,8 @@ my $tests = [
'keep-monthly' => 0,
'keep-yearly' => 0,
},
expected => generate_expected(\@vmids, undef, ['keep', 'remove', 'keep', 'remove', 'keep', 'keep']),
expected =>
generate_expected(\@vmids, undef, ['keep', 'remove', 'keep', 'remove', 'keep', 'keep']),
},
{
description => 'daily=2, one ID',
@ -304,7 +339,11 @@ my $tests = [
keep => {
'keep-daily' => 2,
},
expected => generate_expected([$vmids[0]], undef, ['remove', 'remove', 'keep', 'remove', 'keep', 'keep']),
expected => generate_expected(
[$vmids[0]],
undef,
['remove', 'remove', 'keep', 'remove', 'keep', 'keep'],
),
},
{
description => 'weekly=monthly=1, multiple IDs',
@ -312,7 +351,11 @@ my $tests = [
'keep-weekly' => 1,
'keep-monthly' => 1,
},
expected => generate_expected(\@vmids, undef, ['keep', 'remove', 'remove', 'remove', 'keep', 'keep']),
expected => generate_expected(
\@vmids,
undef,
['keep', 'remove', 'remove', 'remove', 'keep', 'keep'],
),
},
{
description => 'weekly=yearly=1, one ID',
@ -321,7 +364,11 @@ my $tests = [
'keep-weekly' => 1,
'keep-yearly' => 1,
},
expected => generate_expected([$vmids[0]], undef, ['keep', 'remove', 'remove', 'remove', 'keep', 'keep']),
expected => generate_expected(
[$vmids[0]],
undef,
['keep', 'remove', 'remove', 'remove', 'keep', 'keep'],
),
},
{
description => 'weekly=yearly=1, one ID, type qemu',
@ -331,7 +378,11 @@ my $tests = [
'keep-weekly' => 1,
'keep-yearly' => 1,
},
expected => generate_expected([$vmids[0]], 'qemu', ['keep', 'remove', 'remove', 'remove', 'keep', '']),
expected => generate_expected(
[$vmids[0]],
'qemu',
['keep', 'remove', 'remove', 'remove', 'keep', ''],
),
},
{
description => 'week=yearly=1, one ID, type lxc',
@ -358,7 +409,7 @@ my $tests = [
},
{
'volid' => "$storeid:backup/vzdump-lxc-321-2070_01_01-00_01_00.tar.zst",
'ctime' => 60*60*24 * (365*100 + 25) + 60,
'ctime' => 60 * 60 * 24 * (365 * 100 + 25) + 60,
'mark' => 'keep',
'type' => 'lxc',
'vmid' => 321,
@ -383,7 +434,8 @@ my $tests = [
{
description => 'all missing, multiple IDs',
keep => {},
expected => generate_expected(\@vmids, undef, ['keep', 'keep', 'keep', 'keep', 'keep', 'keep']),
expected =>
generate_expected(\@vmids, undef, ['keep', 'keep', 'keep', 'keep', 'keep', 'keep']),
},
{
description => 'all zero, multiple IDs',
@ -395,7 +447,8 @@ my $tests = [
'keep-monthyl' => 0,
'keep-yearly' => 0,
},
expected => generate_expected(\@vmids, undef, ['keep', 'keep', 'keep', 'keep', 'keep', 'keep']),
expected =>
generate_expected(\@vmids, undef, ['keep', 'keep', 'keep', 'keep', 'keep', 'keep']),
},
{
description => 'some zero, some missing, multiple IDs',
@ -406,7 +459,8 @@ my $tests = [
'keep-monthyl' => 0,
'keep-yearly' => 0,
},
expected => generate_expected(\@vmids, undef, ['keep', 'keep', 'keep', 'keep', 'keep', 'keep']),
expected =>
generate_expected(\@vmids, undef, ['keep', 'keep', 'keep', 'keep', 'keep', 'keep']),
},
{
description => 'daily=weekly=monthly=1',
@ -419,14 +473,14 @@ my $tests = [
expected => [
{
'volid' => "$storeid:backup/vzdump-qemu-7654-2019_12_25-12_18_21.tar.zst",
'ctime' => $basetime - 7*24*60*60,
'ctime' => $basetime - 7 * 24 * 60 * 60,
'type' => 'qemu',
'vmid' => 7654,
'mark' => 'keep',
},
{
'volid' => "$storeid:backup/vzdump-qemu-7654-2019_12_31-12_18_21.tar.zst",
'ctime' => $basetime - 24*60*60,
'ctime' => $basetime - 24 * 60 * 60,
'type' => 'qemu',
'vmid' => 7654,
'mark' => 'remove', # month is already covered by the backup kept by keep-weekly!
@ -450,21 +504,21 @@ my $tests = [
expected => [
{
'volid' => "$storeid:backup/vzdump-qemu-7654-2020_12_03-12_18_21.tar.zst",
'ctime' => $basetime + (366-31+2)*24*60*60,
'ctime' => $basetime + (366 - 31 + 2) * 24 * 60 * 60,
'type' => 'qemu',
'vmid' => 7654,
'mark' => 'remove',
},
{
'volid' => "$storeid:backup/vzdump-qemu-7654-2020_12_04-12_18_21.tar.zst",
'ctime' => $basetime + (366-31+3)*24*60*60,
'ctime' => $basetime + (366 - 31 + 3) * 24 * 60 * 60,
'type' => 'qemu',
'vmid' => 7654,
'mark' => 'keep',
},
{
'volid' => "$storeid:backup/vzdump-qemu-7654-2020_12_07-12_18_21.tar.zst",
'ctime' => $basetime + (366-31+6)*24*60*60,
'ctime' => $basetime + (366 - 31 + 6) * 24 * 60 * 60,
'type' => 'qemu',
'vmid' => 7654,
'mark' => 'keep',
@ -479,8 +533,10 @@ for my $tt (@$tests) {
my $got = eval {
$current_list = $tt->{list} // 'default';
my $res = PVE::Storage::Plugin->prune_backups($tt->{scfg}, $storeid, $tt->{keep}, $tt->{vmid}, $tt->{type}, 1);
return [ sort { $a->{volid} cmp $b->{volid} } @{$res} ];
my $res = PVE::Storage::Plugin->prune_backups(
$tt->{scfg}, $storeid, $tt->{keep}, $tt->{vmid}, $tt->{type}, 1,
);
return [sort { $a->{volid} cmp $b->{volid} } @{$res}];
};
$got = $@ if $@;

View File

@ -26,7 +26,7 @@ use JSON;
use PVE::Tools qw(run_command);
my $pool = "testpool";
my $use_existing= undef;
my $use_existing = undef;
my $namespace = "testspace";
my $showhelp = '';
my $vmid = 999999;
@ -46,7 +46,7 @@ Known options are:
-h, --help Print this help message
";
GetOptions (
GetOptions(
"pool=s" => \$pool,
"use-existing" => \$use_existing,
"namespace=s" => \$namespace,
@ -54,7 +54,7 @@ GetOptions (
"h|help" => \$showhelp,
"cleanup" => \$cleanup,
"d|debug" => \$DEBUG,
) or die ($helpstring);
) or die($helpstring);
if ($showhelp) {
warn $helpstring;
@ -69,6 +69,7 @@ my $vmid_linked_clone = int($vmid) - 2;
sub jp {
print to_json($_[0], { utf8 => 8, pretty => 1, canonical => 1 }) . "\n";
}
sub dbgvar {
jp(@_) if $DEBUG;
}
@ -77,11 +78,9 @@ sub run_cmd {
my ($cmd, $json, $ignore_errors) = @_;
my $raw = '';
my $parser = sub {$raw .= shift;};
my $parser = sub { $raw .= shift; };
eval {
run_command($cmd, outfunc => $parser);
};
eval { run_command($cmd, outfunc => $parser); };
if (my $err = $@) {
die $err if !$ignore_errors;
}
@ -109,9 +108,7 @@ sub run_test_cmd {
$raw .= "${line}\n";
};
eval {
run_command($cmd, outfunc => $out);
};
eval { run_command($cmd, outfunc => $out); };
if (my $err = $@) {
print $raw;
print $err;
@ -126,7 +123,7 @@ sub prepare {
my $pools = run_cmd("ceph osd pool ls --format json", 1);
my %poolnames = map {$_ => 1} @$pools;
my %poolnames = map { $_ => 1 } @$pools;
die "Pool '$pool' does not exist!\n"
if !exists($poolnames{$pool}) && $use_existing;
@ -167,13 +164,28 @@ sub prepare {
run_cmd(['pvesm', 'add', 'rbd', $pool, '--pool', $pool, '--content', 'images,rootdir']);
}
# create PVE storages (librbd / krbd)
run_cmd(['pvesm', 'add', 'rbd', ${storage_name}, '--krbd', '0', '--pool', ${pool}, '--namespace', ${namespace}, '--content', 'images,rootdir'])
if !$rbd_found;
run_cmd(
[
'pvesm',
'add',
'rbd',
${storage_name},
'--krbd',
'0',
'--pool',
${pool},
'--namespace',
${namespace},
'--content',
'images,rootdir',
],
) if !$rbd_found;
# create test VM
print "Create test VM ${vmid}\n";
my $vms = run_cmd(['pvesh', 'get', 'cluster/resources', '--type', 'vm', '--output-format', 'json'], 1);
my $vms =
run_cmd(['pvesh', 'get', 'cluster/resources', '--type', 'vm', '--output-format', 'json'],
1);
for my $vm (@$vms) {
# TODO: introduce a force flag to make this behaviour configurable
@ -183,10 +195,21 @@ sub prepare {
run_cmd(['qm', 'destroy', ${vmid}]);
}
}
run_cmd(['qm', 'create', ${vmid}, '--bios', 'ovmf', '--efidisk0', "${storage_name}:1", '--scsi0', "${storage_name}:2"]);
run_cmd(
[
'qm',
'create',
${vmid},
'--bios',
'ovmf',
'--efidisk0',
"${storage_name}:1",
'--scsi0',
"${storage_name}:2",
],
);
}
sub cleanup {
print "Cleaning up test environment!\n";
print "Removing VMs\n";
@ -195,7 +218,21 @@ sub cleanup {
run_cmd(['qm', 'stop', ${vmid_clone}], 0, 1);
run_cmd(['qm', 'destroy', ${vmid_linked_clone}], 0, 1);
run_cmd(['qm', 'destroy', ${vmid_clone}], 0, 1);
run_cmd(['for', 'i', 'in', "/dev/rbd/${pool}/${namespace}/*;", 'do', '/usr/bin/rbd', 'unmap', '\$i;', 'done'], 0, 1);
run_cmd(
[
'for',
'i',
'in',
"/dev/rbd/${pool}/${namespace}/*;",
'do',
'/usr/bin/rbd',
'unmap',
'\$i;',
'done',
],
0,
1,
);
run_cmd(['qm', 'unlock', ${vmid}], 0, 1);
run_cmd(['qm', 'destroy', ${vmid}], 0, 1);
@ -237,8 +274,7 @@ my $tests = [
{
name => 'snapshot/rollback',
steps => [
['qm', 'snapshot', $vmid, 'test'],
['qm', 'rollback', $vmid, 'test'],
['qm', 'snapshot', $vmid, 'test'], ['qm', 'rollback', $vmid, 'test'],
],
cleanup => [
['qm', 'unlock', $vmid],
@ -260,8 +296,7 @@ my $tests = [
{
name => 'switch to krbd',
preparations => [
['qm', 'stop', $vmid],
['pvesm', 'set', $storage_name, '--krbd', 1]
['qm', 'stop', $vmid], ['pvesm', 'set', $storage_name, '--krbd', 1],
],
},
{
@ -273,8 +308,7 @@ my $tests = [
{
name => 'snapshot/rollback with krbd',
steps => [
['qm', 'snapshot', $vmid, 'test'],
['qm', 'rollback', $vmid, 'test'],
['qm', 'snapshot', $vmid, 'test'], ['qm', 'rollback', $vmid, 'test'],
],
cleanup => [
['qm', 'unlock', $vmid],
@ -304,7 +338,7 @@ my $tests = [
preparations => [
['qm', 'stop', $vmid],
['qm', 'stop', $vmid_clone],
['pvesm', 'set', $storage_name, '--krbd', 0]
['pvesm', 'set', $storage_name, '--krbd', 0],
],
},
{
@ -318,12 +352,9 @@ my $tests = [
},
{
name => 'start linked clone with krbd',
preparations => [
['pvesm', 'set', $storage_name, '--krbd', 1]
],
preparations => [['pvesm', 'set', $storage_name, '--krbd', 1]],
steps => [
['qm', 'start', $vmid_linked_clone],
['qm', 'stop', $vmid_linked_clone],
['qm', 'start', $vmid_linked_clone], ['qm', 'stop', $vmid_linked_clone],
],
},
];
@ -332,7 +363,7 @@ sub run_prep_cleanup {
my ($cmds) = @_;
for (@$cmds) {
print join(' ', @$_). "\n";
print join(' ', @$_) . "\n";
run_cmd($_);
}
}
@ -350,7 +381,7 @@ sub run_tests {
my $num_tests = 0;
for (@$tests) {
$num_tests += scalar(@{$_->{steps}}) if defined $_->{steps};
$num_tests += scalar(@{ $_->{steps} }) if defined $_->{steps};
}
print("Tests: $num_tests\n");

View File

@ -51,15 +51,15 @@ EOF
my $permissions = {
'user1@test' => {},
'user2@test' => { '/' => ['Sys.Modify'], },
'user3@test' => { '/storage' => ['Datastore.Allocate'], },
'user4@test' => { '/storage/d20m40r30' => ['Datastore.Allocate'], },
'user2@test' => { '/' => ['Sys.Modify'] },
'user3@test' => { '/storage' => ['Datastore.Allocate'] },
'user4@test' => { '/storage/d20m40r30' => ['Datastore.Allocate'] },
};
my $pve_cluster_module;
$pve_cluster_module = Test::MockModule->new('PVE::Cluster');
$pve_cluster_module->mock(
cfs_update => sub {},
cfs_update => sub { },
get_config => sub {
my ($file) = @_;
if ($file eq 'datacenter.cfg') {
@ -94,106 +94,330 @@ $rpcenv_module->mock(
my $rpcenv = PVE::RPCEnvironment->init('pub');
my @tests = (
[ user => 'root@pam' ],
[ ['unknown', ['nolimit'], undef], 100, 'root / generic default limit, requesting default' ],
[ ['move', ['nolimit'], undef], 80, 'root / specific default limit, requesting default (move)' ],
[ ['restore', ['nolimit'], undef], 60, 'root / specific default limit, requesting default (restore)' ],
[ ['unknown', ['d50m40r30'], undef], 50, 'root / storage default limit' ],
[ ['move', ['d50m40r30'], undef], 40, 'root / specific storage limit (move)' ],
[ ['restore', ['d50m40r30'], undef], 30, 'root / specific storage limit (restore)' ],
[ ['unknown', ['nolimit'], 0], 0, 'root / generic default limit' ],
[ ['move', ['nolimit'], 0], 0, 'root / specific default limit (move)' ],
[ ['restore', ['nolimit'], 0], 0, 'root / specific default limit (restore)' ],
[ ['unknown', ['d50m40r30'], 0], 0, 'root / storage default limit' ],
[ ['move', ['d50m40r30'], 0], 0, 'root / specific storage limit (move)' ],
[ ['restore', ['d50m40r30'], 0], 0, 'root / specific storage limit (restore)' ],
[ ['migrate', undef, 100], 100, 'root / undef storage (migrate)' ],
[ ['migrate', [], 100], 100, 'root / no storage (migrate)' ],
[ ['migrate', [undef], undef], 100, 'root / [undef] storage no override (migrate)' ],
[ ['migrate', [undef, undef], 200], 200, 'root / list of undef storages with override (migrate)' ],
[user => 'root@pam'],
[['unknown', ['nolimit'], undef], 100, 'root / generic default limit, requesting default'],
[
['move', ['nolimit'], undef],
80,
'root / specific default limit, requesting default (move)',
],
[
['restore', ['nolimit'], undef],
60,
'root / specific default limit, requesting default (restore)',
],
[['unknown', ['d50m40r30'], undef], 50, 'root / storage default limit'],
[['move', ['d50m40r30'], undef], 40, 'root / specific storage limit (move)'],
[['restore', ['d50m40r30'], undef], 30, 'root / specific storage limit (restore)'],
[['unknown', ['nolimit'], 0], 0, 'root / generic default limit'],
[['move', ['nolimit'], 0], 0, 'root / specific default limit (move)'],
[['restore', ['nolimit'], 0], 0, 'root / specific default limit (restore)'],
[['unknown', ['d50m40r30'], 0], 0, 'root / storage default limit'],
[['move', ['d50m40r30'], 0], 0, 'root / specific storage limit (move)'],
[['restore', ['d50m40r30'], 0], 0, 'root / specific storage limit (restore)'],
[['migrate', undef, 100], 100, 'root / undef storage (migrate)'],
[['migrate', [], 100], 100, 'root / no storage (migrate)'],
[['migrate', [undef], undef], 100, 'root / [undef] storage no override (migrate)'],
[
['migrate', [undef, undef], 200],
200,
'root / list of undef storages with override (migrate)',
],
[ user => 'user1@test' ],
[ ['unknown', ['nolimit'], undef], 100, 'generic default limit' ],
[ ['move', ['nolimit'], undef], 80, 'specific default limit (move)' ],
[ ['restore', ['nolimit'], undef], 60, 'specific default limit (restore)' ],
[ ['unknown', ['d50m40r30'], undef], 50, 'storage default limit' ],
[ ['move', ['d50m40r30'], undef], 40, 'specific storage limit (move)' ],
[ ['restore', ['d50m40r30'], undef], 30, 'specific storage limit (restore)' ],
[ ['unknown', ['d200m400r300'], undef], 200, 'storage default limit above datacenter limits' ],
[ ['move', ['d200m400r300'], undef], 400, 'specific storage limit above datacenter limits (move)' ],
[ ['restore', ['d200m400r300'], undef], 300, 'specific storage limit above datacenter limits (restore)' ],
[ ['unknown', ['d50'], undef], 50, 'storage default limit' ],
[ ['move', ['d50'], undef], 50, 'storage default limit (move)' ],
[ ['restore', ['d50'], undef], 50, 'storage default limit (restore)' ],
[user => 'user1@test'],
[['unknown', ['nolimit'], undef], 100, 'generic default limit'],
[['move', ['nolimit'], undef], 80, 'specific default limit (move)'],
[['restore', ['nolimit'], undef], 60, 'specific default limit (restore)'],
[['unknown', ['d50m40r30'], undef], 50, 'storage default limit'],
[['move', ['d50m40r30'], undef], 40, 'specific storage limit (move)'],
[['restore', ['d50m40r30'], undef], 30, 'specific storage limit (restore)'],
[
['unknown', ['d200m400r300'], undef],
200,
'storage default limit above datacenter limits',
],
[
['move', ['d200m400r300'], undef],
400,
'specific storage limit above datacenter limits (move)',
],
[
['restore', ['d200m400r300'], undef],
300,
'specific storage limit above datacenter limits (restore)',
],
[['unknown', ['d50'], undef], 50, 'storage default limit'],
[['move', ['d50'], undef], 50, 'storage default limit (move)'],
[['restore', ['d50'], undef], 50, 'storage default limit (restore)'],
[ user => 'user2@test' ],
[ ['unknown', ['nolimit'], 0], 0, 'generic default limit with Sys.Modify, passing unlimited' ],
[ ['unknown', ['nolimit'], undef], 100, 'generic default limit with Sys.Modify' ],
[ ['move', ['nolimit'], undef], 80, 'specific default limit with Sys.Modify (move)' ],
[ ['restore', ['nolimit'], undef], 60, 'specific default limit with Sys.Modify (restore)' ],
[ ['restore', ['nolimit'], 0], 0, 'specific default limit with Sys.Modify, passing unlimited (restore)' ],
[ ['move', ['nolimit'], 0], 0, 'specific default limit with Sys.Modify, passing unlimited (move)' ],
[ ['unknown', ['d50m40r30'], undef], 50, 'storage default limit with Sys.Modify' ],
[ ['restore', ['d50m40r30'], undef], 30, 'specific storage limit with Sys.Modify (restore)' ],
[ ['move', ['d50m40r30'], undef], 40, 'specific storage limit with Sys.Modify (move)' ],
[user => 'user2@test'],
[
['unknown', ['nolimit'], 0],
0,
'generic default limit with Sys.Modify, passing unlimited',
],
[['unknown', ['nolimit'], undef], 100, 'generic default limit with Sys.Modify'],
[['move', ['nolimit'], undef], 80, 'specific default limit with Sys.Modify (move)'],
[['restore', ['nolimit'], undef], 60, 'specific default limit with Sys.Modify (restore)'],
[
['restore', ['nolimit'], 0],
0,
'specific default limit with Sys.Modify, passing unlimited (restore)',
],
[
['move', ['nolimit'], 0],
0,
'specific default limit with Sys.Modify, passing unlimited (move)',
],
[['unknown', ['d50m40r30'], undef], 50, 'storage default limit with Sys.Modify'],
[['restore', ['d50m40r30'], undef], 30, 'specific storage limit with Sys.Modify (restore)'],
[['move', ['d50m40r30'], undef], 40, 'specific storage limit with Sys.Modify (move)'],
[ user => 'user3@test' ],
[ ['unknown', ['nolimit'], undef], 100, 'generic default limit with privileges on /' ],
[ ['unknown', ['nolimit'], 80], 80, 'generic default limit with privileges on /, passing an override value' ],
[ ['unknown', ['nolimit'], 0], 0, 'generic default limit with privileges on /, passing unlimited' ],
[ ['move', ['nolimit'], undef], 80, 'specific default limit with privileges on / (move)' ],
[ ['move', ['nolimit'], 0], 0, 'specific default limit with privileges on /, passing unlimited (move)' ],
[ ['restore', ['nolimit'], undef], 60, 'specific default limit with privileges on / (restore)' ],
[ ['restore', ['nolimit'], 0], 0, 'specific default limit with privileges on /, passing unlimited (restore)' ],
[ ['unknown', ['d50m40r30'], 0], 0, 'storage default limit with privileges on /, passing unlimited' ],
[ ['unknown', ['d50m40r30'], undef], 50, 'storage default limit with privileges on /' ],
[ ['unknown', ['d50m40r30'], 0], 0, 'storage default limit with privileges on, passing unlimited /' ],
[ ['move', ['d50m40r30'], undef], 40, 'specific storage limit with privileges on / (move)' ],
[ ['move', ['d50m40r30'], 0], 0, 'specific storage limit with privileges on, passing unlimited / (move)' ],
[ ['restore', ['d50m40r30'], undef], 30, 'specific storage limit with privileges on / (restore)' ],
[ ['restore', ['d50m40r30'], 0], 0, 'specific storage limit with privileges on /, passing unlimited (restore)' ],
[user => 'user3@test'],
[['unknown', ['nolimit'], undef], 100, 'generic default limit with privileges on /'],
[
['unknown', ['nolimit'], 80],
80,
'generic default limit with privileges on /, passing an override value',
],
[
['unknown', ['nolimit'], 0],
0,
'generic default limit with privileges on /, passing unlimited',
],
[['move', ['nolimit'], undef], 80, 'specific default limit with privileges on / (move)'],
[
['move', ['nolimit'], 0],
0,
'specific default limit with privileges on /, passing unlimited (move)',
],
[
['restore', ['nolimit'], undef],
60,
'specific default limit with privileges on / (restore)',
],
[
['restore', ['nolimit'], 0],
0,
'specific default limit with privileges on /, passing unlimited (restore)',
],
[
['unknown', ['d50m40r30'], 0],
0,
'storage default limit with privileges on /, passing unlimited',
],
[['unknown', ['d50m40r30'], undef], 50, 'storage default limit with privileges on /'],
[
['unknown', ['d50m40r30'], 0],
0,
'storage default limit with privileges on, passing unlimited /',
],
[['move', ['d50m40r30'], undef], 40, 'specific storage limit with privileges on / (move)'],
[
['move', ['d50m40r30'], 0],
0,
'specific storage limit with privileges on, passing unlimited / (move)',
],
[
['restore', ['d50m40r30'], undef],
30,
'specific storage limit with privileges on / (restore)',
],
[
['restore', ['d50m40r30'], 0],
0,
'specific storage limit with privileges on /, passing unlimited (restore)',
],
[ user => 'user4@test' ],
[ ['unknown', ['nolimit'], 10], 10, 'generic default limit with privileges on a different storage, passing lower override' ],
[ ['unknown', ['nolimit'], undef], 100, 'generic default limit with privileges on a different storage' ],
[ ['unknown', ['nolimit'], 0], 100, 'generic default limit with privileges on a different storage, passing unlimited' ],
[ ['move', ['nolimit'], undef], 80, 'specific default limit with privileges on a different storage (move)' ],
[ ['restore', ['nolimit'], undef], 60, 'specific default limit with privileges on a different storage (restore)' ],
[ ['unknown', ['d50m40r30'], undef], 50, 'storage default limit with privileges on a different storage' ],
[ ['move', ['d50m40r30'], undef], 40, 'specific storage limit with privileges on a different storage (move)' ],
[ ['restore', ['d50m40r30'], undef], 30, 'specific storage limit with privileges on a different storage (restore)' ],
[ ['unknown', ['d20m40r30'], undef], 20, 'storage default limit with privileges on that storage' ],
[ ['unknown', ['d20m40r30'], 0], 0, 'storage default limit with privileges on that storage, passing unlimited' ],
[ ['move', ['d20m40r30'], undef], 40, 'specific storage limit with privileges on that storage (move)' ],
[ ['move', ['d20m40r30'], 0], 0, 'specific storage limit with privileges on that storage, passing unlimited (move)' ],
[ ['move', ['d20m40r30'], 10], 10, 'specific storage limit with privileges on that storage, passing low override (move)' ],
[ ['move', ['d20m40r30'], 300], 300, 'specific storage limit with privileges on that storage, passing high override (move)' ],
[ ['restore', ['d20m40r30'], undef], 30, 'specific storage limit with privileges on that storage (restore)' ],
[ ['restore', ['d20m40r30'], 0], 0, 'specific storage limit with privileges on that storage, passing unlimited (restore)' ],
[ ['unknown', ['d50m40r30', 'd20m40r30'], 0], 50, 'multiple storages default limit with privileges on one of them, passing unlimited' ],
[ ['move', ['d50m40r30', 'd20m40r30'], 0], 40, 'multiple storages specific limit with privileges on one of them, passing unlimited (move)' ],
[ ['restore', ['d50m40r30', 'd20m40r30'], 0], 30, 'multiple storages specific limit with privileges on one of them, passing unlimited (restore)' ],
[ ['unknown', ['d50m40r30', 'd20m40r30'], undef], 20, 'multiple storages default limit with privileges on one of them' ],
[ ['unknown', ['d10', 'd20m40r30'], undef], 10, 'multiple storages default limit with privileges on one of them (storage limited)' ],
[ ['move', ['d10', 'd20m40r30'], undef], 10, 'multiple storages specific limit with privileges on one of them (storage limited) (move)' ],
[ ['restore', ['d10', 'd20m40r30'], undef], 10, 'multiple storages specific limit with privileges on one of them (storage limited) (restore)' ],
[ ['restore', ['d10', 'd20m40r30'], 5], 5, 'multiple storages specific limit (storage limited) (restore), passing lower override' ],
[ ['restore', ['d200', 'd200m400r300'], 65], 65, 'multiple storages specific limit (storage limited) (restore), passing lower override' ],
[ ['restore', ['d200', 'd200m400r300'], 400], 200, 'multiple storages specific limit (storage limited) (restore), passing higher override' ],
[ ['restore', ['d200', 'd200m400r300'], 0], 200, 'multiple storages specific limit (storage limited) (restore), passing unlimited' ],
[ ['restore', ['d200', 'd200m400r300'], 1], 1, 'multiple storages specific limit (storage limited) (restore), passing 1' ],
[ ['restore', ['d10', 'd20m40r30'], 500], 10, 'multiple storages specific limit with privileges on one of them (storage limited) (restore), passing higher override' ],
[ ['unknown', ['nolimit', 'd20m40r30'], 0], 100, 'multiple storages default limit with privileges on one of them, passing unlimited (default limited)' ],
[ ['move', ['nolimit', 'd20m40r30'], 0], 80, 'multiple storages specific limit with privileges on one of them, passing unlimited (default limited) (move)' ],
[ ['restore', ['nolimit', 'd20m40r30'], 0], 60, 'multiple storages specific limit with privileges on one of them, passing unlimited (default limited) (restore)' ],
[ ['unknown', ['nolimit', 'd20m40r30'], undef], 20, 'multiple storages default limit with privileges on one of them (default limited)' ],
[ ['move', ['nolimit', 'd20m40r30'], undef], 40, 'multiple storages specific limit with privileges on one of them (default limited) (move)' ],
[ ['restore', ['nolimit', 'd20m40r30'], undef], 30, 'multiple storages specific limit with privileges on one of them (default limited) (restore)' ],
[ ['restore', ['d20m40r30', 'm50'], 200], 60, 'multiple storages specific limit with privileges on one of them (global default limited) (restore)' ],
[ ['move', ['nolimit', undef ], 40] , 40, 'multiple storages one undefined, passing 40 (move)' ],
[ ['move', undef, 100] , 80, 'undef storage, passing 100 (move)' ],
[ ['move', [undef], 100] , 80, '[undef] storage, passing 100 (move)' ],
[ ['move', [undef], undef] , 80, '[undef] storage, no override (move)' ],
[user => 'user4@test'],
[
['unknown', ['nolimit'], 10],
10,
'generic default limit with privileges on a different storage, passing lower override',
],
[
['unknown', ['nolimit'], undef],
100,
'generic default limit with privileges on a different storage',
],
[
['unknown', ['nolimit'], 0],
100,
'generic default limit with privileges on a different storage, passing unlimited',
],
[
['move', ['nolimit'], undef],
80,
'specific default limit with privileges on a different storage (move)',
],
[
['restore', ['nolimit'], undef],
60,
'specific default limit with privileges on a different storage (restore)',
],
[
['unknown', ['d50m40r30'], undef],
50,
'storage default limit with privileges on a different storage',
],
[
['move', ['d50m40r30'], undef],
40,
'specific storage limit with privileges on a different storage (move)',
],
[
['restore', ['d50m40r30'], undef],
30,
'specific storage limit with privileges on a different storage (restore)',
],
[
['unknown', ['d20m40r30'], undef],
20,
'storage default limit with privileges on that storage',
],
[
['unknown', ['d20m40r30'], 0],
0,
'storage default limit with privileges on that storage, passing unlimited',
],
[
['move', ['d20m40r30'], undef],
40,
'specific storage limit with privileges on that storage (move)',
],
[
['move', ['d20m40r30'], 0],
0,
'specific storage limit with privileges on that storage, passing unlimited (move)',
],
[
['move', ['d20m40r30'], 10],
10,
'specific storage limit with privileges on that storage, passing low override (move)',
],
[
['move', ['d20m40r30'], 300],
300,
'specific storage limit with privileges on that storage, passing high override (move)',
],
[
['restore', ['d20m40r30'], undef],
30,
'specific storage limit with privileges on that storage (restore)',
],
[
['restore', ['d20m40r30'], 0],
0,
'specific storage limit with privileges on that storage, passing unlimited (restore)',
],
[
['unknown', ['d50m40r30', 'd20m40r30'], 0],
50,
'multiple storages default limit with privileges on one of them, passing unlimited',
],
[
['move', ['d50m40r30', 'd20m40r30'], 0],
40,
'multiple storages specific limit with privileges on one of them, passing unlimited (move)',
],
[
['restore', ['d50m40r30', 'd20m40r30'], 0],
30,
'multiple storages specific limit with privileges on one of them, passing unlimited (restore)',
],
[
['unknown', ['d50m40r30', 'd20m40r30'], undef],
20,
'multiple storages default limit with privileges on one of them',
],
[
['unknown', ['d10', 'd20m40r30'], undef],
10,
'multiple storages default limit with privileges on one of them (storage limited)',
],
[
['move', ['d10', 'd20m40r30'], undef],
10,
'multiple storages specific limit with privileges on one of them (storage limited) (move)',
],
[
['restore', ['d10', 'd20m40r30'], undef],
10,
'multiple storages specific limit with privileges on one of them (storage limited) (restore)',
],
[
['restore', ['d10', 'd20m40r30'], 5],
5,
'multiple storages specific limit (storage limited) (restore), passing lower override',
],
[
['restore', ['d200', 'd200m400r300'], 65],
65,
'multiple storages specific limit (storage limited) (restore), passing lower override',
],
[
['restore', ['d200', 'd200m400r300'], 400],
200,
'multiple storages specific limit (storage limited) (restore), passing higher override',
],
[
['restore', ['d200', 'd200m400r300'], 0],
200,
'multiple storages specific limit (storage limited) (restore), passing unlimited',
],
[
['restore', ['d200', 'd200m400r300'], 1],
1,
'multiple storages specific limit (storage limited) (restore), passing 1',
],
[
['restore', ['d10', 'd20m40r30'], 500],
10,
'multiple storages specific limit with privileges on one of them (storage limited) (restore), passing higher override',
],
[
['unknown', ['nolimit', 'd20m40r30'], 0],
100,
'multiple storages default limit with privileges on one of them, passing unlimited (default limited)',
],
[
['move', ['nolimit', 'd20m40r30'], 0],
80,
'multiple storages specific limit with privileges on one of them, passing unlimited (default limited) (move)',
],
[
['restore', ['nolimit', 'd20m40r30'], 0],
60,
'multiple storages specific limit with privileges on one of them, passing unlimited (default limited) (restore)',
],
[
['unknown', ['nolimit', 'd20m40r30'], undef],
20,
'multiple storages default limit with privileges on one of them (default limited)',
],
[
['move', ['nolimit', 'd20m40r30'], undef],
40,
'multiple storages specific limit with privileges on one of them (default limited) (move)',
],
[
['restore', ['nolimit', 'd20m40r30'], undef],
30,
'multiple storages specific limit with privileges on one of them (default limited) (restore)',
],
[
['restore', ['d20m40r30', 'm50'], 200],
60,
'multiple storages specific limit with privileges on one of them (global default limited) (restore)',
],
[
['move', ['nolimit', undef], 40],
40,
'multiple storages one undefined, passing 40 (move)',
],
[['move', undef, 100], 80, 'undef storage, passing 100 (move)'],
[['move', [undef], 100], 80, '[undef] storage, passing 100 (move)'],
[['move', [undef], undef], 80, '[undef] storage, no override (move)'],
);
foreach my $t (@tests) {

View File

@ -5,8 +5,8 @@ use warnings;
use TAP::Harness;
my $harness = TAP::Harness->new( { verbosity => -2 });
my $res = $harness->runtests( "disklist_test.pm" );
my $harness = TAP::Harness->new({ verbosity => -2 });
my $res = $harness->runtests("disklist_test.pm");
exit -1 if !$res || $res->{failed} || $res->{parse_errors};

View File

@ -10,11 +10,12 @@ use Test::More;
use Data::Dumper;
my $test_manifests = join ('/', $Bin, 'ovf_manifests');
my $test_manifests = join('/', $Bin, 'ovf_manifests');
print "parsing ovfs\n";
my $win2008 = eval { PVE::GuestImport::OVF::parse_ovf("$test_manifests/Win_2008_R2_two-disks.ovf") };
my $win2008 =
eval { PVE::GuestImport::OVF::parse_ovf("$test_manifests/Win_2008_R2_two-disks.ovf") };
if (my $err = $@) {
fail('parse win2008');
warn("error: $err\n");
@ -28,7 +29,8 @@ if (my $err = $@) {
} else {
ok('parse win10');
}
my $win10noNs = eval { PVE::GuestImport::OVF::parse_ovf("$test_manifests/Win10-Liz_no_default_ns.ovf") };
my $win10noNs =
eval { PVE::GuestImport::OVF::parse_ovf("$test_manifests/Win10-Liz_no_default_ns.ovf") };
if (my $err = $@) {
fail("parse win10 no default rasd NS");
warn("error: $err\n");
@ -38,26 +40,59 @@ if (my $err = $@) {
print "testing disks\n";
is($win2008->{disks}->[0]->{disk_address}, 'scsi0', 'multidisk vm has the correct first disk controller');
is($win2008->{disks}->[0]->{backing_file}, "$test_manifests/disk1.vmdk", 'multidisk vm has the correct first disk backing device');
is(
$win2008->{disks}->[0]->{disk_address},
'scsi0',
'multidisk vm has the correct first disk controller',
);
is(
$win2008->{disks}->[0]->{backing_file},
"$test_manifests/disk1.vmdk",
'multidisk vm has the correct first disk backing device',
);
is($win2008->{disks}->[0]->{virtual_size}, 2048, 'multidisk vm has the correct first disk size');
is($win2008->{disks}->[1]->{disk_address}, 'scsi1', 'multidisk vm has the correct second disk controller');
is($win2008->{disks}->[1]->{backing_file}, "$test_manifests/disk2.vmdk", 'multidisk vm has the correct second disk backing device');
is(
$win2008->{disks}->[1]->{disk_address},
'scsi1',
'multidisk vm has the correct second disk controller',
);
is(
$win2008->{disks}->[1]->{backing_file},
"$test_manifests/disk2.vmdk",
'multidisk vm has the correct second disk backing device',
);
is($win2008->{disks}->[1]->{virtual_size}, 2048, 'multidisk vm has the correct second disk size');
is($win10->{disks}->[0]->{disk_address}, 'scsi0', 'single disk vm has the correct disk controller');
is($win10->{disks}->[0]->{backing_file}, "$test_manifests/Win10-Liz-disk1.vmdk", 'single disk vm has the correct disk backing device');
is(
$win10->{disks}->[0]->{backing_file},
"$test_manifests/Win10-Liz-disk1.vmdk",
'single disk vm has the correct disk backing device',
);
is($win10->{disks}->[0]->{virtual_size}, 2048, 'single disk vm has the correct size');
is($win10noNs->{disks}->[0]->{disk_address}, 'scsi0', 'single disk vm (no default rasd NS) has the correct disk controller');
is($win10noNs->{disks}->[0]->{backing_file}, "$test_manifests/Win10-Liz-disk1.vmdk", 'single disk vm (no default rasd NS) has the correct disk backing device');
is($win10noNs->{disks}->[0]->{virtual_size}, 2048, 'single disk vm (no default rasd NS) has the correct size');
is(
$win10noNs->{disks}->[0]->{disk_address},
'scsi0',
'single disk vm (no default rasd NS) has the correct disk controller',
);
is(
$win10noNs->{disks}->[0]->{backing_file},
"$test_manifests/Win10-Liz-disk1.vmdk",
'single disk vm (no default rasd NS) has the correct disk backing device',
);
is(
$win10noNs->{disks}->[0]->{virtual_size},
2048,
'single disk vm (no default rasd NS) has the correct size',
);
print "testing nics\n";
is($win2008->{net}->{net0}->{model}, 'e1000', 'win2008 has correct nic model');
is($win10->{net}->{net0}->{model}, 'e1000e', 'win10 has correct nic model');
is($win10noNs->{net}->{net0}->{model}, 'e1000e', 'win10 (no default rasd NS) has correct nic model');
is($win10noNs->{net}->{net0}->{model}, 'e1000e',
'win10 (no default rasd NS) has correct nic model');
print "\ntesting vm.conf extraction\n";

View File

@ -8,7 +8,7 @@ $ENV{TZ} = 'UTC';
use TAP::Harness;
my $harness = TAP::Harness->new( { verbosity => -1 });
my $harness = TAP::Harness->new({ verbosity => -1 });
my $res = $harness->runtests(
"archive_info_test.pm",
"parse_volname_test.pm",

File diff suppressed because it is too large Load Diff