auto-format code using perltidy with Proxmox style guide
using the new top-level `make tidy` target, which calls perltidy via our wrapper to enforce the desired style as closely as possible. Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
This commit is contained in:
@ -19,27 +19,27 @@ use PVE::API2::Disks::ZFS;
|
||||
use PVE::RESTHandler;
|
||||
use base qw(PVE::RESTHandler);
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
subclass => "PVE::API2::Disks::LVM",
|
||||
path => 'lvm',
|
||||
__PACKAGE__->register_method({
|
||||
subclass => "PVE::API2::Disks::LVM",
|
||||
path => 'lvm',
|
||||
});
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
subclass => "PVE::API2::Disks::LVMThin",
|
||||
path => 'lvmthin',
|
||||
__PACKAGE__->register_method({
|
||||
subclass => "PVE::API2::Disks::LVMThin",
|
||||
path => 'lvmthin',
|
||||
});
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
subclass => "PVE::API2::Disks::Directory",
|
||||
path => 'directory',
|
||||
__PACKAGE__->register_method({
|
||||
subclass => "PVE::API2::Disks::Directory",
|
||||
path => 'directory',
|
||||
});
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
subclass => "PVE::API2::Disks::ZFS",
|
||||
path => 'zfs',
|
||||
__PACKAGE__->register_method({
|
||||
subclass => "PVE::API2::Disks::ZFS",
|
||||
path => 'zfs',
|
||||
});
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
__PACKAGE__->register_method({
|
||||
name => 'index',
|
||||
path => '',
|
||||
method => 'GET',
|
||||
@ -47,37 +47,38 @@ __PACKAGE__->register_method ({
|
||||
permissions => { user => 'all' },
|
||||
description => "Node index.",
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
},
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
},
|
||||
},
|
||||
returns => {
|
||||
type => 'array',
|
||||
items => {
|
||||
type => "object",
|
||||
properties => {},
|
||||
},
|
||||
links => [ { rel => 'child', href => "{name}" } ],
|
||||
type => 'array',
|
||||
items => {
|
||||
type => "object",
|
||||
properties => {},
|
||||
},
|
||||
links => [{ rel => 'child', href => "{name}" }],
|
||||
},
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
my ($param) = @_;
|
||||
|
||||
my $result = [
|
||||
{ name => 'list' },
|
||||
{ name => 'initgpt' },
|
||||
{ name => 'smart' },
|
||||
{ name => 'lvm' },
|
||||
{ name => 'lvmthin' },
|
||||
{ name => 'directory' },
|
||||
{ name => 'wipedisk' },
|
||||
{ name => 'zfs' },
|
||||
];
|
||||
my $result = [
|
||||
{ name => 'list' },
|
||||
{ name => 'initgpt' },
|
||||
{ name => 'smart' },
|
||||
{ name => 'lvm' },
|
||||
{ name => 'lvmthin' },
|
||||
{ name => 'directory' },
|
||||
{ name => 'wipedisk' },
|
||||
{ name => 'zfs' },
|
||||
];
|
||||
|
||||
return $result;
|
||||
}});
|
||||
return $result;
|
||||
},
|
||||
});
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
__PACKAGE__->register_method({
|
||||
name => 'list',
|
||||
path => 'list',
|
||||
method => 'GET',
|
||||
@ -85,98 +86,97 @@ __PACKAGE__->register_method ({
|
||||
protected => 1,
|
||||
proxyto => 'node',
|
||||
permissions => {
|
||||
check => ['or', ['perm', '/', ['Sys.Audit']], ['perm', '/nodes/{node}', ['Sys.Audit']]],
|
||||
check => ['or', ['perm', '/', ['Sys.Audit']], ['perm', '/nodes/{node}', ['Sys.Audit']]],
|
||||
},
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
'include-partitions' => {
|
||||
description => "Also include partitions.",
|
||||
type => 'boolean',
|
||||
optional => 1,
|
||||
default => 0,
|
||||
},
|
||||
skipsmart => {
|
||||
description => "Skip smart checks.",
|
||||
type => 'boolean',
|
||||
optional => 1,
|
||||
default => 0,
|
||||
},
|
||||
type => {
|
||||
description => "Only list specific types of disks.",
|
||||
type => 'string',
|
||||
enum => ['unused', 'journal_disks'],
|
||||
optional => 1,
|
||||
},
|
||||
},
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
'include-partitions' => {
|
||||
description => "Also include partitions.",
|
||||
type => 'boolean',
|
||||
optional => 1,
|
||||
default => 0,
|
||||
},
|
||||
skipsmart => {
|
||||
description => "Skip smart checks.",
|
||||
type => 'boolean',
|
||||
optional => 1,
|
||||
default => 0,
|
||||
},
|
||||
type => {
|
||||
description => "Only list specific types of disks.",
|
||||
type => 'string',
|
||||
enum => ['unused', 'journal_disks'],
|
||||
optional => 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns => {
|
||||
type => 'array',
|
||||
items => {
|
||||
type => 'object',
|
||||
properties => {
|
||||
devpath => {
|
||||
type => 'string',
|
||||
description => 'The device path',
|
||||
},
|
||||
used => { type => 'string', optional => 1 },
|
||||
gpt => { type => 'boolean' },
|
||||
mounted => { type => 'boolean' },
|
||||
size => { type => 'integer'},
|
||||
osdid => { type => 'integer'}, # TODO: deprecate / remove in PVE 9?
|
||||
'osdid-list' => {
|
||||
type => 'array',
|
||||
items => { type => 'integer' },
|
||||
},
|
||||
vendor => { type => 'string', optional => 1 },
|
||||
model => { type => 'string', optional => 1 },
|
||||
serial => { type => 'string', optional => 1 },
|
||||
wwn => { type => 'string', optional => 1},
|
||||
health => { type => 'string', optional => 1},
|
||||
parent => {
|
||||
type => 'string',
|
||||
description => 'For partitions only. The device path of ' .
|
||||
'the disk the partition resides on.',
|
||||
optional => 1
|
||||
},
|
||||
},
|
||||
},
|
||||
type => 'array',
|
||||
items => {
|
||||
type => 'object',
|
||||
properties => {
|
||||
devpath => {
|
||||
type => 'string',
|
||||
description => 'The device path',
|
||||
},
|
||||
used => { type => 'string', optional => 1 },
|
||||
gpt => { type => 'boolean' },
|
||||
mounted => { type => 'boolean' },
|
||||
size => { type => 'integer' },
|
||||
osdid => { type => 'integer' }, # TODO: deprecate / remove in PVE 9?
|
||||
'osdid-list' => {
|
||||
type => 'array',
|
||||
items => { type => 'integer' },
|
||||
},
|
||||
vendor => { type => 'string', optional => 1 },
|
||||
model => { type => 'string', optional => 1 },
|
||||
serial => { type => 'string', optional => 1 },
|
||||
wwn => { type => 'string', optional => 1 },
|
||||
health => { type => 'string', optional => 1 },
|
||||
parent => {
|
||||
type => 'string',
|
||||
description => 'For partitions only. The device path of '
|
||||
. 'the disk the partition resides on.',
|
||||
optional => 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
my ($param) = @_;
|
||||
|
||||
my $skipsmart = $param->{skipsmart} // 0;
|
||||
my $include_partitions = $param->{'include-partitions'} // 0;
|
||||
my $skipsmart = $param->{skipsmart} // 0;
|
||||
my $include_partitions = $param->{'include-partitions'} // 0;
|
||||
|
||||
my $disks = PVE::Diskmanage::get_disks(
|
||||
undef,
|
||||
$skipsmart,
|
||||
$include_partitions
|
||||
);
|
||||
my $disks = PVE::Diskmanage::get_disks(
|
||||
undef, $skipsmart, $include_partitions,
|
||||
);
|
||||
|
||||
my $type = $param->{type} // '';
|
||||
my $result = [];
|
||||
my $type = $param->{type} // '';
|
||||
my $result = [];
|
||||
|
||||
foreach my $disk (sort keys %$disks) {
|
||||
my $entry = $disks->{$disk};
|
||||
if ($type eq 'journal_disks') {
|
||||
next if $entry->{osdid} >= 0;
|
||||
if (my $usage = $entry->{used}) {
|
||||
next if !($usage eq 'partitions' && $entry->{gpt}
|
||||
|| $usage eq 'LVM');
|
||||
}
|
||||
} elsif ($type eq 'unused') {
|
||||
next if $entry->{used};
|
||||
} elsif ($type ne '') {
|
||||
die "internal error"; # should not happen
|
||||
}
|
||||
push @$result, $entry;
|
||||
}
|
||||
return $result;
|
||||
}});
|
||||
foreach my $disk (sort keys %$disks) {
|
||||
my $entry = $disks->{$disk};
|
||||
if ($type eq 'journal_disks') {
|
||||
next if $entry->{osdid} >= 0;
|
||||
if (my $usage = $entry->{used}) {
|
||||
next
|
||||
if !($usage eq 'partitions' && $entry->{gpt} || $usage eq 'LVM');
|
||||
}
|
||||
} elsif ($type eq 'unused') {
|
||||
next if $entry->{used};
|
||||
} elsif ($type ne '') {
|
||||
die "internal error"; # should not happen
|
||||
}
|
||||
push @$result, $entry;
|
||||
}
|
||||
return $result;
|
||||
},
|
||||
});
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
__PACKAGE__->register_method({
|
||||
name => 'smart',
|
||||
path => 'smart',
|
||||
method => 'GET',
|
||||
@ -184,47 +184,48 @@ __PACKAGE__->register_method ({
|
||||
protected => 1,
|
||||
proxyto => "node",
|
||||
permissions => {
|
||||
check => ['perm', '/', ['Sys.Audit']],
|
||||
check => ['perm', '/', ['Sys.Audit']],
|
||||
},
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
disk => {
|
||||
type => 'string',
|
||||
pattern => '^/dev/[a-zA-Z0-9\/]+$',
|
||||
description => "Block device name",
|
||||
},
|
||||
healthonly => {
|
||||
type => 'boolean',
|
||||
description => "If true returns only the health status",
|
||||
optional => 1,
|
||||
},
|
||||
},
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
disk => {
|
||||
type => 'string',
|
||||
pattern => '^/dev/[a-zA-Z0-9\/]+$',
|
||||
description => "Block device name",
|
||||
},
|
||||
healthonly => {
|
||||
type => 'boolean',
|
||||
description => "If true returns only the health status",
|
||||
optional => 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns => {
|
||||
type => 'object',
|
||||
properties => {
|
||||
health => { type => 'string' },
|
||||
type => { type => 'string', optional => 1 },
|
||||
attributes => { type => 'array', optional => 1},
|
||||
text => { type => 'string', optional => 1 },
|
||||
},
|
||||
type => 'object',
|
||||
properties => {
|
||||
health => { type => 'string' },
|
||||
type => { type => 'string', optional => 1 },
|
||||
attributes => { type => 'array', optional => 1 },
|
||||
text => { type => 'string', optional => 1 },
|
||||
},
|
||||
},
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
my ($param) = @_;
|
||||
|
||||
my $disk = PVE::Diskmanage::verify_blockdev_path($param->{disk});
|
||||
my $disk = PVE::Diskmanage::verify_blockdev_path($param->{disk});
|
||||
|
||||
my $result = PVE::Diskmanage::get_smart_data($disk, $param->{healthonly});
|
||||
my $result = PVE::Diskmanage::get_smart_data($disk, $param->{healthonly});
|
||||
|
||||
$result->{health} = 'UNKNOWN' if !defined $result->{health};
|
||||
$result = { health => $result->{health} } if $param->{healthonly};
|
||||
$result->{health} = 'UNKNOWN' if !defined $result->{health};
|
||||
$result = { health => $result->{health} } if $param->{healthonly};
|
||||
|
||||
return $result;
|
||||
}});
|
||||
return $result;
|
||||
},
|
||||
});
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
__PACKAGE__->register_method({
|
||||
name => 'initgpt',
|
||||
path => 'initgpt',
|
||||
method => 'POST',
|
||||
@ -232,48 +233,49 @@ __PACKAGE__->register_method ({
|
||||
protected => 1,
|
||||
proxyto => "node",
|
||||
permissions => {
|
||||
check => ['perm', '/', ['Sys.Modify']],
|
||||
check => ['perm', '/', ['Sys.Modify']],
|
||||
},
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
disk => {
|
||||
type => 'string',
|
||||
description => "Block device name",
|
||||
pattern => '^/dev/[a-zA-Z0-9\/]+$',
|
||||
},
|
||||
uuid => {
|
||||
type => 'string',
|
||||
description => 'UUID for the GPT table',
|
||||
pattern => '[a-fA-F0-9\-]+',
|
||||
maxLength => 36,
|
||||
optional => 1,
|
||||
},
|
||||
},
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
disk => {
|
||||
type => 'string',
|
||||
description => "Block device name",
|
||||
pattern => '^/dev/[a-zA-Z0-9\/]+$',
|
||||
},
|
||||
uuid => {
|
||||
type => 'string',
|
||||
description => 'UUID for the GPT table',
|
||||
pattern => '[a-fA-F0-9\-]+',
|
||||
maxLength => 36,
|
||||
optional => 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns => { type => 'string' },
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
my ($param) = @_;
|
||||
|
||||
my $disk = PVE::Diskmanage::verify_blockdev_path($param->{disk});
|
||||
my $disk = PVE::Diskmanage::verify_blockdev_path($param->{disk});
|
||||
|
||||
my $rpcenv = PVE::RPCEnvironment::get();
|
||||
my $rpcenv = PVE::RPCEnvironment::get();
|
||||
|
||||
my $authuser = $rpcenv->get_user();
|
||||
my $authuser = $rpcenv->get_user();
|
||||
|
||||
die "$disk is a partition\n" if PVE::Diskmanage::is_partition($disk);
|
||||
die "disk $disk already in use\n" if PVE::Diskmanage::disk_is_used($disk);
|
||||
my $worker = sub {
|
||||
PVE::Diskmanage::init_disk($disk, $param->{uuid});
|
||||
};
|
||||
die "$disk is a partition\n" if PVE::Diskmanage::is_partition($disk);
|
||||
die "disk $disk already in use\n" if PVE::Diskmanage::disk_is_used($disk);
|
||||
my $worker = sub {
|
||||
PVE::Diskmanage::init_disk($disk, $param->{uuid});
|
||||
};
|
||||
|
||||
my $diskid = $disk;
|
||||
$diskid =~ s|^.*/||; # remove all up to the last slash
|
||||
return $rpcenv->fork_worker('diskinit', $diskid, $authuser, $worker);
|
||||
}});
|
||||
my $diskid = $disk;
|
||||
$diskid =~ s|^.*/||; # remove all up to the last slash
|
||||
return $rpcenv->fork_worker('diskinit', $diskid, $authuser, $worker);
|
||||
},
|
||||
});
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
__PACKAGE__->register_method({
|
||||
name => 'wipe_disk',
|
||||
path => 'wipedisk',
|
||||
method => 'PUT',
|
||||
@ -281,39 +283,40 @@ __PACKAGE__->register_method ({
|
||||
proxyto => 'node',
|
||||
protected => 1,
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
disk => {
|
||||
type => 'string',
|
||||
description => "Block device name",
|
||||
pattern => '^/dev/[a-zA-Z0-9\/]+$',
|
||||
},
|
||||
},
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
disk => {
|
||||
type => 'string',
|
||||
description => "Block device name",
|
||||
pattern => '^/dev/[a-zA-Z0-9\/]+$',
|
||||
},
|
||||
},
|
||||
},
|
||||
returns => { type => 'string' },
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
my ($param) = @_;
|
||||
|
||||
my $disk = PVE::Diskmanage::verify_blockdev_path($param->{disk});
|
||||
my $disk = PVE::Diskmanage::verify_blockdev_path($param->{disk});
|
||||
|
||||
my $mounted = PVE::Diskmanage::is_mounted($disk);
|
||||
die "disk/partition '${mounted}' is mounted\n" if $mounted;
|
||||
my $mounted = PVE::Diskmanage::is_mounted($disk);
|
||||
die "disk/partition '${mounted}' is mounted\n" if $mounted;
|
||||
|
||||
my $held = PVE::Diskmanage::has_holder($disk);
|
||||
die "disk/partition '${held}' has a holder\n" if $held;
|
||||
my $held = PVE::Diskmanage::has_holder($disk);
|
||||
die "disk/partition '${held}' has a holder\n" if $held;
|
||||
|
||||
my $rpcenv = PVE::RPCEnvironment::get();
|
||||
my $authuser = $rpcenv->get_user();
|
||||
my $rpcenv = PVE::RPCEnvironment::get();
|
||||
my $authuser = $rpcenv->get_user();
|
||||
|
||||
my $worker = sub {
|
||||
PVE::Diskmanage::wipe_blockdev($disk);
|
||||
PVE::Diskmanage::udevadm_trigger($disk);
|
||||
};
|
||||
my $worker = sub {
|
||||
PVE::Diskmanage::wipe_blockdev($disk);
|
||||
PVE::Diskmanage::udevadm_trigger($disk);
|
||||
};
|
||||
|
||||
my $basename = basename($disk); # avoid '/' in the ID
|
||||
my $basename = basename($disk); # avoid '/' in the ID
|
||||
|
||||
return $rpcenv->fork_worker('wipedisk', $basename, $authuser, $worker);
|
||||
}});
|
||||
return $rpcenv->fork_worker('wipedisk', $basename, $authuser, $worker);
|
||||
},
|
||||
});
|
||||
|
||||
1;
|
||||
|
||||
@ -30,32 +30,32 @@ my $read_ini = sub {
|
||||
my $section;
|
||||
|
||||
foreach my $line (@lines) {
|
||||
$line = trim($line);
|
||||
if ($line =~ m/^\[([^\]]+)\]/) {
|
||||
$section = $1;
|
||||
if (!defined($result->{$section})) {
|
||||
$result->{$section} = {};
|
||||
}
|
||||
} elsif ($line =~ m/^(.*?)=(.*)$/) {
|
||||
my ($key, $val) = ($1, $2);
|
||||
if (!$section) {
|
||||
warn "key value pair found without section, skipping\n";
|
||||
next;
|
||||
}
|
||||
$line = trim($line);
|
||||
if ($line =~ m/^\[([^\]]+)\]/) {
|
||||
$section = $1;
|
||||
if (!defined($result->{$section})) {
|
||||
$result->{$section} = {};
|
||||
}
|
||||
} elsif ($line =~ m/^(.*?)=(.*)$/) {
|
||||
my ($key, $val) = ($1, $2);
|
||||
if (!$section) {
|
||||
warn "key value pair found without section, skipping\n";
|
||||
next;
|
||||
}
|
||||
|
||||
if ($result->{$section}->{$key}) {
|
||||
# make duplicate properties to arrays to keep the order
|
||||
my $prop = $result->{$section}->{$key};
|
||||
if (ref($prop) eq 'ARRAY') {
|
||||
push @$prop, $val;
|
||||
} else {
|
||||
$result->{$section}->{$key} = [$prop, $val];
|
||||
}
|
||||
} else {
|
||||
$result->{$section}->{$key} = $val;
|
||||
}
|
||||
}
|
||||
# ignore everything else
|
||||
if ($result->{$section}->{$key}) {
|
||||
# make duplicate properties to arrays to keep the order
|
||||
my $prop = $result->{$section}->{$key};
|
||||
if (ref($prop) eq 'ARRAY') {
|
||||
push @$prop, $val;
|
||||
} else {
|
||||
$result->{$section}->{$key} = [$prop, $val];
|
||||
}
|
||||
} else {
|
||||
$result->{$section}->{$key} = $val;
|
||||
}
|
||||
}
|
||||
# ignore everything else
|
||||
}
|
||||
|
||||
return $result;
|
||||
@ -67,341 +67,366 @@ my $write_ini = sub {
|
||||
my $content = "";
|
||||
|
||||
foreach my $sname (sort keys %$ini) {
|
||||
my $section = $ini->{$sname};
|
||||
my $section = $ini->{$sname};
|
||||
|
||||
$content .= "[$sname]\n";
|
||||
$content .= "[$sname]\n";
|
||||
|
||||
foreach my $pname (sort keys %$section) {
|
||||
my $prop = $section->{$pname};
|
||||
foreach my $pname (sort keys %$section) {
|
||||
my $prop = $section->{$pname};
|
||||
|
||||
if (!ref($prop)) {
|
||||
$content .= "$pname=$prop\n";
|
||||
} elsif (ref($prop) eq 'ARRAY') {
|
||||
foreach my $val (@$prop) {
|
||||
$content .= "$pname=$val\n";
|
||||
}
|
||||
} else {
|
||||
die "invalid property '$pname'\n";
|
||||
}
|
||||
}
|
||||
$content .= "\n";
|
||||
if (!ref($prop)) {
|
||||
$content .= "$pname=$prop\n";
|
||||
} elsif (ref($prop) eq 'ARRAY') {
|
||||
foreach my $val (@$prop) {
|
||||
$content .= "$pname=$val\n";
|
||||
}
|
||||
} else {
|
||||
die "invalid property '$pname'\n";
|
||||
}
|
||||
}
|
||||
$content .= "\n";
|
||||
}
|
||||
|
||||
file_set_contents($filename, $content);
|
||||
};
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
__PACKAGE__->register_method({
|
||||
name => 'index',
|
||||
path => '',
|
||||
method => 'GET',
|
||||
proxyto => 'node',
|
||||
protected => 1,
|
||||
permissions => {
|
||||
check => ['perm', '/', ['Sys.Audit']],
|
||||
check => ['perm', '/', ['Sys.Audit']],
|
||||
},
|
||||
description => "PVE Managed Directory storages.",
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
},
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
},
|
||||
},
|
||||
returns => {
|
||||
type => 'array',
|
||||
items => {
|
||||
type => 'object',
|
||||
properties => {
|
||||
unitfile => {
|
||||
type => 'string',
|
||||
description => 'The path of the mount unit.',
|
||||
},
|
||||
path => {
|
||||
type => 'string',
|
||||
description => 'The mount path.',
|
||||
},
|
||||
device => {
|
||||
type => 'string',
|
||||
description => 'The mounted device.',
|
||||
},
|
||||
type => {
|
||||
type => 'string',
|
||||
description => 'The filesystem type.',
|
||||
},
|
||||
options => {
|
||||
type => 'string',
|
||||
description => 'The mount options.',
|
||||
},
|
||||
},
|
||||
},
|
||||
type => 'array',
|
||||
items => {
|
||||
type => 'object',
|
||||
properties => {
|
||||
unitfile => {
|
||||
type => 'string',
|
||||
description => 'The path of the mount unit.',
|
||||
},
|
||||
path => {
|
||||
type => 'string',
|
||||
description => 'The mount path.',
|
||||
},
|
||||
device => {
|
||||
type => 'string',
|
||||
description => 'The mounted device.',
|
||||
},
|
||||
type => {
|
||||
type => 'string',
|
||||
description => 'The filesystem type.',
|
||||
},
|
||||
options => {
|
||||
type => 'string',
|
||||
description => 'The mount options.',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
my ($param) = @_;
|
||||
|
||||
my $result = [];
|
||||
my $result = [];
|
||||
|
||||
dir_glob_foreach('/etc/systemd/system', '^mnt-pve-(.+)\.mount$', sub {
|
||||
my ($filename, $storid) = @_;
|
||||
$storid = PVE::Systemd::unescape_unit($storid);
|
||||
dir_glob_foreach(
|
||||
'/etc/systemd/system',
|
||||
'^mnt-pve-(.+)\.mount$',
|
||||
sub {
|
||||
my ($filename, $storid) = @_;
|
||||
$storid = PVE::Systemd::unescape_unit($storid);
|
||||
|
||||
my $unitfile = "/etc/systemd/system/$filename";
|
||||
my $unit = $read_ini->($unitfile);
|
||||
my $unitfile = "/etc/systemd/system/$filename";
|
||||
my $unit = $read_ini->($unitfile);
|
||||
|
||||
push @$result, {
|
||||
unitfile => $unitfile,
|
||||
path => "/mnt/pve/$storid",
|
||||
device => $unit->{'Mount'}->{'What'},
|
||||
type => $unit->{'Mount'}->{'Type'},
|
||||
options => $unit->{'Mount'}->{'Options'},
|
||||
};
|
||||
});
|
||||
push @$result,
|
||||
{
|
||||
unitfile => $unitfile,
|
||||
path => "/mnt/pve/$storid",
|
||||
device => $unit->{'Mount'}->{'What'},
|
||||
type => $unit->{'Mount'}->{'Type'},
|
||||
options => $unit->{'Mount'}->{'Options'},
|
||||
};
|
||||
},
|
||||
);
|
||||
|
||||
return $result;
|
||||
}});
|
||||
return $result;
|
||||
},
|
||||
});
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
__PACKAGE__->register_method({
|
||||
name => 'create',
|
||||
path => '',
|
||||
method => 'POST',
|
||||
proxyto => 'node',
|
||||
protected => 1,
|
||||
permissions => {
|
||||
description => "Requires additionally 'Datastore.Allocate' on /storage when setting 'add_storage'",
|
||||
check => ['perm', '/', ['Sys.Modify']],
|
||||
description =>
|
||||
"Requires additionally 'Datastore.Allocate' on /storage when setting 'add_storage'",
|
||||
check => ['perm', '/', ['Sys.Modify']],
|
||||
},
|
||||
description => "Create a Filesystem on an unused disk. Will be mounted under '/mnt/pve/NAME'.",
|
||||
description =>
|
||||
"Create a Filesystem on an unused disk. Will be mounted under '/mnt/pve/NAME'.",
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
name => get_standard_option('pve-storage-id'),
|
||||
device => {
|
||||
type => 'string',
|
||||
description => 'The block device you want to create the filesystem on.',
|
||||
},
|
||||
add_storage => {
|
||||
description => "Configure storage using the directory.",
|
||||
type => 'boolean',
|
||||
optional => 1,
|
||||
default => 0,
|
||||
},
|
||||
filesystem => {
|
||||
description => "The desired filesystem.",
|
||||
type => 'string',
|
||||
enum => ['ext4', 'xfs'],
|
||||
optional => 1,
|
||||
default => 'ext4',
|
||||
},
|
||||
},
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
name => get_standard_option('pve-storage-id'),
|
||||
device => {
|
||||
type => 'string',
|
||||
description => 'The block device you want to create the filesystem on.',
|
||||
},
|
||||
add_storage => {
|
||||
description => "Configure storage using the directory.",
|
||||
type => 'boolean',
|
||||
optional => 1,
|
||||
default => 0,
|
||||
},
|
||||
filesystem => {
|
||||
description => "The desired filesystem.",
|
||||
type => 'string',
|
||||
enum => ['ext4', 'xfs'],
|
||||
optional => 1,
|
||||
default => 'ext4',
|
||||
},
|
||||
},
|
||||
},
|
||||
returns => { type => 'string' },
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
my ($param) = @_;
|
||||
|
||||
my $rpcenv = PVE::RPCEnvironment::get();
|
||||
my $user = $rpcenv->get_user();
|
||||
my $rpcenv = PVE::RPCEnvironment::get();
|
||||
my $user = $rpcenv->get_user();
|
||||
|
||||
my $name = $param->{name};
|
||||
my $dev = $param->{device};
|
||||
my $node = $param->{node};
|
||||
my $type = $param->{filesystem} // 'ext4';
|
||||
my $path = "/mnt/pve/$name";
|
||||
my $mountunitname = PVE::Systemd::escape_unit($path, 1) . ".mount";
|
||||
my $mountunitpath = "/etc/systemd/system/$mountunitname";
|
||||
my $name = $param->{name};
|
||||
my $dev = $param->{device};
|
||||
my $node = $param->{node};
|
||||
my $type = $param->{filesystem} // 'ext4';
|
||||
my $path = "/mnt/pve/$name";
|
||||
my $mountunitname = PVE::Systemd::escape_unit($path, 1) . ".mount";
|
||||
my $mountunitpath = "/etc/systemd/system/$mountunitname";
|
||||
|
||||
$dev = PVE::Diskmanage::verify_blockdev_path($dev);
|
||||
PVE::Diskmanage::assert_disk_unused($dev);
|
||||
$dev = PVE::Diskmanage::verify_blockdev_path($dev);
|
||||
PVE::Diskmanage::assert_disk_unused($dev);
|
||||
|
||||
my $storage_params = {
|
||||
type => 'dir',
|
||||
storage => $name,
|
||||
content => 'rootdir,images,iso,backup,vztmpl,snippets',
|
||||
is_mountpoint => 1,
|
||||
path => $path,
|
||||
nodes => $node,
|
||||
};
|
||||
my $verify_params = [qw(path)];
|
||||
my $storage_params = {
|
||||
type => 'dir',
|
||||
storage => $name,
|
||||
content => 'rootdir,images,iso,backup,vztmpl,snippets',
|
||||
is_mountpoint => 1,
|
||||
path => $path,
|
||||
nodes => $node,
|
||||
};
|
||||
my $verify_params = [qw(path)];
|
||||
|
||||
if ($param->{add_storage}) {
|
||||
$rpcenv->check($user, "/storage", ['Datastore.Allocate']);
|
||||
if ($param->{add_storage}) {
|
||||
$rpcenv->check($user, "/storage", ['Datastore.Allocate']);
|
||||
|
||||
# reserve the name and add as disabled, will be enabled below if creation works out
|
||||
PVE::API2::Storage::Config->create_or_update(
|
||||
$name, $node, $storage_params, $verify_params, 1);
|
||||
}
|
||||
# reserve the name and add as disabled, will be enabled below if creation works out
|
||||
PVE::API2::Storage::Config->create_or_update(
|
||||
$name, $node, $storage_params, $verify_params, 1,
|
||||
);
|
||||
}
|
||||
|
||||
my $mounted = PVE::Diskmanage::mounted_paths();
|
||||
die "the path for '${name}' is already mounted: ${path} ($mounted->{$path})\n"
|
||||
if $mounted->{$path};
|
||||
die "a systemd mount unit already exists: ${mountunitpath}\n" if -e $mountunitpath;
|
||||
my $mounted = PVE::Diskmanage::mounted_paths();
|
||||
die "the path for '${name}' is already mounted: ${path} ($mounted->{$path})\n"
|
||||
if $mounted->{$path};
|
||||
die "a systemd mount unit already exists: ${mountunitpath}\n" if -e $mountunitpath;
|
||||
|
||||
my $worker = sub {
|
||||
PVE::Diskmanage::locked_disk_action(sub {
|
||||
PVE::Diskmanage::assert_disk_unused($dev);
|
||||
my $worker = sub {
|
||||
PVE::Diskmanage::locked_disk_action(sub {
|
||||
PVE::Diskmanage::assert_disk_unused($dev);
|
||||
|
||||
my $part = $dev;
|
||||
my $part = $dev;
|
||||
|
||||
if (PVE::Diskmanage::is_partition($dev)) {
|
||||
eval { PVE::Diskmanage::change_parttype($dev, '8300'); };
|
||||
warn $@ if $@;
|
||||
} else {
|
||||
# create partition
|
||||
my $cmd = [$SGDISK, '-n1', '-t1:8300', $dev];
|
||||
print "# ", join(' ', @$cmd), "\n";
|
||||
run_command($cmd);
|
||||
if (PVE::Diskmanage::is_partition($dev)) {
|
||||
eval { PVE::Diskmanage::change_parttype($dev, '8300'); };
|
||||
warn $@ if $@;
|
||||
} else {
|
||||
# create partition
|
||||
my $cmd = [$SGDISK, '-n1', '-t1:8300', $dev];
|
||||
print "# ", join(' ', @$cmd), "\n";
|
||||
run_command($cmd);
|
||||
|
||||
my ($devname) = $dev =~ m|^/dev/(.*)$|;
|
||||
$part = "/dev/";
|
||||
dir_glob_foreach("/sys/block/$devname", qr/\Q$devname\E.+/, sub {
|
||||
my ($partition) = @_;
|
||||
$part .= $partition;
|
||||
});
|
||||
}
|
||||
my ($devname) = $dev =~ m|^/dev/(.*)$|;
|
||||
$part = "/dev/";
|
||||
dir_glob_foreach(
|
||||
"/sys/block/$devname",
|
||||
qr/\Q$devname\E.+/,
|
||||
sub {
|
||||
my ($partition) = @_;
|
||||
$part .= $partition;
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
# create filesystem
|
||||
my $cmd = [$MKFS, '-t', $type, $part];
|
||||
print "# ", join(' ', @$cmd), "\n";
|
||||
run_command($cmd);
|
||||
# create filesystem
|
||||
my $cmd = [$MKFS, '-t', $type, $part];
|
||||
print "# ", join(' ', @$cmd), "\n";
|
||||
run_command($cmd);
|
||||
|
||||
# create systemd mount unit and enable & start it
|
||||
my $ini = {
|
||||
'Unit' => {
|
||||
'Description' => "Mount storage '$name' under /mnt/pve",
|
||||
},
|
||||
'Install' => {
|
||||
'WantedBy' => 'multi-user.target',
|
||||
},
|
||||
};
|
||||
# create systemd mount unit and enable & start it
|
||||
my $ini = {
|
||||
'Unit' => {
|
||||
'Description' => "Mount storage '$name' under /mnt/pve",
|
||||
},
|
||||
'Install' => {
|
||||
'WantedBy' => 'multi-user.target',
|
||||
},
|
||||
};
|
||||
|
||||
my $uuid_path;
|
||||
my $uuid;
|
||||
my $uuid_path;
|
||||
my $uuid;
|
||||
|
||||
$cmd = [$BLKID, $part, '-o', 'export'];
|
||||
print "# ", join(' ', @$cmd), "\n";
|
||||
run_command($cmd, outfunc => sub {
|
||||
my ($line) = @_;
|
||||
$cmd = [$BLKID, $part, '-o', 'export'];
|
||||
print "# ", join(' ', @$cmd), "\n";
|
||||
run_command(
|
||||
$cmd,
|
||||
outfunc => sub {
|
||||
my ($line) = @_;
|
||||
|
||||
if ($line =~ m/^UUID=(.*)$/) {
|
||||
$uuid = $1;
|
||||
$uuid_path = "/dev/disk/by-uuid/$uuid";
|
||||
}
|
||||
});
|
||||
if ($line =~ m/^UUID=(.*)$/) {
|
||||
$uuid = $1;
|
||||
$uuid_path = "/dev/disk/by-uuid/$uuid";
|
||||
}
|
||||
},
|
||||
);
|
||||
|
||||
die "could not get UUID of device '$part'\n" if !$uuid;
|
||||
die "could not get UUID of device '$part'\n" if !$uuid;
|
||||
|
||||
$ini->{'Mount'} = {
|
||||
'What' => $uuid_path,
|
||||
'Where' => $path,
|
||||
'Type' => $type,
|
||||
'Options' => 'defaults',
|
||||
};
|
||||
$ini->{'Mount'} = {
|
||||
'What' => $uuid_path,
|
||||
'Where' => $path,
|
||||
'Type' => $type,
|
||||
'Options' => 'defaults',
|
||||
};
|
||||
|
||||
$write_ini->($ini, $mountunitpath);
|
||||
$write_ini->($ini, $mountunitpath);
|
||||
|
||||
PVE::Diskmanage::udevadm_trigger($part);
|
||||
PVE::Diskmanage::udevadm_trigger($part);
|
||||
|
||||
run_command(['systemctl', 'daemon-reload']);
|
||||
run_command(['systemctl', 'enable', $mountunitname]);
|
||||
run_command(['systemctl', 'start', $mountunitname]);
|
||||
run_command(['systemctl', 'daemon-reload']);
|
||||
run_command(['systemctl', 'enable', $mountunitname]);
|
||||
run_command(['systemctl', 'start', $mountunitname]);
|
||||
|
||||
if ($param->{add_storage}) {
|
||||
PVE::API2::Storage::Config->create_or_update(
|
||||
$name, $node, $storage_params, $verify_params);
|
||||
}
|
||||
});
|
||||
};
|
||||
if ($param->{add_storage}) {
|
||||
PVE::API2::Storage::Config->create_or_update(
|
||||
$name, $node, $storage_params, $verify_params,
|
||||
);
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
return $rpcenv->fork_worker('dircreate', $name, $user, $worker);
|
||||
}});
|
||||
return $rpcenv->fork_worker('dircreate', $name, $user, $worker);
|
||||
},
|
||||
});
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
__PACKAGE__->register_method({
|
||||
name => 'delete',
|
||||
path => '{name}',
|
||||
method => 'DELETE',
|
||||
proxyto => 'node',
|
||||
protected => 1,
|
||||
permissions => {
|
||||
description => "Requires additionally 'Datastore.Allocate' on /storage when setting 'cleanup-config'",
|
||||
check => ['perm', '/', ['Sys.Modify']],
|
||||
description =>
|
||||
"Requires additionally 'Datastore.Allocate' on /storage when setting 'cleanup-config'",
|
||||
check => ['perm', '/', ['Sys.Modify']],
|
||||
},
|
||||
description => "Unmounts the storage and removes the mount unit.",
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
name => get_standard_option('pve-storage-id'),
|
||||
'cleanup-config' => {
|
||||
description => "Marks associated storage(s) as not available on this node anymore ".
|
||||
"or removes them from the configuration (if configured for this node only).",
|
||||
type => 'boolean',
|
||||
optional => 1,
|
||||
default => 0,
|
||||
},
|
||||
'cleanup-disks' => {
|
||||
description => "Also wipe disk so it can be repurposed afterwards.",
|
||||
type => 'boolean',
|
||||
optional => 1,
|
||||
default => 0,
|
||||
},
|
||||
},
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
name => get_standard_option('pve-storage-id'),
|
||||
'cleanup-config' => {
|
||||
description =>
|
||||
"Marks associated storage(s) as not available on this node anymore "
|
||||
. "or removes them from the configuration (if configured for this node only).",
|
||||
type => 'boolean',
|
||||
optional => 1,
|
||||
default => 0,
|
||||
},
|
||||
'cleanup-disks' => {
|
||||
description => "Also wipe disk so it can be repurposed afterwards.",
|
||||
type => 'boolean',
|
||||
optional => 1,
|
||||
default => 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns => { type => 'string' },
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
my ($param) = @_;
|
||||
|
||||
my $rpcenv = PVE::RPCEnvironment::get();
|
||||
my $user = $rpcenv->get_user();
|
||||
my $rpcenv = PVE::RPCEnvironment::get();
|
||||
my $user = $rpcenv->get_user();
|
||||
|
||||
$rpcenv->check($user, "/storage", ['Datastore.Allocate']) if $param->{'cleanup-config'};
|
||||
$rpcenv->check($user, "/storage", ['Datastore.Allocate']) if $param->{'cleanup-config'};
|
||||
|
||||
my $name = $param->{name};
|
||||
my $node = $param->{node};
|
||||
my $name = $param->{name};
|
||||
my $node = $param->{node};
|
||||
|
||||
my $worker = sub {
|
||||
my $path = "/mnt/pve/$name";
|
||||
my $mountunitname = PVE::Systemd::escape_unit($path, 1) . ".mount";
|
||||
my $mountunitpath = "/etc/systemd/system/$mountunitname";
|
||||
my $worker = sub {
|
||||
my $path = "/mnt/pve/$name";
|
||||
my $mountunitname = PVE::Systemd::escape_unit($path, 1) . ".mount";
|
||||
my $mountunitpath = "/etc/systemd/system/$mountunitname";
|
||||
|
||||
PVE::Diskmanage::locked_disk_action(sub {
|
||||
my $to_wipe;
|
||||
if ($param->{'cleanup-disks'}) {
|
||||
my $unit = $read_ini->($mountunitpath);
|
||||
PVE::Diskmanage::locked_disk_action(sub {
|
||||
my $to_wipe;
|
||||
if ($param->{'cleanup-disks'}) {
|
||||
my $unit = $read_ini->($mountunitpath);
|
||||
|
||||
my $dev = PVE::Diskmanage::verify_blockdev_path($unit->{'Mount'}->{'What'});
|
||||
$to_wipe = $dev;
|
||||
my $dev = PVE::Diskmanage::verify_blockdev_path($unit->{'Mount'}->{'What'});
|
||||
$to_wipe = $dev;
|
||||
|
||||
# clean up whole device if this is the only partition
|
||||
$dev =~ s|^/dev/||;
|
||||
my $info = PVE::Diskmanage::get_disks($dev, 1, 1);
|
||||
die "unable to obtain information for disk '$dev'\n" if !$info->{$dev};
|
||||
$to_wipe = $info->{$dev}->{parent}
|
||||
if $info->{$dev}->{parent} && scalar(keys $info->%*) == 2;
|
||||
}
|
||||
# clean up whole device if this is the only partition
|
||||
$dev =~ s|^/dev/||;
|
||||
my $info = PVE::Diskmanage::get_disks($dev, 1, 1);
|
||||
die "unable to obtain information for disk '$dev'\n" if !$info->{$dev};
|
||||
$to_wipe = $info->{$dev}->{parent}
|
||||
if $info->{$dev}->{parent} && scalar(keys $info->%*) == 2;
|
||||
}
|
||||
|
||||
run_command(['systemctl', 'stop', $mountunitname]);
|
||||
run_command(['systemctl', 'disable', $mountunitname]);
|
||||
run_command(['systemctl', 'stop', $mountunitname]);
|
||||
run_command(['systemctl', 'disable', $mountunitname]);
|
||||
|
||||
unlink $mountunitpath or $! == ENOENT or die "cannot remove $mountunitpath - $!\n";
|
||||
unlink $mountunitpath
|
||||
or $! == ENOENT
|
||||
or die "cannot remove $mountunitpath - $!\n";
|
||||
|
||||
my $config_err;
|
||||
if ($param->{'cleanup-config'}) {
|
||||
my $match = sub {
|
||||
my ($scfg) = @_;
|
||||
return $scfg->{type} eq 'dir' && $scfg->{path} eq $path;
|
||||
};
|
||||
eval { PVE::API2::Storage::Config->cleanup_storages_for_node($match, $node); };
|
||||
warn $config_err = $@ if $@;
|
||||
}
|
||||
my $config_err;
|
||||
if ($param->{'cleanup-config'}) {
|
||||
my $match = sub {
|
||||
my ($scfg) = @_;
|
||||
return $scfg->{type} eq 'dir' && $scfg->{path} eq $path;
|
||||
};
|
||||
eval {
|
||||
PVE::API2::Storage::Config->cleanup_storages_for_node($match, $node);
|
||||
};
|
||||
warn $config_err = $@ if $@;
|
||||
}
|
||||
|
||||
if ($to_wipe) {
|
||||
PVE::Diskmanage::wipe_blockdev($to_wipe);
|
||||
PVE::Diskmanage::udevadm_trigger($to_wipe);
|
||||
}
|
||||
if ($to_wipe) {
|
||||
PVE::Diskmanage::wipe_blockdev($to_wipe);
|
||||
PVE::Diskmanage::udevadm_trigger($to_wipe);
|
||||
}
|
||||
|
||||
die "config cleanup failed - $config_err" if $config_err;
|
||||
});
|
||||
};
|
||||
die "config cleanup failed - $config_err" if $config_err;
|
||||
});
|
||||
};
|
||||
|
||||
return $rpcenv->fork_worker('dirremove', $name, $user, $worker);
|
||||
}});
|
||||
return $rpcenv->fork_worker('dirremove', $name, $user, $worker);
|
||||
},
|
||||
});
|
||||
|
||||
1;
|
||||
|
||||
@ -14,266 +14,277 @@ use PVE::RESTHandler;
|
||||
|
||||
use base qw(PVE::RESTHandler);
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
__PACKAGE__->register_method({
|
||||
name => 'index',
|
||||
path => '',
|
||||
method => 'GET',
|
||||
proxyto => 'node',
|
||||
protected => 1,
|
||||
permissions => {
|
||||
check => ['perm', '/', ['Sys.Audit']],
|
||||
check => ['perm', '/', ['Sys.Audit']],
|
||||
},
|
||||
description => "List LVM Volume Groups",
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
},
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
},
|
||||
},
|
||||
returns => {
|
||||
type => 'object',
|
||||
properties => {
|
||||
leaf => {
|
||||
type => 'boolean',
|
||||
},
|
||||
children => {
|
||||
type => 'array',
|
||||
items => {
|
||||
type => "object",
|
||||
properties => {
|
||||
leaf => {
|
||||
type => 'boolean',
|
||||
},
|
||||
name => {
|
||||
type => 'string',
|
||||
description => 'The name of the volume group',
|
||||
},
|
||||
size => {
|
||||
type => 'integer',
|
||||
description => 'The size of the volume group in bytes',
|
||||
},
|
||||
free => {
|
||||
type => 'integer',
|
||||
description => 'The free bytes in the volume group',
|
||||
},
|
||||
children => {
|
||||
optional => 1,
|
||||
type => 'array',
|
||||
description => 'The underlying physical volumes',
|
||||
items => {
|
||||
type => 'object',
|
||||
properties => {
|
||||
leaf => {
|
||||
type => 'boolean',
|
||||
},
|
||||
name => {
|
||||
type => 'string',
|
||||
description => 'The name of the physical volume',
|
||||
},
|
||||
size => {
|
||||
type => 'integer',
|
||||
description => 'The size of the physical volume in bytes',
|
||||
},
|
||||
free => {
|
||||
type => 'integer',
|
||||
description => 'The free bytes in the physical volume',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
type => 'object',
|
||||
properties => {
|
||||
leaf => {
|
||||
type => 'boolean',
|
||||
},
|
||||
children => {
|
||||
type => 'array',
|
||||
items => {
|
||||
type => "object",
|
||||
properties => {
|
||||
leaf => {
|
||||
type => 'boolean',
|
||||
},
|
||||
name => {
|
||||
type => 'string',
|
||||
description => 'The name of the volume group',
|
||||
},
|
||||
size => {
|
||||
type => 'integer',
|
||||
description => 'The size of the volume group in bytes',
|
||||
},
|
||||
free => {
|
||||
type => 'integer',
|
||||
description => 'The free bytes in the volume group',
|
||||
},
|
||||
children => {
|
||||
optional => 1,
|
||||
type => 'array',
|
||||
description => 'The underlying physical volumes',
|
||||
items => {
|
||||
type => 'object',
|
||||
properties => {
|
||||
leaf => {
|
||||
type => 'boolean',
|
||||
},
|
||||
name => {
|
||||
type => 'string',
|
||||
description => 'The name of the physical volume',
|
||||
},
|
||||
size => {
|
||||
type => 'integer',
|
||||
description =>
|
||||
'The size of the physical volume in bytes',
|
||||
},
|
||||
free => {
|
||||
type => 'integer',
|
||||
description => 'The free bytes in the physical volume',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
my ($param) = @_;
|
||||
|
||||
my $result = [];
|
||||
my $result = [];
|
||||
|
||||
my $vgs = PVE::Storage::LVMPlugin::lvm_vgs(1);
|
||||
my $vgs = PVE::Storage::LVMPlugin::lvm_vgs(1);
|
||||
|
||||
foreach my $vg_name (sort keys %$vgs) {
|
||||
my $vg = $vgs->{$vg_name};
|
||||
$vg->{name} = $vg_name;
|
||||
$vg->{leaf} = 0;
|
||||
foreach my $pv (@{$vg->{pvs}}) {
|
||||
$pv->{leaf} = 1;
|
||||
}
|
||||
$vg->{children} = delete $vg->{pvs};
|
||||
push @$result, $vg;
|
||||
}
|
||||
foreach my $vg_name (sort keys %$vgs) {
|
||||
my $vg = $vgs->{$vg_name};
|
||||
$vg->{name} = $vg_name;
|
||||
$vg->{leaf} = 0;
|
||||
foreach my $pv (@{ $vg->{pvs} }) {
|
||||
$pv->{leaf} = 1;
|
||||
}
|
||||
$vg->{children} = delete $vg->{pvs};
|
||||
push @$result, $vg;
|
||||
}
|
||||
|
||||
return {
|
||||
leaf => 0,
|
||||
children => $result,
|
||||
};
|
||||
}});
|
||||
return {
|
||||
leaf => 0,
|
||||
children => $result,
|
||||
};
|
||||
},
|
||||
});
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
__PACKAGE__->register_method({
|
||||
name => 'create',
|
||||
path => '',
|
||||
method => 'POST',
|
||||
proxyto => 'node',
|
||||
protected => 1,
|
||||
permissions => {
|
||||
description => "Requires additionally 'Datastore.Allocate' on /storage when setting 'add_storage'",
|
||||
check => ['perm', '/', ['Sys.Modify']],
|
||||
description =>
|
||||
"Requires additionally 'Datastore.Allocate' on /storage when setting 'add_storage'",
|
||||
check => ['perm', '/', ['Sys.Modify']],
|
||||
},
|
||||
description => "Create an LVM Volume Group",
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
name => get_standard_option('pve-storage-id'),
|
||||
device => {
|
||||
type => 'string',
|
||||
description => 'The block device you want to create the volume group on',
|
||||
},
|
||||
add_storage => {
|
||||
description => "Configure storage using the Volume Group",
|
||||
type => 'boolean',
|
||||
optional => 1,
|
||||
default => 0,
|
||||
},
|
||||
},
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
name => get_standard_option('pve-storage-id'),
|
||||
device => {
|
||||
type => 'string',
|
||||
description => 'The block device you want to create the volume group on',
|
||||
},
|
||||
add_storage => {
|
||||
description => "Configure storage using the Volume Group",
|
||||
type => 'boolean',
|
||||
optional => 1,
|
||||
default => 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns => { type => 'string' },
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
my ($param) = @_;
|
||||
|
||||
my $rpcenv = PVE::RPCEnvironment::get();
|
||||
my $user = $rpcenv->get_user();
|
||||
my $rpcenv = PVE::RPCEnvironment::get();
|
||||
my $user = $rpcenv->get_user();
|
||||
|
||||
my $name = $param->{name};
|
||||
my $dev = $param->{device};
|
||||
my $node = $param->{node};
|
||||
my $name = $param->{name};
|
||||
my $dev = $param->{device};
|
||||
my $node = $param->{node};
|
||||
|
||||
$dev = PVE::Diskmanage::verify_blockdev_path($dev);
|
||||
PVE::Diskmanage::assert_disk_unused($dev);
|
||||
$dev = PVE::Diskmanage::verify_blockdev_path($dev);
|
||||
PVE::Diskmanage::assert_disk_unused($dev);
|
||||
|
||||
my $storage_params = {
|
||||
type => 'lvm',
|
||||
vgname => $name,
|
||||
storage => $name,
|
||||
content => 'rootdir,images',
|
||||
shared => 0,
|
||||
nodes => $node,
|
||||
};
|
||||
my $verify_params = [qw(vgname)];
|
||||
my $storage_params = {
|
||||
type => 'lvm',
|
||||
vgname => $name,
|
||||
storage => $name,
|
||||
content => 'rootdir,images',
|
||||
shared => 0,
|
||||
nodes => $node,
|
||||
};
|
||||
my $verify_params = [qw(vgname)];
|
||||
|
||||
if ($param->{add_storage}) {
|
||||
$rpcenv->check($user, "/storage", ['Datastore.Allocate']);
|
||||
if ($param->{add_storage}) {
|
||||
$rpcenv->check($user, "/storage", ['Datastore.Allocate']);
|
||||
|
||||
# reserve the name and add as disabled, will be enabled below if creation works out
|
||||
PVE::API2::Storage::Config->create_or_update(
|
||||
$name, $node, $storage_params, $verify_params, 1);
|
||||
}
|
||||
# reserve the name and add as disabled, will be enabled below if creation works out
|
||||
PVE::API2::Storage::Config->create_or_update(
|
||||
$name, $node, $storage_params, $verify_params, 1,
|
||||
);
|
||||
}
|
||||
|
||||
my $worker = sub {
|
||||
PVE::Diskmanage::locked_disk_action(sub {
|
||||
PVE::Diskmanage::assert_disk_unused($dev);
|
||||
die "volume group with name '${name}' already exists on node '${node}'\n"
|
||||
if PVE::Storage::LVMPlugin::lvm_vgs()->{$name};
|
||||
my $worker = sub {
|
||||
PVE::Diskmanage::locked_disk_action(sub {
|
||||
PVE::Diskmanage::assert_disk_unused($dev);
|
||||
die "volume group with name '${name}' already exists on node '${node}'\n"
|
||||
if PVE::Storage::LVMPlugin::lvm_vgs()->{$name};
|
||||
|
||||
if (PVE::Diskmanage::is_partition($dev)) {
|
||||
eval { PVE::Diskmanage::change_parttype($dev, '8E00'); };
|
||||
warn $@ if $@;
|
||||
}
|
||||
if (PVE::Diskmanage::is_partition($dev)) {
|
||||
eval { PVE::Diskmanage::change_parttype($dev, '8E00'); };
|
||||
warn $@ if $@;
|
||||
}
|
||||
|
||||
PVE::Storage::LVMPlugin::lvm_create_volume_group($dev, $name);
|
||||
PVE::Storage::LVMPlugin::lvm_create_volume_group($dev, $name);
|
||||
|
||||
PVE::Diskmanage::udevadm_trigger($dev);
|
||||
PVE::Diskmanage::udevadm_trigger($dev);
|
||||
|
||||
if ($param->{add_storage}) {
|
||||
PVE::API2::Storage::Config->create_or_update(
|
||||
$name, $node, $storage_params, $verify_params);
|
||||
}
|
||||
});
|
||||
};
|
||||
if ($param->{add_storage}) {
|
||||
PVE::API2::Storage::Config->create_or_update(
|
||||
$name, $node, $storage_params, $verify_params,
|
||||
);
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
return $rpcenv->fork_worker('lvmcreate', $name, $user, $worker);
|
||||
}});
|
||||
return $rpcenv->fork_worker('lvmcreate', $name, $user, $worker);
|
||||
},
|
||||
});
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
__PACKAGE__->register_method({
|
||||
name => 'delete',
|
||||
path => '{name}',
|
||||
method => 'DELETE',
|
||||
proxyto => 'node',
|
||||
protected => 1,
|
||||
permissions => {
|
||||
description => "Requires additionally 'Datastore.Allocate' on /storage when setting 'cleanup-config'",
|
||||
check => ['perm', '/', ['Sys.Modify']],
|
||||
description =>
|
||||
"Requires additionally 'Datastore.Allocate' on /storage when setting 'cleanup-config'",
|
||||
check => ['perm', '/', ['Sys.Modify']],
|
||||
},
|
||||
description => "Remove an LVM Volume Group.",
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
name => get_standard_option('pve-storage-id'),
|
||||
'cleanup-config' => {
|
||||
description => "Marks associated storage(s) as not available on this node anymore ".
|
||||
"or removes them from the configuration (if configured for this node only).",
|
||||
type => 'boolean',
|
||||
optional => 1,
|
||||
default => 0,
|
||||
},
|
||||
'cleanup-disks' => {
|
||||
description => "Also wipe disks so they can be repurposed afterwards.",
|
||||
type => 'boolean',
|
||||
optional => 1,
|
||||
default => 0,
|
||||
},
|
||||
},
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
name => get_standard_option('pve-storage-id'),
|
||||
'cleanup-config' => {
|
||||
description =>
|
||||
"Marks associated storage(s) as not available on this node anymore "
|
||||
. "or removes them from the configuration (if configured for this node only).",
|
||||
type => 'boolean',
|
||||
optional => 1,
|
||||
default => 0,
|
||||
},
|
||||
'cleanup-disks' => {
|
||||
description => "Also wipe disks so they can be repurposed afterwards.",
|
||||
type => 'boolean',
|
||||
optional => 1,
|
||||
default => 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns => { type => 'string' },
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
my ($param) = @_;
|
||||
|
||||
my $rpcenv = PVE::RPCEnvironment::get();
|
||||
my $user = $rpcenv->get_user();
|
||||
my $rpcenv = PVE::RPCEnvironment::get();
|
||||
my $user = $rpcenv->get_user();
|
||||
|
||||
$rpcenv->check($user, "/storage", ['Datastore.Allocate']) if $param->{'cleanup-config'};
|
||||
$rpcenv->check($user, "/storage", ['Datastore.Allocate']) if $param->{'cleanup-config'};
|
||||
|
||||
my $name = $param->{name};
|
||||
my $node = $param->{node};
|
||||
my $name = $param->{name};
|
||||
my $node = $param->{node};
|
||||
|
||||
my $worker = sub {
|
||||
PVE::Diskmanage::locked_disk_action(sub {
|
||||
my $vgs = PVE::Storage::LVMPlugin::lvm_vgs(1);
|
||||
die "no such volume group '$name'\n" if !$vgs->{$name};
|
||||
my $worker = sub {
|
||||
PVE::Diskmanage::locked_disk_action(sub {
|
||||
my $vgs = PVE::Storage::LVMPlugin::lvm_vgs(1);
|
||||
die "no such volume group '$name'\n" if !$vgs->{$name};
|
||||
|
||||
PVE::Storage::LVMPlugin::lvm_destroy_volume_group($name);
|
||||
PVE::Storage::LVMPlugin::lvm_destroy_volume_group($name);
|
||||
|
||||
my $config_err;
|
||||
if ($param->{'cleanup-config'}) {
|
||||
my $match = sub {
|
||||
my ($scfg) = @_;
|
||||
return $scfg->{type} eq 'lvm' && $scfg->{vgname} eq $name;
|
||||
};
|
||||
eval { PVE::API2::Storage::Config->cleanup_storages_for_node($match, $node); };
|
||||
warn $config_err = $@ if $@;
|
||||
}
|
||||
my $config_err;
|
||||
if ($param->{'cleanup-config'}) {
|
||||
my $match = sub {
|
||||
my ($scfg) = @_;
|
||||
return $scfg->{type} eq 'lvm' && $scfg->{vgname} eq $name;
|
||||
};
|
||||
eval {
|
||||
PVE::API2::Storage::Config->cleanup_storages_for_node($match, $node);
|
||||
};
|
||||
warn $config_err = $@ if $@;
|
||||
}
|
||||
|
||||
if ($param->{'cleanup-disks'}) {
|
||||
my $wiped = [];
|
||||
eval {
|
||||
for my $pv ($vgs->{$name}->{pvs}->@*) {
|
||||
my $dev = PVE::Diskmanage::verify_blockdev_path($pv->{name});
|
||||
PVE::Diskmanage::wipe_blockdev($dev);
|
||||
push $wiped->@*, $dev;
|
||||
}
|
||||
};
|
||||
my $err = $@;
|
||||
PVE::Diskmanage::udevadm_trigger($wiped->@*);
|
||||
die "cleanup failed - $err" if $err;
|
||||
}
|
||||
if ($param->{'cleanup-disks'}) {
|
||||
my $wiped = [];
|
||||
eval {
|
||||
for my $pv ($vgs->{$name}->{pvs}->@*) {
|
||||
my $dev = PVE::Diskmanage::verify_blockdev_path($pv->{name});
|
||||
PVE::Diskmanage::wipe_blockdev($dev);
|
||||
push $wiped->@*, $dev;
|
||||
}
|
||||
};
|
||||
my $err = $@;
|
||||
PVE::Diskmanage::udevadm_trigger($wiped->@*);
|
||||
die "cleanup failed - $err" if $err;
|
||||
}
|
||||
|
||||
die "config cleanup failed - $config_err" if $config_err;
|
||||
});
|
||||
};
|
||||
die "config cleanup failed - $config_err" if $config_err;
|
||||
});
|
||||
};
|
||||
|
||||
return $rpcenv->fork_worker('lvmremove', $name, $user, $worker);
|
||||
}});
|
||||
return $rpcenv->fork_worker('lvmremove', $name, $user, $worker);
|
||||
},
|
||||
});
|
||||
|
||||
1;
|
||||
|
||||
@ -15,255 +15,269 @@ use PVE::RESTHandler;
|
||||
|
||||
use base qw(PVE::RESTHandler);
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
__PACKAGE__->register_method({
|
||||
name => 'index',
|
||||
path => '',
|
||||
method => 'GET',
|
||||
proxyto => 'node',
|
||||
protected => 1,
|
||||
permissions => {
|
||||
check => ['perm', '/', ['Sys.Audit']],
|
||||
check => ['perm', '/', ['Sys.Audit']],
|
||||
},
|
||||
description => "List LVM thinpools",
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
},
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
},
|
||||
},
|
||||
returns => {
|
||||
type => 'array',
|
||||
items => {
|
||||
type => 'object',
|
||||
properties => {
|
||||
lv => {
|
||||
type => 'string',
|
||||
description => 'The name of the thinpool.',
|
||||
},
|
||||
vg => {
|
||||
type => 'string',
|
||||
description => 'The associated volume group.',
|
||||
},
|
||||
lv_size => {
|
||||
type => 'integer',
|
||||
description => 'The size of the thinpool in bytes.',
|
||||
},
|
||||
used => {
|
||||
type => 'integer',
|
||||
description => 'The used bytes of the thinpool.',
|
||||
},
|
||||
metadata_size => {
|
||||
type => 'integer',
|
||||
description => 'The size of the metadata lv in bytes.',
|
||||
},
|
||||
metadata_used => {
|
||||
type => 'integer',
|
||||
description => 'The used bytes of the metadata lv.',
|
||||
},
|
||||
},
|
||||
},
|
||||
type => 'array',
|
||||
items => {
|
||||
type => 'object',
|
||||
properties => {
|
||||
lv => {
|
||||
type => 'string',
|
||||
description => 'The name of the thinpool.',
|
||||
},
|
||||
vg => {
|
||||
type => 'string',
|
||||
description => 'The associated volume group.',
|
||||
},
|
||||
lv_size => {
|
||||
type => 'integer',
|
||||
description => 'The size of the thinpool in bytes.',
|
||||
},
|
||||
used => {
|
||||
type => 'integer',
|
||||
description => 'The used bytes of the thinpool.',
|
||||
},
|
||||
metadata_size => {
|
||||
type => 'integer',
|
||||
description => 'The size of the metadata lv in bytes.',
|
||||
},
|
||||
metadata_used => {
|
||||
type => 'integer',
|
||||
description => 'The used bytes of the metadata lv.',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
return PVE::Storage::LvmThinPlugin::list_thinpools(undef);
|
||||
}});
|
||||
my ($param) = @_;
|
||||
return PVE::Storage::LvmThinPlugin::list_thinpools(undef);
|
||||
},
|
||||
});
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
__PACKAGE__->register_method({
|
||||
name => 'create',
|
||||
path => '',
|
||||
method => 'POST',
|
||||
proxyto => 'node',
|
||||
protected => 1,
|
||||
permissions => {
|
||||
description => "Requires additionally 'Datastore.Allocate' on /storage when setting 'add_storage'",
|
||||
check => ['perm', '/', ['Sys.Modify']],
|
||||
description =>
|
||||
"Requires additionally 'Datastore.Allocate' on /storage when setting 'add_storage'",
|
||||
check => ['perm', '/', ['Sys.Modify']],
|
||||
},
|
||||
description => "Create an LVM thinpool",
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
name => get_standard_option('pve-storage-id'),
|
||||
device => {
|
||||
type => 'string',
|
||||
description => 'The block device you want to create the thinpool on.',
|
||||
},
|
||||
add_storage => {
|
||||
description => "Configure storage using the thinpool.",
|
||||
type => 'boolean',
|
||||
optional => 1,
|
||||
default => 0,
|
||||
},
|
||||
},
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
name => get_standard_option('pve-storage-id'),
|
||||
device => {
|
||||
type => 'string',
|
||||
description => 'The block device you want to create the thinpool on.',
|
||||
},
|
||||
add_storage => {
|
||||
description => "Configure storage using the thinpool.",
|
||||
type => 'boolean',
|
||||
optional => 1,
|
||||
default => 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns => { type => 'string' },
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
my ($param) = @_;
|
||||
|
||||
my $rpcenv = PVE::RPCEnvironment::get();
|
||||
my $user = $rpcenv->get_user();
|
||||
my $rpcenv = PVE::RPCEnvironment::get();
|
||||
my $user = $rpcenv->get_user();
|
||||
|
||||
my $name = $param->{name};
|
||||
my $dev = $param->{device};
|
||||
my $node = $param->{node};
|
||||
my $name = $param->{name};
|
||||
my $dev = $param->{device};
|
||||
my $node = $param->{node};
|
||||
|
||||
$dev = PVE::Diskmanage::verify_blockdev_path($dev);
|
||||
PVE::Diskmanage::assert_disk_unused($dev);
|
||||
$dev = PVE::Diskmanage::verify_blockdev_path($dev);
|
||||
PVE::Diskmanage::assert_disk_unused($dev);
|
||||
|
||||
my $storage_params = {
|
||||
type => 'lvmthin',
|
||||
vgname => $name,
|
||||
thinpool => $name,
|
||||
storage => $name,
|
||||
content => 'rootdir,images',
|
||||
nodes => $node,
|
||||
};
|
||||
my $verify_params = [qw(vgname thinpool)];
|
||||
my $storage_params = {
|
||||
type => 'lvmthin',
|
||||
vgname => $name,
|
||||
thinpool => $name,
|
||||
storage => $name,
|
||||
content => 'rootdir,images',
|
||||
nodes => $node,
|
||||
};
|
||||
my $verify_params = [qw(vgname thinpool)];
|
||||
|
||||
if ($param->{add_storage}) {
|
||||
$rpcenv->check($user, "/storage", ['Datastore.Allocate']);
|
||||
if ($param->{add_storage}) {
|
||||
$rpcenv->check($user, "/storage", ['Datastore.Allocate']);
|
||||
|
||||
# reserve the name and add as disabled, will be enabled below if creation works out
|
||||
PVE::API2::Storage::Config->create_or_update(
|
||||
$name, $node, $storage_params, $verify_params, 1);
|
||||
}
|
||||
# reserve the name and add as disabled, will be enabled below if creation works out
|
||||
PVE::API2::Storage::Config->create_or_update(
|
||||
$name, $node, $storage_params, $verify_params, 1,
|
||||
);
|
||||
}
|
||||
|
||||
my $worker = sub {
|
||||
PVE::Diskmanage::locked_disk_action(sub {
|
||||
PVE::Diskmanage::assert_disk_unused($dev);
|
||||
my $worker = sub {
|
||||
PVE::Diskmanage::locked_disk_action(sub {
|
||||
PVE::Diskmanage::assert_disk_unused($dev);
|
||||
|
||||
die "volume group with name '${name}' already exists on node '${node}'\n"
|
||||
if PVE::Storage::LVMPlugin::lvm_vgs()->{$name};
|
||||
die "volume group with name '${name}' already exists on node '${node}'\n"
|
||||
if PVE::Storage::LVMPlugin::lvm_vgs()->{$name};
|
||||
|
||||
if (PVE::Diskmanage::is_partition($dev)) {
|
||||
eval { PVE::Diskmanage::change_parttype($dev, '8E00'); };
|
||||
warn $@ if $@;
|
||||
}
|
||||
if (PVE::Diskmanage::is_partition($dev)) {
|
||||
eval { PVE::Diskmanage::change_parttype($dev, '8E00'); };
|
||||
warn $@ if $@;
|
||||
}
|
||||
|
||||
PVE::Storage::LVMPlugin::lvm_create_volume_group($dev, $name);
|
||||
my $pv = PVE::Storage::LVMPlugin::lvm_pv_info($dev);
|
||||
# keep some free space just in case
|
||||
my $datasize = $pv->{size} - 128*1024;
|
||||
# default to 1% for metadata
|
||||
my $metadatasize = $datasize/100;
|
||||
# but at least 1G, as recommended in lvmthin man
|
||||
$metadatasize = 1024*1024 if $metadatasize < 1024*1024;
|
||||
# but at most 16G, which is the current lvm max
|
||||
$metadatasize = 16*1024*1024 if $metadatasize > 16*1024*1024;
|
||||
# shrink data by needed amount for metadata
|
||||
$datasize -= 2*$metadatasize;
|
||||
PVE::Storage::LVMPlugin::lvm_create_volume_group($dev, $name);
|
||||
my $pv = PVE::Storage::LVMPlugin::lvm_pv_info($dev);
|
||||
# keep some free space just in case
|
||||
my $datasize = $pv->{size} - 128 * 1024;
|
||||
# default to 1% for metadata
|
||||
my $metadatasize = $datasize / 100;
|
||||
# but at least 1G, as recommended in lvmthin man
|
||||
$metadatasize = 1024 * 1024 if $metadatasize < 1024 * 1024;
|
||||
# but at most 16G, which is the current lvm max
|
||||
$metadatasize = 16 * 1024 * 1024 if $metadatasize > 16 * 1024 * 1024;
|
||||
# shrink data by needed amount for metadata
|
||||
$datasize -= 2 * $metadatasize;
|
||||
|
||||
run_command([
|
||||
'/sbin/lvcreate',
|
||||
'--type', 'thin-pool',
|
||||
"-L${datasize}K",
|
||||
'--poolmetadatasize', "${metadatasize}K",
|
||||
'-n', $name,
|
||||
$name
|
||||
]);
|
||||
run_command([
|
||||
'/sbin/lvcreate',
|
||||
'--type',
|
||||
'thin-pool',
|
||||
"-L${datasize}K",
|
||||
'--poolmetadatasize',
|
||||
"${metadatasize}K",
|
||||
'-n',
|
||||
$name,
|
||||
$name,
|
||||
]);
|
||||
|
||||
PVE::Diskmanage::udevadm_trigger($dev);
|
||||
PVE::Diskmanage::udevadm_trigger($dev);
|
||||
|
||||
if ($param->{add_storage}) {
|
||||
PVE::API2::Storage::Config->create_or_update(
|
||||
$name, $node, $storage_params, $verify_params);
|
||||
}
|
||||
});
|
||||
};
|
||||
if ($param->{add_storage}) {
|
||||
PVE::API2::Storage::Config->create_or_update(
|
||||
$name, $node, $storage_params, $verify_params,
|
||||
);
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
return $rpcenv->fork_worker('lvmthincreate', $name, $user, $worker);
|
||||
}});
|
||||
return $rpcenv->fork_worker('lvmthincreate', $name, $user, $worker);
|
||||
},
|
||||
});
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
__PACKAGE__->register_method({
|
||||
name => 'delete',
|
||||
path => '{name}',
|
||||
method => 'DELETE',
|
||||
proxyto => 'node',
|
||||
protected => 1,
|
||||
permissions => {
|
||||
description => "Requires additionally 'Datastore.Allocate' on /storage when setting 'cleanup-config'",
|
||||
check => ['perm', '/', ['Sys.Modify']],
|
||||
description =>
|
||||
"Requires additionally 'Datastore.Allocate' on /storage when setting 'cleanup-config'",
|
||||
check => ['perm', '/', ['Sys.Modify']],
|
||||
},
|
||||
description => "Remove an LVM thin pool.",
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
name => get_standard_option('pve-storage-id'),
|
||||
'volume-group' => get_standard_option('pve-storage-id'),
|
||||
'cleanup-config' => {
|
||||
description => "Marks associated storage(s) as not available on this node anymore ".
|
||||
"or removes them from the configuration (if configured for this node only).",
|
||||
type => 'boolean',
|
||||
optional => 1,
|
||||
default => 0,
|
||||
},
|
||||
'cleanup-disks' => {
|
||||
description => "Also wipe disks so they can be repurposed afterwards.",
|
||||
type => 'boolean',
|
||||
optional => 1,
|
||||
default => 0,
|
||||
},
|
||||
},
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
name => get_standard_option('pve-storage-id'),
|
||||
'volume-group' => get_standard_option('pve-storage-id'),
|
||||
'cleanup-config' => {
|
||||
description =>
|
||||
"Marks associated storage(s) as not available on this node anymore "
|
||||
. "or removes them from the configuration (if configured for this node only).",
|
||||
type => 'boolean',
|
||||
optional => 1,
|
||||
default => 0,
|
||||
},
|
||||
'cleanup-disks' => {
|
||||
description => "Also wipe disks so they can be repurposed afterwards.",
|
||||
type => 'boolean',
|
||||
optional => 1,
|
||||
default => 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns => { type => 'string' },
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
my ($param) = @_;
|
||||
|
||||
my $rpcenv = PVE::RPCEnvironment::get();
|
||||
my $user = $rpcenv->get_user();
|
||||
my $rpcenv = PVE::RPCEnvironment::get();
|
||||
my $user = $rpcenv->get_user();
|
||||
|
||||
$rpcenv->check($user, "/storage", ['Datastore.Allocate']) if $param->{'cleanup-config'};
|
||||
$rpcenv->check($user, "/storage", ['Datastore.Allocate']) if $param->{'cleanup-config'};
|
||||
|
||||
my $vg = $param->{'volume-group'};
|
||||
my $lv = $param->{name};
|
||||
my $node = $param->{node};
|
||||
my $vg = $param->{'volume-group'};
|
||||
my $lv = $param->{name};
|
||||
my $node = $param->{node};
|
||||
|
||||
my $worker = sub {
|
||||
PVE::Diskmanage::locked_disk_action(sub {
|
||||
my $thinpools = PVE::Storage::LvmThinPlugin::list_thinpools();
|
||||
my $worker = sub {
|
||||
PVE::Diskmanage::locked_disk_action(sub {
|
||||
my $thinpools = PVE::Storage::LvmThinPlugin::list_thinpools();
|
||||
|
||||
die "no such thin pool ${vg}/${lv}\n"
|
||||
if !grep { $_->{lv} eq $lv && $_->{vg} eq $vg } $thinpools->@*;
|
||||
die "no such thin pool ${vg}/${lv}\n"
|
||||
if !grep { $_->{lv} eq $lv && $_->{vg} eq $vg } $thinpools->@*;
|
||||
|
||||
run_command(['lvremove', '-y', "${vg}/${lv}"]);
|
||||
run_command(['lvremove', '-y', "${vg}/${lv}"]);
|
||||
|
||||
my $config_err;
|
||||
if ($param->{'cleanup-config'}) {
|
||||
my $match = sub {
|
||||
my ($scfg) = @_;
|
||||
return $scfg->{type} eq 'lvmthin'
|
||||
&& $scfg->{vgname} eq $vg
|
||||
&& $scfg->{thinpool} eq $lv;
|
||||
};
|
||||
eval { PVE::API2::Storage::Config->cleanup_storages_for_node($match, $node); };
|
||||
warn $config_err = $@ if $@;
|
||||
}
|
||||
my $config_err;
|
||||
if ($param->{'cleanup-config'}) {
|
||||
my $match = sub {
|
||||
my ($scfg) = @_;
|
||||
return
|
||||
$scfg->{type} eq 'lvmthin'
|
||||
&& $scfg->{vgname} eq $vg
|
||||
&& $scfg->{thinpool} eq $lv;
|
||||
};
|
||||
eval {
|
||||
PVE::API2::Storage::Config->cleanup_storages_for_node($match, $node);
|
||||
};
|
||||
warn $config_err = $@ if $@;
|
||||
}
|
||||
|
||||
if ($param->{'cleanup-disks'}) {
|
||||
my $vgs = PVE::Storage::LVMPlugin::lvm_vgs(1);
|
||||
if ($param->{'cleanup-disks'}) {
|
||||
my $vgs = PVE::Storage::LVMPlugin::lvm_vgs(1);
|
||||
|
||||
die "no such volume group '$vg'\n" if !$vgs->{$vg};
|
||||
die "volume group '$vg' still in use\n" if $vgs->{$vg}->{lvcount} > 0;
|
||||
die "no such volume group '$vg'\n" if !$vgs->{$vg};
|
||||
die "volume group '$vg' still in use\n" if $vgs->{$vg}->{lvcount} > 0;
|
||||
|
||||
my $wiped = [];
|
||||
eval {
|
||||
for my $pv ($vgs->{$vg}->{pvs}->@*) {
|
||||
my $dev = PVE::Diskmanage::verify_blockdev_path($pv->{name});
|
||||
PVE::Diskmanage::wipe_blockdev($dev);
|
||||
push $wiped->@*, $dev;
|
||||
}
|
||||
};
|
||||
my $err = $@;
|
||||
PVE::Diskmanage::udevadm_trigger($wiped->@*);
|
||||
die "cleanup failed - $err" if $err;
|
||||
}
|
||||
my $wiped = [];
|
||||
eval {
|
||||
for my $pv ($vgs->{$vg}->{pvs}->@*) {
|
||||
my $dev = PVE::Diskmanage::verify_blockdev_path($pv->{name});
|
||||
PVE::Diskmanage::wipe_blockdev($dev);
|
||||
push $wiped->@*, $dev;
|
||||
}
|
||||
};
|
||||
my $err = $@;
|
||||
PVE::Diskmanage::udevadm_trigger($wiped->@*);
|
||||
die "cleanup failed - $err" if $err;
|
||||
}
|
||||
|
||||
die "config cleanup failed - $config_err" if $config_err;
|
||||
});
|
||||
};
|
||||
die "config cleanup failed - $config_err" if $config_err;
|
||||
});
|
||||
};
|
||||
|
||||
return $rpcenv->fork_worker('lvmthinremove', "${vg}-${lv}", $user, $worker);
|
||||
}});
|
||||
return $rpcenv->fork_worker('lvmthinremove', "${vg}-${lv}", $user, $worker);
|
||||
},
|
||||
});
|
||||
|
||||
1;
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -29,10 +29,12 @@ my $api_storage_config = sub {
|
||||
my $scfg = dclone(PVE::Storage::storage_config($cfg, $storeid));
|
||||
$scfg->{storage} = $storeid;
|
||||
$scfg->{digest} = $cfg->{digest};
|
||||
$scfg->{content} = PVE::Storage::Plugin->encode_value($scfg->{type}, 'content', $scfg->{content});
|
||||
$scfg->{content} =
|
||||
PVE::Storage::Plugin->encode_value($scfg->{type}, 'content', $scfg->{content});
|
||||
|
||||
if ($scfg->{nodes}) {
|
||||
$scfg->{nodes} = PVE::Storage::Plugin->encode_value($scfg->{type}, 'nodes', $scfg->{nodes});
|
||||
$scfg->{nodes} =
|
||||
PVE::Storage::Plugin->encode_value($scfg->{type}, 'nodes', $scfg->{nodes});
|
||||
}
|
||||
|
||||
return $scfg;
|
||||
@ -47,21 +49,21 @@ sub cleanup_storages_for_node {
|
||||
my $cluster_nodes = PVE::Cluster::get_nodelist();
|
||||
|
||||
for my $storeid (keys $config->{ids}->%*) {
|
||||
my $scfg = PVE::Storage::storage_config($config, $storeid);
|
||||
next if !$match->($scfg);
|
||||
my $scfg = PVE::Storage::storage_config($config, $storeid);
|
||||
next if !$match->($scfg);
|
||||
|
||||
my $nodes = $scfg->{nodes} || { map { $_ => 1 } $cluster_nodes->@* };
|
||||
next if !$nodes->{$node}; # not configured on $node, so nothing to do
|
||||
delete $nodes->{$node};
|
||||
my $nodes = $scfg->{nodes} || { map { $_ => 1 } $cluster_nodes->@* };
|
||||
next if !$nodes->{$node}; # not configured on $node, so nothing to do
|
||||
delete $nodes->{$node};
|
||||
|
||||
if (scalar(keys $nodes->%*) > 0) {
|
||||
$self->update({
|
||||
nodes => join(',', sort keys $nodes->%*),
|
||||
storage => $storeid,
|
||||
});
|
||||
} else {
|
||||
$self->delete({storage => $storeid});
|
||||
}
|
||||
if (scalar(keys $nodes->%*) > 0) {
|
||||
$self->update({
|
||||
nodes => join(',', sort keys $nodes->%*),
|
||||
storage => $storeid,
|
||||
});
|
||||
} else {
|
||||
$self->delete({ storage => $storeid });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -80,355 +82,375 @@ sub create_or_update {
|
||||
my $scfg = PVE::Storage::storage_config($cfg, $sid, 1);
|
||||
|
||||
if ($scfg) {
|
||||
die "storage config for '${sid}' exists but no parameters to verify were provided\n"
|
||||
if !$verify_params;
|
||||
die "storage config for '${sid}' exists but no parameters to verify were provided\n"
|
||||
if !$verify_params;
|
||||
|
||||
$node = PVE::INotify::nodename() if !$node || ($node eq 'localhost');
|
||||
die "Storage ID '${sid}' already exists on node ${node}\n"
|
||||
if !defined($scfg->{nodes}) || $scfg->{nodes}->{$node};
|
||||
$node = PVE::INotify::nodename() if !$node || ($node eq 'localhost');
|
||||
die "Storage ID '${sid}' already exists on node ${node}\n"
|
||||
if !defined($scfg->{nodes}) || $scfg->{nodes}->{$node};
|
||||
|
||||
# check for type mismatch first to get a clear error
|
||||
for my $key ('type', $verify_params->@*) {
|
||||
if (!defined($scfg->{$key})) {
|
||||
die "Option '${key}' is not configured for storage '$sid', "
|
||||
."expected it to be '$storage_params->{$key}'";
|
||||
}
|
||||
if ($storage_params->{$key} ne $scfg->{$key}) {
|
||||
die "Option '${key}' ($storage_params->{$key}) does not match "
|
||||
."existing storage configuration '$scfg->{$key}'\n";
|
||||
}
|
||||
}
|
||||
# check for type mismatch first to get a clear error
|
||||
for my $key ('type', $verify_params->@*) {
|
||||
if (!defined($scfg->{$key})) {
|
||||
die "Option '${key}' is not configured for storage '$sid', "
|
||||
. "expected it to be '$storage_params->{$key}'";
|
||||
}
|
||||
if ($storage_params->{$key} ne $scfg->{$key}) {
|
||||
die "Option '${key}' ($storage_params->{$key}) does not match "
|
||||
. "existing storage configuration '$scfg->{$key}'\n";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!$dryrun) {
|
||||
if ($scfg) {
|
||||
if ($scfg->{nodes}) {
|
||||
$scfg->{nodes}->{$node} = 1;
|
||||
$self->update({
|
||||
nodes => join(',', sort keys $scfg->{nodes}->%*),
|
||||
storage => $sid,
|
||||
});
|
||||
print "Added '${node}' to nodes for storage '${sid}'\n";
|
||||
}
|
||||
} else {
|
||||
$self->create($storage_params);
|
||||
}
|
||||
if ($scfg) {
|
||||
if ($scfg->{nodes}) {
|
||||
$scfg->{nodes}->{$node} = 1;
|
||||
$self->update({
|
||||
nodes => join(',', sort keys $scfg->{nodes}->%*),
|
||||
storage => $sid,
|
||||
});
|
||||
print "Added '${node}' to nodes for storage '${sid}'\n";
|
||||
}
|
||||
} else {
|
||||
$self->create($storage_params);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
__PACKAGE__->register_method({
|
||||
name => 'index',
|
||||
path => '',
|
||||
method => 'GET',
|
||||
description => "Storage index.",
|
||||
permissions => {
|
||||
description => "Only list entries where you have 'Datastore.Audit' or 'Datastore.AllocateSpace' permissions on '/storage/<storage>'",
|
||||
user => 'all',
|
||||
description =>
|
||||
"Only list entries where you have 'Datastore.Audit' or 'Datastore.AllocateSpace' permissions on '/storage/<storage>'",
|
||||
user => 'all',
|
||||
},
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
type => {
|
||||
description => "Only list storage of specific type",
|
||||
type => 'string',
|
||||
enum => $storage_type_enum,
|
||||
optional => 1,
|
||||
},
|
||||
},
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
type => {
|
||||
description => "Only list storage of specific type",
|
||||
type => 'string',
|
||||
enum => $storage_type_enum,
|
||||
optional => 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns => {
|
||||
type => 'array',
|
||||
items => {
|
||||
type => "object",
|
||||
properties => { storage => { type => 'string'} },
|
||||
},
|
||||
links => [ { rel => 'child', href => "{storage}" } ],
|
||||
type => 'array',
|
||||
items => {
|
||||
type => "object",
|
||||
properties => { storage => { type => 'string' } },
|
||||
},
|
||||
links => [{ rel => 'child', href => "{storage}" }],
|
||||
},
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
my ($param) = @_;
|
||||
|
||||
my $rpcenv = PVE::RPCEnvironment::get();
|
||||
my $authuser = $rpcenv->get_user();
|
||||
my $rpcenv = PVE::RPCEnvironment::get();
|
||||
my $authuser = $rpcenv->get_user();
|
||||
|
||||
my $cfg = PVE::Storage::config();
|
||||
my $cfg = PVE::Storage::config();
|
||||
|
||||
my @sids = PVE::Storage::storage_ids($cfg);
|
||||
my @sids = PVE::Storage::storage_ids($cfg);
|
||||
|
||||
my $res = [];
|
||||
foreach my $storeid (@sids) {
|
||||
my $privs = [ 'Datastore.Audit', 'Datastore.AllocateSpace' ];
|
||||
next if !$rpcenv->check_any($authuser, "/storage/$storeid", $privs, 1);
|
||||
my $res = [];
|
||||
foreach my $storeid (@sids) {
|
||||
my $privs = ['Datastore.Audit', 'Datastore.AllocateSpace'];
|
||||
next if !$rpcenv->check_any($authuser, "/storage/$storeid", $privs, 1);
|
||||
|
||||
my $scfg = &$api_storage_config($cfg, $storeid);
|
||||
next if $param->{type} && $param->{type} ne $scfg->{type};
|
||||
push @$res, $scfg;
|
||||
}
|
||||
my $scfg = &$api_storage_config($cfg, $storeid);
|
||||
next if $param->{type} && $param->{type} ne $scfg->{type};
|
||||
push @$res, $scfg;
|
||||
}
|
||||
|
||||
return $res;
|
||||
}});
|
||||
return $res;
|
||||
},
|
||||
});
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
__PACKAGE__->register_method({
|
||||
name => 'read',
|
||||
path => '{storage}',
|
||||
method => 'GET',
|
||||
description => "Read storage configuration.",
|
||||
permissions => {
|
||||
check => ['perm', '/storage/{storage}', ['Datastore.Allocate']],
|
||||
check => ['perm', '/storage/{storage}', ['Datastore.Allocate']],
|
||||
},
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
storage => get_standard_option('pve-storage-id'),
|
||||
},
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
storage => get_standard_option('pve-storage-id'),
|
||||
},
|
||||
},
|
||||
returns => { type => 'object' },
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
my ($param) = @_;
|
||||
|
||||
my $cfg = PVE::Storage::config();
|
||||
my $cfg = PVE::Storage::config();
|
||||
|
||||
return &$api_storage_config($cfg, $param->{storage});
|
||||
}});
|
||||
return &$api_storage_config($cfg, $param->{storage});
|
||||
},
|
||||
});
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
__PACKAGE__->register_method({
|
||||
name => 'create',
|
||||
protected => 1,
|
||||
path => '',
|
||||
method => 'POST',
|
||||
description => "Create a new storage.",
|
||||
permissions => {
|
||||
check => ['perm', '/storage', ['Datastore.Allocate']],
|
||||
check => ['perm', '/storage', ['Datastore.Allocate']],
|
||||
},
|
||||
parameters => PVE::Storage::Plugin->createSchema(),
|
||||
returns => {
|
||||
type => 'object',
|
||||
properties => {
|
||||
storage => {
|
||||
description => "The ID of the created storage.",
|
||||
type => 'string',
|
||||
},
|
||||
type => {
|
||||
description => "The type of the created storage.",
|
||||
type => 'string',
|
||||
enum => $storage_type_enum,
|
||||
},
|
||||
config => {
|
||||
description => "Partial, possible server generated, configuration properties.",
|
||||
type => 'object',
|
||||
optional => 1,
|
||||
additionalProperties => 1,
|
||||
properties => {
|
||||
'encryption-key' => {
|
||||
description => "The, possible auto-generated, encryption-key.",
|
||||
optional => 1,
|
||||
type => 'string',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
type => 'object',
|
||||
properties => {
|
||||
storage => {
|
||||
description => "The ID of the created storage.",
|
||||
type => 'string',
|
||||
},
|
||||
type => {
|
||||
description => "The type of the created storage.",
|
||||
type => 'string',
|
||||
enum => $storage_type_enum,
|
||||
},
|
||||
config => {
|
||||
description => "Partial, possible server generated, configuration properties.",
|
||||
type => 'object',
|
||||
optional => 1,
|
||||
additionalProperties => 1,
|
||||
properties => {
|
||||
'encryption-key' => {
|
||||
description => "The, possible auto-generated, encryption-key.",
|
||||
optional => 1,
|
||||
type => 'string',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
my ($param) = @_;
|
||||
|
||||
my $type = extract_param($param, 'type');
|
||||
my $storeid = extract_param($param, 'storage');
|
||||
my $type = extract_param($param, 'type');
|
||||
my $storeid = extract_param($param, 'storage');
|
||||
|
||||
# revent an empty nodelist.
|
||||
# fix me in section config create never need an empty entity.
|
||||
delete $param->{nodes} if !$param->{nodes};
|
||||
# revent an empty nodelist.
|
||||
# fix me in section config create never need an empty entity.
|
||||
delete $param->{nodes} if !$param->{nodes};
|
||||
|
||||
my $sensitive_params = PVE::Storage::Plugin::sensitive_properties($type);
|
||||
my $sensitive = extract_sensitive_params($param, $sensitive_params, []);
|
||||
my $sensitive_params = PVE::Storage::Plugin::sensitive_properties($type);
|
||||
my $sensitive = extract_sensitive_params($param, $sensitive_params, []);
|
||||
|
||||
my $plugin = PVE::Storage::Plugin->lookup($type);
|
||||
my $opts = $plugin->check_config($storeid, $param, 1, 1);
|
||||
my $plugin = PVE::Storage::Plugin->lookup($type);
|
||||
my $opts = $plugin->check_config($storeid, $param, 1, 1);
|
||||
|
||||
my $returned_config;
|
||||
PVE::Storage::lock_storage_config(sub {
|
||||
my $cfg = PVE::Storage::config();
|
||||
my $returned_config;
|
||||
PVE::Storage::lock_storage_config(
|
||||
sub {
|
||||
my $cfg = PVE::Storage::config();
|
||||
|
||||
if (my $scfg = PVE::Storage::storage_config($cfg, $storeid, 1)) {
|
||||
die "storage ID '$storeid' already defined\n";
|
||||
}
|
||||
if (my $scfg = PVE::Storage::storage_config($cfg, $storeid, 1)) {
|
||||
die "storage ID '$storeid' already defined\n";
|
||||
}
|
||||
|
||||
$cfg->{ids}->{$storeid} = $opts;
|
||||
$cfg->{ids}->{$storeid} = $opts;
|
||||
|
||||
$returned_config = $plugin->on_add_hook($storeid, $opts, %$sensitive);
|
||||
$returned_config = $plugin->on_add_hook($storeid, $opts, %$sensitive);
|
||||
|
||||
if (defined($opts->{mkdir})) { # TODO: remove complete option in Proxmox VE 9
|
||||
warn "NOTE: The 'mkdir' option set for '${storeid}' is deprecated and will be removed"
|
||||
." in Proxmox VE 9. Use 'create-base-path' or 'create-subdirs' instead.\n"
|
||||
}
|
||||
if (defined($opts->{mkdir})) { # TODO: remove complete option in Proxmox VE 9
|
||||
warn
|
||||
"NOTE: The 'mkdir' option set for '${storeid}' is deprecated and will be removed"
|
||||
. " in Proxmox VE 9. Use 'create-base-path' or 'create-subdirs' instead.\n";
|
||||
}
|
||||
|
||||
eval {
|
||||
# try to activate if enabled on local node,
|
||||
# we only do this to detect errors/problems sooner
|
||||
if (PVE::Storage::storage_check_enabled($cfg, $storeid, undef, 1)) {
|
||||
PVE::Storage::activate_storage($cfg, $storeid);
|
||||
}
|
||||
};
|
||||
if (my $err = $@) {
|
||||
eval { $plugin->on_delete_hook($storeid, $opts) };
|
||||
warn "$@\n" if $@;
|
||||
die $err;
|
||||
}
|
||||
eval {
|
||||
# try to activate if enabled on local node,
|
||||
# we only do this to detect errors/problems sooner
|
||||
if (PVE::Storage::storage_check_enabled($cfg, $storeid, undef, 1)) {
|
||||
PVE::Storage::activate_storage($cfg, $storeid);
|
||||
}
|
||||
};
|
||||
if (my $err = $@) {
|
||||
eval { $plugin->on_delete_hook($storeid, $opts) };
|
||||
warn "$@\n" if $@;
|
||||
die $err;
|
||||
}
|
||||
|
||||
PVE::Storage::write_config($cfg);
|
||||
PVE::Storage::write_config($cfg);
|
||||
|
||||
}, "create storage failed");
|
||||
},
|
||||
"create storage failed",
|
||||
);
|
||||
|
||||
my $res = {
|
||||
storage => $storeid,
|
||||
type => $type,
|
||||
};
|
||||
$res->{config} = $returned_config if $returned_config;
|
||||
return $res;
|
||||
}});
|
||||
my $res = {
|
||||
storage => $storeid,
|
||||
type => $type,
|
||||
};
|
||||
$res->{config} = $returned_config if $returned_config;
|
||||
return $res;
|
||||
},
|
||||
});
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
__PACKAGE__->register_method({
|
||||
name => 'update',
|
||||
protected => 1,
|
||||
path => '{storage}',
|
||||
method => 'PUT',
|
||||
description => "Update storage configuration.",
|
||||
permissions => {
|
||||
check => ['perm', '/storage', ['Datastore.Allocate']],
|
||||
check => ['perm', '/storage', ['Datastore.Allocate']],
|
||||
},
|
||||
parameters => PVE::Storage::Plugin->updateSchema(),
|
||||
returns => {
|
||||
type => 'object',
|
||||
properties => {
|
||||
storage => {
|
||||
description => "The ID of the created storage.",
|
||||
type => 'string',
|
||||
},
|
||||
type => {
|
||||
description => "The type of the created storage.",
|
||||
type => 'string',
|
||||
enum => $storage_type_enum,
|
||||
},
|
||||
config => {
|
||||
description => "Partial, possible server generated, configuration properties.",
|
||||
type => 'object',
|
||||
optional => 1,
|
||||
additionalProperties => 1,
|
||||
properties => {
|
||||
'encryption-key' => {
|
||||
description => "The, possible auto-generated, encryption-key.",
|
||||
optional => 1,
|
||||
type => 'string',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
type => 'object',
|
||||
properties => {
|
||||
storage => {
|
||||
description => "The ID of the created storage.",
|
||||
type => 'string',
|
||||
},
|
||||
type => {
|
||||
description => "The type of the created storage.",
|
||||
type => 'string',
|
||||
enum => $storage_type_enum,
|
||||
},
|
||||
config => {
|
||||
description => "Partial, possible server generated, configuration properties.",
|
||||
type => 'object',
|
||||
optional => 1,
|
||||
additionalProperties => 1,
|
||||
properties => {
|
||||
'encryption-key' => {
|
||||
description => "The, possible auto-generated, encryption-key.",
|
||||
optional => 1,
|
||||
type => 'string',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
my ($param) = @_;
|
||||
|
||||
my $storeid = extract_param($param, 'storage');
|
||||
my $digest = extract_param($param, 'digest');
|
||||
my $delete = extract_param($param, 'delete');
|
||||
my $type;
|
||||
my $storeid = extract_param($param, 'storage');
|
||||
my $digest = extract_param($param, 'digest');
|
||||
my $delete = extract_param($param, 'delete');
|
||||
my $type;
|
||||
|
||||
if ($delete) {
|
||||
$delete = [ PVE::Tools::split_list($delete) ];
|
||||
}
|
||||
if ($delete) {
|
||||
$delete = [PVE::Tools::split_list($delete)];
|
||||
}
|
||||
|
||||
my $returned_config;
|
||||
PVE::Storage::lock_storage_config(sub {
|
||||
my $cfg = PVE::Storage::config();
|
||||
my $returned_config;
|
||||
PVE::Storage::lock_storage_config(
|
||||
sub {
|
||||
my $cfg = PVE::Storage::config();
|
||||
|
||||
PVE::SectionConfig::assert_if_modified($cfg, $digest);
|
||||
PVE::SectionConfig::assert_if_modified($cfg, $digest);
|
||||
|
||||
my $scfg = PVE::Storage::storage_config($cfg, $storeid);
|
||||
$type = $scfg->{type};
|
||||
my $scfg = PVE::Storage::storage_config($cfg, $storeid);
|
||||
$type = $scfg->{type};
|
||||
|
||||
my $sensitive_params = PVE::Storage::Plugin::sensitive_properties($type);
|
||||
my $sensitive = extract_sensitive_params($param, $sensitive_params, $delete);
|
||||
my $sensitive_params = PVE::Storage::Plugin::sensitive_properties($type);
|
||||
my $sensitive = extract_sensitive_params($param, $sensitive_params, $delete);
|
||||
|
||||
my $plugin = PVE::Storage::Plugin->lookup($type);
|
||||
my $opts = $plugin->check_config($storeid, $param, 0, 1);
|
||||
my $plugin = PVE::Storage::Plugin->lookup($type);
|
||||
my $opts = $plugin->check_config($storeid, $param, 0, 1);
|
||||
|
||||
if ($delete) {
|
||||
my $options = $plugin->private()->{options}->{$type};
|
||||
foreach my $k (@$delete) {
|
||||
my $d = $options->{$k} || die "no such option '$k'\n";
|
||||
die "unable to delete required option '$k'\n" if !$d->{optional};
|
||||
die "unable to delete fixed option '$k'\n" if $d->{fixed};
|
||||
die "cannot set and delete property '$k' at the same time!\n"
|
||||
if defined($opts->{$k});
|
||||
if ($delete) {
|
||||
my $options = $plugin->private()->{options}->{$type};
|
||||
foreach my $k (@$delete) {
|
||||
my $d = $options->{$k} || die "no such option '$k'\n";
|
||||
die "unable to delete required option '$k'\n" if !$d->{optional};
|
||||
die "unable to delete fixed option '$k'\n" if $d->{fixed};
|
||||
die "cannot set and delete property '$k' at the same time!\n"
|
||||
if defined($opts->{$k});
|
||||
|
||||
delete $scfg->{$k};
|
||||
}
|
||||
}
|
||||
delete $scfg->{$k};
|
||||
}
|
||||
}
|
||||
|
||||
$returned_config = $plugin->on_update_hook($storeid, $opts, %$sensitive);
|
||||
$returned_config = $plugin->on_update_hook($storeid, $opts, %$sensitive);
|
||||
|
||||
for my $k (keys %$opts) {
|
||||
$scfg->{$k} = $opts->{$k};
|
||||
}
|
||||
for my $k (keys %$opts) {
|
||||
$scfg->{$k} = $opts->{$k};
|
||||
}
|
||||
|
||||
if (defined($scfg->{mkdir})) { # TODO: remove complete option in Proxmox VE 9
|
||||
warn "NOTE: The 'mkdir' option set for '${storeid}' is deprecated and will be removed"
|
||||
." in Proxmox VE 9. Use 'create-base-path' or 'create-subdirs' instead.\n"
|
||||
}
|
||||
if (defined($scfg->{mkdir})) { # TODO: remove complete option in Proxmox VE 9
|
||||
warn
|
||||
"NOTE: The 'mkdir' option set for '${storeid}' is deprecated and will be removed"
|
||||
. " in Proxmox VE 9. Use 'create-base-path' or 'create-subdirs' instead.\n";
|
||||
}
|
||||
|
||||
PVE::Storage::write_config($cfg);
|
||||
PVE::Storage::write_config($cfg);
|
||||
|
||||
}, "update storage failed");
|
||||
},
|
||||
"update storage failed",
|
||||
);
|
||||
|
||||
my $res = {
|
||||
storage => $storeid,
|
||||
type => $type,
|
||||
};
|
||||
$res->{config} = $returned_config if $returned_config;
|
||||
return $res;
|
||||
}});
|
||||
my $res = {
|
||||
storage => $storeid,
|
||||
type => $type,
|
||||
};
|
||||
$res->{config} = $returned_config if $returned_config;
|
||||
return $res;
|
||||
},
|
||||
});
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
__PACKAGE__->register_method({
|
||||
name => 'delete',
|
||||
protected => 1,
|
||||
path => '{storage}', # /storage/config/{storage}
|
||||
method => 'DELETE',
|
||||
description => "Delete storage configuration.",
|
||||
permissions => {
|
||||
check => ['perm', '/storage', ['Datastore.Allocate']],
|
||||
check => ['perm', '/storage', ['Datastore.Allocate']],
|
||||
},
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
storage => get_standard_option('pve-storage-id', {
|
||||
completion => \&PVE::Storage::complete_storage,
|
||||
}),
|
||||
},
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
storage => get_standard_option(
|
||||
'pve-storage-id',
|
||||
{
|
||||
completion => \&PVE::Storage::complete_storage,
|
||||
},
|
||||
),
|
||||
},
|
||||
},
|
||||
returns => { type => 'null' },
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
my ($param) = @_;
|
||||
|
||||
my $storeid = extract_param($param, 'storage');
|
||||
my $storeid = extract_param($param, 'storage');
|
||||
|
||||
PVE::Storage::lock_storage_config(sub {
|
||||
my $cfg = PVE::Storage::config();
|
||||
PVE::Storage::lock_storage_config(
|
||||
sub {
|
||||
my $cfg = PVE::Storage::config();
|
||||
|
||||
my $scfg = PVE::Storage::storage_config($cfg, $storeid);
|
||||
my $scfg = PVE::Storage::storage_config($cfg, $storeid);
|
||||
|
||||
die "can't remove storage - storage is used as base of another storage\n"
|
||||
if PVE::Storage::storage_is_used($cfg, $storeid);
|
||||
die "can't remove storage - storage is used as base of another storage\n"
|
||||
if PVE::Storage::storage_is_used($cfg, $storeid);
|
||||
|
||||
my $plugin = PVE::Storage::Plugin->lookup($scfg->{type});
|
||||
my $plugin = PVE::Storage::Plugin->lookup($scfg->{type});
|
||||
|
||||
$plugin->on_delete_hook($storeid, $scfg);
|
||||
$plugin->on_delete_hook($storeid, $scfg);
|
||||
|
||||
delete $cfg->{ids}->{$storeid};
|
||||
delete $cfg->{ids}->{$storeid};
|
||||
|
||||
PVE::Storage::write_config($cfg);
|
||||
PVE::Storage::write_config($cfg);
|
||||
|
||||
}, "delete storage failed");
|
||||
},
|
||||
"delete storage failed",
|
||||
);
|
||||
|
||||
PVE::AccessControl::remove_storage_access($storeid);
|
||||
PVE::AccessControl::remove_storage_access($storeid);
|
||||
|
||||
return undef;
|
||||
}});
|
||||
return undef;
|
||||
},
|
||||
});
|
||||
|
||||
1;
|
||||
|
||||
@ -16,214 +16,248 @@ use PVE::SSHInfo;
|
||||
|
||||
use base qw(PVE::RESTHandler);
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
__PACKAGE__->register_method({
|
||||
name => 'index',
|
||||
path => '',
|
||||
method => 'GET',
|
||||
description => "List storage content.",
|
||||
permissions => {
|
||||
check => ['perm', '/storage/{storage}', ['Datastore.Audit', 'Datastore.AllocateSpace'], any => 1],
|
||||
check => [
|
||||
'perm',
|
||||
'/storage/{storage}',
|
||||
['Datastore.Audit', 'Datastore.AllocateSpace'],
|
||||
any => 1,
|
||||
],
|
||||
},
|
||||
protected => 1,
|
||||
proxyto => 'node',
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
storage => get_standard_option('pve-storage-id', {
|
||||
completion => \&PVE::Storage::complete_storage_enabled,
|
||||
}),
|
||||
content => {
|
||||
description => "Only list content of this type.",
|
||||
type => 'string', format => 'pve-storage-content',
|
||||
optional => 1,
|
||||
completion => \&PVE::Storage::complete_content_type,
|
||||
},
|
||||
vmid => get_standard_option('pve-vmid', {
|
||||
description => "Only list images for this VM",
|
||||
optional => 1,
|
||||
completion => \&PVE::Cluster::complete_vmid,
|
||||
}),
|
||||
},
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
storage => get_standard_option(
|
||||
'pve-storage-id',
|
||||
{
|
||||
completion => \&PVE::Storage::complete_storage_enabled,
|
||||
},
|
||||
),
|
||||
content => {
|
||||
description => "Only list content of this type.",
|
||||
type => 'string',
|
||||
format => 'pve-storage-content',
|
||||
optional => 1,
|
||||
completion => \&PVE::Storage::complete_content_type,
|
||||
},
|
||||
vmid => get_standard_option(
|
||||
'pve-vmid',
|
||||
{
|
||||
description => "Only list images for this VM",
|
||||
optional => 1,
|
||||
completion => \&PVE::Cluster::complete_vmid,
|
||||
},
|
||||
),
|
||||
},
|
||||
},
|
||||
returns => {
|
||||
type => 'array',
|
||||
items => {
|
||||
type => "object",
|
||||
properties => {
|
||||
volid => {
|
||||
description => "Volume identifier.",
|
||||
type => 'string',
|
||||
},
|
||||
vmid => {
|
||||
description => "Associated Owner VMID.",
|
||||
type => 'integer',
|
||||
optional => 1,
|
||||
},
|
||||
parent => {
|
||||
description => "Volume identifier of parent (for linked cloned).",
|
||||
type => 'string',
|
||||
optional => 1,
|
||||
},
|
||||
'format' => {
|
||||
description => "Format identifier ('raw', 'qcow2', 'subvol', 'iso', 'tgz' ...)",
|
||||
type => 'string',
|
||||
},
|
||||
size => {
|
||||
description => "Volume size in bytes.",
|
||||
type => 'integer',
|
||||
renderer => 'bytes',
|
||||
},
|
||||
used => {
|
||||
description => "Used space. Please note that most storage plugins " .
|
||||
"do not report anything useful here.",
|
||||
type => 'integer',
|
||||
renderer => 'bytes',
|
||||
optional => 1,
|
||||
},
|
||||
ctime => {
|
||||
description => "Creation time (seconds since the UNIX Epoch).",
|
||||
type => 'integer',
|
||||
minimum => 0,
|
||||
optional => 1,
|
||||
},
|
||||
notes => {
|
||||
description => "Optional notes. If they contain multiple lines, only the first one is returned here.",
|
||||
type => 'string',
|
||||
optional => 1,
|
||||
},
|
||||
encrypted => {
|
||||
description => "If whole backup is encrypted, value is the fingerprint or '1' "
|
||||
." if encrypted. Only useful for the Proxmox Backup Server storage type.",
|
||||
type => 'string',
|
||||
optional => 1,
|
||||
},
|
||||
verification => {
|
||||
description => "Last backup verification result, only useful for PBS storages.",
|
||||
type => 'object',
|
||||
properties => {
|
||||
state => {
|
||||
description => "Last backup verification state.",
|
||||
type => 'string',
|
||||
},
|
||||
upid => {
|
||||
description => "Last backup verification UPID.",
|
||||
type => 'string',
|
||||
},
|
||||
},
|
||||
optional => 1,
|
||||
},
|
||||
protected => {
|
||||
description => "Protection status. Currently only supported for backups.",
|
||||
type => 'boolean',
|
||||
optional => 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
links => [ { rel => 'child', href => "{volid}" } ],
|
||||
type => 'array',
|
||||
items => {
|
||||
type => "object",
|
||||
properties => {
|
||||
volid => {
|
||||
description => "Volume identifier.",
|
||||
type => 'string',
|
||||
},
|
||||
vmid => {
|
||||
description => "Associated Owner VMID.",
|
||||
type => 'integer',
|
||||
optional => 1,
|
||||
},
|
||||
parent => {
|
||||
description => "Volume identifier of parent (for linked cloned).",
|
||||
type => 'string',
|
||||
optional => 1,
|
||||
},
|
||||
'format' => {
|
||||
description =>
|
||||
"Format identifier ('raw', 'qcow2', 'subvol', 'iso', 'tgz' ...)",
|
||||
type => 'string',
|
||||
},
|
||||
size => {
|
||||
description => "Volume size in bytes.",
|
||||
type => 'integer',
|
||||
renderer => 'bytes',
|
||||
},
|
||||
used => {
|
||||
description => "Used space. Please note that most storage plugins "
|
||||
. "do not report anything useful here.",
|
||||
type => 'integer',
|
||||
renderer => 'bytes',
|
||||
optional => 1,
|
||||
},
|
||||
ctime => {
|
||||
description => "Creation time (seconds since the UNIX Epoch).",
|
||||
type => 'integer',
|
||||
minimum => 0,
|
||||
optional => 1,
|
||||
},
|
||||
notes => {
|
||||
description =>
|
||||
"Optional notes. If they contain multiple lines, only the first one is returned here.",
|
||||
type => 'string',
|
||||
optional => 1,
|
||||
},
|
||||
encrypted => {
|
||||
description =>
|
||||
"If whole backup is encrypted, value is the fingerprint or '1' "
|
||||
. " if encrypted. Only useful for the Proxmox Backup Server storage type.",
|
||||
type => 'string',
|
||||
optional => 1,
|
||||
},
|
||||
verification => {
|
||||
description =>
|
||||
"Last backup verification result, only useful for PBS storages.",
|
||||
type => 'object',
|
||||
properties => {
|
||||
state => {
|
||||
description => "Last backup verification state.",
|
||||
type => 'string',
|
||||
},
|
||||
upid => {
|
||||
description => "Last backup verification UPID.",
|
||||
type => 'string',
|
||||
},
|
||||
},
|
||||
optional => 1,
|
||||
},
|
||||
protected => {
|
||||
description => "Protection status. Currently only supported for backups.",
|
||||
type => 'boolean',
|
||||
optional => 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
links => [{ rel => 'child', href => "{volid}" }],
|
||||
},
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
my ($param) = @_;
|
||||
|
||||
my $rpcenv = PVE::RPCEnvironment::get();
|
||||
my $rpcenv = PVE::RPCEnvironment::get();
|
||||
|
||||
my $authuser = $rpcenv->get_user();
|
||||
my $authuser = $rpcenv->get_user();
|
||||
|
||||
my $storeid = $param->{storage};
|
||||
my $storeid = $param->{storage};
|
||||
|
||||
my $cfg = PVE::Storage::config();
|
||||
my $cfg = PVE::Storage::config();
|
||||
|
||||
my $vollist = PVE::Storage::volume_list($cfg, $storeid, $param->{vmid}, $param->{content});
|
||||
my $vollist =
|
||||
PVE::Storage::volume_list($cfg, $storeid, $param->{vmid}, $param->{content});
|
||||
|
||||
my $res = [];
|
||||
foreach my $item (@$vollist) {
|
||||
eval { PVE::Storage::check_volume_access($rpcenv, $authuser, $cfg, undef, $item->{volid}); };
|
||||
next if $@;
|
||||
$item->{vmid} = int($item->{vmid}) if defined($item->{vmid});
|
||||
$item->{size} = int($item->{size}) if defined($item->{size});
|
||||
$item->{used} = int($item->{used}) if defined($item->{used});
|
||||
push @$res, $item;
|
||||
}
|
||||
my $res = [];
|
||||
foreach my $item (@$vollist) {
|
||||
eval {
|
||||
PVE::Storage::check_volume_access(
|
||||
$rpcenv, $authuser, $cfg, undef, $item->{volid},
|
||||
);
|
||||
};
|
||||
next if $@;
|
||||
$item->{vmid} = int($item->{vmid}) if defined($item->{vmid});
|
||||
$item->{size} = int($item->{size}) if defined($item->{size});
|
||||
$item->{used} = int($item->{used}) if defined($item->{used});
|
||||
push @$res, $item;
|
||||
}
|
||||
|
||||
return $res;
|
||||
}});
|
||||
return $res;
|
||||
},
|
||||
});
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
__PACKAGE__->register_method({
|
||||
name => 'create',
|
||||
path => '',
|
||||
method => 'POST',
|
||||
description => "Allocate disk images.",
|
||||
permissions => {
|
||||
check => ['perm', '/storage/{storage}', ['Datastore.AllocateSpace']],
|
||||
check => ['perm', '/storage/{storage}', ['Datastore.AllocateSpace']],
|
||||
},
|
||||
protected => 1,
|
||||
proxyto => 'node',
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
storage => get_standard_option('pve-storage-id', {
|
||||
completion => \&PVE::Storage::complete_storage_enabled,
|
||||
}),
|
||||
filename => {
|
||||
description => "The name of the file to create.",
|
||||
type => 'string',
|
||||
},
|
||||
vmid => get_standard_option('pve-vmid', {
|
||||
description => "Specify owner VM",
|
||||
completion => \&PVE::Cluster::complete_vmid,
|
||||
}),
|
||||
size => {
|
||||
description => "Size in kilobyte (1024 bytes). Optional suffixes 'M' (megabyte, 1024K) and 'G' (gigabyte, 1024M)",
|
||||
type => 'string',
|
||||
pattern => '\d+[MG]?',
|
||||
},
|
||||
format => get_standard_option('pve-storage-image-format', {
|
||||
requires => 'size',
|
||||
optional => 1,
|
||||
}),
|
||||
},
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
storage => get_standard_option(
|
||||
'pve-storage-id',
|
||||
{
|
||||
completion => \&PVE::Storage::complete_storage_enabled,
|
||||
},
|
||||
),
|
||||
filename => {
|
||||
description => "The name of the file to create.",
|
||||
type => 'string',
|
||||
},
|
||||
vmid => get_standard_option(
|
||||
'pve-vmid',
|
||||
{
|
||||
description => "Specify owner VM",
|
||||
completion => \&PVE::Cluster::complete_vmid,
|
||||
},
|
||||
),
|
||||
size => {
|
||||
description =>
|
||||
"Size in kilobyte (1024 bytes). Optional suffixes 'M' (megabyte, 1024K) and 'G' (gigabyte, 1024M)",
|
||||
type => 'string',
|
||||
pattern => '\d+[MG]?',
|
||||
},
|
||||
format => get_standard_option(
|
||||
'pve-storage-image-format',
|
||||
{
|
||||
requires => 'size',
|
||||
optional => 1,
|
||||
},
|
||||
),
|
||||
},
|
||||
},
|
||||
returns => {
|
||||
description => "Volume identifier",
|
||||
type => 'string',
|
||||
description => "Volume identifier",
|
||||
type => 'string',
|
||||
},
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
my ($param) = @_;
|
||||
|
||||
my $storeid = $param->{storage};
|
||||
my $name = $param->{filename};
|
||||
my $sizestr = $param->{size};
|
||||
my $storeid = $param->{storage};
|
||||
my $name = $param->{filename};
|
||||
my $sizestr = $param->{size};
|
||||
|
||||
my $size;
|
||||
if ($sizestr =~ m/^\d+$/) {
|
||||
$size = $sizestr;
|
||||
} elsif ($sizestr =~ m/^(\d+)M$/) {
|
||||
$size = $1 * 1024;
|
||||
} elsif ($sizestr =~ m/^(\d+)G$/) {
|
||||
$size = $1 * 1024 * 1024;
|
||||
} else {
|
||||
raise_param_exc({ size => "unable to parse size '$sizestr'" });
|
||||
}
|
||||
my $size;
|
||||
if ($sizestr =~ m/^\d+$/) {
|
||||
$size = $sizestr;
|
||||
} elsif ($sizestr =~ m/^(\d+)M$/) {
|
||||
$size = $1 * 1024;
|
||||
} elsif ($sizestr =~ m/^(\d+)G$/) {
|
||||
$size = $1 * 1024 * 1024;
|
||||
} else {
|
||||
raise_param_exc({ size => "unable to parse size '$sizestr'" });
|
||||
}
|
||||
|
||||
# extract FORMAT from name
|
||||
if ($name =~ m/\.(raw|qcow2|vmdk)$/) {
|
||||
my $fmt = $1;
|
||||
# extract FORMAT from name
|
||||
if ($name =~ m/\.(raw|qcow2|vmdk)$/) {
|
||||
my $fmt = $1;
|
||||
|
||||
raise_param_exc({ format => "different storage formats ($param->{format} != $fmt)" })
|
||||
if $param->{format} && $param->{format} ne $fmt;
|
||||
raise_param_exc({
|
||||
format => "different storage formats ($param->{format} != $fmt)" })
|
||||
if $param->{format} && $param->{format} ne $fmt;
|
||||
|
||||
$param->{format} = $fmt;
|
||||
}
|
||||
$param->{format} = $fmt;
|
||||
}
|
||||
|
||||
my $cfg = PVE::Storage::config();
|
||||
my $cfg = PVE::Storage::config();
|
||||
|
||||
my $volid = PVE::Storage::vdisk_alloc ($cfg, $storeid, $param->{vmid},
|
||||
$param->{format},
|
||||
$name, $size);
|
||||
my $volid = PVE::Storage::vdisk_alloc(
|
||||
$cfg, $storeid, $param->{vmid}, $param->{format}, $name, $size,
|
||||
);
|
||||
|
||||
return $volid;
|
||||
}});
|
||||
return $volid;
|
||||
},
|
||||
});
|
||||
|
||||
# we allow to pass volume names (without storage prefix) if the storage
|
||||
# is specified as separate parameter.
|
||||
@ -233,257 +267,268 @@ my $real_volume_id = sub {
|
||||
my $volid;
|
||||
|
||||
if ($volume =~ m/:/) {
|
||||
eval {
|
||||
my ($sid, $volname) = PVE::Storage::parse_volume_id ($volume);
|
||||
die "storage ID mismatch ($sid != $storeid)\n"
|
||||
if $storeid && $sid ne $storeid;
|
||||
$volid = $volume;
|
||||
$storeid = $sid;
|
||||
};
|
||||
raise_param_exc({ volume => $@ }) if $@;
|
||||
eval {
|
||||
my ($sid, $volname) = PVE::Storage::parse_volume_id($volume);
|
||||
die "storage ID mismatch ($sid != $storeid)\n"
|
||||
if $storeid && $sid ne $storeid;
|
||||
$volid = $volume;
|
||||
$storeid = $sid;
|
||||
};
|
||||
raise_param_exc({ volume => $@ }) if $@;
|
||||
|
||||
} else {
|
||||
raise_param_exc({ volume => "no storage specified - incomplete volume ID" })
|
||||
if !$storeid;
|
||||
raise_param_exc({ volume => "no storage specified - incomplete volume ID" })
|
||||
if !$storeid;
|
||||
|
||||
$volid = "$storeid:$volume";
|
||||
$volid = "$storeid:$volume";
|
||||
}
|
||||
|
||||
return wantarray ? ($volid, $storeid) : $volid;
|
||||
};
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
__PACKAGE__->register_method({
|
||||
name => 'info',
|
||||
path => '{volume}',
|
||||
method => 'GET',
|
||||
description => "Get volume attributes",
|
||||
permissions => {
|
||||
description => "You need read access for the volume.",
|
||||
user => 'all',
|
||||
description => "You need read access for the volume.",
|
||||
user => 'all',
|
||||
},
|
||||
protected => 1,
|
||||
proxyto => 'node',
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
storage => get_standard_option('pve-storage-id', { optional => 1 }),
|
||||
volume => {
|
||||
description => "Volume identifier",
|
||||
type => 'string',
|
||||
},
|
||||
},
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
storage => get_standard_option('pve-storage-id', { optional => 1 }),
|
||||
volume => {
|
||||
description => "Volume identifier",
|
||||
type => 'string',
|
||||
},
|
||||
},
|
||||
},
|
||||
returns => {
|
||||
type => 'object',
|
||||
properties => {
|
||||
path => {
|
||||
description => "The Path",
|
||||
type => 'string',
|
||||
},
|
||||
size => {
|
||||
description => "Volume size in bytes.",
|
||||
type => 'integer',
|
||||
renderer => 'bytes',
|
||||
},
|
||||
used => {
|
||||
description => "Used space. Please note that most storage plugins " .
|
||||
"do not report anything useful here.",
|
||||
type => 'integer',
|
||||
renderer => 'bytes',
|
||||
},
|
||||
format => {
|
||||
description => "Format identifier ('raw', 'qcow2', 'subvol', 'iso', 'tgz' ...)",
|
||||
type => 'string',
|
||||
},
|
||||
notes => {
|
||||
description => "Optional notes.",
|
||||
optional => 1,
|
||||
type => 'string',
|
||||
},
|
||||
protected => {
|
||||
description => "Protection status. Currently only supported for backups.",
|
||||
type => 'boolean',
|
||||
optional => 1,
|
||||
},
|
||||
},
|
||||
type => 'object',
|
||||
properties => {
|
||||
path => {
|
||||
description => "The Path",
|
||||
type => 'string',
|
||||
},
|
||||
size => {
|
||||
description => "Volume size in bytes.",
|
||||
type => 'integer',
|
||||
renderer => 'bytes',
|
||||
},
|
||||
used => {
|
||||
description => "Used space. Please note that most storage plugins "
|
||||
. "do not report anything useful here.",
|
||||
type => 'integer',
|
||||
renderer => 'bytes',
|
||||
},
|
||||
format => {
|
||||
description => "Format identifier ('raw', 'qcow2', 'subvol', 'iso', 'tgz' ...)",
|
||||
type => 'string',
|
||||
},
|
||||
notes => {
|
||||
description => "Optional notes.",
|
||||
optional => 1,
|
||||
type => 'string',
|
||||
},
|
||||
protected => {
|
||||
description => "Protection status. Currently only supported for backups.",
|
||||
type => 'boolean',
|
||||
optional => 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
my ($param) = @_;
|
||||
|
||||
my $rpcenv = PVE::RPCEnvironment::get();
|
||||
my $authuser = $rpcenv->get_user();
|
||||
my $rpcenv = PVE::RPCEnvironment::get();
|
||||
my $authuser = $rpcenv->get_user();
|
||||
|
||||
my ($volid, $storeid) = &$real_volume_id($param->{storage}, $param->{volume});
|
||||
my ($volid, $storeid) = &$real_volume_id($param->{storage}, $param->{volume});
|
||||
|
||||
my $cfg = PVE::Storage::config();
|
||||
my $cfg = PVE::Storage::config();
|
||||
|
||||
PVE::Storage::check_volume_access($rpcenv, $authuser, $cfg, undef, $volid);
|
||||
PVE::Storage::check_volume_access($rpcenv, $authuser, $cfg, undef, $volid);
|
||||
|
||||
my $path = PVE::Storage::path($cfg, $volid);
|
||||
my ($size, $format, $used, $parent) = PVE::Storage::volume_size_info($cfg, $volid);
|
||||
die "volume_size_info on '$volid' failed - no format\n" if !$format;
|
||||
die "volume_size_info on '$volid' failed - no size\n" if !defined($size);
|
||||
die "volume '$volid' has size zero\n" if !$size && $format ne 'subvol';
|
||||
my $path = PVE::Storage::path($cfg, $volid);
|
||||
my ($size, $format, $used, $parent) = PVE::Storage::volume_size_info($cfg, $volid);
|
||||
die "volume_size_info on '$volid' failed - no format\n" if !$format;
|
||||
die "volume_size_info on '$volid' failed - no size\n" if !defined($size);
|
||||
die "volume '$volid' has size zero\n" if !$size && $format ne 'subvol';
|
||||
|
||||
my $entry = {
|
||||
path => $path,
|
||||
size => int($size), # cast to integer in case it was changed to a string previously
|
||||
used => int($used),
|
||||
format => $format,
|
||||
};
|
||||
my $entry = {
|
||||
path => $path,
|
||||
size => int($size), # cast to integer in case it was changed to a string previously
|
||||
used => int($used),
|
||||
format => $format,
|
||||
};
|
||||
|
||||
for my $attribute (qw(notes protected)) {
|
||||
# keep going if fetching an optional attribute fails
|
||||
eval {
|
||||
my $value = PVE::Storage::get_volume_attribute($cfg, $volid, $attribute);
|
||||
$entry->{$attribute} = $value if defined($value);
|
||||
};
|
||||
warn $@ if $@;
|
||||
}
|
||||
for my $attribute (qw(notes protected)) {
|
||||
# keep going if fetching an optional attribute fails
|
||||
eval {
|
||||
my $value = PVE::Storage::get_volume_attribute($cfg, $volid, $attribute);
|
||||
$entry->{$attribute} = $value if defined($value);
|
||||
};
|
||||
warn $@ if $@;
|
||||
}
|
||||
|
||||
return $entry;
|
||||
}});
|
||||
return $entry;
|
||||
},
|
||||
});
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
__PACKAGE__->register_method({
|
||||
name => 'updateattributes',
|
||||
path => '{volume}',
|
||||
method => 'PUT',
|
||||
description => "Update volume attributes",
|
||||
permissions => {
|
||||
description => "You need read access for the volume.",
|
||||
user => 'all',
|
||||
description => "You need read access for the volume.",
|
||||
user => 'all',
|
||||
},
|
||||
protected => 1,
|
||||
proxyto => 'node',
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
storage => get_standard_option('pve-storage-id', { optional => 1 }),
|
||||
volume => {
|
||||
description => "Volume identifier",
|
||||
type => 'string',
|
||||
},
|
||||
notes => {
|
||||
description => "The new notes.",
|
||||
type => 'string',
|
||||
optional => 1,
|
||||
},
|
||||
protected => {
|
||||
description => "Protection status. Currently only supported for backups.",
|
||||
type => 'boolean',
|
||||
optional => 1,
|
||||
},
|
||||
},
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
storage => get_standard_option('pve-storage-id', { optional => 1 }),
|
||||
volume => {
|
||||
description => "Volume identifier",
|
||||
type => 'string',
|
||||
},
|
||||
notes => {
|
||||
description => "The new notes.",
|
||||
type => 'string',
|
||||
optional => 1,
|
||||
},
|
||||
protected => {
|
||||
description => "Protection status. Currently only supported for backups.",
|
||||
type => 'boolean',
|
||||
optional => 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns => { type => 'null' },
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
my ($param) = @_;
|
||||
|
||||
my $rpcenv = PVE::RPCEnvironment::get();
|
||||
my $authuser = $rpcenv->get_user();
|
||||
my $rpcenv = PVE::RPCEnvironment::get();
|
||||
my $authuser = $rpcenv->get_user();
|
||||
|
||||
my ($volid, $storeid) = &$real_volume_id($param->{storage}, $param->{volume});
|
||||
my ($volid, $storeid) = &$real_volume_id($param->{storage}, $param->{volume});
|
||||
|
||||
my $cfg = PVE::Storage::config();
|
||||
my $cfg = PVE::Storage::config();
|
||||
|
||||
PVE::Storage::check_volume_access($rpcenv, $authuser, $cfg, undef, $volid);
|
||||
PVE::Storage::check_volume_access($rpcenv, $authuser, $cfg, undef, $volid);
|
||||
|
||||
for my $attr (qw(notes protected)) {
|
||||
if (exists $param->{$attr}) {
|
||||
PVE::Storage::update_volume_attribute($cfg, $volid, $attr, $param->{$attr});
|
||||
}
|
||||
}
|
||||
for my $attr (qw(notes protected)) {
|
||||
if (exists $param->{$attr}) {
|
||||
PVE::Storage::update_volume_attribute($cfg, $volid, $attr, $param->{$attr});
|
||||
}
|
||||
}
|
||||
|
||||
return undef;
|
||||
}});
|
||||
return undef;
|
||||
},
|
||||
});
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
__PACKAGE__->register_method({
|
||||
name => 'delete',
|
||||
path => '{volume}',
|
||||
method => 'DELETE',
|
||||
description => "Delete volume",
|
||||
permissions => {
|
||||
description => "You need 'Datastore.Allocate' privilege on the storage (or 'Datastore.AllocateSpace' for backup volumes if you have VM.Backup privilege on the VM).",
|
||||
user => 'all',
|
||||
description =>
|
||||
"You need 'Datastore.Allocate' privilege on the storage (or 'Datastore.AllocateSpace' for backup volumes if you have VM.Backup privilege on the VM).",
|
||||
user => 'all',
|
||||
},
|
||||
protected => 1,
|
||||
proxyto => 'node',
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
storage => get_standard_option('pve-storage-id', {
|
||||
optional => 1,
|
||||
completion => \&PVE::Storage::complete_storage,
|
||||
}),
|
||||
volume => {
|
||||
description => "Volume identifier",
|
||||
type => 'string',
|
||||
completion => \&PVE::Storage::complete_volume,
|
||||
},
|
||||
delay => {
|
||||
type => 'integer',
|
||||
description => "Time to wait for the task to finish. We return 'null' if the task finish within that time.",
|
||||
minimum => 1,
|
||||
maximum => 30,
|
||||
optional => 1,
|
||||
},
|
||||
},
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
storage => get_standard_option(
|
||||
'pve-storage-id',
|
||||
{
|
||||
optional => 1,
|
||||
completion => \&PVE::Storage::complete_storage,
|
||||
},
|
||||
),
|
||||
volume => {
|
||||
description => "Volume identifier",
|
||||
type => 'string',
|
||||
completion => \&PVE::Storage::complete_volume,
|
||||
},
|
||||
delay => {
|
||||
type => 'integer',
|
||||
description =>
|
||||
"Time to wait for the task to finish. We return 'null' if the task finish within that time.",
|
||||
minimum => 1,
|
||||
maximum => 30,
|
||||
optional => 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns => { type => 'string', optional => 1, },
|
||||
returns => { type => 'string', optional => 1 },
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
my ($param) = @_;
|
||||
|
||||
my $rpcenv = PVE::RPCEnvironment::get();
|
||||
my $authuser = $rpcenv->get_user();
|
||||
my $rpcenv = PVE::RPCEnvironment::get();
|
||||
my $authuser = $rpcenv->get_user();
|
||||
|
||||
my $cfg = PVE::Storage::config();
|
||||
my $cfg = PVE::Storage::config();
|
||||
|
||||
my ($volid, $storeid) = &$real_volume_id($param->{storage}, $param->{volume});
|
||||
my ($volid, $storeid) = &$real_volume_id($param->{storage}, $param->{volume});
|
||||
|
||||
my ($path, $ownervm, $vtype) = PVE::Storage::path($cfg, $volid);
|
||||
if ($vtype eq 'backup' && $ownervm) {
|
||||
$rpcenv->check($authuser, "/storage/$storeid", ['Datastore.AllocateSpace']);
|
||||
$rpcenv->check($authuser, "/vms/$ownervm", ['VM.Backup']);
|
||||
} else {
|
||||
$rpcenv->check($authuser, "/storage/$storeid", ['Datastore.Allocate']);
|
||||
}
|
||||
my ($path, $ownervm, $vtype) = PVE::Storage::path($cfg, $volid);
|
||||
if ($vtype eq 'backup' && $ownervm) {
|
||||
$rpcenv->check($authuser, "/storage/$storeid", ['Datastore.AllocateSpace']);
|
||||
$rpcenv->check($authuser, "/vms/$ownervm", ['VM.Backup']);
|
||||
} else {
|
||||
$rpcenv->check($authuser, "/storage/$storeid", ['Datastore.Allocate']);
|
||||
}
|
||||
|
||||
my $worker = sub {
|
||||
PVE::Storage::vdisk_free ($cfg, $volid);
|
||||
print "Removed volume '$volid'\n";
|
||||
if ($vtype eq 'backup'
|
||||
&& $path =~ /(.*\/vzdump-\w+-\d+-\d{4}_\d{2}_\d{2}-\d{2}_\d{2}_\d{2})[^\/]+$/) {
|
||||
# Remove log file #318 and notes file #3972 if they still exist
|
||||
PVE::Storage::archive_auxiliaries_remove($path);
|
||||
}
|
||||
};
|
||||
my $worker = sub {
|
||||
PVE::Storage::vdisk_free($cfg, $volid);
|
||||
print "Removed volume '$volid'\n";
|
||||
if (
|
||||
$vtype eq 'backup'
|
||||
&& $path =~ /(.*\/vzdump-\w+-\d+-\d{4}_\d{2}_\d{2}-\d{2}_\d{2}_\d{2})[^\/]+$/
|
||||
) {
|
||||
# Remove log file #318 and notes file #3972 if they still exist
|
||||
PVE::Storage::archive_auxiliaries_remove($path);
|
||||
}
|
||||
};
|
||||
|
||||
my $id = (defined $ownervm ? "$ownervm@" : '') . $storeid;
|
||||
my $upid = $rpcenv->fork_worker('imgdel', $id, $authuser, $worker);
|
||||
my $background_delay = $param->{delay};
|
||||
if ($background_delay) {
|
||||
my $end_time = time() + $background_delay;
|
||||
my $currently_deleting; # not necessarily true, e.g. sequential api call from cli
|
||||
do {
|
||||
my $task = PVE::Tools::upid_decode($upid);
|
||||
$currently_deleting = PVE::ProcFSTools::check_process_running($task->{pid}, $task->{pstart});
|
||||
sleep 1 if $currently_deleting;
|
||||
} while (time() < $end_time && $currently_deleting);
|
||||
my $id = (defined $ownervm ? "$ownervm@" : '') . $storeid;
|
||||
my $upid = $rpcenv->fork_worker('imgdel', $id, $authuser, $worker);
|
||||
my $background_delay = $param->{delay};
|
||||
if ($background_delay) {
|
||||
my $end_time = time() + $background_delay;
|
||||
my $currently_deleting; # not necessarily true, e.g. sequential api call from cli
|
||||
do {
|
||||
my $task = PVE::Tools::upid_decode($upid);
|
||||
$currently_deleting =
|
||||
PVE::ProcFSTools::check_process_running($task->{pid}, $task->{pstart});
|
||||
sleep 1 if $currently_deleting;
|
||||
} while (time() < $end_time && $currently_deleting);
|
||||
|
||||
if (!$currently_deleting) {
|
||||
my $status = PVE::Tools::upid_read_status($upid);
|
||||
chomp $status;
|
||||
return undef if !PVE::Tools::upid_status_is_error($status);
|
||||
die "$status\n";
|
||||
}
|
||||
}
|
||||
return $upid;
|
||||
}});
|
||||
if (!$currently_deleting) {
|
||||
my $status = PVE::Tools::upid_read_status($upid);
|
||||
chomp $status;
|
||||
return undef if !PVE::Tools::upid_status_is_error($status);
|
||||
die "$status\n";
|
||||
}
|
||||
}
|
||||
return $upid;
|
||||
},
|
||||
});
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
__PACKAGE__->register_method({
|
||||
name => 'copy',
|
||||
path => '{volume}',
|
||||
method => 'POST',
|
||||
@ -491,70 +536,80 @@ __PACKAGE__->register_method ({
|
||||
protected => 1,
|
||||
proxyto => 'node',
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
storage => get_standard_option('pve-storage-id', { optional => 1}),
|
||||
volume => {
|
||||
description => "Source volume identifier",
|
||||
type => 'string',
|
||||
},
|
||||
target => {
|
||||
description => "Target volume identifier",
|
||||
type => 'string',
|
||||
},
|
||||
target_node => get_standard_option('pve-node', {
|
||||
description => "Target node. Default is local node.",
|
||||
optional => 1,
|
||||
}),
|
||||
},
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
storage => get_standard_option('pve-storage-id', { optional => 1 }),
|
||||
volume => {
|
||||
description => "Source volume identifier",
|
||||
type => 'string',
|
||||
},
|
||||
target => {
|
||||
description => "Target volume identifier",
|
||||
type => 'string',
|
||||
},
|
||||
target_node => get_standard_option(
|
||||
'pve-node',
|
||||
{
|
||||
description => "Target node. Default is local node.",
|
||||
optional => 1,
|
||||
},
|
||||
),
|
||||
},
|
||||
},
|
||||
returns => {
|
||||
type => 'string',
|
||||
type => 'string',
|
||||
},
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
my ($param) = @_;
|
||||
|
||||
my $rpcenv = PVE::RPCEnvironment::get();
|
||||
my $rpcenv = PVE::RPCEnvironment::get();
|
||||
|
||||
my $user = $rpcenv->get_user();
|
||||
my $user = $rpcenv->get_user();
|
||||
|
||||
my $target_node = $param->{target_node} || PVE::INotify::nodename();
|
||||
# pvesh examples
|
||||
# cd /nodes/localhost/storage/local/content
|
||||
# pve:/> create local:103/vm-103-disk-1.raw -target local:103/vm-103-disk-2.raw
|
||||
# pve:/> create 103/vm-103-disk-1.raw -target 103/vm-103-disk-3.raw
|
||||
my $target_node = $param->{target_node} || PVE::INotify::nodename();
|
||||
# pvesh examples
|
||||
# cd /nodes/localhost/storage/local/content
|
||||
# pve:/> create local:103/vm-103-disk-1.raw -target local:103/vm-103-disk-2.raw
|
||||
# pve:/> create 103/vm-103-disk-1.raw -target 103/vm-103-disk-3.raw
|
||||
|
||||
my $src_volid = &$real_volume_id($param->{storage}, $param->{volume});
|
||||
my $dst_volid = &$real_volume_id($param->{storage}, $param->{target});
|
||||
my $src_volid = &$real_volume_id($param->{storage}, $param->{volume});
|
||||
my $dst_volid = &$real_volume_id($param->{storage}, $param->{target});
|
||||
|
||||
print "DEBUG: COPY $src_volid TO $dst_volid\n";
|
||||
print "DEBUG: COPY $src_volid TO $dst_volid\n";
|
||||
|
||||
my $cfg = PVE::Storage::config();
|
||||
my $cfg = PVE::Storage::config();
|
||||
|
||||
# do all parameter checks first
|
||||
# do all parameter checks first
|
||||
|
||||
# then do all short running task (to raise errors before we go to background)
|
||||
# then do all short running task (to raise errors before we go to background)
|
||||
|
||||
# then start the worker task
|
||||
my $worker = sub {
|
||||
my $upid = shift;
|
||||
# then start the worker task
|
||||
my $worker = sub {
|
||||
my $upid = shift;
|
||||
|
||||
print "DEBUG: starting worker $upid\n";
|
||||
print "DEBUG: starting worker $upid\n";
|
||||
|
||||
my ($target_sid, $target_volname) = PVE::Storage::parse_volume_id($dst_volid);
|
||||
#my $target_ip = PVE::Cluster::remote_node_ip($target_node);
|
||||
my ($target_sid, $target_volname) = PVE::Storage::parse_volume_id($dst_volid);
|
||||
#my $target_ip = PVE::Cluster::remote_node_ip($target_node);
|
||||
|
||||
# you need to get this working (fails currently, because storage_migrate() uses
|
||||
# ssh to connect to local host (which is not needed
|
||||
my $sshinfo = PVE::SSHInfo::get_ssh_info($target_node);
|
||||
PVE::Storage::storage_migrate($cfg, $src_volid, $sshinfo, $target_sid, {'target_volname' => $target_volname});
|
||||
# you need to get this working (fails currently, because storage_migrate() uses
|
||||
# ssh to connect to local host (which is not needed
|
||||
my $sshinfo = PVE::SSHInfo::get_ssh_info($target_node);
|
||||
PVE::Storage::storage_migrate(
|
||||
$cfg,
|
||||
$src_volid,
|
||||
$sshinfo,
|
||||
$target_sid,
|
||||
{ 'target_volname' => $target_volname },
|
||||
);
|
||||
|
||||
print "DEBUG: end worker $upid\n";
|
||||
print "DEBUG: end worker $upid\n";
|
||||
|
||||
};
|
||||
};
|
||||
|
||||
return $rpcenv->fork_worker('imgcopy', undef, $user, $worker);
|
||||
}});
|
||||
return $rpcenv->fork_worker('imgcopy', undef, $user, $worker);
|
||||
},
|
||||
});
|
||||
|
||||
1;
|
||||
|
||||
@ -20,204 +20,219 @@ my $parse_volname_or_id = sub {
|
||||
my ($sid, $volname) = PVE::Storage::parse_volume_id($volume, 1);
|
||||
|
||||
if (defined($sid)) {
|
||||
raise_param_exc({ volume => "storage ID mismatch ($sid != $storeid)." })
|
||||
if $sid ne $storeid;
|
||||
raise_param_exc({ volume => "storage ID mismatch ($sid != $storeid)." })
|
||||
if $sid ne $storeid;
|
||||
|
||||
$volid = $volume;
|
||||
$volid = $volume;
|
||||
} elsif ($volume =~ m/^backup\//) {
|
||||
$volid = "$storeid:$volume";
|
||||
$volid = "$storeid:$volume";
|
||||
} else {
|
||||
$volid = "$storeid:backup/$volume";
|
||||
$volid = "$storeid:backup/$volume";
|
||||
}
|
||||
|
||||
return $volid;
|
||||
};
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
__PACKAGE__->register_method({
|
||||
name => 'list',
|
||||
path => 'list',
|
||||
method => 'GET',
|
||||
proxyto => 'node',
|
||||
permissions => {
|
||||
description => "You need read access for the volume.",
|
||||
user => 'all',
|
||||
description => "You need read access for the volume.",
|
||||
user => 'all',
|
||||
},
|
||||
description => "List files and directories for single file restore under the given path.",
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
storage => get_standard_option('pve-storage-id', {
|
||||
completion => \&PVE::Storage::complete_storage_enabled,
|
||||
}),
|
||||
volume => {
|
||||
description => "Backup volume ID or name. Currently only PBS snapshots are supported.",
|
||||
type => 'string',
|
||||
completion => \&PVE::Storage::complete_volume,
|
||||
},
|
||||
filepath => {
|
||||
description => 'base64-path to the directory or file being listed, or "/".',
|
||||
type => 'string',
|
||||
},
|
||||
},
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
storage => get_standard_option(
|
||||
'pve-storage-id',
|
||||
{
|
||||
completion => \&PVE::Storage::complete_storage_enabled,
|
||||
},
|
||||
),
|
||||
volume => {
|
||||
description =>
|
||||
"Backup volume ID or name. Currently only PBS snapshots are supported.",
|
||||
type => 'string',
|
||||
completion => \&PVE::Storage::complete_volume,
|
||||
},
|
||||
filepath => {
|
||||
description => 'base64-path to the directory or file being listed, or "/".',
|
||||
type => 'string',
|
||||
},
|
||||
},
|
||||
},
|
||||
returns => {
|
||||
type => 'array',
|
||||
items => {
|
||||
type => "object",
|
||||
properties => {
|
||||
filepath => {
|
||||
description => "base64 path of the current entry",
|
||||
type => 'string',
|
||||
},
|
||||
type => {
|
||||
description => "Entry type.",
|
||||
type => 'string',
|
||||
},
|
||||
text => {
|
||||
description => "Entry display text.",
|
||||
type => 'string',
|
||||
},
|
||||
leaf => {
|
||||
description => "If this entry is a leaf in the directory graph.",
|
||||
type => 'boolean',
|
||||
},
|
||||
size => {
|
||||
description => "Entry file size.",
|
||||
type => 'integer',
|
||||
optional => 1,
|
||||
},
|
||||
mtime => {
|
||||
description => "Entry last-modified time (unix timestamp).",
|
||||
type => 'integer',
|
||||
optional => 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
type => 'array',
|
||||
items => {
|
||||
type => "object",
|
||||
properties => {
|
||||
filepath => {
|
||||
description => "base64 path of the current entry",
|
||||
type => 'string',
|
||||
},
|
||||
type => {
|
||||
description => "Entry type.",
|
||||
type => 'string',
|
||||
},
|
||||
text => {
|
||||
description => "Entry display text.",
|
||||
type => 'string',
|
||||
},
|
||||
leaf => {
|
||||
description => "If this entry is a leaf in the directory graph.",
|
||||
type => 'boolean',
|
||||
},
|
||||
size => {
|
||||
description => "Entry file size.",
|
||||
type => 'integer',
|
||||
optional => 1,
|
||||
},
|
||||
mtime => {
|
||||
description => "Entry last-modified time (unix timestamp).",
|
||||
type => 'integer',
|
||||
optional => 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
protected => 1,
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
my ($param) = @_;
|
||||
|
||||
my $rpcenv = PVE::RPCEnvironment::get();
|
||||
my $user = $rpcenv->get_user();
|
||||
my $rpcenv = PVE::RPCEnvironment::get();
|
||||
my $user = $rpcenv->get_user();
|
||||
|
||||
my $path = extract_param($param, 'filepath') || "/";
|
||||
my $base64 = $path ne "/";
|
||||
my $path = extract_param($param, 'filepath') || "/";
|
||||
my $base64 = $path ne "/";
|
||||
|
||||
my $storeid = extract_param($param, 'storage');
|
||||
my $storeid = extract_param($param, 'storage');
|
||||
|
||||
my $volid = $parse_volname_or_id->($storeid, $param->{volume});
|
||||
my $cfg = PVE::Storage::config();
|
||||
my $scfg = PVE::Storage::storage_config($cfg, $storeid);
|
||||
my $volid = $parse_volname_or_id->($storeid, $param->{volume});
|
||||
my $cfg = PVE::Storage::config();
|
||||
my $scfg = PVE::Storage::storage_config($cfg, $storeid);
|
||||
|
||||
PVE::Storage::check_volume_access($rpcenv, $user, $cfg, undef, $volid, 'backup');
|
||||
PVE::Storage::check_volume_access($rpcenv, $user, $cfg, undef, $volid, 'backup');
|
||||
|
||||
raise_param_exc({'storage' => "Only PBS storages supported for file-restore."})
|
||||
if $scfg->{type} ne 'pbs';
|
||||
raise_param_exc({ 'storage' => "Only PBS storages supported for file-restore." })
|
||||
if $scfg->{type} ne 'pbs';
|
||||
|
||||
my (undef, $snap) = PVE::Storage::parse_volname($cfg, $volid);
|
||||
my (undef, $snap) = PVE::Storage::parse_volname($cfg, $volid);
|
||||
|
||||
my $client = PVE::PBSClient->new($scfg, $storeid);
|
||||
my $ret = $client->file_restore_list($snap, $path, $base64, { timeout => 25 });
|
||||
my $client = PVE::PBSClient->new($scfg, $storeid);
|
||||
my $ret = $client->file_restore_list($snap, $path, $base64, { timeout => 25 });
|
||||
|
||||
if (ref($ret) eq "HASH") {
|
||||
my $msg = $ret->{message};
|
||||
if (my $code = $ret->{code}) {
|
||||
die PVE::Exception->new("$msg\n", code => $code);
|
||||
} else {
|
||||
die "$msg\n";
|
||||
}
|
||||
} elsif (ref($ret) eq "ARRAY") {
|
||||
# 'leaf' is a proper JSON boolean, map to perl-y bool
|
||||
# TODO: make PBSClient decode all bools always as 1/0?
|
||||
foreach my $item (@$ret) {
|
||||
$item->{leaf} = $item->{leaf} ? 1 : 0;
|
||||
}
|
||||
if (ref($ret) eq "HASH") {
|
||||
my $msg = $ret->{message};
|
||||
if (my $code = $ret->{code}) {
|
||||
die PVE::Exception->new("$msg\n", code => $code);
|
||||
} else {
|
||||
die "$msg\n";
|
||||
}
|
||||
} elsif (ref($ret) eq "ARRAY") {
|
||||
# 'leaf' is a proper JSON boolean, map to perl-y bool
|
||||
# TODO: make PBSClient decode all bools always as 1/0?
|
||||
foreach my $item (@$ret) {
|
||||
$item->{leaf} = $item->{leaf} ? 1 : 0;
|
||||
}
|
||||
|
||||
return $ret;
|
||||
}
|
||||
return $ret;
|
||||
}
|
||||
|
||||
die "invalid proxmox-file-restore output";
|
||||
}});
|
||||
die "invalid proxmox-file-restore output";
|
||||
},
|
||||
});
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
__PACKAGE__->register_method({
|
||||
name => 'download',
|
||||
path => 'download',
|
||||
method => 'GET',
|
||||
proxyto => 'node',
|
||||
download_allowed => 1,
|
||||
permissions => {
|
||||
description => "You need read access for the volume.",
|
||||
user => 'all',
|
||||
description => "You need read access for the volume.",
|
||||
user => 'all',
|
||||
},
|
||||
description => "Extract a file or directory (as zip archive) from a PBS backup.",
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
storage => get_standard_option('pve-storage-id', {
|
||||
completion => \&PVE::Storage::complete_storage_enabled,
|
||||
}),
|
||||
volume => {
|
||||
description => "Backup volume ID or name. Currently only PBS snapshots are supported.",
|
||||
type => 'string',
|
||||
completion => \&PVE::Storage::complete_volume,
|
||||
},
|
||||
filepath => {
|
||||
description => 'base64-path to the directory or file to download.',
|
||||
type => 'string',
|
||||
},
|
||||
tar => {
|
||||
description => "Download dirs as 'tar.zst' instead of 'zip'.",
|
||||
type => 'boolean',
|
||||
optional => 1,
|
||||
default => 0,
|
||||
},
|
||||
},
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
storage => get_standard_option(
|
||||
'pve-storage-id',
|
||||
{
|
||||
completion => \&PVE::Storage::complete_storage_enabled,
|
||||
},
|
||||
),
|
||||
volume => {
|
||||
description =>
|
||||
"Backup volume ID or name. Currently only PBS snapshots are supported.",
|
||||
type => 'string',
|
||||
completion => \&PVE::Storage::complete_volume,
|
||||
},
|
||||
filepath => {
|
||||
description => 'base64-path to the directory or file to download.',
|
||||
type => 'string',
|
||||
},
|
||||
tar => {
|
||||
description => "Download dirs as 'tar.zst' instead of 'zip'.",
|
||||
type => 'boolean',
|
||||
optional => 1,
|
||||
default => 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns => {
|
||||
type => 'any', # download
|
||||
type => 'any', # download
|
||||
},
|
||||
protected => 1,
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
my ($param) = @_;
|
||||
|
||||
my $rpcenv = PVE::RPCEnvironment::get();
|
||||
my $user = $rpcenv->get_user();
|
||||
my $rpcenv = PVE::RPCEnvironment::get();
|
||||
my $user = $rpcenv->get_user();
|
||||
|
||||
my $path = extract_param($param, 'filepath');
|
||||
my $storeid = extract_param($param, 'storage');
|
||||
my $volid = $parse_volname_or_id->($storeid, $param->{volume});
|
||||
my $tar = extract_param($param, 'tar') // 0;
|
||||
my $path = extract_param($param, 'filepath');
|
||||
my $storeid = extract_param($param, 'storage');
|
||||
my $volid = $parse_volname_or_id->($storeid, $param->{volume});
|
||||
my $tar = extract_param($param, 'tar') // 0;
|
||||
|
||||
my $cfg = PVE::Storage::config();
|
||||
my $scfg = PVE::Storage::storage_config($cfg, $storeid);
|
||||
my $cfg = PVE::Storage::config();
|
||||
my $scfg = PVE::Storage::storage_config($cfg, $storeid);
|
||||
|
||||
PVE::Storage::check_volume_access($rpcenv, $user, $cfg, undef, $volid, 'backup');
|
||||
PVE::Storage::check_volume_access($rpcenv, $user, $cfg, undef, $volid, 'backup');
|
||||
|
||||
raise_param_exc({'storage' => "Only PBS storages supported for file-restore."})
|
||||
if $scfg->{type} ne 'pbs';
|
||||
raise_param_exc({ 'storage' => "Only PBS storages supported for file-restore." })
|
||||
if $scfg->{type} ne 'pbs';
|
||||
|
||||
my (undef, $snap) = PVE::Storage::parse_volname($cfg, $volid);
|
||||
my (undef, $snap) = PVE::Storage::parse_volname($cfg, $volid);
|
||||
|
||||
my $client = PVE::PBSClient->new($scfg, $storeid);
|
||||
my $fifo = $client->file_restore_extract_prepare();
|
||||
my $client = PVE::PBSClient->new($scfg, $storeid);
|
||||
my $fifo = $client->file_restore_extract_prepare();
|
||||
|
||||
$rpcenv->fork_worker('pbs-download', undef, $user, sub {
|
||||
my $name = decode_base64($path);
|
||||
print "Starting download of file: $name\n";
|
||||
$client->file_restore_extract($fifo, $snap, $path, 1, $tar);
|
||||
});
|
||||
$rpcenv->fork_worker(
|
||||
'pbs-download',
|
||||
undef,
|
||||
$user,
|
||||
sub {
|
||||
my $name = decode_base64($path);
|
||||
print "Starting download of file: $name\n";
|
||||
$client->file_restore_extract($fifo, $snap, $path, 1, $tar);
|
||||
},
|
||||
);
|
||||
|
||||
my $ret = {
|
||||
download => {
|
||||
path => $fifo,
|
||||
stream => 1,
|
||||
'content-type' => 'application/octet-stream',
|
||||
},
|
||||
};
|
||||
return $ret;
|
||||
}});
|
||||
my $ret = {
|
||||
download => {
|
||||
path => $fifo,
|
||||
stream => 1,
|
||||
'content-type' => 'application/octet-stream',
|
||||
},
|
||||
};
|
||||
return $ret;
|
||||
},
|
||||
});
|
||||
|
||||
1;
|
||||
|
||||
@ -12,153 +12,185 @@ use PVE::Tools qw(extract_param);
|
||||
|
||||
use base qw(PVE::RESTHandler);
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
__PACKAGE__->register_method({
|
||||
name => 'dryrun',
|
||||
path => '',
|
||||
method => 'GET',
|
||||
description => "Get prune information for backups. NOTE: this is only a preview and might not be " .
|
||||
"what a subsequent prune call does if backups are removed/added in the meantime.",
|
||||
description =>
|
||||
"Get prune information for backups. NOTE: this is only a preview and might not be "
|
||||
. "what a subsequent prune call does if backups are removed/added in the meantime.",
|
||||
permissions => {
|
||||
check => ['perm', '/storage/{storage}', ['Datastore.Audit', 'Datastore.AllocateSpace'], any => 1],
|
||||
check => [
|
||||
'perm',
|
||||
'/storage/{storage}',
|
||||
['Datastore.Audit', 'Datastore.AllocateSpace'],
|
||||
any => 1,
|
||||
],
|
||||
},
|
||||
protected => 1,
|
||||
proxyto => 'node',
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
storage => get_standard_option('pve-storage-id', {
|
||||
completion => \&PVE::Storage::complete_storage_enabled,
|
||||
}),
|
||||
'prune-backups' => get_standard_option('prune-backups', {
|
||||
description => "Use these retention options instead of those from the storage configuration.",
|
||||
optional => 1,
|
||||
}),
|
||||
type => {
|
||||
description => "Either 'qemu' or 'lxc'. Only consider backups for guests of this type.",
|
||||
type => 'string',
|
||||
optional => 1,
|
||||
enum => ['qemu', 'lxc'],
|
||||
},
|
||||
vmid => get_standard_option('pve-vmid', {
|
||||
description => "Only consider backups for this guest.",
|
||||
optional => 1,
|
||||
completion => \&PVE::Cluster::complete_vmid,
|
||||
}),
|
||||
},
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
storage => get_standard_option(
|
||||
'pve-storage-id',
|
||||
{
|
||||
completion => \&PVE::Storage::complete_storage_enabled,
|
||||
},
|
||||
),
|
||||
'prune-backups' => get_standard_option(
|
||||
'prune-backups',
|
||||
{
|
||||
description =>
|
||||
"Use these retention options instead of those from the storage configuration.",
|
||||
optional => 1,
|
||||
},
|
||||
),
|
||||
type => {
|
||||
description =>
|
||||
"Either 'qemu' or 'lxc'. Only consider backups for guests of this type.",
|
||||
type => 'string',
|
||||
optional => 1,
|
||||
enum => ['qemu', 'lxc'],
|
||||
},
|
||||
vmid => get_standard_option(
|
||||
'pve-vmid',
|
||||
{
|
||||
description => "Only consider backups for this guest.",
|
||||
optional => 1,
|
||||
completion => \&PVE::Cluster::complete_vmid,
|
||||
},
|
||||
),
|
||||
},
|
||||
},
|
||||
returns => {
|
||||
type => 'array',
|
||||
items => {
|
||||
type => 'object',
|
||||
properties => {
|
||||
volid => {
|
||||
description => "Backup volume ID.",
|
||||
type => 'string',
|
||||
},
|
||||
'ctime' => {
|
||||
description => "Creation time of the backup (seconds since the UNIX epoch).",
|
||||
type => 'integer',
|
||||
},
|
||||
'mark' => {
|
||||
description => "Whether the backup would be kept or removed. Backups that are" .
|
||||
" protected or don't use the standard naming scheme are not removed.",
|
||||
type => 'string',
|
||||
enum => ['keep', 'remove', 'protected', 'renamed'],
|
||||
},
|
||||
type => {
|
||||
description => "One of 'qemu', 'lxc', 'openvz' or 'unknown'.",
|
||||
type => 'string',
|
||||
},
|
||||
'vmid' => {
|
||||
description => "The VM the backup belongs to.",
|
||||
type => 'integer',
|
||||
optional => 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
type => 'array',
|
||||
items => {
|
||||
type => 'object',
|
||||
properties => {
|
||||
volid => {
|
||||
description => "Backup volume ID.",
|
||||
type => 'string',
|
||||
},
|
||||
'ctime' => {
|
||||
description =>
|
||||
"Creation time of the backup (seconds since the UNIX epoch).",
|
||||
type => 'integer',
|
||||
},
|
||||
'mark' => {
|
||||
description =>
|
||||
"Whether the backup would be kept or removed. Backups that are"
|
||||
. " protected or don't use the standard naming scheme are not removed.",
|
||||
type => 'string',
|
||||
enum => ['keep', 'remove', 'protected', 'renamed'],
|
||||
},
|
||||
type => {
|
||||
description => "One of 'qemu', 'lxc', 'openvz' or 'unknown'.",
|
||||
type => 'string',
|
||||
},
|
||||
'vmid' => {
|
||||
description => "The VM the backup belongs to.",
|
||||
type => 'integer',
|
||||
optional => 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
my ($param) = @_;
|
||||
|
||||
my $cfg = PVE::Storage::config();
|
||||
my $cfg = PVE::Storage::config();
|
||||
|
||||
my $vmid = extract_param($param, 'vmid');
|
||||
my $type = extract_param($param, 'type');
|
||||
my $storeid = extract_param($param, 'storage');
|
||||
my $vmid = extract_param($param, 'vmid');
|
||||
my $type = extract_param($param, 'type');
|
||||
my $storeid = extract_param($param, 'storage');
|
||||
|
||||
my $prune_backups = extract_param($param, 'prune-backups');
|
||||
$prune_backups = PVE::JSONSchema::parse_property_string('prune-backups', $prune_backups)
|
||||
if defined($prune_backups);
|
||||
my $prune_backups = extract_param($param, 'prune-backups');
|
||||
$prune_backups = PVE::JSONSchema::parse_property_string('prune-backups', $prune_backups)
|
||||
if defined($prune_backups);
|
||||
|
||||
return PVE::Storage::prune_backups($cfg, $storeid, $prune_backups, $vmid, $type, 1);
|
||||
}});
|
||||
return PVE::Storage::prune_backups($cfg, $storeid, $prune_backups, $vmid, $type, 1);
|
||||
},
|
||||
});
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
__PACKAGE__->register_method({
|
||||
name => 'delete',
|
||||
path => '',
|
||||
method => 'DELETE',
|
||||
description => "Prune backups. Only those using the standard naming scheme are considered.",
|
||||
permissions => {
|
||||
description => "You need the 'Datastore.Allocate' privilege on the storage " .
|
||||
"(or if a VM ID is specified, 'Datastore.AllocateSpace' and 'VM.Backup' for the VM).",
|
||||
user => 'all',
|
||||
description => "You need the 'Datastore.Allocate' privilege on the storage "
|
||||
. "(or if a VM ID is specified, 'Datastore.AllocateSpace' and 'VM.Backup' for the VM).",
|
||||
user => 'all',
|
||||
},
|
||||
protected => 1,
|
||||
proxyto => 'node',
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
storage => get_standard_option('pve-storage-id', {
|
||||
completion => \&PVE::Storage::complete_storage,
|
||||
}),
|
||||
'prune-backups' => get_standard_option('prune-backups', {
|
||||
description => "Use these retention options instead of those from the storage configuration.",
|
||||
}),
|
||||
type => {
|
||||
description => "Either 'qemu' or 'lxc'. Only consider backups for guests of this type.",
|
||||
type => 'string',
|
||||
optional => 1,
|
||||
enum => ['qemu', 'lxc'],
|
||||
},
|
||||
vmid => get_standard_option('pve-vmid', {
|
||||
description => "Only prune backups for this VM.",
|
||||
completion => \&PVE::Cluster::complete_vmid,
|
||||
optional => 1,
|
||||
}),
|
||||
},
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
storage => get_standard_option(
|
||||
'pve-storage-id',
|
||||
{
|
||||
completion => \&PVE::Storage::complete_storage,
|
||||
},
|
||||
),
|
||||
'prune-backups' => get_standard_option(
|
||||
'prune-backups',
|
||||
{
|
||||
description =>
|
||||
"Use these retention options instead of those from the storage configuration.",
|
||||
},
|
||||
),
|
||||
type => {
|
||||
description =>
|
||||
"Either 'qemu' or 'lxc'. Only consider backups for guests of this type.",
|
||||
type => 'string',
|
||||
optional => 1,
|
||||
enum => ['qemu', 'lxc'],
|
||||
},
|
||||
vmid => get_standard_option(
|
||||
'pve-vmid',
|
||||
{
|
||||
description => "Only prune backups for this VM.",
|
||||
completion => \&PVE::Cluster::complete_vmid,
|
||||
optional => 1,
|
||||
},
|
||||
),
|
||||
},
|
||||
},
|
||||
returns => { type => 'string' },
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
my ($param) = @_;
|
||||
|
||||
my $rpcenv = PVE::RPCEnvironment::get();
|
||||
my $authuser = $rpcenv->get_user();
|
||||
my $rpcenv = PVE::RPCEnvironment::get();
|
||||
my $authuser = $rpcenv->get_user();
|
||||
|
||||
my $cfg = PVE::Storage::config();
|
||||
my $cfg = PVE::Storage::config();
|
||||
|
||||
my $vmid = extract_param($param, 'vmid');
|
||||
my $type = extract_param($param, 'type');
|
||||
my $storeid = extract_param($param, 'storage');
|
||||
my $vmid = extract_param($param, 'vmid');
|
||||
my $type = extract_param($param, 'type');
|
||||
my $storeid = extract_param($param, 'storage');
|
||||
|
||||
my $prune_backups = extract_param($param, 'prune-backups');
|
||||
$prune_backups = PVE::JSONSchema::parse_property_string('prune-backups', $prune_backups)
|
||||
if defined($prune_backups);
|
||||
my $prune_backups = extract_param($param, 'prune-backups');
|
||||
$prune_backups = PVE::JSONSchema::parse_property_string('prune-backups', $prune_backups)
|
||||
if defined($prune_backups);
|
||||
|
||||
if (defined($vmid)) {
|
||||
$rpcenv->check($authuser, "/storage/$storeid", ['Datastore.AllocateSpace']);
|
||||
$rpcenv->check($authuser, "/vms/$vmid", ['VM.Backup']);
|
||||
} else {
|
||||
$rpcenv->check($authuser, "/storage/$storeid", ['Datastore.Allocate']);
|
||||
}
|
||||
if (defined($vmid)) {
|
||||
$rpcenv->check($authuser, "/storage/$storeid", ['Datastore.AllocateSpace']);
|
||||
$rpcenv->check($authuser, "/vms/$vmid", ['VM.Backup']);
|
||||
} else {
|
||||
$rpcenv->check($authuser, "/storage/$storeid", ['Datastore.Allocate']);
|
||||
}
|
||||
|
||||
my $id = (defined($vmid) ? "$vmid@" : '') . $storeid;
|
||||
my $worker = sub {
|
||||
PVE::Storage::prune_backups($cfg, $storeid, $prune_backups, $vmid, $type, 0);
|
||||
};
|
||||
my $id = (defined($vmid) ? "$vmid@" : '') . $storeid;
|
||||
my $worker = sub {
|
||||
PVE::Storage::prune_backups($cfg, $storeid, $prune_backups, $vmid, $type, 0);
|
||||
};
|
||||
|
||||
return $rpcenv->fork_worker('prunebackups', $id, $authuser, $worker);
|
||||
}});
|
||||
return $rpcenv->fork_worker('prunebackups', $id, $authuser, $worker);
|
||||
},
|
||||
});
|
||||
|
||||
1;
|
||||
|
||||
@ -20,39 +20,40 @@ __PACKAGE__->register_method({
|
||||
method => 'GET',
|
||||
description => "Index of available scan methods",
|
||||
permissions => {
|
||||
user => 'all',
|
||||
user => 'all',
|
||||
},
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
},
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
},
|
||||
},
|
||||
returns => {
|
||||
type => 'array',
|
||||
items => {
|
||||
type => "object",
|
||||
properties => {
|
||||
method => { type => 'string'},
|
||||
},
|
||||
},
|
||||
links => [ { rel => 'child', href => "{method}" } ],
|
||||
type => 'array',
|
||||
items => {
|
||||
type => "object",
|
||||
properties => {
|
||||
method => { type => 'string' },
|
||||
},
|
||||
},
|
||||
links => [{ rel => 'child', href => "{method}" }],
|
||||
},
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
my ($param) = @_;
|
||||
|
||||
my $res = [
|
||||
{ method => 'cifs' },
|
||||
{ method => 'glusterfs' },
|
||||
{ method => 'iscsi' },
|
||||
{ method => 'lvm' },
|
||||
{ method => 'nfs' },
|
||||
{ method => 'pbs' },
|
||||
{ method => 'zfs' },
|
||||
];
|
||||
my $res = [
|
||||
{ method => 'cifs' },
|
||||
{ method => 'glusterfs' },
|
||||
{ method => 'iscsi' },
|
||||
{ method => 'lvm' },
|
||||
{ method => 'nfs' },
|
||||
{ method => 'pbs' },
|
||||
{ method => 'zfs' },
|
||||
];
|
||||
|
||||
return $res;
|
||||
}});
|
||||
return $res;
|
||||
},
|
||||
});
|
||||
|
||||
__PACKAGE__->register_method({
|
||||
name => 'nfsscan',
|
||||
@ -62,46 +63,48 @@ __PACKAGE__->register_method({
|
||||
protected => 1,
|
||||
proxyto => "node",
|
||||
permissions => {
|
||||
check => ['perm', '/storage', ['Datastore.Allocate']],
|
||||
check => ['perm', '/storage', ['Datastore.Allocate']],
|
||||
},
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
server => {
|
||||
description => "The server address (name or IP).",
|
||||
type => 'string', format => 'pve-storage-server',
|
||||
},
|
||||
},
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
server => {
|
||||
description => "The server address (name or IP).",
|
||||
type => 'string',
|
||||
format => 'pve-storage-server',
|
||||
},
|
||||
},
|
||||
},
|
||||
returns => {
|
||||
type => 'array',
|
||||
items => {
|
||||
type => "object",
|
||||
properties => {
|
||||
path => {
|
||||
description => "The exported path.",
|
||||
type => 'string',
|
||||
},
|
||||
options => {
|
||||
description => "NFS export options.",
|
||||
type => 'string',
|
||||
},
|
||||
},
|
||||
},
|
||||
type => 'array',
|
||||
items => {
|
||||
type => "object",
|
||||
properties => {
|
||||
path => {
|
||||
description => "The exported path.",
|
||||
type => 'string',
|
||||
},
|
||||
options => {
|
||||
description => "NFS export options.",
|
||||
type => 'string',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
my ($param) = @_;
|
||||
|
||||
my $server = $param->{server};
|
||||
my $res = PVE::Storage::scan_nfs($server);
|
||||
my $server = $param->{server};
|
||||
my $res = PVE::Storage::scan_nfs($server);
|
||||
|
||||
my $data = [];
|
||||
foreach my $k (sort keys %$res) {
|
||||
push @$data, { path => $k, options => $res->{$k} };
|
||||
}
|
||||
return $data;
|
||||
}});
|
||||
my $data = [];
|
||||
foreach my $k (sort keys %$res) {
|
||||
push @$data, { path => $k, options => $res->{$k} };
|
||||
}
|
||||
return $data;
|
||||
},
|
||||
});
|
||||
|
||||
__PACKAGE__->register_method({
|
||||
name => 'cifsscan',
|
||||
@ -111,68 +114,70 @@ __PACKAGE__->register_method({
|
||||
protected => 1,
|
||||
proxyto => "node",
|
||||
permissions => {
|
||||
check => ['perm', '/storage', ['Datastore.Allocate']],
|
||||
check => ['perm', '/storage', ['Datastore.Allocate']],
|
||||
},
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
server => {
|
||||
description => "The server address (name or IP).",
|
||||
type => 'string', format => 'pve-storage-server',
|
||||
},
|
||||
username => {
|
||||
description => "User name.",
|
||||
type => 'string',
|
||||
optional => 1,
|
||||
},
|
||||
password => {
|
||||
description => "User password.",
|
||||
type => 'string',
|
||||
optional => 1,
|
||||
},
|
||||
domain => {
|
||||
description => "SMB domain (Workgroup).",
|
||||
type => 'string',
|
||||
optional => 1,
|
||||
},
|
||||
},
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
server => {
|
||||
description => "The server address (name or IP).",
|
||||
type => 'string',
|
||||
format => 'pve-storage-server',
|
||||
},
|
||||
username => {
|
||||
description => "User name.",
|
||||
type => 'string',
|
||||
optional => 1,
|
||||
},
|
||||
password => {
|
||||
description => "User password.",
|
||||
type => 'string',
|
||||
optional => 1,
|
||||
},
|
||||
domain => {
|
||||
description => "SMB domain (Workgroup).",
|
||||
type => 'string',
|
||||
optional => 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns => {
|
||||
type => 'array',
|
||||
items => {
|
||||
type => "object",
|
||||
properties => {
|
||||
share => {
|
||||
description => "The cifs share name.",
|
||||
type => 'string',
|
||||
},
|
||||
description => {
|
||||
description => "Descriptive text from server.",
|
||||
type => 'string',
|
||||
},
|
||||
},
|
||||
},
|
||||
type => 'array',
|
||||
items => {
|
||||
type => "object",
|
||||
properties => {
|
||||
share => {
|
||||
description => "The cifs share name.",
|
||||
type => 'string',
|
||||
},
|
||||
description => {
|
||||
description => "Descriptive text from server.",
|
||||
type => 'string',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
my ($param) = @_;
|
||||
|
||||
my $server = $param->{server};
|
||||
my $server = $param->{server};
|
||||
|
||||
my $username = $param->{username};
|
||||
my $password = $param->{password};
|
||||
my $domain = $param->{domain};
|
||||
my $username = $param->{username};
|
||||
my $password = $param->{password};
|
||||
my $domain = $param->{domain};
|
||||
|
||||
my $res = PVE::Storage::scan_cifs($server, $username, $password, $domain);
|
||||
my $res = PVE::Storage::scan_cifs($server, $username, $password, $domain);
|
||||
|
||||
my $data = [];
|
||||
foreach my $k (sort keys %$res) {
|
||||
next if $k =~ m/NT_STATUS_/;
|
||||
push @$data, { share => $k, description => $res->{$k} };
|
||||
}
|
||||
my $data = [];
|
||||
foreach my $k (sort keys %$res) {
|
||||
next if $k =~ m/NT_STATUS_/;
|
||||
push @$data, { share => $k, description => $res->{$k} };
|
||||
}
|
||||
|
||||
return $data;
|
||||
}});
|
||||
return $data;
|
||||
},
|
||||
});
|
||||
|
||||
__PACKAGE__->register_method({
|
||||
name => 'pbsscan',
|
||||
@ -182,61 +187,62 @@ __PACKAGE__->register_method({
|
||||
protected => 1,
|
||||
proxyto => "node",
|
||||
permissions => {
|
||||
check => ['perm', '/storage', ['Datastore.Allocate']],
|
||||
check => ['perm', '/storage', ['Datastore.Allocate']],
|
||||
},
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
server => {
|
||||
description => "The server address (name or IP).",
|
||||
type => 'string', format => 'pve-storage-server',
|
||||
},
|
||||
username => {
|
||||
description => "User-name or API token-ID.",
|
||||
type => 'string',
|
||||
},
|
||||
password => {
|
||||
description => "User password or API token secret.",
|
||||
type => 'string',
|
||||
},
|
||||
fingerprint => get_standard_option('fingerprint-sha256', {
|
||||
optional => 1,
|
||||
}),
|
||||
port => {
|
||||
description => "Optional port.",
|
||||
type => 'integer',
|
||||
minimum => 1,
|
||||
maximum => 65535,
|
||||
default => 8007,
|
||||
optional => 1,
|
||||
},
|
||||
},
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
server => {
|
||||
description => "The server address (name or IP).",
|
||||
type => 'string',
|
||||
format => 'pve-storage-server',
|
||||
},
|
||||
username => {
|
||||
description => "User-name or API token-ID.",
|
||||
type => 'string',
|
||||
},
|
||||
password => {
|
||||
description => "User password or API token secret.",
|
||||
type => 'string',
|
||||
},
|
||||
fingerprint => get_standard_option('fingerprint-sha256', {
|
||||
optional => 1,
|
||||
}),
|
||||
port => {
|
||||
description => "Optional port.",
|
||||
type => 'integer',
|
||||
minimum => 1,
|
||||
maximum => 65535,
|
||||
default => 8007,
|
||||
optional => 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns => {
|
||||
type => 'array',
|
||||
items => {
|
||||
type => "object",
|
||||
properties => {
|
||||
store => {
|
||||
description => "The datastore name.",
|
||||
type => 'string',
|
||||
},
|
||||
comment => {
|
||||
description => "Comment from server.",
|
||||
type => 'string',
|
||||
optional => 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
type => 'array',
|
||||
items => {
|
||||
type => "object",
|
||||
properties => {
|
||||
store => {
|
||||
description => "The datastore name.",
|
||||
type => 'string',
|
||||
},
|
||||
comment => {
|
||||
description => "Comment from server.",
|
||||
type => 'string',
|
||||
optional => 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
my ($param) = @_;
|
||||
|
||||
my $password = delete $param->{password};
|
||||
my $password = delete $param->{password};
|
||||
|
||||
return PVE::Storage::PBSPlugin::scan_datastores($param, $password);
|
||||
}
|
||||
return PVE::Storage::PBSPlugin::scan_datastores($param, $password);
|
||||
},
|
||||
});
|
||||
|
||||
# Note: GlusterFS currently does not have an equivalent of showmount.
|
||||
@ -250,44 +256,46 @@ __PACKAGE__->register_method({
|
||||
protected => 1,
|
||||
proxyto => "node",
|
||||
permissions => {
|
||||
check => ['perm', '/storage', ['Datastore.Allocate']],
|
||||
check => ['perm', '/storage', ['Datastore.Allocate']],
|
||||
},
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
server => {
|
||||
description => "The server address (name or IP).",
|
||||
type => 'string', format => 'pve-storage-server',
|
||||
},
|
||||
},
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
server => {
|
||||
description => "The server address (name or IP).",
|
||||
type => 'string',
|
||||
format => 'pve-storage-server',
|
||||
},
|
||||
},
|
||||
},
|
||||
returns => {
|
||||
type => 'array',
|
||||
items => {
|
||||
type => "object",
|
||||
properties => {
|
||||
volname => {
|
||||
description => "The volume name.",
|
||||
type => 'string',
|
||||
},
|
||||
},
|
||||
},
|
||||
type => 'array',
|
||||
items => {
|
||||
type => "object",
|
||||
properties => {
|
||||
volname => {
|
||||
description => "The volume name.",
|
||||
type => 'string',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
my ($param) = @_;
|
||||
|
||||
my $server = $param->{server};
|
||||
my $res = PVE::Storage::scan_nfs($server);
|
||||
my $server = $param->{server};
|
||||
my $res = PVE::Storage::scan_nfs($server);
|
||||
|
||||
my $data = [];
|
||||
foreach my $path (sort keys %$res) {
|
||||
if ($path =~ m!^/([^\s/]+)$!) {
|
||||
push @$data, { volname => $1 };
|
||||
}
|
||||
}
|
||||
return $data;
|
||||
}});
|
||||
my $data = [];
|
||||
foreach my $path (sort keys %$res) {
|
||||
if ($path =~ m!^/([^\s/]+)$!) {
|
||||
push @$data, { volname => $1 };
|
||||
}
|
||||
}
|
||||
return $data;
|
||||
},
|
||||
});
|
||||
|
||||
__PACKAGE__->register_method({
|
||||
name => 'iscsiscan',
|
||||
@ -297,46 +305,48 @@ __PACKAGE__->register_method({
|
||||
protected => 1,
|
||||
proxyto => "node",
|
||||
permissions => {
|
||||
check => ['perm', '/storage', ['Datastore.Allocate']],
|
||||
check => ['perm', '/storage', ['Datastore.Allocate']],
|
||||
},
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
portal => {
|
||||
description => "The iSCSI portal (IP or DNS name with optional port).",
|
||||
type => 'string', format => 'pve-storage-portal-dns',
|
||||
},
|
||||
},
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
portal => {
|
||||
description => "The iSCSI portal (IP or DNS name with optional port).",
|
||||
type => 'string',
|
||||
format => 'pve-storage-portal-dns',
|
||||
},
|
||||
},
|
||||
},
|
||||
returns => {
|
||||
type => 'array',
|
||||
items => {
|
||||
type => "object",
|
||||
properties => {
|
||||
target => {
|
||||
description => "The iSCSI target name.",
|
||||
type => 'string',
|
||||
},
|
||||
portal => {
|
||||
description => "The iSCSI portal name.",
|
||||
type => 'string',
|
||||
},
|
||||
},
|
||||
},
|
||||
type => 'array',
|
||||
items => {
|
||||
type => "object",
|
||||
properties => {
|
||||
target => {
|
||||
description => "The iSCSI target name.",
|
||||
type => 'string',
|
||||
},
|
||||
portal => {
|
||||
description => "The iSCSI portal name.",
|
||||
type => 'string',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
my ($param) = @_;
|
||||
|
||||
my $res = PVE::Storage::scan_iscsi($param->{portal});
|
||||
my $res = PVE::Storage::scan_iscsi($param->{portal});
|
||||
|
||||
my $data = [];
|
||||
foreach my $k (sort keys %$res) {
|
||||
push @$data, { target => $k, portal => join(',', @{$res->{$k}}) };
|
||||
}
|
||||
my $data = [];
|
||||
foreach my $k (sort keys %$res) {
|
||||
push @$data, { target => $k, portal => join(',', @{ $res->{$k} }) };
|
||||
}
|
||||
|
||||
return $data;
|
||||
}});
|
||||
return $data;
|
||||
},
|
||||
});
|
||||
|
||||
__PACKAGE__->register_method({
|
||||
name => 'lvmscan',
|
||||
@ -346,32 +356,33 @@ __PACKAGE__->register_method({
|
||||
protected => 1,
|
||||
proxyto => "node",
|
||||
permissions => {
|
||||
check => ['perm', '/storage', ['Datastore.Allocate']],
|
||||
check => ['perm', '/storage', ['Datastore.Allocate']],
|
||||
},
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
},
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
},
|
||||
},
|
||||
returns => {
|
||||
type => 'array',
|
||||
items => {
|
||||
type => "object",
|
||||
properties => {
|
||||
vg => {
|
||||
description => "The LVM logical volume group name.",
|
||||
type => 'string',
|
||||
},
|
||||
},
|
||||
},
|
||||
type => 'array',
|
||||
items => {
|
||||
type => "object",
|
||||
properties => {
|
||||
vg => {
|
||||
description => "The LVM logical volume group name.",
|
||||
type => 'string',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
my ($param) = @_;
|
||||
|
||||
my $res = PVE::Storage::LVMPlugin::lvm_vgs();
|
||||
return PVE::RESTHandler::hash_to_array($res, 'vg');
|
||||
}});
|
||||
my $res = PVE::Storage::LVMPlugin::lvm_vgs();
|
||||
return PVE::RESTHandler::hash_to_array($res, 'vg');
|
||||
},
|
||||
});
|
||||
|
||||
__PACKAGE__->register_method({
|
||||
name => 'lvmthinscan',
|
||||
@ -381,36 +392,37 @@ __PACKAGE__->register_method({
|
||||
protected => 1,
|
||||
proxyto => "node",
|
||||
permissions => {
|
||||
check => ['perm', '/storage', ['Datastore.Allocate']],
|
||||
check => ['perm', '/storage', ['Datastore.Allocate']],
|
||||
},
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
vg => {
|
||||
type => 'string',
|
||||
pattern => '[a-zA-Z0-9\.\+\_][a-zA-Z0-9\.\+\_\-]+', # see lvm(8) manpage
|
||||
maxLength => 100,
|
||||
},
|
||||
},
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
vg => {
|
||||
type => 'string',
|
||||
pattern => '[a-zA-Z0-9\.\+\_][a-zA-Z0-9\.\+\_\-]+', # see lvm(8) manpage
|
||||
maxLength => 100,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns => {
|
||||
type => 'array',
|
||||
items => {
|
||||
type => "object",
|
||||
properties => {
|
||||
lv => {
|
||||
description => "The LVM Thin Pool name (LVM logical volume).",
|
||||
type => 'string',
|
||||
},
|
||||
},
|
||||
},
|
||||
type => 'array',
|
||||
items => {
|
||||
type => "object",
|
||||
properties => {
|
||||
lv => {
|
||||
description => "The LVM Thin Pool name (LVM logical volume).",
|
||||
type => 'string',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
my ($param) = @_;
|
||||
|
||||
return PVE::Storage::LvmThinPlugin::list_thinpools($param->{vg});
|
||||
}});
|
||||
return PVE::Storage::LvmThinPlugin::list_thinpools($param->{vg});
|
||||
},
|
||||
});
|
||||
|
||||
__PACKAGE__->register_method({
|
||||
name => 'zfsscan',
|
||||
@ -420,30 +432,31 @@ __PACKAGE__->register_method({
|
||||
protected => 1,
|
||||
proxyto => "node",
|
||||
permissions => {
|
||||
check => ['perm', '/storage', ['Datastore.Allocate']],
|
||||
check => ['perm', '/storage', ['Datastore.Allocate']],
|
||||
},
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
},
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
},
|
||||
},
|
||||
returns => {
|
||||
type => 'array',
|
||||
items => {
|
||||
type => "object",
|
||||
properties => {
|
||||
pool => {
|
||||
description => "ZFS pool name.",
|
||||
type => 'string',
|
||||
},
|
||||
},
|
||||
},
|
||||
type => 'array',
|
||||
items => {
|
||||
type => "object",
|
||||
properties => {
|
||||
pool => {
|
||||
description => "ZFS pool name.",
|
||||
type => 'string',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
my ($param) = @_;
|
||||
|
||||
return PVE::Storage::scan_zfs();
|
||||
}});
|
||||
return PVE::Storage::scan_zfs();
|
||||
},
|
||||
});
|
||||
|
||||
1;
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -168,6 +168,7 @@ The message to be printed.
|
||||
=back
|
||||
|
||||
=cut
|
||||
|
||||
sub new {
|
||||
my ($class, $storage_plugin, $scfg, $storeid, $log_function) = @_;
|
||||
|
||||
@ -183,6 +184,7 @@ Returns the name of the backup provider. It will be printed in some log lines.
|
||||
=back
|
||||
|
||||
=cut
|
||||
|
||||
sub provider_name {
|
||||
my ($self) = @_;
|
||||
|
||||
@ -211,6 +213,7 @@ Unix time-stamp of when the job started.
|
||||
=back
|
||||
|
||||
=cut
|
||||
|
||||
sub job_init {
|
||||
my ($self, $start_time) = @_;
|
||||
|
||||
@ -227,6 +230,7 @@ the backup server. Called in both, success and failure scenarios.
|
||||
=back
|
||||
|
||||
=cut
|
||||
|
||||
sub job_cleanup {
|
||||
my ($self) = @_;
|
||||
|
||||
@ -271,6 +275,7 @@ Unix time-stamp of when the guest backup started.
|
||||
=back
|
||||
|
||||
=cut
|
||||
|
||||
sub backup_init {
|
||||
my ($self, $vmid, $vmtype, $start_time) = @_;
|
||||
|
||||
@ -326,6 +331,7 @@ Present if there was a failure. The error message indicating the failure.
|
||||
=back
|
||||
|
||||
=cut
|
||||
|
||||
sub backup_cleanup {
|
||||
my ($self, $vmid, $vmtype, $success, $info) = @_;
|
||||
|
||||
@ -366,6 +372,7 @@ The type of the guest being backed up. Currently, either C<qemu> or C<lxc>.
|
||||
=back
|
||||
|
||||
=cut
|
||||
|
||||
sub backup_get_mechanism {
|
||||
my ($self, $vmid, $vmtype) = @_;
|
||||
|
||||
@ -396,6 +403,7 @@ Path to the file with the backup log.
|
||||
=back
|
||||
|
||||
=cut
|
||||
|
||||
sub backup_handle_log_file {
|
||||
my ($self, $vmid, $filename) = @_;
|
||||
|
||||
@ -462,6 +470,7 @@ bitmap and existing ones will be discarded.
|
||||
=back
|
||||
|
||||
=cut
|
||||
|
||||
sub backup_vm_query_incremental {
|
||||
my ($self, $vmid, $volumes) = @_;
|
||||
|
||||
@ -619,6 +628,7 @@ configuration as raw data.
|
||||
=back
|
||||
|
||||
=cut
|
||||
|
||||
sub backup_vm {
|
||||
my ($self, $vmid, $guest_config, $volumes, $info) = @_;
|
||||
|
||||
@ -652,6 +662,7 @@ description there.
|
||||
=back
|
||||
|
||||
=cut
|
||||
|
||||
sub backup_container_prepare {
|
||||
my ($self, $vmid, $info) = @_;
|
||||
|
||||
@ -752,6 +763,7 @@ for unprivileged containers by default.
|
||||
=back
|
||||
|
||||
=cut
|
||||
|
||||
sub backup_container {
|
||||
my ($self, $vmid, $guest_config, $exclude_patterns, $info) = @_;
|
||||
|
||||
@ -797,6 +809,7 @@ The volume ID of the archive being restored.
|
||||
=back
|
||||
|
||||
=cut
|
||||
|
||||
sub restore_get_mechanism {
|
||||
my ($self, $volname) = @_;
|
||||
|
||||
@ -824,6 +837,7 @@ The volume ID of the archive being restored.
|
||||
=back
|
||||
|
||||
=cut
|
||||
|
||||
sub archive_get_guest_config {
|
||||
my ($self, $volname) = @_;
|
||||
|
||||
@ -853,6 +867,7 @@ The volume ID of the archive being restored.
|
||||
=back
|
||||
|
||||
=cut
|
||||
|
||||
sub archive_get_firewall_config {
|
||||
my ($self, $volname) = @_;
|
||||
|
||||
@ -901,6 +916,7 @@ The volume ID of the archive being restored.
|
||||
=back
|
||||
|
||||
=cut
|
||||
|
||||
sub restore_vm_init {
|
||||
my ($self, $volname) = @_;
|
||||
|
||||
@ -927,6 +943,7 @@ The volume ID of the archive being restored.
|
||||
=back
|
||||
|
||||
=cut
|
||||
|
||||
sub restore_vm_cleanup {
|
||||
my ($self, $volname) = @_;
|
||||
|
||||
@ -984,6 +1001,7 @@ empty.
|
||||
=back
|
||||
|
||||
=cut
|
||||
|
||||
sub restore_vm_volume_init {
|
||||
my ($self, $volname, $device_name, $info) = @_;
|
||||
|
||||
@ -1020,6 +1038,7 @@ empty.
|
||||
=back
|
||||
|
||||
=cut
|
||||
|
||||
sub restore_vm_volume_cleanup {
|
||||
my ($self, $volname, $device_name, $info) = @_;
|
||||
|
||||
@ -1086,6 +1105,7 @@ empty.
|
||||
=back
|
||||
|
||||
=cut
|
||||
|
||||
sub restore_container_init {
|
||||
my ($self, $volname, $info) = @_;
|
||||
|
||||
@ -1117,6 +1137,7 @@ empty.
|
||||
=back
|
||||
|
||||
=cut
|
||||
|
||||
sub restore_container_cleanup {
|
||||
my ($self, $volname, $info) = @_;
|
||||
|
||||
|
||||
1132
src/PVE/CLI/pvesm.pm
1132
src/PVE/CLI/pvesm.pm
File diff suppressed because it is too large
Load Diff
@ -6,9 +6,7 @@ use Net::IP;
|
||||
use PVE::Tools qw(run_command);
|
||||
use PVE::Cluster qw(cfs_register_file);
|
||||
|
||||
cfs_register_file('ceph.conf',
|
||||
\&parse_ceph_config,
|
||||
\&write_ceph_config);
|
||||
cfs_register_file('ceph.conf', \&parse_ceph_config, \&write_ceph_config);
|
||||
|
||||
# For more information on how the Ceph parser works and how its grammar is
|
||||
# defined, see:
|
||||
@ -77,177 +75,177 @@ sub parse_ceph_config {
|
||||
my @lines = split(/\n/, $raw);
|
||||
|
||||
my $parse_section_header = sub {
|
||||
my ($section_line) = @_;
|
||||
my ($section_line) = @_;
|
||||
|
||||
# continued lines in section headers are allowed
|
||||
while ($section_line =~ s/$re_continue_marker$//) {
|
||||
$section_line .= shift(@lines);
|
||||
}
|
||||
# continued lines in section headers are allowed
|
||||
while ($section_line =~ s/$re_continue_marker$//) {
|
||||
$section_line .= shift(@lines);
|
||||
}
|
||||
|
||||
my $remainder = $section_line;
|
||||
my $remainder = $section_line;
|
||||
|
||||
$remainder =~ s/$re_section_header//;
|
||||
my $parsed_header = $1;
|
||||
$remainder =~ s/$re_section_header//;
|
||||
my $parsed_header = $1;
|
||||
|
||||
# Un-escape comment literals
|
||||
$parsed_header =~ s/\\($re_comment_class)/$1/g;
|
||||
# Un-escape comment literals
|
||||
$parsed_header =~ s/\\($re_comment_class)/$1/g;
|
||||
|
||||
if (!$parsed_header) {
|
||||
die "failed to parse section - skip: $section_line\n";
|
||||
}
|
||||
if (!$parsed_header) {
|
||||
die "failed to parse section - skip: $section_line\n";
|
||||
}
|
||||
|
||||
# preserve Ceph's behaviour and disallow anything after the section header
|
||||
# that's not whitespace or a comment
|
||||
$remainder =~ s/$re_leading_ws//;
|
||||
$remainder =~ s/^$re_comment_class.*$//;
|
||||
# preserve Ceph's behaviour and disallow anything after the section header
|
||||
# that's not whitespace or a comment
|
||||
$remainder =~ s/$re_leading_ws//;
|
||||
$remainder =~ s/^$re_comment_class.*$//;
|
||||
|
||||
if ($remainder) {
|
||||
die "unexpected remainder after section - skip: $section_line\n";
|
||||
}
|
||||
if ($remainder) {
|
||||
die "unexpected remainder after section - skip: $section_line\n";
|
||||
}
|
||||
|
||||
return $parsed_header;
|
||||
return $parsed_header;
|
||||
};
|
||||
|
||||
my $parse_key = sub {
|
||||
my ($line) = @_;
|
||||
my ($line) = @_;
|
||||
|
||||
my $remainder = $line;
|
||||
my $remainder = $line;
|
||||
|
||||
my $key = '';
|
||||
while ($remainder =~ s/$re_key//) {
|
||||
$key .= $1;
|
||||
my $key = '';
|
||||
while ($remainder =~ s/$re_key//) {
|
||||
$key .= $1;
|
||||
|
||||
while ($key =~ s/$re_continue_marker$//) {
|
||||
$remainder = shift(@lines);
|
||||
}
|
||||
}
|
||||
while ($key =~ s/$re_continue_marker$//) {
|
||||
$remainder = shift(@lines);
|
||||
}
|
||||
}
|
||||
|
||||
$key =~ s/$re_trailing_ws//;
|
||||
$key =~ s/$re_leading_ws//;
|
||||
$key =~ s/$re_trailing_ws//;
|
||||
$key =~ s/$re_leading_ws//;
|
||||
|
||||
$key =~ s/\s/ /;
|
||||
while ($key =~ s/\s\s/ /) {} # squeeze repeated whitespace
|
||||
$key =~ s/\s/ /;
|
||||
while ($key =~ s/\s\s/ /) { } # squeeze repeated whitespace
|
||||
|
||||
# Ceph treats *single* spaces in keys the same as underscores,
|
||||
# but we'll just use underscores for readability
|
||||
$key =~ s/ /_/g;
|
||||
# Ceph treats *single* spaces in keys the same as underscores,
|
||||
# but we'll just use underscores for readability
|
||||
$key =~ s/ /_/g;
|
||||
|
||||
# Un-escape comment literals
|
||||
$key =~ s/\\($re_comment_class)/$1/g;
|
||||
# Un-escape comment literals
|
||||
$key =~ s/\\($re_comment_class)/$1/g;
|
||||
|
||||
if ($key eq '') {
|
||||
die "failed to parse key from line - skip: $line\n";
|
||||
}
|
||||
if ($key eq '') {
|
||||
die "failed to parse key from line - skip: $line\n";
|
||||
}
|
||||
|
||||
my $had_equals = $remainder =~ s/^$re_kv_separator//;
|
||||
my $had_equals = $remainder =~ s/^$re_kv_separator//;
|
||||
|
||||
if (!$had_equals) {
|
||||
die "expected '=' after key - skip: $line\n";
|
||||
}
|
||||
if (!$had_equals) {
|
||||
die "expected '=' after key - skip: $line\n";
|
||||
}
|
||||
|
||||
while ($remainder =~ s/^$re_continue_marker$//) {
|
||||
# Whitespace and continuations after equals sign can be arbitrary
|
||||
$remainder = shift(@lines);
|
||||
$remainder =~ s/$re_leading_ws//;
|
||||
}
|
||||
while ($remainder =~ s/^$re_continue_marker$//) {
|
||||
# Whitespace and continuations after equals sign can be arbitrary
|
||||
$remainder = shift(@lines);
|
||||
$remainder =~ s/$re_leading_ws//;
|
||||
}
|
||||
|
||||
return ($key, $remainder);
|
||||
return ($key, $remainder);
|
||||
};
|
||||
|
||||
my $parse_value = sub {
|
||||
my ($line, $remainder) = @_;
|
||||
my ($line, $remainder) = @_;
|
||||
|
||||
my $starts_with_quote = $remainder =~ m/^['"]/;
|
||||
$remainder =~ s/$re_value//;
|
||||
my $value = $1 // '';
|
||||
my $starts_with_quote = $remainder =~ m/^['"]/;
|
||||
$remainder =~ s/$re_value//;
|
||||
my $value = $1 // '';
|
||||
|
||||
if ($value eq '') {
|
||||
die "failed to parse value - skip: $line\n";
|
||||
}
|
||||
if ($value eq '') {
|
||||
die "failed to parse value - skip: $line\n";
|
||||
}
|
||||
|
||||
if ($starts_with_quote) {
|
||||
# If it started with a quote, the parsed value MUST end with a quote
|
||||
my $is_single_quoted = $value =~ m/$re_single_quoted_value/;
|
||||
$value = $1 if $is_single_quoted;
|
||||
my $is_double_quoted = !$is_single_quoted && $value =~ m/$re_double_quoted_value/;
|
||||
$value = $1 if $is_double_quoted;
|
||||
if ($starts_with_quote) {
|
||||
# If it started with a quote, the parsed value MUST end with a quote
|
||||
my $is_single_quoted = $value =~ m/$re_single_quoted_value/;
|
||||
$value = $1 if $is_single_quoted;
|
||||
my $is_double_quoted = !$is_single_quoted && $value =~ m/$re_double_quoted_value/;
|
||||
$value = $1 if $is_double_quoted;
|
||||
|
||||
if (!($is_single_quoted || $is_double_quoted)) {
|
||||
die "failed to parse quoted value - skip: $line\n";
|
||||
}
|
||||
if (!($is_single_quoted || $is_double_quoted)) {
|
||||
die "failed to parse quoted value - skip: $line\n";
|
||||
}
|
||||
|
||||
# Optionally, *only* line continuations may *only* follow right after
|
||||
while ($remainder =~ s/^$re_continue_marker$//) {
|
||||
$remainder .= shift(@lines);
|
||||
}
|
||||
# Optionally, *only* line continuations may *only* follow right after
|
||||
while ($remainder =~ s/^$re_continue_marker$//) {
|
||||
$remainder .= shift(@lines);
|
||||
}
|
||||
|
||||
# Nothing but whitespace or a comment may follow
|
||||
$remainder =~ s/$re_leading_ws//;
|
||||
$remainder =~ s/^$re_comment_class.*$//;
|
||||
# Nothing but whitespace or a comment may follow
|
||||
$remainder =~ s/$re_leading_ws//;
|
||||
$remainder =~ s/^$re_comment_class.*$//;
|
||||
|
||||
if ($remainder) {
|
||||
die "unexpected remainder after value - skip: $line\n";
|
||||
}
|
||||
if ($remainder) {
|
||||
die "unexpected remainder after value - skip: $line\n";
|
||||
}
|
||||
|
||||
} else {
|
||||
while ($value =~ s/$re_continue_marker$//) {
|
||||
my $next_line = shift(@lines);
|
||||
} else {
|
||||
while ($value =~ s/$re_continue_marker$//) {
|
||||
my $next_line = shift(@lines);
|
||||
|
||||
$next_line =~ s/$re_unquoted_value//;
|
||||
my $value_part = $1 // '';
|
||||
$value .= $value_part;
|
||||
}
|
||||
$next_line =~ s/$re_unquoted_value//;
|
||||
my $value_part = $1 // '';
|
||||
$value .= $value_part;
|
||||
}
|
||||
|
||||
$value =~ s/$re_trailing_ws//;
|
||||
}
|
||||
$value =~ s/$re_trailing_ws//;
|
||||
}
|
||||
|
||||
# Un-escape comment literals
|
||||
$value =~ s/\\($re_comment_class)/$1/g;
|
||||
# Un-escape comment literals
|
||||
$value =~ s/\\($re_comment_class)/$1/g;
|
||||
|
||||
return $value;
|
||||
return $value;
|
||||
};
|
||||
|
||||
while (scalar(@lines)) {
|
||||
my $line = shift(@lines);
|
||||
my $line = shift(@lines);
|
||||
|
||||
$line =~ s/^\s*(?<!\\)$re_comment_class.*$//;
|
||||
$line =~ s/^\s*$//;
|
||||
next if !$line;
|
||||
next if $line =~ m/^$re_continue_marker$/;
|
||||
$line =~ s/^\s*(?<!\\)$re_comment_class.*$//;
|
||||
$line =~ s/^\s*$//;
|
||||
next if !$line;
|
||||
next if $line =~ m/^$re_continue_marker$/;
|
||||
|
||||
if ($line =~ m/$re_section_start/) {
|
||||
$section = undef;
|
||||
if ($line =~ m/$re_section_start/) {
|
||||
$section = undef;
|
||||
|
||||
eval { $section = $parse_section_header->($line) };
|
||||
if ($@) {
|
||||
warn "$@\n";
|
||||
}
|
||||
eval { $section = $parse_section_header->($line) };
|
||||
if ($@) {
|
||||
warn "$@\n";
|
||||
}
|
||||
|
||||
if (defined($section)) {
|
||||
$cfg->{$section} = {} if !exists($cfg->{$section});
|
||||
}
|
||||
if (defined($section)) {
|
||||
$cfg->{$section} = {} if !exists($cfg->{$section});
|
||||
}
|
||||
|
||||
next;
|
||||
}
|
||||
next;
|
||||
}
|
||||
|
||||
if (!defined($section)) {
|
||||
warn "no section header - skip: $line\n";
|
||||
next;
|
||||
}
|
||||
if (!defined($section)) {
|
||||
warn "no section header - skip: $line\n";
|
||||
next;
|
||||
}
|
||||
|
||||
my ($key, $remainder) = eval { $parse_key->($line) };
|
||||
if ($@) {
|
||||
warn "$@\n";
|
||||
next;
|
||||
}
|
||||
my ($key, $remainder) = eval { $parse_key->($line) };
|
||||
if ($@) {
|
||||
warn "$@\n";
|
||||
next;
|
||||
}
|
||||
|
||||
my $value = eval { $parse_value->($line, $remainder) };
|
||||
if ($@) {
|
||||
warn "$@\n";
|
||||
next;
|
||||
}
|
||||
my $value = eval { $parse_value->($line, $remainder) };
|
||||
if ($@) {
|
||||
warn "$@\n";
|
||||
next;
|
||||
}
|
||||
|
||||
$cfg->{$section}->{$key} = $value;
|
||||
$cfg->{$section}->{$key} = $value;
|
||||
}
|
||||
|
||||
return $cfg;
|
||||
@ -258,7 +256,7 @@ my $parse_ceph_file = sub {
|
||||
|
||||
my $cfg = {};
|
||||
|
||||
return $cfg if ! -f $filename;
|
||||
return $cfg if !-f $filename;
|
||||
|
||||
my $content = PVE::Tools::file_get_contents($filename);
|
||||
|
||||
@ -272,45 +270,45 @@ sub write_ceph_config {
|
||||
my $out = '';
|
||||
|
||||
my $cond_write_sec = sub {
|
||||
my $re = shift;
|
||||
my $re = shift;
|
||||
|
||||
for my $section (sort keys $cfg->%*) {
|
||||
next if $section !~ m/^$re$/;
|
||||
next if exists($written_sections->{$section});
|
||||
for my $section (sort keys $cfg->%*) {
|
||||
next if $section !~ m/^$re$/;
|
||||
next if exists($written_sections->{$section});
|
||||
|
||||
$out .= "[$section]\n";
|
||||
for my $key (sort keys $cfg->{$section}->%*) {
|
||||
$out .= "\t$key = $cfg->{$section}->{$key}\n";
|
||||
}
|
||||
$out .= "\n";
|
||||
$out .= "[$section]\n";
|
||||
for my $key (sort keys $cfg->{$section}->%*) {
|
||||
$out .= "\t$key = $cfg->{$section}->{$key}\n";
|
||||
}
|
||||
$out .= "\n";
|
||||
|
||||
$written_sections->{$section} = 1;
|
||||
}
|
||||
$written_sections->{$section} = 1;
|
||||
}
|
||||
};
|
||||
|
||||
my @rexprs = (
|
||||
qr/global/,
|
||||
qr/global/,
|
||||
|
||||
qr/client/,
|
||||
qr/client\..*/,
|
||||
qr/client/,
|
||||
qr/client\..*/,
|
||||
|
||||
qr/mds/,
|
||||
qr/mds\..*/,
|
||||
qr/mds/,
|
||||
qr/mds\..*/,
|
||||
|
||||
qr/mon/,
|
||||
qr/mon\..*/,
|
||||
qr/mon/,
|
||||
qr/mon\..*/,
|
||||
|
||||
qr/osd/,
|
||||
qr/osd\..*/,
|
||||
qr/osd/,
|
||||
qr/osd\..*/,
|
||||
|
||||
qr/mgr/,
|
||||
qr/mgr\..*/,
|
||||
qr/mgr/,
|
||||
qr/mgr\..*/,
|
||||
|
||||
qr/.*/,
|
||||
qr/.*/,
|
||||
);
|
||||
|
||||
for my $re (@rexprs) {
|
||||
$cond_write_sec->($re);
|
||||
$cond_write_sec->($re);
|
||||
}
|
||||
|
||||
# Escape comment literals that aren't escaped already
|
||||
@ -332,7 +330,7 @@ my $get_host = sub {
|
||||
my ($hostport) = @_;
|
||||
my ($host, $port) = PVE::Tools::parse_host_and_port($hostport);
|
||||
if (!defined($host)) {
|
||||
return "";
|
||||
return "";
|
||||
}
|
||||
$port = defined($port) ? ":$port" : '';
|
||||
$host = "[$host]" if Net::IP::ip_is_ipv6($host);
|
||||
@ -343,8 +341,8 @@ sub get_monaddr_list {
|
||||
my ($configfile) = shift;
|
||||
|
||||
if (!defined($configfile)) {
|
||||
warn "No ceph config specified\n";
|
||||
return;
|
||||
warn "No ceph config specified\n";
|
||||
return;
|
||||
}
|
||||
|
||||
my $config = $parse_ceph_file->($configfile);
|
||||
@ -352,24 +350,24 @@ sub get_monaddr_list {
|
||||
my $monhostlist = {};
|
||||
|
||||
# get all ip addresses from mon_host
|
||||
my $monhosts = [ split (/[ ,;]+/, $config->{global}->{mon_host} // "") ];
|
||||
my $monhosts = [split(/[ ,;]+/, $config->{global}->{mon_host} // "")];
|
||||
|
||||
foreach my $monhost (@$monhosts) {
|
||||
$monhost =~ s/^\[?v\d\://; # remove beginning of vector
|
||||
$monhost =~ s|/\d+\]?||; # remove end of vector
|
||||
my $host = $get_host->($monhost);
|
||||
if ($host ne "") {
|
||||
$monhostlist->{$host} = 1;
|
||||
}
|
||||
$monhost =~ s/^\[?v\d\://; # remove beginning of vector
|
||||
$monhost =~ s|/\d+\]?||; # remove end of vector
|
||||
my $host = $get_host->($monhost);
|
||||
if ($host ne "") {
|
||||
$monhostlist->{$host} = 1;
|
||||
}
|
||||
}
|
||||
|
||||
# then get all addrs from mon. sections
|
||||
for my $section ( keys %$config ) {
|
||||
next if $section !~ m/^mon\./;
|
||||
for my $section (keys %$config) {
|
||||
next if $section !~ m/^mon\./;
|
||||
|
||||
if (my $addr = $config->{$section}->{mon_addr}) {
|
||||
$monhostlist->{$addr} = 1;
|
||||
}
|
||||
if (my $addr = $config->{$section}->{mon_addr}) {
|
||||
$monhostlist->{$addr} = 1;
|
||||
}
|
||||
}
|
||||
|
||||
return join(',', sort keys %$monhostlist);
|
||||
@ -385,17 +383,17 @@ sub hostlist {
|
||||
my $ceph_check_keyfile = sub {
|
||||
my ($filename, $type) = @_;
|
||||
|
||||
return if ! -f $filename;
|
||||
return if !-f $filename;
|
||||
|
||||
my $content = PVE::Tools::file_get_contents($filename);
|
||||
eval {
|
||||
die if !$content;
|
||||
die if !$content;
|
||||
|
||||
if ($type eq 'rbd') {
|
||||
die if $content !~ /\s*\[\S+\]\s*key\s*=\s*\S+==\s*$/m;
|
||||
} elsif ($type eq 'cephfs') {
|
||||
die if $content !~ /\S+==\s*$/;
|
||||
}
|
||||
if ($type eq 'rbd') {
|
||||
die if $content !~ /\s*\[\S+\]\s*key\s*=\s*\S+==\s*$/m;
|
||||
} elsif ($type eq 'cephfs') {
|
||||
die if $content !~ /\S+==\s*$/;
|
||||
}
|
||||
};
|
||||
die "Not a proper $type authentication file: $filename\n" if $@;
|
||||
|
||||
@ -415,23 +413,24 @@ sub ceph_connect_option {
|
||||
$ceph_check_keyfile->($keyfile, $scfg->{type});
|
||||
|
||||
if (-e "/etc/pve/priv/ceph/${storeid}.conf") {
|
||||
# allow custom ceph configuration for external clusters
|
||||
if ($pveceph_managed) {
|
||||
warn "ignoring custom ceph config for storage '$storeid', 'monhost' is not set (assuming pveceph managed cluster)!\n";
|
||||
} else {
|
||||
$cmd_option->{ceph_conf} = "/etc/pve/priv/ceph/${storeid}.conf";
|
||||
}
|
||||
# allow custom ceph configuration for external clusters
|
||||
if ($pveceph_managed) {
|
||||
warn
|
||||
"ignoring custom ceph config for storage '$storeid', 'monhost' is not set (assuming pveceph managed cluster)!\n";
|
||||
} else {
|
||||
$cmd_option->{ceph_conf} = "/etc/pve/priv/ceph/${storeid}.conf";
|
||||
}
|
||||
}
|
||||
|
||||
$cmd_option->{keyring} = $keyfile if (-e $keyfile);
|
||||
$cmd_option->{auth_supported} = (defined $cmd_option->{keyring}) ? 'cephx' : 'none';
|
||||
$cmd_option->{userid} = $scfg->{username} ? $scfg->{username} : 'admin';
|
||||
$cmd_option->{userid} = $scfg->{username} ? $scfg->{username} : 'admin';
|
||||
$cmd_option->{mon_host} = hostlist($scfg->{monhost}, ',') if (defined($scfg->{monhost}));
|
||||
|
||||
if (%options) {
|
||||
foreach my $k (keys %options) {
|
||||
$cmd_option->{$k} = $options{$k};
|
||||
}
|
||||
foreach my $k (keys %options) {
|
||||
$cmd_option->{$k} = $options{$k};
|
||||
}
|
||||
}
|
||||
|
||||
return $cmd_option;
|
||||
@ -448,30 +447,31 @@ sub ceph_create_keyfile {
|
||||
my $ceph_storage_keyring = "/etc/pve/priv/ceph/${storeid}.$extension";
|
||||
|
||||
die "ceph authx keyring file for storage '$storeid' already exists!\n"
|
||||
if -e $ceph_storage_keyring && !defined($secret);
|
||||
if -e $ceph_storage_keyring && !defined($secret);
|
||||
|
||||
if (-e $ceph_admin_keyring || defined($secret)) {
|
||||
eval {
|
||||
if (defined($secret)) {
|
||||
mkdir '/etc/pve/priv/ceph';
|
||||
chomp $secret;
|
||||
PVE::Tools::file_set_contents($ceph_storage_keyring, "${secret}\n", 0400);
|
||||
} elsif ($type eq 'rbd') {
|
||||
mkdir '/etc/pve/priv/ceph';
|
||||
PVE::Tools::file_copy($ceph_admin_keyring, $ceph_storage_keyring);
|
||||
} elsif ($type eq 'cephfs') {
|
||||
my $cephfs_secret = $ceph_get_key->($ceph_admin_keyring, 'admin');
|
||||
mkdir '/etc/pve/priv/ceph';
|
||||
chomp $cephfs_secret;
|
||||
PVE::Tools::file_set_contents($ceph_storage_keyring, "${cephfs_secret}\n", 0400);
|
||||
}
|
||||
};
|
||||
if (my $err = $@) {
|
||||
unlink $ceph_storage_keyring;
|
||||
die "failed to copy ceph authx $extension for storage '$storeid': $err\n";
|
||||
}
|
||||
eval {
|
||||
if (defined($secret)) {
|
||||
mkdir '/etc/pve/priv/ceph';
|
||||
chomp $secret;
|
||||
PVE::Tools::file_set_contents($ceph_storage_keyring, "${secret}\n", 0400);
|
||||
} elsif ($type eq 'rbd') {
|
||||
mkdir '/etc/pve/priv/ceph';
|
||||
PVE::Tools::file_copy($ceph_admin_keyring, $ceph_storage_keyring);
|
||||
} elsif ($type eq 'cephfs') {
|
||||
my $cephfs_secret = $ceph_get_key->($ceph_admin_keyring, 'admin');
|
||||
mkdir '/etc/pve/priv/ceph';
|
||||
chomp $cephfs_secret;
|
||||
PVE::Tools::file_set_contents($ceph_storage_keyring, "${cephfs_secret}\n",
|
||||
0400);
|
||||
}
|
||||
};
|
||||
if (my $err = $@) {
|
||||
unlink $ceph_storage_keyring;
|
||||
die "failed to copy ceph authx $extension for storage '$storeid': $err\n";
|
||||
}
|
||||
} else {
|
||||
warn "$ceph_admin_keyring not found, authentication is disabled.\n";
|
||||
warn "$ceph_admin_keyring not found, authentication is disabled.\n";
|
||||
}
|
||||
}
|
||||
|
||||
@ -483,7 +483,7 @@ sub ceph_remove_keyfile {
|
||||
my $ceph_storage_keyring = "/etc/pve/priv/ceph/${storeid}.$extension";
|
||||
|
||||
if (-f $ceph_storage_keyring) {
|
||||
unlink($ceph_storage_keyring) or warn "removing keyring of storage failed: $!\n";
|
||||
unlink($ceph_storage_keyring) or warn "removing keyring of storage failed: $!\n";
|
||||
}
|
||||
}
|
||||
|
||||
@ -491,10 +491,10 @@ my $ceph_version_parser = sub {
|
||||
my $ceph_version = shift;
|
||||
# FIXME this is the same as pve-manager PVE::Ceph::Tools get_local_version
|
||||
if ($ceph_version =~ /^ceph.*\sv?(\d+(?:\.\d+)+(?:-pve\d+)?)\s+(?:\(([a-zA-Z0-9]+)\))?/) {
|
||||
my ($version, $buildcommit) = ($1, $2);
|
||||
my $subversions = [ split(/\.|-/, $version) ];
|
||||
my ($version, $buildcommit) = ($1, $2);
|
||||
my $subversions = [split(/\.|-/, $version)];
|
||||
|
||||
return ($subversions, $version, $buildcommit);
|
||||
return ($subversions, $version, $buildcommit);
|
||||
}
|
||||
warn "Could not parse Ceph version: '$ceph_version'\n";
|
||||
};
|
||||
@ -504,9 +504,12 @@ sub local_ceph_version {
|
||||
|
||||
my $version_string = $cache;
|
||||
if (!defined($version_string)) {
|
||||
run_command('ceph --version', outfunc => sub {
|
||||
$version_string = shift;
|
||||
});
|
||||
run_command(
|
||||
'ceph --version',
|
||||
outfunc => sub {
|
||||
$version_string = shift;
|
||||
},
|
||||
);
|
||||
}
|
||||
return undef if !defined($version_string);
|
||||
# subversion is an array ref. with the version parts from major to minor
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -16,24 +16,24 @@ sub extract_disk_from_import_file {
|
||||
my $cfg = PVE::Storage::config();
|
||||
|
||||
my ($vtype, $name, undef, undef, undef, undef, $fmt) =
|
||||
PVE::Storage::parse_volname($cfg, $volid);
|
||||
PVE::Storage::parse_volname($cfg, $volid);
|
||||
|
||||
die "only files with content type 'import' can be extracted\n"
|
||||
if $vtype ne 'import';
|
||||
if $vtype ne 'import';
|
||||
|
||||
die "only files from 'ova' format can be extracted\n"
|
||||
if $fmt !~ m/^ova\+/;
|
||||
if $fmt !~ m/^ova\+/;
|
||||
|
||||
# extract the inner file from the name
|
||||
my $archive_volid;
|
||||
my $inner_file;
|
||||
my $inner_fmt;
|
||||
if ($name =~ m!^(.*\.ova)/(${PVE::Storage::SAFE_CHAR_WITH_WHITESPACE_CLASS_RE}+)$!) {
|
||||
$archive_volid = "$source_storeid:import/$1";
|
||||
$inner_file = $2;
|
||||
($inner_fmt) = $fmt =~ /^ova\+(.*)$/;
|
||||
$archive_volid = "$source_storeid:import/$1";
|
||||
$inner_file = $2;
|
||||
($inner_fmt) = $fmt =~ /^ova\+(.*)$/;
|
||||
} else {
|
||||
die "cannot extract $volid - invalid volname $volname\n";
|
||||
die "cannot extract $volid - invalid volname $volname\n";
|
||||
}
|
||||
|
||||
die "cannot determine format of '$volid'\n" if !$inner_fmt;
|
||||
@ -49,36 +49,40 @@ sub extract_disk_from_import_file {
|
||||
my $target_path;
|
||||
my $target_volid;
|
||||
eval {
|
||||
run_command([
|
||||
'tar',
|
||||
'-x',
|
||||
'--force-local',
|
||||
'--no-same-owner',
|
||||
'-C', $tmpdir,
|
||||
'-f', $ova_path,
|
||||
$inner_file,
|
||||
]);
|
||||
run_command([
|
||||
'tar',
|
||||
'-x',
|
||||
'--force-local',
|
||||
'--no-same-owner',
|
||||
'-C',
|
||||
$tmpdir,
|
||||
'-f',
|
||||
$ova_path,
|
||||
$inner_file,
|
||||
]);
|
||||
|
||||
# check for symlinks and other non regular files
|
||||
if (-l $source_path || ! -f $source_path) {
|
||||
die "extracted file '$inner_file' from archive '$archive_volid' is not a regular file\n";
|
||||
}
|
||||
# check for symlinks and other non regular files
|
||||
if (-l $source_path || !-f $source_path) {
|
||||
die
|
||||
"extracted file '$inner_file' from archive '$archive_volid' is not a regular file\n";
|
||||
}
|
||||
|
||||
# check potentially untrusted image file!
|
||||
PVE::Storage::file_size_info($source_path, undef, $inner_fmt, 1);
|
||||
# check potentially untrusted image file!
|
||||
PVE::Storage::file_size_info($source_path, undef, $inner_fmt, 1);
|
||||
|
||||
# create temporary 1M image that will get overwritten by the rename
|
||||
# to reserve the filename and take care of locking
|
||||
$target_volid = PVE::Storage::vdisk_alloc($cfg, $target_storeid, $vmid, $inner_fmt, undef, 1024);
|
||||
$target_path = PVE::Storage::path($cfg, $target_volid);
|
||||
# create temporary 1M image that will get overwritten by the rename
|
||||
# to reserve the filename and take care of locking
|
||||
$target_volid =
|
||||
PVE::Storage::vdisk_alloc($cfg, $target_storeid, $vmid, $inner_fmt, undef, 1024);
|
||||
$target_path = PVE::Storage::path($cfg, $target_volid);
|
||||
|
||||
print "renaming $source_path to $target_path\n";
|
||||
print "renaming $source_path to $target_path\n";
|
||||
|
||||
rename($source_path, $target_path) or die "unable to move - $!\n";
|
||||
rename($source_path, $target_path) or die "unable to move - $!\n";
|
||||
};
|
||||
if (my $err = $@) {
|
||||
File::Path::remove_tree($tmpdir);
|
||||
die "error during extraction: $err\n";
|
||||
File::Path::remove_tree($tmpdir);
|
||||
die "error during extraction: $err\n";
|
||||
}
|
||||
|
||||
File::Path::remove_tree($tmpdir);
|
||||
|
||||
@ -36,7 +36,7 @@ my @resources = (
|
||||
{ id => 17, dtmf_name => 'Disk Drive' },
|
||||
{ id => 18, dtmf_name => 'Tape Drive' },
|
||||
{ id => 19, dtmf_name => 'Storage Extent' },
|
||||
{ id => 20, dtmf_name => 'Other storage device', pve_type => 'sata'},
|
||||
{ id => 20, dtmf_name => 'Other storage device', pve_type => 'sata' },
|
||||
{ id => 21, dtmf_name => 'Serial port' },
|
||||
{ id => 22, dtmf_name => 'Parallel port' },
|
||||
{ id => 23, dtmf_name => 'USB Controller' },
|
||||
@ -51,7 +51,7 @@ my @resources = (
|
||||
{ id => 32, dtmf_name => 'Storage Volume' },
|
||||
{ id => 33, dtmf_name => 'Ethernet Connection' },
|
||||
{ id => 34, dtmf_name => 'DMTF reserved' },
|
||||
{ id => 35, dtmf_name => 'Vendor Reserved'}
|
||||
{ id => 35, dtmf_name => 'Vendor Reserved' },
|
||||
);
|
||||
|
||||
# see https://schemas.dmtf.org/wbem/cim-html/2.55.0+/CIM_OperatingSystem.html
|
||||
@ -120,17 +120,15 @@ sub get_ostype {
|
||||
}
|
||||
|
||||
my $allowed_nic_models = [
|
||||
'e1000',
|
||||
'e1000e',
|
||||
'vmxnet3',
|
||||
'e1000', 'e1000e', 'vmxnet3',
|
||||
];
|
||||
|
||||
sub find_by {
|
||||
my ($key, $param) = @_;
|
||||
foreach my $resource (@resources) {
|
||||
if ($resource->{$key} eq $param) {
|
||||
return ($resource);
|
||||
}
|
||||
if ($resource->{$key} eq $param) {
|
||||
return ($resource);
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
@ -139,9 +137,9 @@ sub dtmf_name_to_id {
|
||||
my ($dtmf_name) = @_;
|
||||
my $found = find_by('dtmf_name', $dtmf_name);
|
||||
if ($found) {
|
||||
return $found->{id};
|
||||
return $found->{id};
|
||||
} else {
|
||||
return;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
@ -149,9 +147,9 @@ sub id_to_pve {
|
||||
my ($id) = @_;
|
||||
my $resource = find_by('id', $id);
|
||||
if ($resource) {
|
||||
return $resource->{pve_type};
|
||||
return $resource->{pve_type};
|
||||
} else {
|
||||
return;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
@ -161,9 +159,9 @@ sub try_parse_capacity_unit {
|
||||
my ($unit_text) = @_;
|
||||
|
||||
if ($unit_text =~ m/^\s*byte\s*\*\s*([0-9]+)\s*\^\s*([0-9]+)\s*$/) {
|
||||
my $base = $1;
|
||||
my $exp = $2;
|
||||
return $base ** $exp;
|
||||
my $base = $1;
|
||||
my $exp = $2;
|
||||
return $base**$exp;
|
||||
}
|
||||
|
||||
return undef;
|
||||
@ -176,25 +174,32 @@ sub parse_ovf {
|
||||
# we have to ignore missing disk images for ova
|
||||
my $dom;
|
||||
if ($isOva) {
|
||||
my $raw = "";
|
||||
PVE::Tools::run_command(['tar', '-xO', '--wildcards', '--occurrence=1', '-f', $ovf, '*.ovf'], outfunc => sub {
|
||||
my $line = shift;
|
||||
$raw .= $line;
|
||||
});
|
||||
$dom = XML::LibXML->load_xml(string => $raw, no_blanks => 1);
|
||||
my $raw = "";
|
||||
PVE::Tools::run_command(
|
||||
['tar', '-xO', '--wildcards', '--occurrence=1', '-f', $ovf, '*.ovf'],
|
||||
outfunc => sub {
|
||||
my $line = shift;
|
||||
$raw .= $line;
|
||||
},
|
||||
);
|
||||
$dom = XML::LibXML->load_xml(string => $raw, no_blanks => 1);
|
||||
} else {
|
||||
$dom = XML::LibXML->load_xml(location => $ovf, no_blanks => 1);
|
||||
$dom = XML::LibXML->load_xml(location => $ovf, no_blanks => 1);
|
||||
}
|
||||
|
||||
|
||||
# register the xml namespaces in a xpath context object
|
||||
# 'ovf' is the default namespace so it will prepended to each xml element
|
||||
my $xpc = XML::LibXML::XPathContext->new($dom);
|
||||
$xpc->registerNs('ovf', 'http://schemas.dmtf.org/ovf/envelope/1');
|
||||
$xpc->registerNs('vmw', 'http://www.vmware.com/schema/ovf');
|
||||
$xpc->registerNs('rasd', 'http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData');
|
||||
$xpc->registerNs('vssd', 'http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData');
|
||||
|
||||
$xpc->registerNs(
|
||||
'rasd',
|
||||
'http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData',
|
||||
);
|
||||
$xpc->registerNs(
|
||||
'vssd',
|
||||
'http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData',
|
||||
);
|
||||
|
||||
# hash to save qm.conf parameters
|
||||
my $qm;
|
||||
@ -206,48 +211,55 @@ sub parse_ovf {
|
||||
# walk down the dom until we find the matching XML element
|
||||
my $ovf_name = $xpc->findvalue("/ovf:Envelope/ovf:VirtualSystem/ovf:Name");
|
||||
if (!$ovf_name) {
|
||||
# this is a bit of a hack, but best-effort and can only win here
|
||||
my @nodes = $xpc->findnodes("/ovf:Envelope/ovf:VirtualSystem");
|
||||
if (my $virtual_system_node = shift @nodes) {
|
||||
for my $attr ($virtual_system_node->attributes()) {
|
||||
if ($attr->nodeName() eq 'ovf:id') {
|
||||
$ovf_name = $attr->getValue();
|
||||
last;
|
||||
}
|
||||
}
|
||||
}
|
||||
# this is a bit of a hack, but best-effort and can only win here
|
||||
my @nodes = $xpc->findnodes("/ovf:Envelope/ovf:VirtualSystem");
|
||||
if (my $virtual_system_node = shift @nodes) {
|
||||
for my $attr ($virtual_system_node->attributes()) {
|
||||
if ($attr->nodeName() eq 'ovf:id') {
|
||||
$ovf_name = $attr->getValue();
|
||||
last;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if ($ovf_name) {
|
||||
# PVE::QemuServer::confdesc requires a valid DNS name
|
||||
$ovf_name =~ s/\s+/-/g;
|
||||
($qm->{name} = $ovf_name) =~ s/[^a-zA-Z0-9\-\.]//g;
|
||||
# PVE::QemuServer::confdesc requires a valid DNS name
|
||||
$ovf_name =~ s/\s+/-/g;
|
||||
($qm->{name} = $ovf_name) =~ s/[^a-zA-Z0-9\-\.]//g;
|
||||
} else {
|
||||
warn "warning: unable to parse the VM name in this OVF manifest, generating a default value\n";
|
||||
warn
|
||||
"warning: unable to parse the VM name in this OVF manifest, generating a default value\n";
|
||||
}
|
||||
|
||||
# middle level xpath
|
||||
# element[child] search the elements which have this [child]
|
||||
my $processor_id = dtmf_name_to_id('Processor');
|
||||
my $xpath_find_vcpu_count = "/ovf:Envelope/ovf:VirtualSystem/ovf:VirtualHardwareSection/ovf:Item[rasd:ResourceType=${processor_id}]/rasd:VirtualQuantity";
|
||||
my $xpath_find_vcpu_count =
|
||||
"/ovf:Envelope/ovf:VirtualSystem/ovf:VirtualHardwareSection/ovf:Item[rasd:ResourceType=${processor_id}]/rasd:VirtualQuantity";
|
||||
$qm->{'cores'} = $xpc->findvalue($xpath_find_vcpu_count);
|
||||
|
||||
my $memory_id = dtmf_name_to_id('Memory');
|
||||
my $xpath_find_memory = ("/ovf:Envelope/ovf:VirtualSystem/ovf:VirtualHardwareSection/ovf:Item[rasd:ResourceType=${memory_id}]/rasd:VirtualQuantity");
|
||||
my $xpath_find_memory = (
|
||||
"/ovf:Envelope/ovf:VirtualSystem/ovf:VirtualHardwareSection/ovf:Item[rasd:ResourceType=${memory_id}]/rasd:VirtualQuantity"
|
||||
);
|
||||
$qm->{'memory'} = $xpc->findvalue($xpath_find_memory);
|
||||
|
||||
# middle level xpath
|
||||
# here we expect multiple results, so we do not read the element value with
|
||||
# findvalue() but store multiple elements with findnodes()
|
||||
my $disk_id = dtmf_name_to_id('Disk Drive');
|
||||
my $xpath_find_disks = "/ovf:Envelope/ovf:VirtualSystem/ovf:VirtualHardwareSection/ovf:Item[rasd:ResourceType=${disk_id}]";
|
||||
my $xpath_find_disks =
|
||||
"/ovf:Envelope/ovf:VirtualSystem/ovf:VirtualHardwareSection/ovf:Item[rasd:ResourceType=${disk_id}]";
|
||||
my @disk_items = $xpc->findnodes($xpath_find_disks);
|
||||
|
||||
my $xpath_find_ostype_id = "/ovf:Envelope/ovf:VirtualSystem/ovf:OperatingSystemSection/\@ovf:id";
|
||||
my $xpath_find_ostype_id =
|
||||
"/ovf:Envelope/ovf:VirtualSystem/ovf:OperatingSystemSection/\@ovf:id";
|
||||
my $ostype_id = $xpc->findvalue($xpath_find_ostype_id);
|
||||
$qm->{ostype} = get_ostype($ostype_id);
|
||||
|
||||
# vmware specific firmware config, seems to not be standardized in ovf ?
|
||||
my $xpath_find_firmware = "/ovf:Envelope/ovf:VirtualSystem/ovf:VirtualHardwareSection/vmw:Config[\@vmw:key=\"firmware\"]/\@vmw:value";
|
||||
my $xpath_find_firmware =
|
||||
"/ovf:Envelope/ovf:VirtualSystem/ovf:VirtualHardwareSection/vmw:Config[\@vmw:key=\"firmware\"]/\@vmw:value";
|
||||
my $firmware = $xpc->findvalue($xpath_find_firmware) || 'seabios';
|
||||
$qm->{bios} = 'ovmf' if $firmware eq 'efi';
|
||||
|
||||
@ -265,130 +277,141 @@ sub parse_ovf {
|
||||
my $boot_order = [];
|
||||
|
||||
for my $item_node (@disk_items) {
|
||||
my ($disk_node, $file_node, $controller_node, $pve_disk);
|
||||
my ($disk_node, $file_node, $controller_node, $pve_disk);
|
||||
|
||||
print "disk item:\n", $item_node->toString(1), "\n" if $debug;
|
||||
print "disk item:\n", $item_node->toString(1), "\n" if $debug;
|
||||
|
||||
# from Item, find corresponding Disk node
|
||||
# here the dot means the search should start from the current element in dom
|
||||
my $host_resource = $xpc->findvalue('rasd:HostResource', $item_node);
|
||||
my $disk_section_path;
|
||||
my $disk_id;
|
||||
# from Item, find corresponding Disk node
|
||||
# here the dot means the search should start from the current element in dom
|
||||
my $host_resource = $xpc->findvalue('rasd:HostResource', $item_node);
|
||||
my $disk_section_path;
|
||||
my $disk_id;
|
||||
|
||||
# RFC 3986 "2.3. Unreserved Characters"
|
||||
my $valid_uripath_chars = qr/[[:alnum:]]|[\-\._~]/;
|
||||
# RFC 3986 "2.3. Unreserved Characters"
|
||||
my $valid_uripath_chars = qr/[[:alnum:]]|[\-\._~]/;
|
||||
|
||||
if ($host_resource =~ m|^(?:ovf:)?/(${valid_uripath_chars}+)/(${valid_uripath_chars}+)$|) {
|
||||
$disk_section_path = $1;
|
||||
$disk_id = $2;
|
||||
} else {
|
||||
warn "invalid host resource $host_resource, skipping\n";
|
||||
next;
|
||||
}
|
||||
printf "disk section path: $disk_section_path and disk id: $disk_id\n" if $debug;
|
||||
if ($host_resource =~ m|^(?:ovf:)?/(${valid_uripath_chars}+)/(${valid_uripath_chars}+)$|) {
|
||||
$disk_section_path = $1;
|
||||
$disk_id = $2;
|
||||
} else {
|
||||
warn "invalid host resource $host_resource, skipping\n";
|
||||
next;
|
||||
}
|
||||
printf "disk section path: $disk_section_path and disk id: $disk_id\n" if $debug;
|
||||
|
||||
# tricky xpath
|
||||
# @ means we filter the result query based on a the value of an item attribute ( @ = attribute)
|
||||
# @ needs to be escaped to prevent Perl double quote interpolation
|
||||
my $xpath_find_fileref = sprintf("/ovf:Envelope/ovf:DiskSection/\
|
||||
ovf:Disk[\@ovf:diskId='%s']/\@ovf:fileRef", $disk_id);
|
||||
my $xpath_find_capacity = sprintf("/ovf:Envelope/ovf:DiskSection/\
|
||||
ovf:Disk[\@ovf:diskId='%s']/\@ovf:capacity", $disk_id);
|
||||
my $xpath_find_capacity_unit = sprintf("/ovf:Envelope/ovf:DiskSection/\
|
||||
ovf:Disk[\@ovf:diskId='%s']/\@ovf:capacityAllocationUnits", $disk_id);
|
||||
my $fileref = $xpc->findvalue($xpath_find_fileref);
|
||||
my $capacity = $xpc->findvalue($xpath_find_capacity);
|
||||
my $capacity_unit = $xpc->findvalue($xpath_find_capacity_unit);
|
||||
my $virtual_size;
|
||||
if (my $factor = try_parse_capacity_unit($capacity_unit)) {
|
||||
$virtual_size = $capacity * $factor;
|
||||
}
|
||||
# tricky xpath
|
||||
# @ means we filter the result query based on a the value of an item attribute ( @ = attribute)
|
||||
# @ needs to be escaped to prevent Perl double quote interpolation
|
||||
my $xpath_find_fileref = sprintf(
|
||||
"/ovf:Envelope/ovf:DiskSection/\
|
||||
ovf:Disk[\@ovf:diskId='%s']/\@ovf:fileRef", $disk_id,
|
||||
);
|
||||
my $xpath_find_capacity = sprintf(
|
||||
"/ovf:Envelope/ovf:DiskSection/\
|
||||
ovf:Disk[\@ovf:diskId='%s']/\@ovf:capacity", $disk_id,
|
||||
);
|
||||
my $xpath_find_capacity_unit = sprintf(
|
||||
"/ovf:Envelope/ovf:DiskSection/\
|
||||
ovf:Disk[\@ovf:diskId='%s']/\@ovf:capacityAllocationUnits", $disk_id,
|
||||
);
|
||||
my $fileref = $xpc->findvalue($xpath_find_fileref);
|
||||
my $capacity = $xpc->findvalue($xpath_find_capacity);
|
||||
my $capacity_unit = $xpc->findvalue($xpath_find_capacity_unit);
|
||||
my $virtual_size;
|
||||
if (my $factor = try_parse_capacity_unit($capacity_unit)) {
|
||||
$virtual_size = $capacity * $factor;
|
||||
}
|
||||
|
||||
my $valid_url_chars = qr@${valid_uripath_chars}|/@;
|
||||
if (!$fileref || $fileref !~ m/^${valid_url_chars}+$/) {
|
||||
warn "invalid host resource $host_resource, skipping\n";
|
||||
next;
|
||||
}
|
||||
my $valid_url_chars = qr@${valid_uripath_chars}|/@;
|
||||
if (!$fileref || $fileref !~ m/^${valid_url_chars}+$/) {
|
||||
warn "invalid host resource $host_resource, skipping\n";
|
||||
next;
|
||||
}
|
||||
|
||||
# from Item, find owning Controller type
|
||||
my $controller_id = $xpc->findvalue('rasd:Parent', $item_node);
|
||||
my $xpath_find_parent_type = sprintf("/ovf:Envelope/ovf:VirtualSystem/ovf:VirtualHardwareSection/\
|
||||
ovf:Item[rasd:InstanceID='%s']/rasd:ResourceType", $controller_id);
|
||||
my $controller_type = $xpc->findvalue($xpath_find_parent_type);
|
||||
if (!$controller_type) {
|
||||
warn "invalid or missing controller: $controller_type, skipping\n";
|
||||
next;
|
||||
}
|
||||
print "owning controller type: $controller_type\n" if $debug;
|
||||
# from Item, find owning Controller type
|
||||
my $controller_id = $xpc->findvalue('rasd:Parent', $item_node);
|
||||
my $xpath_find_parent_type = sprintf(
|
||||
"/ovf:Envelope/ovf:VirtualSystem/ovf:VirtualHardwareSection/\
|
||||
ovf:Item[rasd:InstanceID='%s']/rasd:ResourceType", $controller_id,
|
||||
);
|
||||
my $controller_type = $xpc->findvalue($xpath_find_parent_type);
|
||||
if (!$controller_type) {
|
||||
warn "invalid or missing controller: $controller_type, skipping\n";
|
||||
next;
|
||||
}
|
||||
print "owning controller type: $controller_type\n" if $debug;
|
||||
|
||||
# extract corresponding Controller node details
|
||||
my $adress_on_controller = $xpc->findvalue('rasd:AddressOnParent', $item_node);
|
||||
my $pve_disk_address = id_to_pve($controller_type) . $adress_on_controller;
|
||||
# extract corresponding Controller node details
|
||||
my $adress_on_controller = $xpc->findvalue('rasd:AddressOnParent', $item_node);
|
||||
my $pve_disk_address = id_to_pve($controller_type) . $adress_on_controller;
|
||||
|
||||
# from Disk Node, find corresponding filepath
|
||||
my $xpath_find_filepath = sprintf("/ovf:Envelope/ovf:References/ovf:File[\@ovf:id='%s']/\@ovf:href", $fileref);
|
||||
my $filepath = $xpc->findvalue($xpath_find_filepath);
|
||||
if (!$filepath) {
|
||||
warn "invalid file reference $fileref, skipping\n";
|
||||
next;
|
||||
}
|
||||
print "file path: $filepath\n" if $debug;
|
||||
my $original_filepath = $filepath;
|
||||
($filepath) = $filepath =~ m|^(${PVE::Storage::SAFE_CHAR_WITH_WHITESPACE_CLASS_RE}+)$|; # untaint & check no sub/parent dirs
|
||||
die "referenced path '$original_filepath' is invalid\n" if !$filepath || $filepath eq "." || $filepath eq "..";
|
||||
# from Disk Node, find corresponding filepath
|
||||
my $xpath_find_filepath =
|
||||
sprintf("/ovf:Envelope/ovf:References/ovf:File[\@ovf:id='%s']/\@ovf:href", $fileref);
|
||||
my $filepath = $xpc->findvalue($xpath_find_filepath);
|
||||
if (!$filepath) {
|
||||
warn "invalid file reference $fileref, skipping\n";
|
||||
next;
|
||||
}
|
||||
print "file path: $filepath\n" if $debug;
|
||||
my $original_filepath = $filepath;
|
||||
($filepath) = $filepath =~ m|^(${PVE::Storage::SAFE_CHAR_WITH_WHITESPACE_CLASS_RE}+)$|; # untaint & check no sub/parent dirs
|
||||
die "referenced path '$original_filepath' is invalid\n"
|
||||
if !$filepath || $filepath eq "." || $filepath eq "..";
|
||||
|
||||
# resolve symlinks and relative path components
|
||||
# and die if the diskimage is not somewhere under the $ovf path
|
||||
my $ovf_dir = realpath(dirname(File::Spec->rel2abs($ovf)))
|
||||
or die "could not get absolute path of $ovf: $!\n";
|
||||
my $backing_file_path = realpath(join ('/', $ovf_dir, $filepath))
|
||||
or die "could not get absolute path of $filepath: $!\n";
|
||||
if ($backing_file_path !~ /^\Q${ovf_dir}\E/) {
|
||||
die "error parsing $filepath, are you using a symlink ?\n";
|
||||
}
|
||||
# resolve symlinks and relative path components
|
||||
# and die if the diskimage is not somewhere under the $ovf path
|
||||
my $ovf_dir = realpath(dirname(File::Spec->rel2abs($ovf)))
|
||||
or die "could not get absolute path of $ovf: $!\n";
|
||||
my $backing_file_path = realpath(join('/', $ovf_dir, $filepath))
|
||||
or die "could not get absolute path of $filepath: $!\n";
|
||||
if ($backing_file_path !~ /^\Q${ovf_dir}\E/) {
|
||||
die "error parsing $filepath, are you using a symlink ?\n";
|
||||
}
|
||||
|
||||
($backing_file_path) = $backing_file_path =~ m|^(/.*)|; # untaint
|
||||
($backing_file_path) = $backing_file_path =~ m|^(/.*)|; # untaint
|
||||
|
||||
if (!-e $backing_file_path && !$isOva) {
|
||||
die "error parsing $filepath, file seems not to exist at $backing_file_path\n";
|
||||
}
|
||||
if (!-e $backing_file_path && !$isOva) {
|
||||
die "error parsing $filepath, file seems not to exist at $backing_file_path\n";
|
||||
}
|
||||
|
||||
if (!$isOva) {
|
||||
my $size = PVE::Storage::file_size_info($backing_file_path, undef, 'auto-detect');
|
||||
die "error parsing $backing_file_path, cannot determine file size\n"
|
||||
if !$size;
|
||||
if (!$isOva) {
|
||||
my $size = PVE::Storage::file_size_info($backing_file_path, undef, 'auto-detect');
|
||||
die "error parsing $backing_file_path, cannot determine file size\n"
|
||||
if !$size;
|
||||
|
||||
$virtual_size = $size;
|
||||
}
|
||||
$pve_disk = {
|
||||
disk_address => $pve_disk_address,
|
||||
backing_file => $backing_file_path,
|
||||
virtual_size => $virtual_size,
|
||||
relative_path => $filepath,
|
||||
};
|
||||
$pve_disk->{virtual_size} = $virtual_size if defined($virtual_size);
|
||||
push @disks, $pve_disk;
|
||||
push @$boot_order, $pve_disk_address;
|
||||
$virtual_size = $size;
|
||||
}
|
||||
$pve_disk = {
|
||||
disk_address => $pve_disk_address,
|
||||
backing_file => $backing_file_path,
|
||||
virtual_size => $virtual_size,
|
||||
relative_path => $filepath,
|
||||
};
|
||||
$pve_disk->{virtual_size} = $virtual_size if defined($virtual_size);
|
||||
push @disks, $pve_disk;
|
||||
push @$boot_order, $pve_disk_address;
|
||||
}
|
||||
|
||||
$qm->{boot} = "order=" . join(';', @$boot_order) if scalar(@$boot_order) > 0;
|
||||
|
||||
my $nic_id = dtmf_name_to_id('Ethernet Adapter');
|
||||
my $xpath_find_nics = "/ovf:Envelope/ovf:VirtualSystem/ovf:VirtualHardwareSection/ovf:Item[rasd:ResourceType=${nic_id}]";
|
||||
my $xpath_find_nics =
|
||||
"/ovf:Envelope/ovf:VirtualSystem/ovf:VirtualHardwareSection/ovf:Item[rasd:ResourceType=${nic_id}]";
|
||||
my @nic_items = $xpc->findnodes($xpath_find_nics);
|
||||
|
||||
my $net = {};
|
||||
|
||||
my $net_count = 0;
|
||||
for my $item_node (@nic_items) {
|
||||
my $model = $xpc->findvalue('rasd:ResourceSubType', $item_node);
|
||||
$model = lc($model);
|
||||
$model = 'e1000' if ! grep { $_ eq $model } @$allowed_nic_models;
|
||||
$net->{"net${net_count}"} = { model => $model };
|
||||
$net_count++;
|
||||
my $model = $xpc->findvalue('rasd:ResourceSubType', $item_node);
|
||||
$model = lc($model);
|
||||
$model = 'e1000' if !grep { $_ eq $model } @$allowed_nic_models;
|
||||
$net->{"net${net_count}"} = { model => $model };
|
||||
$net_count++;
|
||||
}
|
||||
|
||||
return {qm => $qm, disks => \@disks, net => $net};
|
||||
return { qm => $qm, disks => \@disks, net => $net };
|
||||
}
|
||||
|
||||
1;
|
||||
|
||||
1593
src/PVE/Storage.pm
1593
src/PVE/Storage.pm
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -16,7 +16,7 @@ use base qw(PVE::Storage::Plugin);
|
||||
sub cifs_is_mounted : prototype($$) {
|
||||
my ($scfg, $mountdata) = @_;
|
||||
|
||||
my ($mountpoint, $server, $share) = $scfg->@{'path', 'server', 'share'};
|
||||
my ($mountpoint, $server, $share) = $scfg->@{ 'path', 'server', 'share' };
|
||||
my $subdir = $scfg->{subdir} // '';
|
||||
|
||||
$server = "[$server]" if Net::IP::ip_is_ipv6($server);
|
||||
@ -24,9 +24,9 @@ sub cifs_is_mounted : prototype($$) {
|
||||
$mountdata = PVE::ProcFSTools::parse_proc_mounts() if !$mountdata;
|
||||
|
||||
return $mountpoint if grep {
|
||||
$_->[2] =~ /^cifs/ &&
|
||||
$_->[0] =~ m|^\Q$source\E/?$| &&
|
||||
$_->[1] eq $mountpoint
|
||||
$_->[2] =~ /^cifs/
|
||||
&& $_->[0] =~ m|^\Q$source\E/?$|
|
||||
&& $_->[1] eq $mountpoint
|
||||
} @$mountdata;
|
||||
return undef;
|
||||
}
|
||||
@ -40,7 +40,7 @@ sub cifs_delete_credentials {
|
||||
my ($storeid) = @_;
|
||||
|
||||
if (my $cred_file = get_cred_file($storeid)) {
|
||||
unlink($cred_file) or warn "removing cifs credientials '$cred_file' failed: $!\n";
|
||||
unlink($cred_file) or warn "removing cifs credientials '$cred_file' failed: $!\n";
|
||||
}
|
||||
}
|
||||
|
||||
@ -61,7 +61,7 @@ sub get_cred_file {
|
||||
my $cred_file = cifs_cred_file_name($storeid);
|
||||
|
||||
if (-e $cred_file) {
|
||||
return $cred_file;
|
||||
return $cred_file;
|
||||
}
|
||||
return undef;
|
||||
}
|
||||
@ -69,7 +69,7 @@ sub get_cred_file {
|
||||
sub cifs_mount : prototype($$$$$) {
|
||||
my ($scfg, $storeid, $smbver, $user, $domain) = @_;
|
||||
|
||||
my ($mountpoint, $server, $share, $options) = $scfg->@{'path', 'server', 'share', 'options'};
|
||||
my ($mountpoint, $server, $share, $options) = $scfg->@{ 'path', 'server', 'share', 'options' };
|
||||
my $subdir = $scfg->{subdir} // '';
|
||||
|
||||
$server = "[$server]" if Net::IP::ip_is_ipv6($server);
|
||||
@ -78,10 +78,10 @@ sub cifs_mount : prototype($$$$$) {
|
||||
my $cmd = ['/bin/mount', '-t', 'cifs', $source, $mountpoint, '-o', 'soft', '-o'];
|
||||
|
||||
if (my $cred_file = get_cred_file($storeid)) {
|
||||
push @$cmd, "username=$user", '-o', "credentials=$cred_file";
|
||||
push @$cmd, '-o', "domain=$domain" if defined($domain);
|
||||
push @$cmd, "username=$user", '-o', "credentials=$cred_file";
|
||||
push @$cmd, '-o', "domain=$domain" if defined($domain);
|
||||
} else {
|
||||
push @$cmd, 'guest,username=guest';
|
||||
push @$cmd, 'guest,username=guest';
|
||||
}
|
||||
|
||||
push @$cmd, '-o', defined($smbver) ? "vers=$smbver" : "vers=default";
|
||||
@ -98,69 +98,79 @@ sub type {
|
||||
|
||||
sub plugindata {
|
||||
return {
|
||||
content => [ { images => 1, rootdir => 1, vztmpl => 1, iso => 1,
|
||||
backup => 1, snippets => 1, import => 1}, { images => 1 }],
|
||||
format => [ { raw => 1, qcow2 => 1, vmdk => 1 } , 'raw' ],
|
||||
'sensitive-properties' => { password => 1 },
|
||||
content => [
|
||||
{
|
||||
images => 1,
|
||||
rootdir => 1,
|
||||
vztmpl => 1,
|
||||
iso => 1,
|
||||
backup => 1,
|
||||
snippets => 1,
|
||||
import => 1,
|
||||
},
|
||||
{ images => 1 },
|
||||
],
|
||||
format => [{ raw => 1, qcow2 => 1, vmdk => 1 }, 'raw'],
|
||||
'sensitive-properties' => { password => 1 },
|
||||
};
|
||||
}
|
||||
|
||||
sub properties {
|
||||
return {
|
||||
share => {
|
||||
description => "CIFS share.",
|
||||
type => 'string',
|
||||
},
|
||||
password => {
|
||||
description => "Password for accessing the share/datastore.",
|
||||
type => 'string',
|
||||
maxLength => 256,
|
||||
},
|
||||
domain => {
|
||||
description => "CIFS domain.",
|
||||
type => 'string',
|
||||
optional => 1,
|
||||
maxLength => 256,
|
||||
},
|
||||
smbversion => {
|
||||
description => "SMB protocol version. 'default' if not set, negotiates the highest SMB2+"
|
||||
." version supported by both the client and server.",
|
||||
type => 'string',
|
||||
default => 'default',
|
||||
enum => ['default', '2.0', '2.1', '3', '3.0', '3.11'],
|
||||
optional => 1,
|
||||
},
|
||||
share => {
|
||||
description => "CIFS share.",
|
||||
type => 'string',
|
||||
},
|
||||
password => {
|
||||
description => "Password for accessing the share/datastore.",
|
||||
type => 'string',
|
||||
maxLength => 256,
|
||||
},
|
||||
domain => {
|
||||
description => "CIFS domain.",
|
||||
type => 'string',
|
||||
optional => 1,
|
||||
maxLength => 256,
|
||||
},
|
||||
smbversion => {
|
||||
description =>
|
||||
"SMB protocol version. 'default' if not set, negotiates the highest SMB2+"
|
||||
. " version supported by both the client and server.",
|
||||
type => 'string',
|
||||
default => 'default',
|
||||
enum => ['default', '2.0', '2.1', '3', '3.0', '3.11'],
|
||||
optional => 1,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
sub options {
|
||||
return {
|
||||
path => { fixed => 1 },
|
||||
'content-dirs' => { optional => 1 },
|
||||
server => { fixed => 1 },
|
||||
share => { fixed => 1 },
|
||||
subdir => { optional => 1 },
|
||||
nodes => { optional => 1 },
|
||||
disable => { optional => 1 },
|
||||
maxfiles => { optional => 1 },
|
||||
'prune-backups' => { optional => 1 },
|
||||
'max-protected-backups' => { optional => 1 },
|
||||
content => { optional => 1 },
|
||||
format => { optional => 1 },
|
||||
username => { optional => 1 },
|
||||
password => { optional => 1},
|
||||
domain => { optional => 1},
|
||||
smbversion => { optional => 1},
|
||||
mkdir => { optional => 1 },
|
||||
'create-base-path' => { optional => 1 },
|
||||
'create-subdirs' => { optional => 1 },
|
||||
bwlimit => { optional => 1 },
|
||||
preallocation => { optional => 1 },
|
||||
options => { optional => 1 },
|
||||
path => { fixed => 1 },
|
||||
'content-dirs' => { optional => 1 },
|
||||
server => { fixed => 1 },
|
||||
share => { fixed => 1 },
|
||||
subdir => { optional => 1 },
|
||||
nodes => { optional => 1 },
|
||||
disable => { optional => 1 },
|
||||
maxfiles => { optional => 1 },
|
||||
'prune-backups' => { optional => 1 },
|
||||
'max-protected-backups' => { optional => 1 },
|
||||
content => { optional => 1 },
|
||||
format => { optional => 1 },
|
||||
username => { optional => 1 },
|
||||
password => { optional => 1 },
|
||||
domain => { optional => 1 },
|
||||
smbversion => { optional => 1 },
|
||||
mkdir => { optional => 1 },
|
||||
'create-base-path' => { optional => 1 },
|
||||
'create-subdirs' => { optional => 1 },
|
||||
bwlimit => { optional => 1 },
|
||||
preallocation => { optional => 1 },
|
||||
options => { optional => 1 },
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
sub check_config {
|
||||
my ($class, $sectionId, $config, $create, $skipSchemaCheck) = @_;
|
||||
|
||||
@ -175,12 +185,12 @@ sub on_add_hook {
|
||||
my ($class, $storeid, $scfg, %sensitive) = @_;
|
||||
|
||||
if (defined($sensitive{password})) {
|
||||
cifs_set_credentials($sensitive{password}, $storeid);
|
||||
if (!exists($scfg->{username})) {
|
||||
warn "storage $storeid: ignoring password parameter, no user set\n";
|
||||
}
|
||||
cifs_set_credentials($sensitive{password}, $storeid);
|
||||
if (!exists($scfg->{username})) {
|
||||
warn "storage $storeid: ignoring password parameter, no user set\n";
|
||||
}
|
||||
} else {
|
||||
cifs_delete_credentials($storeid);
|
||||
cifs_delete_credentials($storeid);
|
||||
}
|
||||
|
||||
return;
|
||||
@ -192,12 +202,12 @@ sub on_update_hook {
|
||||
return if !exists($sensitive{password});
|
||||
|
||||
if (defined($sensitive{password})) {
|
||||
cifs_set_credentials($sensitive{password}, $storeid);
|
||||
if (!exists($scfg->{username})) {
|
||||
warn "storage $storeid: ignoring password parameter, no user set\n";
|
||||
}
|
||||
cifs_set_credentials($sensitive{password}, $storeid);
|
||||
if (!exists($scfg->{username})) {
|
||||
warn "storage $storeid: ignoring password parameter, no user set\n";
|
||||
}
|
||||
} else {
|
||||
cifs_delete_credentials($storeid);
|
||||
cifs_delete_credentials($storeid);
|
||||
}
|
||||
|
||||
return;
|
||||
@ -215,10 +225,10 @@ sub status {
|
||||
my ($class, $storeid, $scfg, $cache) = @_;
|
||||
|
||||
$cache->{mountdata} = PVE::ProcFSTools::parse_proc_mounts()
|
||||
if !$cache->{mountdata};
|
||||
if !$cache->{mountdata};
|
||||
|
||||
return undef
|
||||
if !cifs_is_mounted($scfg, $cache->{mountdata});
|
||||
if !cifs_is_mounted($scfg, $cache->{mountdata});
|
||||
|
||||
return $class->SUPER::status($storeid, $scfg, $cache);
|
||||
}
|
||||
@ -227,19 +237,18 @@ sub activate_storage {
|
||||
my ($class, $storeid, $scfg, $cache) = @_;
|
||||
|
||||
$cache->{mountdata} = PVE::ProcFSTools::parse_proc_mounts()
|
||||
if !$cache->{mountdata};
|
||||
if !$cache->{mountdata};
|
||||
|
||||
my $path = $scfg->{path};
|
||||
|
||||
if (!cifs_is_mounted($scfg, $cache->{mountdata})) {
|
||||
|
||||
$class->config_aware_base_mkdir($scfg, $path);
|
||||
$class->config_aware_base_mkdir($scfg, $path);
|
||||
|
||||
die "unable to activate storage '$storeid' - " .
|
||||
"directory '$path' does not exist\n" if ! -d $path;
|
||||
die "unable to activate storage '$storeid' - " . "directory '$path' does not exist\n"
|
||||
if !-d $path;
|
||||
|
||||
cifs_mount($scfg, $storeid, $scfg->{smbversion},
|
||||
$scfg->{username}, $scfg->{domain});
|
||||
cifs_mount($scfg, $storeid, $scfg->{smbversion}, $scfg->{username}, $scfg->{domain});
|
||||
}
|
||||
|
||||
$class->SUPER::activate_storage($storeid, $scfg, $cache);
|
||||
@ -249,45 +258,48 @@ sub deactivate_storage {
|
||||
my ($class, $storeid, $scfg, $cache) = @_;
|
||||
|
||||
$cache->{mountdata} = PVE::ProcFSTools::parse_proc_mounts()
|
||||
if !$cache->{mountdata};
|
||||
if !$cache->{mountdata};
|
||||
|
||||
my $path = $scfg->{path};
|
||||
|
||||
if (cifs_is_mounted($scfg, $cache->{mountdata})) {
|
||||
my $cmd = ['/bin/umount', $path];
|
||||
run_command($cmd, errmsg => 'umount error');
|
||||
my $cmd = ['/bin/umount', $path];
|
||||
run_command($cmd, errmsg => 'umount error');
|
||||
}
|
||||
}
|
||||
|
||||
sub check_connection {
|
||||
my ($class, $storeid, $scfg) = @_;
|
||||
|
||||
my $servicename = '//'.$scfg->{server}.'/'.$scfg->{share};
|
||||
my $servicename = '//' . $scfg->{server} . '/' . $scfg->{share};
|
||||
|
||||
my $cmd = ['/usr/bin/smbclient', $servicename, '-d', '0'];
|
||||
|
||||
if (defined($scfg->{smbversion}) && $scfg->{smbversion} ne 'default') {
|
||||
# max-protocol version, so basically only relevant for smb2 vs smb3
|
||||
push @$cmd, '-m', "smb" . int($scfg->{smbversion});
|
||||
# max-protocol version, so basically only relevant for smb2 vs smb3
|
||||
push @$cmd, '-m', "smb" . int($scfg->{smbversion});
|
||||
}
|
||||
|
||||
if (my $cred_file = get_cred_file($storeid)) {
|
||||
push @$cmd, '-U', $scfg->{username}, '-A', $cred_file;
|
||||
push @$cmd, '-W', $scfg->{domain} if $scfg->{domain};
|
||||
push @$cmd, '-U', $scfg->{username}, '-A', $cred_file;
|
||||
push @$cmd, '-W', $scfg->{domain} if $scfg->{domain};
|
||||
} else {
|
||||
push @$cmd, '-U', 'Guest','-N';
|
||||
push @$cmd, '-U', 'Guest', '-N';
|
||||
}
|
||||
push @$cmd, '-c', 'echo 1 0';
|
||||
|
||||
my $out_str;
|
||||
my $out = sub { $out_str .= shift };
|
||||
|
||||
eval { run_command($cmd, timeout => 10, outfunc => $out, errfunc => sub {}) };
|
||||
eval {
|
||||
run_command($cmd, timeout => 10, outfunc => $out, errfunc => sub { });
|
||||
};
|
||||
|
||||
if (my $err = $@) {
|
||||
die "$out_str\n" if defined($out_str) &&
|
||||
($out_str =~ m/NT_STATUS_(ACCESS_DENIED|INVALID_PARAMETER|LOGON_FAILURE)/);
|
||||
return 0;
|
||||
die "$out_str\n"
|
||||
if defined($out_str)
|
||||
&& ($out_str =~ m/NT_STATUS_(ACCESS_DENIED|INVALID_PARAMETER|LOGON_FAILURE)/);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 1;
|
||||
|
||||
@ -27,13 +27,13 @@ sub cephfs_is_mounted {
|
||||
|
||||
$mountdata = PVE::ProcFSTools::parse_proc_mounts() if !$mountdata;
|
||||
return $mountpoint if grep {
|
||||
$_->[2] =~ m#^ceph|fuse\.ceph-fuse# &&
|
||||
$_->[0] =~ m#\Q:$subdir\E$|^ceph-fuse$# &&
|
||||
$_->[1] eq $mountpoint
|
||||
$_->[2] =~ m#^ceph|fuse\.ceph-fuse#
|
||||
&& $_->[0] =~ m#\Q:$subdir\E$|^ceph-fuse$#
|
||||
&& $_->[1] eq $mountpoint
|
||||
} @$mountdata;
|
||||
|
||||
warn "A filesystem is already mounted on $mountpoint\n"
|
||||
if grep { $_->[1] eq $mountpoint } @$mountdata;
|
||||
if grep { $_->[1] eq $mountpoint } @$mountdata;
|
||||
|
||||
return undef;
|
||||
}
|
||||
@ -42,12 +42,12 @@ sub cephfs_is_mounted {
|
||||
sub systemd_netmount {
|
||||
my ($where, $type, $what, $opts) = @_;
|
||||
|
||||
# don't do default deps, systemd v241 generator produces ordering deps on both
|
||||
# local-fs(-pre) and remote-fs(-pre) targets if we use the required _netdev
|
||||
# option. Over three corners this gets us an ordering cycle on shutdown, which
|
||||
# may make shutdown hang if the random cycle breaking hits the "wrong" unit to
|
||||
# delete.
|
||||
my $unit = <<"EOF";
|
||||
# don't do default deps, systemd v241 generator produces ordering deps on both
|
||||
# local-fs(-pre) and remote-fs(-pre) targets if we use the required _netdev
|
||||
# option. Over three corners this gets us an ordering cycle on shutdown, which
|
||||
# may make shutdown hang if the random cycle breaking hits the "wrong" unit to
|
||||
# delete.
|
||||
my $unit = <<"EOF";
|
||||
[Unit]
|
||||
Description=${where}
|
||||
DefaultDependencies=no
|
||||
@ -71,7 +71,7 @@ EOF
|
||||
file_set_contents($unit_path, $unit);
|
||||
|
||||
run_command(['systemctl', 'daemon-reload'], errmsg => "daemon-reload error")
|
||||
if $daemon_needs_reload;
|
||||
if $daemon_needs_reload;
|
||||
run_command(['systemctl', 'start', $unit_fn], errmsg => "mount error");
|
||||
|
||||
}
|
||||
@ -91,16 +91,16 @@ sub cephfs_mount {
|
||||
|
||||
my @opts = ();
|
||||
if ($scfg->{fuse}) {
|
||||
$type = 'fuse.ceph';
|
||||
push @opts, "ceph.id=$cmd_option->{userid}";
|
||||
push @opts, "ceph.keyfile=$secretfile" if defined($secretfile);
|
||||
push @opts, "ceph.conf=$configfile" if defined($configfile);
|
||||
push @opts, "ceph.client_fs=$fs_name" if defined($fs_name);
|
||||
$type = 'fuse.ceph';
|
||||
push @opts, "ceph.id=$cmd_option->{userid}";
|
||||
push @opts, "ceph.keyfile=$secretfile" if defined($secretfile);
|
||||
push @opts, "ceph.conf=$configfile" if defined($configfile);
|
||||
push @opts, "ceph.client_fs=$fs_name" if defined($fs_name);
|
||||
} else {
|
||||
push @opts, "name=$cmd_option->{userid}";
|
||||
push @opts, "secretfile=$secretfile" if defined($secretfile);
|
||||
push @opts, "conf=$configfile" if defined($configfile);
|
||||
push @opts, "fs=$fs_name" if defined($fs_name);
|
||||
push @opts, "name=$cmd_option->{userid}";
|
||||
push @opts, "secretfile=$secretfile" if defined($secretfile);
|
||||
push @opts, "conf=$configfile" if defined($configfile);
|
||||
push @opts, "fs=$fs_name" if defined($fs_name);
|
||||
}
|
||||
|
||||
push @opts, $scfg->{options} if $scfg->{options};
|
||||
@ -116,47 +116,48 @@ sub type {
|
||||
|
||||
sub plugindata {
|
||||
return {
|
||||
content => [ { vztmpl => 1, iso => 1, backup => 1, snippets => 1, import => 1 },
|
||||
{ backup => 1 }],
|
||||
'sensitive-properties' => { keyring => 1 },
|
||||
content =>
|
||||
[{ vztmpl => 1, iso => 1, backup => 1, snippets => 1, import => 1 }, { backup => 1 }],
|
||||
'sensitive-properties' => { keyring => 1 },
|
||||
};
|
||||
}
|
||||
|
||||
sub properties {
|
||||
return {
|
||||
fuse => {
|
||||
description => "Mount CephFS through FUSE.",
|
||||
type => 'boolean',
|
||||
},
|
||||
'fs-name' => {
|
||||
description => "The Ceph filesystem name.",
|
||||
type => 'string', format => 'pve-configid',
|
||||
},
|
||||
fuse => {
|
||||
description => "Mount CephFS through FUSE.",
|
||||
type => 'boolean',
|
||||
},
|
||||
'fs-name' => {
|
||||
description => "The Ceph filesystem name.",
|
||||
type => 'string',
|
||||
format => 'pve-configid',
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
sub options {
|
||||
return {
|
||||
path => { fixed => 1 },
|
||||
'content-dirs' => { optional => 1 },
|
||||
monhost => { optional => 1},
|
||||
nodes => { optional => 1 },
|
||||
subdir => { optional => 1 },
|
||||
disable => { optional => 1 },
|
||||
options => { optional => 1 },
|
||||
username => { optional => 1 },
|
||||
content => { optional => 1 },
|
||||
format => { optional => 1 },
|
||||
mkdir => { optional => 1 },
|
||||
'create-base-path' => { optional => 1 },
|
||||
'create-subdirs' => { optional => 1 },
|
||||
fuse => { optional => 1 },
|
||||
bwlimit => { optional => 1 },
|
||||
maxfiles => { optional => 1 },
|
||||
keyring => { optional => 1 },
|
||||
'prune-backups' => { optional => 1 },
|
||||
'max-protected-backups' => { optional => 1 },
|
||||
'fs-name' => { optional => 1 },
|
||||
path => { fixed => 1 },
|
||||
'content-dirs' => { optional => 1 },
|
||||
monhost => { optional => 1 },
|
||||
nodes => { optional => 1 },
|
||||
subdir => { optional => 1 },
|
||||
disable => { optional => 1 },
|
||||
options => { optional => 1 },
|
||||
username => { optional => 1 },
|
||||
content => { optional => 1 },
|
||||
format => { optional => 1 },
|
||||
mkdir => { optional => 1 },
|
||||
'create-base-path' => { optional => 1 },
|
||||
'create-subdirs' => { optional => 1 },
|
||||
fuse => { optional => 1 },
|
||||
bwlimit => { optional => 1 },
|
||||
maxfiles => { optional => 1 },
|
||||
keyring => { optional => 1 },
|
||||
'prune-backups' => { optional => 1 },
|
||||
'max-protected-backups' => { optional => 1 },
|
||||
'fs-name' => { optional => 1 },
|
||||
};
|
||||
}
|
||||
|
||||
@ -182,11 +183,11 @@ sub on_update_hook {
|
||||
my ($class, $storeid, $scfg, %param) = @_;
|
||||
|
||||
if (exists($param{keyring})) {
|
||||
if (defined($param{keyring})) {
|
||||
PVE::CephConfig::ceph_create_keyfile($scfg->{type}, $storeid, $param{keyring});
|
||||
} else {
|
||||
PVE::CephConfig::ceph_remove_keyfile($scfg->{type}, $storeid);
|
||||
}
|
||||
if (defined($param{keyring})) {
|
||||
PVE::CephConfig::ceph_create_keyfile($scfg->{type}, $storeid, $param{keyring});
|
||||
} else {
|
||||
PVE::CephConfig::ceph_remove_keyfile($scfg->{type}, $storeid);
|
||||
}
|
||||
}
|
||||
|
||||
return;
|
||||
@ -215,14 +216,14 @@ sub activate_storage {
|
||||
|
||||
# NOTE: mkpath may hang if storage is mounted but not reachable
|
||||
if (!cephfs_is_mounted($scfg, $storeid, $cache->{mountdata})) {
|
||||
my $path = $scfg->{path};
|
||||
my $path = $scfg->{path};
|
||||
|
||||
$class->config_aware_base_mkdir($scfg, $path);
|
||||
$class->config_aware_base_mkdir($scfg, $path);
|
||||
|
||||
die "unable to activate storage '$storeid' - " .
|
||||
"directory '$path' does not exist\n" if ! -d $path;
|
||||
die "unable to activate storage '$storeid' - " . "directory '$path' does not exist\n"
|
||||
if !-d $path;
|
||||
|
||||
cephfs_mount($scfg, $storeid);
|
||||
cephfs_mount($scfg, $storeid);
|
||||
}
|
||||
|
||||
$class->SUPER::activate_storage($storeid, $scfg, $cache);
|
||||
@ -236,7 +237,7 @@ sub deactivate_storage {
|
||||
my $path = $scfg->{path};
|
||||
|
||||
if (cephfs_is_mounted($scfg, $storeid, $cache->{mountdata})) {
|
||||
run_command(['/bin/umount', $path], errmsg => 'umount error');
|
||||
run_command(['/bin/umount', $path], errmsg => 'umount error');
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -50,11 +50,14 @@ Possible formats a guest image can have.
|
||||
# Those formats should either be allowed here or support for them should be phased out (at least in
|
||||
# the storage layer). Can still be added again in the future, should any plugin provider request it.
|
||||
|
||||
PVE::JSONSchema::register_standard_option('pve-storage-image-format', {
|
||||
type => 'string',
|
||||
enum => ['raw', 'qcow2', 'subvol', 'vmdk'],
|
||||
description => "Format of the image.",
|
||||
});
|
||||
PVE::JSONSchema::register_standard_option(
|
||||
'pve-storage-image-format',
|
||||
{
|
||||
type => 'string',
|
||||
enum => ['raw', 'qcow2', 'subvol', 'vmdk'],
|
||||
description => "Format of the image.",
|
||||
},
|
||||
);
|
||||
|
||||
=pod
|
||||
|
||||
@ -80,7 +83,7 @@ sub align_size_up : prototype($$) {
|
||||
my $padding = ($granularity - $size % $granularity) % $granularity;
|
||||
my $aligned_size = $size + $padding;
|
||||
print "size $size is not aligned to granularity $granularity, rounding up to $aligned_size\n"
|
||||
if $aligned_size != $size;
|
||||
if $aligned_size != $size;
|
||||
return $aligned_size;
|
||||
}
|
||||
|
||||
@ -103,7 +106,7 @@ sub deallocate : prototype($$$) {
|
||||
$length = int($length);
|
||||
|
||||
if (syscall(PVE::Syscall::fallocate, fileno($file_handle), $mode, $offset, $length) != 0) {
|
||||
die "fallocate: punch hole failed (offset: $offset, length: $length) - $!\n";
|
||||
die "fallocate: punch hole failed (offset: $offset, length: $length) - $!\n";
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -24,66 +24,78 @@ sub type {
|
||||
|
||||
sub plugindata {
|
||||
return {
|
||||
content => [ { images => 1, rootdir => 1, vztmpl => 1, iso => 1, backup => 1, snippets => 1, none => 1, import => 1 },
|
||||
{ images => 1, rootdir => 1 }],
|
||||
format => [ { raw => 1, qcow2 => 1, vmdk => 1, subvol => 1 } , 'raw' ],
|
||||
'sensitive-properties' => {},
|
||||
content => [
|
||||
{
|
||||
images => 1,
|
||||
rootdir => 1,
|
||||
vztmpl => 1,
|
||||
iso => 1,
|
||||
backup => 1,
|
||||
snippets => 1,
|
||||
none => 1,
|
||||
import => 1,
|
||||
},
|
||||
{ images => 1, rootdir => 1 },
|
||||
],
|
||||
format => [{ raw => 1, qcow2 => 1, vmdk => 1, subvol => 1 }, 'raw'],
|
||||
'sensitive-properties' => {},
|
||||
};
|
||||
}
|
||||
|
||||
sub properties {
|
||||
return {
|
||||
path => {
|
||||
description => "File system path.",
|
||||
type => 'string', format => 'pve-storage-path',
|
||||
},
|
||||
mkdir => {
|
||||
description => "Create the directory if it doesn't exist and populate it with default sub-dirs."
|
||||
." NOTE: Deprecated, use the 'create-base-path' and 'create-subdirs' options instead.",
|
||||
type => 'boolean',
|
||||
default => 'yes',
|
||||
},
|
||||
'create-base-path' => {
|
||||
description => "Create the base directory if it doesn't exist.",
|
||||
type => 'boolean',
|
||||
default => 'yes',
|
||||
},
|
||||
'create-subdirs' => {
|
||||
description => "Populate the directory with the default structure.",
|
||||
type => 'boolean',
|
||||
default => 'yes',
|
||||
},
|
||||
is_mountpoint => {
|
||||
description =>
|
||||
"Assume the given path is an externally managed mountpoint " .
|
||||
"and consider the storage offline if it is not mounted. ".
|
||||
"Using a boolean (yes/no) value serves as a shortcut to using the target path in this field.",
|
||||
type => 'string',
|
||||
default => 'no',
|
||||
},
|
||||
bwlimit => get_standard_option('bwlimit'),
|
||||
path => {
|
||||
description => "File system path.",
|
||||
type => 'string',
|
||||
format => 'pve-storage-path',
|
||||
},
|
||||
mkdir => {
|
||||
description =>
|
||||
"Create the directory if it doesn't exist and populate it with default sub-dirs."
|
||||
. " NOTE: Deprecated, use the 'create-base-path' and 'create-subdirs' options instead.",
|
||||
type => 'boolean',
|
||||
default => 'yes',
|
||||
},
|
||||
'create-base-path' => {
|
||||
description => "Create the base directory if it doesn't exist.",
|
||||
type => 'boolean',
|
||||
default => 'yes',
|
||||
},
|
||||
'create-subdirs' => {
|
||||
description => "Populate the directory with the default structure.",
|
||||
type => 'boolean',
|
||||
default => 'yes',
|
||||
},
|
||||
is_mountpoint => {
|
||||
description => "Assume the given path is an externally managed mountpoint "
|
||||
. "and consider the storage offline if it is not mounted. "
|
||||
. "Using a boolean (yes/no) value serves as a shortcut to using the target path in this field.",
|
||||
type => 'string',
|
||||
default => 'no',
|
||||
},
|
||||
bwlimit => get_standard_option('bwlimit'),
|
||||
};
|
||||
}
|
||||
|
||||
sub options {
|
||||
return {
|
||||
path => { fixed => 1 },
|
||||
'content-dirs' => { optional => 1 },
|
||||
nodes => { optional => 1 },
|
||||
shared => { optional => 1 },
|
||||
disable => { optional => 1 },
|
||||
maxfiles => { optional => 1 },
|
||||
'prune-backups' => { optional => 1 },
|
||||
'max-protected-backups' => { optional => 1 },
|
||||
content => { optional => 1 },
|
||||
format => { optional => 1 },
|
||||
mkdir => { optional => 1 },
|
||||
'create-base-path' => { optional => 1 },
|
||||
'create-subdirs' => { optional => 1 },
|
||||
is_mountpoint => { optional => 1 },
|
||||
bwlimit => { optional => 1 },
|
||||
preallocation => { optional => 1 },
|
||||
};
|
||||
path => { fixed => 1 },
|
||||
'content-dirs' => { optional => 1 },
|
||||
nodes => { optional => 1 },
|
||||
shared => { optional => 1 },
|
||||
disable => { optional => 1 },
|
||||
maxfiles => { optional => 1 },
|
||||
'prune-backups' => { optional => 1 },
|
||||
'max-protected-backups' => { optional => 1 },
|
||||
content => { optional => 1 },
|
||||
format => { optional => 1 },
|
||||
mkdir => { optional => 1 },
|
||||
'create-base-path' => { optional => 1 },
|
||||
'create-subdirs' => { optional => 1 },
|
||||
is_mountpoint => { optional => 1 },
|
||||
bwlimit => { optional => 1 },
|
||||
preallocation => { optional => 1 },
|
||||
};
|
||||
}
|
||||
|
||||
# Storage implementation
|
||||
@ -106,7 +118,7 @@ sub parse_is_mountpoint {
|
||||
my $is_mp = $scfg->{is_mountpoint};
|
||||
return undef if !defined $is_mp;
|
||||
if (defined(my $bool = PVE::JSONSchema::parse_boolean($is_mp))) {
|
||||
return $bool ? $scfg->{path} : undef;
|
||||
return $bool ? $scfg->{path} : undef;
|
||||
}
|
||||
return $is_mp; # contains a path
|
||||
}
|
||||
@ -122,8 +134,8 @@ my $get_volume_notes_impl = sub {
|
||||
$path .= $class->SUPER::NOTES_EXT;
|
||||
|
||||
if (-f $path) {
|
||||
my $data = PVE::Tools::file_get_contents($path);
|
||||
return eval { decode('UTF-8', $data, 1) } // $data;
|
||||
my $data = PVE::Tools::file_get_contents($path);
|
||||
return eval { decode('UTF-8', $data, 1) } // $data;
|
||||
}
|
||||
|
||||
return '';
|
||||
@ -147,10 +159,10 @@ my $update_volume_notes_impl = sub {
|
||||
$path .= $class->SUPER::NOTES_EXT;
|
||||
|
||||
if (defined($notes) && $notes ne '') {
|
||||
my $encoded = encode('UTF-8', $notes);
|
||||
PVE::Tools::file_set_contents($path, $encoded);
|
||||
my $encoded = encode('UTF-8', $notes);
|
||||
PVE::Tools::file_set_contents($path, $encoded);
|
||||
} else {
|
||||
unlink $path or $! == ENOENT or die "could not delete notes - $!\n";
|
||||
unlink $path or $! == ENOENT or die "could not delete notes - $!\n";
|
||||
}
|
||||
return;
|
||||
};
|
||||
@ -166,15 +178,15 @@ sub get_volume_attribute {
|
||||
my ($class, $scfg, $storeid, $volname, $attribute) = @_;
|
||||
|
||||
if ($attribute eq 'notes') {
|
||||
return $get_volume_notes_impl->($class, $scfg, $storeid, $volname);
|
||||
return $get_volume_notes_impl->($class, $scfg, $storeid, $volname);
|
||||
}
|
||||
|
||||
my ($vtype) = $class->parse_volname($volname);
|
||||
return if $vtype ne 'backup';
|
||||
|
||||
if ($attribute eq 'protected') {
|
||||
my $path = $class->filesystem_path($scfg, $volname);
|
||||
return -e PVE::Storage::protection_file_path($path) ? 1 : 0;
|
||||
my $path = $class->filesystem_path($scfg, $volname);
|
||||
return -e PVE::Storage::protection_file_path($path) ? 1 : 0;
|
||||
}
|
||||
|
||||
return;
|
||||
@ -184,28 +196,29 @@ sub update_volume_attribute {
|
||||
my ($class, $scfg, $storeid, $volname, $attribute, $value) = @_;
|
||||
|
||||
if ($attribute eq 'notes') {
|
||||
return $update_volume_notes_impl->($class, $scfg, $storeid, $volname, $value);
|
||||
return $update_volume_notes_impl->($class, $scfg, $storeid, $volname, $value);
|
||||
}
|
||||
|
||||
my ($vtype) = $class->parse_volname($volname);
|
||||
die "only backups support attribute '$attribute'\n" if $vtype ne 'backup';
|
||||
|
||||
if ($attribute eq 'protected') {
|
||||
my $path = $class->filesystem_path($scfg, $volname);
|
||||
my $protection_path = PVE::Storage::protection_file_path($path);
|
||||
my $path = $class->filesystem_path($scfg, $volname);
|
||||
my $protection_path = PVE::Storage::protection_file_path($path);
|
||||
|
||||
return if !((-e $protection_path) xor $value); # protection status already correct
|
||||
return if !((-e $protection_path) xor $value); # protection status already correct
|
||||
|
||||
if ($value) {
|
||||
my $fh = IO::File->new($protection_path, O_CREAT, 0644)
|
||||
or die "unable to create protection file '$protection_path' - $!\n";
|
||||
close($fh);
|
||||
} else {
|
||||
unlink $protection_path or $! == ENOENT
|
||||
or die "could not delete protection file '$protection_path' - $!\n";
|
||||
}
|
||||
if ($value) {
|
||||
my $fh = IO::File->new($protection_path, O_CREAT, 0644)
|
||||
or die "unable to create protection file '$protection_path' - $!\n";
|
||||
close($fh);
|
||||
} else {
|
||||
unlink $protection_path
|
||||
or $! == ENOENT
|
||||
or die "could not delete protection file '$protection_path' - $!\n";
|
||||
}
|
||||
|
||||
return;
|
||||
return;
|
||||
}
|
||||
|
||||
die "attribute '$attribute' is not supported for storage type '$scfg->{type}'\n";
|
||||
@ -215,16 +228,15 @@ sub status {
|
||||
my ($class, $storeid, $scfg, $cache) = @_;
|
||||
|
||||
if (defined(my $mp = parse_is_mountpoint($scfg))) {
|
||||
$cache->{mountdata} = PVE::ProcFSTools::parse_proc_mounts()
|
||||
if !$cache->{mountdata};
|
||||
$cache->{mountdata} = PVE::ProcFSTools::parse_proc_mounts()
|
||||
if !$cache->{mountdata};
|
||||
|
||||
return undef if !path_is_mounted($mp, $cache->{mountdata});
|
||||
return undef if !path_is_mounted($mp, $cache->{mountdata});
|
||||
}
|
||||
|
||||
return $class->SUPER::status($storeid, $scfg, $cache);
|
||||
}
|
||||
|
||||
|
||||
sub activate_storage {
|
||||
my ($class, $storeid, $scfg, $cache) = @_;
|
||||
|
||||
@ -232,8 +244,8 @@ sub activate_storage {
|
||||
|
||||
my $mp = parse_is_mountpoint($scfg);
|
||||
if (defined($mp) && !path_is_mounted($mp, $cache->{mountdata})) {
|
||||
die "unable to activate storage '$storeid' - " .
|
||||
"directory is expected to be a mount point but is not mounted: '$mp'\n";
|
||||
die "unable to activate storage '$storeid' - "
|
||||
. "directory is expected to be a mount point but is not mounted: '$mp'\n";
|
||||
}
|
||||
|
||||
$class->config_aware_base_mkdir($scfg, $path);
|
||||
@ -242,10 +254,11 @@ sub activate_storage {
|
||||
|
||||
sub check_config {
|
||||
my ($self, $sectionId, $config, $create, $skipSchemaCheck) = @_;
|
||||
my $opts = PVE::SectionConfig::check_config($self, $sectionId, $config, $create, $skipSchemaCheck);
|
||||
my $opts =
|
||||
PVE::SectionConfig::check_config($self, $sectionId, $config, $create, $skipSchemaCheck);
|
||||
return $opts if !$create;
|
||||
if ($opts->{path} !~ m|^/[-/a-zA-Z0-9_.@]+$|) {
|
||||
die "illegal path for directory storage: $opts->{path}\n";
|
||||
die "illegal path for directory storage: $opts->{path}\n";
|
||||
}
|
||||
# remove trailing slashes from path
|
||||
$opts->{path} = File::Spec->canonpath($opts->{path});
|
||||
@ -264,40 +277,40 @@ sub get_import_metadata {
|
||||
|
||||
my $isOva = 0;
|
||||
if ($fmt =~ m/^ova/) {
|
||||
$isOva = 1;
|
||||
push @$warnings, { type => 'ova-needs-extracting' };
|
||||
$isOva = 1;
|
||||
push @$warnings, { type => 'ova-needs-extracting' };
|
||||
}
|
||||
my $path = $class->path($scfg, $volname, $storeid, undef);
|
||||
my $res = PVE::GuestImport::OVF::parse_ovf($path, $isOva);
|
||||
my $disks = {};
|
||||
for my $disk ($res->{disks}->@*) {
|
||||
my $id = $disk->{disk_address};
|
||||
my $size = $disk->{virtual_size};
|
||||
my $path = $disk->{relative_path};
|
||||
my $volid;
|
||||
if ($isOva) {
|
||||
$volid = "$storeid:$volname/$path";
|
||||
} else {
|
||||
$volid = "$storeid:import/$path",
|
||||
}
|
||||
$disks->{$id} = {
|
||||
volid => $volid,
|
||||
defined($size) ? (size => $size) : (),
|
||||
};
|
||||
my $id = $disk->{disk_address};
|
||||
my $size = $disk->{virtual_size};
|
||||
my $path = $disk->{relative_path};
|
||||
my $volid;
|
||||
if ($isOva) {
|
||||
$volid = "$storeid:$volname/$path";
|
||||
} else {
|
||||
$volid = "$storeid:import/$path",;
|
||||
}
|
||||
$disks->{$id} = {
|
||||
volid => $volid,
|
||||
defined($size) ? (size => $size) : (),
|
||||
};
|
||||
}
|
||||
|
||||
if (defined($res->{qm}->{bios}) && $res->{qm}->{bios} eq 'ovmf') {
|
||||
$disks->{efidisk0} = 1;
|
||||
push @$warnings, { type => 'efi-state-lost', key => 'bios', value => 'ovmf' };
|
||||
$disks->{efidisk0} = 1;
|
||||
push @$warnings, { type => 'efi-state-lost', key => 'bios', value => 'ovmf' };
|
||||
}
|
||||
|
||||
return {
|
||||
type => 'vm',
|
||||
source => $volname,
|
||||
'create-args' => $res->{qm},
|
||||
'disks' => $disks,
|
||||
warnings => $warnings,
|
||||
net => $res->{net},
|
||||
type => 'vm',
|
||||
source => $volname,
|
||||
'create-args' => $res->{qm},
|
||||
'disks' => $disks,
|
||||
warnings => $warnings,
|
||||
net => $res->{net},
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -22,43 +22,48 @@ my $get_active_server = sub {
|
||||
my $defaultserver = $scfg->{server} ? $scfg->{server} : 'localhost';
|
||||
|
||||
if ($return_default_if_offline && !defined($scfg->{server2})) {
|
||||
# avoid delays (there is no backup server anyways)
|
||||
return $defaultserver;
|
||||
# avoid delays (there is no backup server anyways)
|
||||
return $defaultserver;
|
||||
}
|
||||
|
||||
my $serverlist = [ $defaultserver ];
|
||||
my $serverlist = [$defaultserver];
|
||||
push @$serverlist, $scfg->{server2} if $scfg->{server2};
|
||||
|
||||
my $ctime = time();
|
||||
foreach my $server (@$serverlist) {
|
||||
my $stat = $server_test_results->{$server};
|
||||
return $server if $stat && $stat->{active} && (($ctime - $stat->{time}) <= 2);
|
||||
my $stat = $server_test_results->{$server};
|
||||
return $server if $stat && $stat->{active} && (($ctime - $stat->{time}) <= 2);
|
||||
}
|
||||
|
||||
foreach my $server (@$serverlist) {
|
||||
my $status = 0;
|
||||
my $status = 0;
|
||||
|
||||
if ($server && $server ne 'localhost' && $server ne '127.0.0.1' && $server ne '::1') {
|
||||
# ping the gluster daemon default port (24007) as heuristic
|
||||
$status = PVE::Network::tcp_ping($server, 24007, 2);
|
||||
if ($server && $server ne 'localhost' && $server ne '127.0.0.1' && $server ne '::1') {
|
||||
# ping the gluster daemon default port (24007) as heuristic
|
||||
$status = PVE::Network::tcp_ping($server, 24007, 2);
|
||||
|
||||
} else {
|
||||
} else {
|
||||
|
||||
my $parser = sub {
|
||||
my $line = shift;
|
||||
my $parser = sub {
|
||||
my $line = shift;
|
||||
|
||||
if ($line =~ m/Status: Started$/) {
|
||||
$status = 1;
|
||||
}
|
||||
};
|
||||
if ($line =~ m/Status: Started$/) {
|
||||
$status = 1;
|
||||
}
|
||||
};
|
||||
|
||||
my $cmd = ['/usr/sbin/gluster', 'volume', 'info', $scfg->{volume}];
|
||||
my $cmd = ['/usr/sbin/gluster', 'volume', 'info', $scfg->{volume}];
|
||||
|
||||
run_command($cmd, errmsg => "glusterfs error", errfunc => sub {}, outfunc => $parser);
|
||||
}
|
||||
run_command(
|
||||
$cmd,
|
||||
errmsg => "glusterfs error",
|
||||
errfunc => sub { },
|
||||
outfunc => $parser,
|
||||
);
|
||||
}
|
||||
|
||||
$server_test_results->{$server} = { time => time(), active => $status };
|
||||
return $server if $status;
|
||||
$server_test_results->{$server} = { time => time(), active => $status };
|
||||
return $server if $status;
|
||||
}
|
||||
|
||||
return $defaultserver if $return_default_if_offline;
|
||||
@ -72,9 +77,9 @@ sub glusterfs_is_mounted {
|
||||
$mountdata = PVE::ProcFSTools::parse_proc_mounts() if !$mountdata;
|
||||
|
||||
return $mountpoint if grep {
|
||||
$_->[2] eq 'fuse.glusterfs' &&
|
||||
$_->[0] =~ /^\S+:\Q$volume\E$/ &&
|
||||
$_->[1] eq $mountpoint
|
||||
$_->[2] eq 'fuse.glusterfs'
|
||||
&& $_->[0] =~ /^\S+:\Q$volume\E$/
|
||||
&& $_->[1] eq $mountpoint
|
||||
} @$mountdata;
|
||||
return undef;
|
||||
}
|
||||
@ -97,55 +102,57 @@ sub type {
|
||||
|
||||
sub plugindata {
|
||||
return {
|
||||
content => [ { images => 1, vztmpl => 1, iso => 1, backup => 1, snippets => 1, import => 1},
|
||||
{ images => 1 }],
|
||||
format => [ { raw => 1, qcow2 => 1, vmdk => 1 } , 'raw' ],
|
||||
'sensitive-properties' => {},
|
||||
content => [
|
||||
{ images => 1, vztmpl => 1, iso => 1, backup => 1, snippets => 1, import => 1 },
|
||||
{ images => 1 },
|
||||
],
|
||||
format => [{ raw => 1, qcow2 => 1, vmdk => 1 }, 'raw'],
|
||||
'sensitive-properties' => {},
|
||||
};
|
||||
}
|
||||
|
||||
sub properties {
|
||||
return {
|
||||
volume => {
|
||||
description => "Glusterfs Volume.",
|
||||
type => 'string',
|
||||
},
|
||||
server2 => {
|
||||
description => "Backup volfile server IP or DNS name.",
|
||||
type => 'string', format => 'pve-storage-server',
|
||||
requires => 'server',
|
||||
},
|
||||
transport => {
|
||||
description => "Gluster transport: tcp or rdma",
|
||||
type => 'string',
|
||||
enum => ['tcp', 'rdma', 'unix'],
|
||||
},
|
||||
volume => {
|
||||
description => "Glusterfs Volume.",
|
||||
type => 'string',
|
||||
},
|
||||
server2 => {
|
||||
description => "Backup volfile server IP or DNS name.",
|
||||
type => 'string',
|
||||
format => 'pve-storage-server',
|
||||
requires => 'server',
|
||||
},
|
||||
transport => {
|
||||
description => "Gluster transport: tcp or rdma",
|
||||
type => 'string',
|
||||
enum => ['tcp', 'rdma', 'unix'],
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
sub options {
|
||||
return {
|
||||
path => { fixed => 1 },
|
||||
server => { optional => 1 },
|
||||
server2 => { optional => 1 },
|
||||
volume => { fixed => 1 },
|
||||
transport => { optional => 1 },
|
||||
nodes => { optional => 1 },
|
||||
disable => { optional => 1 },
|
||||
maxfiles => { optional => 1 },
|
||||
'prune-backups' => { optional => 1 },
|
||||
'max-protected-backups' => { optional => 1 },
|
||||
content => { optional => 1 },
|
||||
format => { optional => 1 },
|
||||
mkdir => { optional => 1 },
|
||||
'create-base-path' => { optional => 1 },
|
||||
'create-subdirs' => { optional => 1 },
|
||||
bwlimit => { optional => 1 },
|
||||
preallocation => { optional => 1 },
|
||||
path => { fixed => 1 },
|
||||
server => { optional => 1 },
|
||||
server2 => { optional => 1 },
|
||||
volume => { fixed => 1 },
|
||||
transport => { optional => 1 },
|
||||
nodes => { optional => 1 },
|
||||
disable => { optional => 1 },
|
||||
maxfiles => { optional => 1 },
|
||||
'prune-backups' => { optional => 1 },
|
||||
'max-protected-backups' => { optional => 1 },
|
||||
content => { optional => 1 },
|
||||
format => { optional => 1 },
|
||||
mkdir => { optional => 1 },
|
||||
'create-base-path' => { optional => 1 },
|
||||
'create-subdirs' => { optional => 1 },
|
||||
bwlimit => { optional => 1 },
|
||||
preallocation => { optional => 1 },
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
sub check_config {
|
||||
my ($class, $sectionId, $config, $create, $skipSchemaCheck) = @_;
|
||||
|
||||
@ -169,31 +176,30 @@ sub parse_name_dir {
|
||||
sub path {
|
||||
my ($class, $scfg, $volname, $storeid, $snapname) = @_;
|
||||
|
||||
my ($vtype, $name, $vmid, undef, undef, $isBase, $format) =
|
||||
$class->parse_volname($volname);
|
||||
my ($vtype, $name, $vmid, undef, undef, $isBase, $format) = $class->parse_volname($volname);
|
||||
|
||||
# Note: qcow2/qed has internal snapshot, so path is always
|
||||
# the same (with or without snapshot => same file).
|
||||
die "can't snapshot this image format\n"
|
||||
if defined($snapname) && $format !~ m/^(qcow2|qed)$/;
|
||||
die "can't snapshot this image format\n"
|
||||
if defined($snapname) && $format !~ m/^(qcow2|qed)$/;
|
||||
|
||||
my $path = undef;
|
||||
if ($vtype eq 'images') {
|
||||
|
||||
my $server = &$get_active_server($scfg, 1);
|
||||
my $glustervolume = $scfg->{volume};
|
||||
my $transport = $scfg->{transport};
|
||||
my $protocol = "gluster";
|
||||
my $server = &$get_active_server($scfg, 1);
|
||||
my $glustervolume = $scfg->{volume};
|
||||
my $transport = $scfg->{transport};
|
||||
my $protocol = "gluster";
|
||||
|
||||
if ($transport) {
|
||||
$protocol = "gluster+$transport";
|
||||
}
|
||||
if ($transport) {
|
||||
$protocol = "gluster+$transport";
|
||||
}
|
||||
|
||||
$path = "$protocol://$server/$glustervolume/images/$vmid/$name";
|
||||
$path = "$protocol://$server/$glustervolume/images/$vmid/$name";
|
||||
|
||||
} else {
|
||||
my $dir = $class->get_subdir($scfg, $vtype);
|
||||
$path = "$dir/$name";
|
||||
my $dir = $class->get_subdir($scfg, $vtype);
|
||||
$path = "$dir/$name";
|
||||
}
|
||||
|
||||
return wantarray ? ($path, $vmid, $vtype) : $path;
|
||||
@ -205,7 +211,7 @@ sub clone_image {
|
||||
die "storage definition has no path\n" if !$scfg->{path};
|
||||
|
||||
my ($vtype, $basename, $basevmid, undef, undef, $isBase, $format) =
|
||||
$class->parse_volname($volname);
|
||||
$class->parse_volname($volname);
|
||||
|
||||
die "clone_image on wrong vtype '$vtype'\n" if $vtype ne 'images';
|
||||
|
||||
@ -232,8 +238,17 @@ sub clone_image {
|
||||
my $glustervolume = $scfg->{volume};
|
||||
my $volumepath = "gluster://$server/$glustervolume/images/$vmid/$name";
|
||||
|
||||
my $cmd = ['/usr/bin/qemu-img', 'create', '-b', "../$basevmid/$basename",
|
||||
'-F', $format, '-f', 'qcow2', $volumepath];
|
||||
my $cmd = [
|
||||
'/usr/bin/qemu-img',
|
||||
'create',
|
||||
'-b',
|
||||
"../$basevmid/$basename",
|
||||
'-F',
|
||||
$format,
|
||||
'-f',
|
||||
'qcow2',
|
||||
$volumepath,
|
||||
];
|
||||
|
||||
run_command($cmd, errmsg => "unable to create image");
|
||||
|
||||
@ -272,9 +287,9 @@ sub alloc_image {
|
||||
|
||||
eval { run_command($cmd, errmsg => "unable to create image"); };
|
||||
if ($@) {
|
||||
unlink $path;
|
||||
rmdir $imagedir;
|
||||
die "$@";
|
||||
unlink $path;
|
||||
rmdir $imagedir;
|
||||
die "$@";
|
||||
}
|
||||
|
||||
return "$vmid/$name";
|
||||
@ -284,7 +299,7 @@ sub status {
|
||||
my ($class, $storeid, $scfg, $cache) = @_;
|
||||
|
||||
$cache->{mountdata} = PVE::ProcFSTools::parse_proc_mounts()
|
||||
if !$cache->{mountdata};
|
||||
if !$cache->{mountdata};
|
||||
|
||||
my $path = $scfg->{path};
|
||||
|
||||
@ -299,20 +314,20 @@ sub activate_storage {
|
||||
my ($class, $storeid, $scfg, $cache) = @_;
|
||||
|
||||
$cache->{mountdata} = PVE::ProcFSTools::parse_proc_mounts()
|
||||
if !$cache->{mountdata};
|
||||
if !$cache->{mountdata};
|
||||
|
||||
my $path = $scfg->{path};
|
||||
my $volume = $scfg->{volume};
|
||||
|
||||
if (!glusterfs_is_mounted($volume, $path, $cache->{mountdata})) {
|
||||
$class->config_aware_base_mkdir($scfg, $path);
|
||||
$class->config_aware_base_mkdir($scfg, $path);
|
||||
|
||||
die "unable to activate storage '$storeid' - " .
|
||||
"directory '$path' does not exist\n" if ! -d $path;
|
||||
die "unable to activate storage '$storeid' - " . "directory '$path' does not exist\n"
|
||||
if !-d $path;
|
||||
|
||||
my $server = &$get_active_server($scfg, 1);
|
||||
my $server = &$get_active_server($scfg, 1);
|
||||
|
||||
glusterfs_mount($server, $volume, $path);
|
||||
glusterfs_mount($server, $volume, $path);
|
||||
}
|
||||
|
||||
$class->SUPER::activate_storage($storeid, $scfg, $cache);
|
||||
@ -322,14 +337,14 @@ sub deactivate_storage {
|
||||
my ($class, $storeid, $scfg, $cache) = @_;
|
||||
|
||||
$cache->{mountdata} = PVE::ProcFSTools::parse_proc_mounts()
|
||||
if !$cache->{mountdata};
|
||||
if !$cache->{mountdata};
|
||||
|
||||
my $path = $scfg->{path};
|
||||
my $volume = $scfg->{volume};
|
||||
|
||||
if (glusterfs_is_mounted($volume, $path, $cache->{mountdata})) {
|
||||
my $cmd = ['/bin/umount', $path];
|
||||
run_command($cmd, errmsg => 'umount error');
|
||||
my $cmd = ['/bin/umount', $path];
|
||||
run_command($cmd, errmsg => 'umount error');
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -18,30 +18,36 @@ sub iscsi_ls {
|
||||
my ($scfg) = @_;
|
||||
|
||||
my $portal = $scfg->{portal};
|
||||
my $cmd = ['/usr/bin/iscsi-ls', '-s', 'iscsi://'.$portal ];
|
||||
my $cmd = ['/usr/bin/iscsi-ls', '-s', 'iscsi://' . $portal];
|
||||
my $list = {};
|
||||
my %unittobytes = (
|
||||
"k" => 1024,
|
||||
"M" => 1024*1024,
|
||||
"G" => 1024*1024*1024,
|
||||
"T" => 1024*1024*1024*1024
|
||||
"k" => 1024,
|
||||
"M" => 1024 * 1024,
|
||||
"G" => 1024 * 1024 * 1024,
|
||||
"T" => 1024 * 1024 * 1024 * 1024,
|
||||
);
|
||||
eval {
|
||||
run_command($cmd, errmsg => "iscsi error", errfunc => sub {}, outfunc => sub {
|
||||
my $line = shift;
|
||||
$line = trim($line);
|
||||
if( $line =~ /Lun:(\d+)\s+([A-Za-z0-9\-\_\.\:]*)\s+\(Size:([0-9\.]*)(k|M|G|T)\)/ ) {
|
||||
my $image = "lun".$1;
|
||||
my $size = $3;
|
||||
my $unit = $4;
|
||||
run_command(
|
||||
$cmd,
|
||||
errmsg => "iscsi error",
|
||||
errfunc => sub { },
|
||||
outfunc => sub {
|
||||
my $line = shift;
|
||||
$line = trim($line);
|
||||
if ($line =~ /Lun:(\d+)\s+([A-Za-z0-9\-\_\.\:]*)\s+\(Size:([0-9\.]*)(k|M|G|T)\)/
|
||||
) {
|
||||
my $image = "lun" . $1;
|
||||
my $size = $3;
|
||||
my $unit = $4;
|
||||
|
||||
$list->{$image} = {
|
||||
name => $image,
|
||||
size => $size * $unittobytes{$unit},
|
||||
format => 'raw',
|
||||
};
|
||||
}
|
||||
});
|
||||
$list->{$image} = {
|
||||
name => $image,
|
||||
size => $size * $unittobytes{$unit},
|
||||
format => 'raw',
|
||||
};
|
||||
}
|
||||
},
|
||||
);
|
||||
};
|
||||
|
||||
my $err = $@;
|
||||
@ -58,9 +64,9 @@ sub type {
|
||||
|
||||
sub plugindata {
|
||||
return {
|
||||
content => [ {images => 1, none => 1}, { images => 1 }],
|
||||
select_existing => 1,
|
||||
'sensitive-properties' => {},
|
||||
content => [{ images => 1, none => 1 }, { images => 1 }],
|
||||
select_existing => 1,
|
||||
'sensitive-properties' => {},
|
||||
};
|
||||
}
|
||||
|
||||
@ -68,9 +74,9 @@ sub options {
|
||||
return {
|
||||
portal => { fixed => 1 },
|
||||
target => { fixed => 1 },
|
||||
nodes => { optional => 1},
|
||||
disable => { optional => 1},
|
||||
content => { optional => 1},
|
||||
nodes => { optional => 1 },
|
||||
disable => { optional => 1 },
|
||||
content => { optional => 1 },
|
||||
bwlimit => { optional => 1 },
|
||||
};
|
||||
}
|
||||
@ -80,9 +86,8 @@ sub options {
|
||||
sub parse_volname {
|
||||
my ($class, $volname) = @_;
|
||||
|
||||
|
||||
if ($volname =~ m/^lun(\d+)$/) {
|
||||
return ('images', $1, undef, undef, undef, undef, 'raw');
|
||||
return ('images', $1, undef, undef, undef, undef, 'raw');
|
||||
}
|
||||
|
||||
die "unable to parse iscsi volume name '$volname'\n";
|
||||
@ -93,7 +98,7 @@ sub path {
|
||||
my ($class, $scfg, $volname, $storeid, $snapname) = @_;
|
||||
|
||||
die "volume snapshot is not possible on iscsi device\n"
|
||||
if defined($snapname);
|
||||
if defined($snapname);
|
||||
|
||||
my ($vtype, $lun, $vmid) = $class->parse_volname($volname);
|
||||
|
||||
@ -138,20 +143,20 @@ sub list_images {
|
||||
|
||||
my $dat = iscsi_ls($scfg);
|
||||
foreach my $volname (keys %$dat) {
|
||||
my $volid = "$storeid:$volname";
|
||||
my $volid = "$storeid:$volname";
|
||||
|
||||
if ($vollist) {
|
||||
my $found = grep { $_ eq $volid } @$vollist;
|
||||
next if !$found;
|
||||
} else {
|
||||
# we have no owner for iscsi devices
|
||||
next if defined($vmid);
|
||||
}
|
||||
if ($vollist) {
|
||||
my $found = grep { $_ eq $volid } @$vollist;
|
||||
next if !$found;
|
||||
} else {
|
||||
# we have no owner for iscsi devices
|
||||
next if defined($vmid);
|
||||
}
|
||||
|
||||
my $info = $dat->{$volname};
|
||||
$info->{volid} = $volid;
|
||||
my $info = $dat->{$volname};
|
||||
$info->{volid} = $volid;
|
||||
|
||||
push @$res, $info;
|
||||
push @$res, $info;
|
||||
}
|
||||
|
||||
return $res;
|
||||
@ -164,7 +169,7 @@ sub status {
|
||||
my $free = 0;
|
||||
my $used = 0;
|
||||
my $active = 1;
|
||||
return ($total,$free,$used,$active);
|
||||
return ($total, $free, $used, $active);
|
||||
|
||||
return undef;
|
||||
}
|
||||
@ -228,17 +233,16 @@ sub volume_has_feature {
|
||||
my ($class, $scfg, $feature, $storeid, $volname, $snapname, $running) = @_;
|
||||
|
||||
my $features = {
|
||||
copy => { current => 1},
|
||||
copy => { current => 1 },
|
||||
};
|
||||
|
||||
my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) =
|
||||
$class->parse_volname($volname);
|
||||
my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) = $class->parse_volname($volname);
|
||||
|
||||
my $key = undef;
|
||||
if($snapname){
|
||||
$key = 'snap';
|
||||
}else{
|
||||
$key = $isBase ? 'base' : 'current';
|
||||
if ($snapname) {
|
||||
$key = 'snap';
|
||||
} else {
|
||||
$key = $isBase ? 'base' : 'current';
|
||||
}
|
||||
return 1 if $features->{$feature}->{$key};
|
||||
|
||||
@ -256,15 +260,15 @@ sub volume_export_formats {
|
||||
|
||||
sub volume_export {
|
||||
my (
|
||||
$class,
|
||||
$scfg,
|
||||
$storeid,
|
||||
$fh,
|
||||
$volname,
|
||||
$format,
|
||||
$snapshot,
|
||||
$base_snapshot,
|
||||
$with_snapshots,
|
||||
$class,
|
||||
$scfg,
|
||||
$storeid,
|
||||
$fh,
|
||||
$volname,
|
||||
$format,
|
||||
$snapshot,
|
||||
$base_snapshot,
|
||||
$with_snapshots,
|
||||
) = @_;
|
||||
|
||||
die "volume export format $format not available for $class\n" if $format ne 'raw+size';
|
||||
@ -276,8 +280,8 @@ sub volume_export {
|
||||
|
||||
my $json = '';
|
||||
run_command(
|
||||
['/usr/bin/qemu-img', 'info', '-f', 'raw', '--output=json', $file],
|
||||
outfunc => sub { $json .= shift },
|
||||
['/usr/bin/qemu-img', 'info', '-f', 'raw', '--output=json', $file],
|
||||
outfunc => sub { $json .= shift },
|
||||
);
|
||||
die "failed to query size information for '$file' with qemu-img\n" if !$json;
|
||||
my $info = eval { decode_json($json) };
|
||||
@ -289,8 +293,8 @@ sub volume_export {
|
||||
|
||||
PVE::Storage::Plugin::write_common_header($fh, $size);
|
||||
run_command(
|
||||
['qemu-img', 'dd', 'bs=64k', "if=$file", '-f', 'raw', '-O', 'raw'],
|
||||
output => '>&'.fileno($fh),
|
||||
['qemu-img', 'dd', 'bs=64k', "if=$file", '-f', 'raw', '-O', 'raw'],
|
||||
output => '>&' . fileno($fh),
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -9,7 +9,8 @@ use IO::File;
|
||||
|
||||
use PVE::JSONSchema qw(get_standard_option);
|
||||
use PVE::Storage::Plugin;
|
||||
use PVE::Tools qw(run_command file_read_firstline trim dir_glob_regex dir_glob_foreach $IPV4RE $IPV6RE);
|
||||
use PVE::Tools
|
||||
qw(run_command file_read_firstline trim dir_glob_regex dir_glob_foreach $IPV4RE $IPV6RE);
|
||||
|
||||
use base qw(PVE::Storage::Plugin);
|
||||
|
||||
@ -25,8 +26,8 @@ my sub assert_iscsi_support {
|
||||
$found_iscsi_adm_exe = -x $ISCSIADM;
|
||||
|
||||
if (!$found_iscsi_adm_exe) {
|
||||
die "error: no iscsi support - please install open-iscsi\n" if !$noerr;
|
||||
warn "warning: no iscsi support - please install open-iscsi\n";
|
||||
die "error: no iscsi support - please install open-iscsi\n" if !$noerr;
|
||||
warn "warning: no iscsi support - please install open-iscsi\n";
|
||||
}
|
||||
return $found_iscsi_adm_exe;
|
||||
}
|
||||
@ -41,18 +42,24 @@ sub iscsi_session_list {
|
||||
|
||||
my $res = {};
|
||||
eval {
|
||||
run_command($cmd, errmsg => 'iscsi session scan failed', outfunc => sub {
|
||||
my $line = shift;
|
||||
# example: tcp: [1] 192.168.122.252:3260,1 iqn.2003-01.org.linux-iscsi.proxmox-nfs.x8664:sn.00567885ba8f (non-flash)
|
||||
if ($line =~ m/^tcp:\s+\[(\S+)\]\s+((?:$IPV4RE|\[$IPV6RE\]):\d+)\,\S+\s+(\S+)\s+\S+?\s*$/) {
|
||||
my ($session_id, $portal, $target) = ($1, $2, $3);
|
||||
# there can be several sessions per target (multipath)
|
||||
push @{$res->{$target}}, { session_id => $session_id, portal => $portal };
|
||||
}
|
||||
});
|
||||
run_command(
|
||||
$cmd,
|
||||
errmsg => 'iscsi session scan failed',
|
||||
outfunc => sub {
|
||||
my $line = shift;
|
||||
# example: tcp: [1] 192.168.122.252:3260,1 iqn.2003-01.org.linux-iscsi.proxmox-nfs.x8664:sn.00567885ba8f (non-flash)
|
||||
if ($line =~
|
||||
m/^tcp:\s+\[(\S+)\]\s+((?:$IPV4RE|\[$IPV6RE\]):\d+)\,\S+\s+(\S+)\s+\S+?\s*$/
|
||||
) {
|
||||
my ($session_id, $portal, $target) = ($1, $2, $3);
|
||||
# there can be several sessions per target (multipath)
|
||||
push @{ $res->{$target} }, { session_id => $session_id, portal => $portal };
|
||||
}
|
||||
},
|
||||
);
|
||||
};
|
||||
if (my $err = $@) {
|
||||
die $err if $err !~ m/: No active sessions.$/i;
|
||||
die $err if $err !~ m/: No active sessions.$/i;
|
||||
}
|
||||
|
||||
return $res;
|
||||
@ -62,7 +69,7 @@ sub iscsi_test_session {
|
||||
my ($sid) = @_;
|
||||
|
||||
if ($sid !~ m/^[0-9]+$/) {
|
||||
die "session_id: '$sid' is not a number\n";
|
||||
die "session_id: '$sid' is not a number\n";
|
||||
}
|
||||
my $state = file_read_firstline("/sys/class/iscsi_session/session${sid}/state");
|
||||
return defined($state) && $state eq 'LOGGED_IN';
|
||||
@ -73,13 +80,13 @@ sub iscsi_test_portal {
|
||||
$cache //= {};
|
||||
|
||||
if (defined($target)) {
|
||||
# check session state instead if available
|
||||
my $sessions = iscsi_session($cache, $target);
|
||||
for my $session ($sessions->@*) {
|
||||
next if $session->{portal} ne $portal;
|
||||
my $state = iscsi_test_session($session->{session_id});
|
||||
return $state if $state;
|
||||
}
|
||||
# check session state instead if available
|
||||
my $sessions = iscsi_session($cache, $target);
|
||||
for my $session ($sessions->@*) {
|
||||
next if $session->{portal} ne $portal;
|
||||
my $state = iscsi_test_session($session->{session_id});
|
||||
return $state if $state;
|
||||
}
|
||||
}
|
||||
# check portal via tcp
|
||||
my ($server, $port) = PVE::Tools::parse_host_and_port($portal);
|
||||
@ -95,25 +102,28 @@ sub iscsi_portals {
|
||||
my $res = [];
|
||||
my $cmd = [$ISCSIADM, '--mode', 'node'];
|
||||
eval {
|
||||
run_command($cmd, outfunc => sub {
|
||||
my $line = shift;
|
||||
run_command(
|
||||
$cmd,
|
||||
outfunc => sub {
|
||||
my $line = shift;
|
||||
|
||||
if ($line =~ $ISCSI_TARGET_RE) {
|
||||
my ($portal, $portal_target) = ($1, $2);
|
||||
if ($portal_target eq $target) {
|
||||
push @{$res}, $portal;
|
||||
}
|
||||
}
|
||||
});
|
||||
if ($line =~ $ISCSI_TARGET_RE) {
|
||||
my ($portal, $portal_target) = ($1, $2);
|
||||
if ($portal_target eq $target) {
|
||||
push @{$res}, $portal;
|
||||
}
|
||||
}
|
||||
},
|
||||
);
|
||||
};
|
||||
|
||||
my $err = $@;
|
||||
warn $err if $err;
|
||||
|
||||
if ($err || !scalar(@$res)) {
|
||||
return [ $portal_in ];
|
||||
return [$portal_in];
|
||||
} else {
|
||||
return $res;
|
||||
return $res;
|
||||
}
|
||||
}
|
||||
|
||||
@ -124,24 +134,27 @@ sub iscsi_discovery {
|
||||
|
||||
my $res = {};
|
||||
for my $portal ($portals->@*) {
|
||||
next if !iscsi_test_portal($target_in, $portal, $cache); # fixme: raise exception here?
|
||||
next if !iscsi_test_portal($target_in, $portal, $cache); # fixme: raise exception here?
|
||||
|
||||
my $cmd = [$ISCSIADM, '--mode', 'discovery', '--type', 'sendtargets', '--portal', $portal];
|
||||
eval {
|
||||
run_command($cmd, outfunc => sub {
|
||||
my $line = shift;
|
||||
my $cmd = [$ISCSIADM, '--mode', 'discovery', '--type', 'sendtargets', '--portal', $portal];
|
||||
eval {
|
||||
run_command(
|
||||
$cmd,
|
||||
outfunc => sub {
|
||||
my $line = shift;
|
||||
|
||||
if ($line =~ $ISCSI_TARGET_RE) {
|
||||
my ($portal, $target) = ($1, $2);
|
||||
# one target can have more than one portal (multipath)
|
||||
# and sendtargets should return all of them in single call
|
||||
push @{$res->{$target}}, $portal;
|
||||
}
|
||||
});
|
||||
};
|
||||
if ($line =~ $ISCSI_TARGET_RE) {
|
||||
my ($portal, $target) = ($1, $2);
|
||||
# one target can have more than one portal (multipath)
|
||||
# and sendtargets should return all of them in single call
|
||||
push @{ $res->{$target} }, $portal;
|
||||
}
|
||||
},
|
||||
);
|
||||
};
|
||||
|
||||
# In case of multipath we can stop after receiving targets from any available portal
|
||||
last if scalar(keys %$res) > 0;
|
||||
# In case of multipath we can stop after receiving targets from any available portal
|
||||
last if scalar(keys %$res) > 0;
|
||||
}
|
||||
|
||||
return $res;
|
||||
@ -157,19 +170,24 @@ sub iscsi_login {
|
||||
|
||||
# Disable retries to avoid blocking pvestatd for too long, next iteration will retry anyway
|
||||
eval {
|
||||
my $cmd = [
|
||||
$ISCSIADM,
|
||||
'--mode', 'node',
|
||||
'--targetname', $target,
|
||||
'--op', 'update',
|
||||
'--name', 'node.session.initial_login_retry_max',
|
||||
'--value', '0',
|
||||
];
|
||||
run_command($cmd);
|
||||
my $cmd = [
|
||||
$ISCSIADM,
|
||||
'--mode',
|
||||
'node',
|
||||
'--targetname',
|
||||
$target,
|
||||
'--op',
|
||||
'update',
|
||||
'--name',
|
||||
'node.session.initial_login_retry_max',
|
||||
'--value',
|
||||
'0',
|
||||
];
|
||||
run_command($cmd);
|
||||
};
|
||||
warn $@ if $@;
|
||||
|
||||
run_command([$ISCSIADM, '--mode', 'node', '--targetname', $target, '--login']);
|
||||
run_command([$ISCSIADM, '--mode', 'node', '--targetname', $target, '--login']);
|
||||
}
|
||||
|
||||
sub iscsi_logout {
|
||||
@ -190,22 +208,24 @@ sub iscsi_session_rescan {
|
||||
my $rstat = stat($rescan_filename);
|
||||
|
||||
if (!$rstat) {
|
||||
if (my $fh = IO::File->new($rescan_filename, "a")) {
|
||||
utime undef, undef, $fh;
|
||||
close($fh);
|
||||
}
|
||||
if (my $fh = IO::File->new($rescan_filename, "a")) {
|
||||
utime undef, undef, $fh;
|
||||
close($fh);
|
||||
}
|
||||
} else {
|
||||
my $atime = $rstat->atime;
|
||||
my $tdiff = time() - $atime;
|
||||
# avoid frequent rescans
|
||||
return if !($tdiff < 0 || $tdiff > 10);
|
||||
utime undef, undef, $rescan_filename;
|
||||
my $atime = $rstat->atime;
|
||||
my $tdiff = time() - $atime;
|
||||
# avoid frequent rescans
|
||||
return if !($tdiff < 0 || $tdiff > 10);
|
||||
utime undef, undef, $rescan_filename;
|
||||
}
|
||||
|
||||
foreach my $session (@$session_list) {
|
||||
my $cmd = [$ISCSIADM, '--mode', 'session', '--sid', $session->{session_id}, '--rescan'];
|
||||
eval { run_command($cmd, outfunc => sub {}); };
|
||||
warn $@ if $@;
|
||||
my $cmd = [$ISCSIADM, '--mode', 'session', '--sid', $session->{session_id}, '--rescan'];
|
||||
eval {
|
||||
run_command($cmd, outfunc => sub { });
|
||||
};
|
||||
warn $@ if $@;
|
||||
}
|
||||
}
|
||||
|
||||
@ -216,19 +236,19 @@ sub load_stable_scsi_paths {
|
||||
my $stabledir = "/dev/disk/by-id";
|
||||
|
||||
if (my $dh = IO::Dir->new($stabledir)) {
|
||||
foreach my $tmp (sort $dh->read) {
|
||||
# exclude filenames with part in name (same disk but partitions)
|
||||
# use only filenames with scsi(with multipath i have the same device
|
||||
# with dm-uuid-mpath , dm-name and scsi in name)
|
||||
if($tmp !~ m/-part\d+$/ && ($tmp =~ m/^scsi-/ || $tmp =~ m/^dm-uuid-mpath-/)) {
|
||||
my $path = "$stabledir/$tmp";
|
||||
my $bdevdest = readlink($path);
|
||||
if ($bdevdest && $bdevdest =~ m|^../../([^/]+)|) {
|
||||
$stable_paths->{$1}=$tmp;
|
||||
}
|
||||
}
|
||||
}
|
||||
$dh->close;
|
||||
foreach my $tmp (sort $dh->read) {
|
||||
# exclude filenames with part in name (same disk but partitions)
|
||||
# use only filenames with scsi(with multipath i have the same device
|
||||
# with dm-uuid-mpath , dm-name and scsi in name)
|
||||
if ($tmp !~ m/-part\d+$/ && ($tmp =~ m/^scsi-/ || $tmp =~ m/^dm-uuid-mpath-/)) {
|
||||
my $path = "$stabledir/$tmp";
|
||||
my $bdevdest = readlink($path);
|
||||
if ($bdevdest && $bdevdest =~ m|^../../([^/]+)|) {
|
||||
$stable_paths->{$1} = $tmp;
|
||||
}
|
||||
}
|
||||
}
|
||||
$dh->close;
|
||||
}
|
||||
return $stable_paths;
|
||||
}
|
||||
@ -241,56 +261,67 @@ sub iscsi_device_list {
|
||||
|
||||
my $stable_paths = load_stable_scsi_paths();
|
||||
|
||||
dir_glob_foreach($dirname, 'session(\d+)', sub {
|
||||
my ($ent, $session) = @_;
|
||||
dir_glob_foreach(
|
||||
$dirname,
|
||||
'session(\d+)',
|
||||
sub {
|
||||
my ($ent, $session) = @_;
|
||||
|
||||
my $target = file_read_firstline("$dirname/$ent/targetname");
|
||||
return if !$target;
|
||||
my $target = file_read_firstline("$dirname/$ent/targetname");
|
||||
return if !$target;
|
||||
|
||||
my (undef, $host) = dir_glob_regex("$dirname/$ent/device", 'target(\d+):.*');
|
||||
return if !defined($host);
|
||||
my (undef, $host) = dir_glob_regex("$dirname/$ent/device", 'target(\d+):.*');
|
||||
return if !defined($host);
|
||||
|
||||
dir_glob_foreach("/sys/bus/scsi/devices", "$host:" . '(\d+):(\d+):(\d+)', sub {
|
||||
my ($tmp, $channel, $id, $lun) = @_;
|
||||
dir_glob_foreach(
|
||||
"/sys/bus/scsi/devices",
|
||||
"$host:" . '(\d+):(\d+):(\d+)',
|
||||
sub {
|
||||
my ($tmp, $channel, $id, $lun) = @_;
|
||||
|
||||
my $type = file_read_firstline("/sys/bus/scsi/devices/$tmp/type");
|
||||
return if !defined($type) || $type ne '0'; # list disks only
|
||||
my $type = file_read_firstline("/sys/bus/scsi/devices/$tmp/type");
|
||||
return if !defined($type) || $type ne '0'; # list disks only
|
||||
|
||||
my $bdev;
|
||||
if (-d "/sys/bus/scsi/devices/$tmp/block") { # newer kernels
|
||||
(undef, $bdev) = dir_glob_regex("/sys/bus/scsi/devices/$tmp/block/", '([A-Za-z]\S*)');
|
||||
} else {
|
||||
(undef, $bdev) = dir_glob_regex("/sys/bus/scsi/devices/$tmp", 'block:(\S+)');
|
||||
}
|
||||
return if !$bdev;
|
||||
my $bdev;
|
||||
if (-d "/sys/bus/scsi/devices/$tmp/block") { # newer kernels
|
||||
(undef, $bdev) =
|
||||
dir_glob_regex("/sys/bus/scsi/devices/$tmp/block/", '([A-Za-z]\S*)');
|
||||
} else {
|
||||
(undef, $bdev) =
|
||||
dir_glob_regex("/sys/bus/scsi/devices/$tmp", 'block:(\S+)');
|
||||
}
|
||||
return if !$bdev;
|
||||
|
||||
#check multipath
|
||||
if (-d "/sys/block/$bdev/holders") {
|
||||
my $multipathdev = dir_glob_regex("/sys/block/$bdev/holders", '[A-Za-z]\S*');
|
||||
$bdev = $multipathdev if $multipathdev;
|
||||
}
|
||||
#check multipath
|
||||
if (-d "/sys/block/$bdev/holders") {
|
||||
my $multipathdev =
|
||||
dir_glob_regex("/sys/block/$bdev/holders", '[A-Za-z]\S*');
|
||||
$bdev = $multipathdev if $multipathdev;
|
||||
}
|
||||
|
||||
my $blockdev = $stable_paths->{$bdev};
|
||||
return if !$blockdev;
|
||||
my $blockdev = $stable_paths->{$bdev};
|
||||
return if !$blockdev;
|
||||
|
||||
my $size = file_read_firstline("/sys/block/$bdev/size");
|
||||
return if !$size;
|
||||
my $size = file_read_firstline("/sys/block/$bdev/size");
|
||||
return if !$size;
|
||||
|
||||
my $volid = "$channel.$id.$lun.$blockdev";
|
||||
my $volid = "$channel.$id.$lun.$blockdev";
|
||||
|
||||
$res->{$target}->{$volid} = {
|
||||
'format' => 'raw',
|
||||
'size' => int($size * 512),
|
||||
'vmid' => 0, # not assigned to any vm
|
||||
'channel' => int($channel),
|
||||
'id' => int($id),
|
||||
'lun' => int($lun),
|
||||
};
|
||||
$res->{$target}->{$volid} = {
|
||||
'format' => 'raw',
|
||||
'size' => int($size * 512),
|
||||
'vmid' => 0, # not assigned to any vm
|
||||
'channel' => int($channel),
|
||||
'id' => int($id),
|
||||
'lun' => int($lun),
|
||||
};
|
||||
|
||||
#print "TEST: $target $session $host,$bus,$tg,$lun $blockdev\n";
|
||||
});
|
||||
#print "TEST: $target $session $host,$bus,$tg,$lun $blockdev\n";
|
||||
},
|
||||
);
|
||||
|
||||
});
|
||||
},
|
||||
);
|
||||
|
||||
return $res;
|
||||
}
|
||||
@ -303,22 +334,23 @@ sub type {
|
||||
|
||||
sub plugindata {
|
||||
return {
|
||||
content => [ {images => 1, none => 1}, { images => 1 }],
|
||||
select_existing => 1,
|
||||
'sensitive-properties' => {},
|
||||
content => [{ images => 1, none => 1 }, { images => 1 }],
|
||||
select_existing => 1,
|
||||
'sensitive-properties' => {},
|
||||
};
|
||||
}
|
||||
|
||||
sub properties {
|
||||
return {
|
||||
target => {
|
||||
description => "iSCSI target.",
|
||||
type => 'string',
|
||||
},
|
||||
portal => {
|
||||
description => "iSCSI portal (IP or DNS name with optional port).",
|
||||
type => 'string', format => 'pve-storage-portal-dns',
|
||||
},
|
||||
target => {
|
||||
description => "iSCSI target.",
|
||||
type => 'string',
|
||||
},
|
||||
portal => {
|
||||
description => "iSCSI portal (IP or DNS name with optional port).",
|
||||
type => 'string',
|
||||
format => 'pve-storage-portal-dns',
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
@ -326,10 +358,10 @@ sub options {
|
||||
return {
|
||||
portal => { fixed => 1 },
|
||||
target => { fixed => 1 },
|
||||
nodes => { optional => 1},
|
||||
disable => { optional => 1},
|
||||
content => { optional => 1},
|
||||
bwlimit => { optional => 1 },
|
||||
nodes => { optional => 1 },
|
||||
disable => { optional => 1 },
|
||||
content => { optional => 1 },
|
||||
bwlimit => { optional => 1 },
|
||||
};
|
||||
}
|
||||
|
||||
@ -339,7 +371,7 @@ sub parse_volname {
|
||||
my ($class, $volname) = @_;
|
||||
|
||||
if ($volname =~ m!^\d+\.\d+\.\d+\.([^/\s]+)$!) {
|
||||
return ('images', $1, undef, undef, undef, undef, 'raw');
|
||||
return ('images', $1, undef, undef, undef, undef, 'raw');
|
||||
}
|
||||
|
||||
die "unable to parse iscsi volume name '$volname'\n";
|
||||
@ -389,7 +421,7 @@ sub list_volumes {
|
||||
my $res = $class->list_images($storeid, $scfg, $vmid);
|
||||
|
||||
for my $item (@$res) {
|
||||
$item->{content} = 'images'; # we only have images
|
||||
$item->{content} = 'images'; # we only have images
|
||||
}
|
||||
|
||||
return $res;
|
||||
@ -408,23 +440,23 @@ sub list_images {
|
||||
|
||||
if (my $dat = $cache->{iscsi_devices}->{$target}) {
|
||||
|
||||
foreach my $volname (keys %$dat) {
|
||||
foreach my $volname (keys %$dat) {
|
||||
|
||||
my $volid = "$storeid:$volname";
|
||||
my $volid = "$storeid:$volname";
|
||||
|
||||
if ($vollist) {
|
||||
my $found = grep { $_ eq $volid } @$vollist;
|
||||
next if !$found;
|
||||
} else {
|
||||
# we have no owner for iscsi devices
|
||||
next if defined($vmid);
|
||||
}
|
||||
if ($vollist) {
|
||||
my $found = grep { $_ eq $volid } @$vollist;
|
||||
next if !$found;
|
||||
} else {
|
||||
# we have no owner for iscsi devices
|
||||
next if defined($vmid);
|
||||
}
|
||||
|
||||
my $info = $dat->{$volname};
|
||||
$info->{volid} = $volid;
|
||||
my $info = $dat->{$volname};
|
||||
$info->{volid} = $volid;
|
||||
|
||||
push @$res, $info;
|
||||
}
|
||||
push @$res, $info;
|
||||
}
|
||||
}
|
||||
|
||||
return $res;
|
||||
@ -455,23 +487,23 @@ sub activate_storage {
|
||||
my $do_login = !defined($sessions);
|
||||
|
||||
if (!$do_login) {
|
||||
# We should check that sessions for all portals are available
|
||||
my $session_portals = [ map { $_->{portal} } (@$sessions) ];
|
||||
# We should check that sessions for all portals are available
|
||||
my $session_portals = [map { $_->{portal} } (@$sessions)];
|
||||
|
||||
for my $portal (@$portals) {
|
||||
if (!grep(/^\Q$portal\E$/, @$session_portals)) {
|
||||
$do_login = 1;
|
||||
last;
|
||||
}
|
||||
}
|
||||
for my $portal (@$portals) {
|
||||
if (!grep(/^\Q$portal\E$/, @$session_portals)) {
|
||||
$do_login = 1;
|
||||
last;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ($do_login) {
|
||||
eval { iscsi_login($scfg->{target}, $portals, $cache); };
|
||||
warn $@ if $@;
|
||||
eval { iscsi_login($scfg->{target}, $portals, $cache); };
|
||||
warn $@ if $@;
|
||||
} else {
|
||||
# make sure we get all devices
|
||||
iscsi_session_rescan($sessions);
|
||||
# make sure we get all devices
|
||||
iscsi_session_rescan($sessions);
|
||||
}
|
||||
}
|
||||
|
||||
@ -481,7 +513,7 @@ sub deactivate_storage {
|
||||
return if !assert_iscsi_support(1);
|
||||
|
||||
if (defined(iscsi_session($cache, $scfg->{target}))) {
|
||||
iscsi_logout($scfg->{target});
|
||||
iscsi_logout($scfg->{target});
|
||||
}
|
||||
}
|
||||
|
||||
@ -490,17 +522,17 @@ my $check_devices_part_of_target = sub {
|
||||
|
||||
my $found = 0;
|
||||
for my $path (@$device_paths) {
|
||||
if ($path =~ m!^/devices/platform/host\d+/session(\d+)/target\d+:\d:\d!) {
|
||||
my $session_id = $1;
|
||||
if ($path =~ m!^/devices/platform/host\d+/session(\d+)/target\d+:\d:\d!) {
|
||||
my $session_id = $1;
|
||||
|
||||
my $targetname = file_read_firstline(
|
||||
"/sys/class/iscsi_session/session$session_id/targetname",
|
||||
);
|
||||
if ($targetname && ($targetname eq $target)) {
|
||||
$found = 1;
|
||||
last;
|
||||
}
|
||||
}
|
||||
my $targetname = file_read_firstline(
|
||||
"/sys/class/iscsi_session/session$session_id/targetname",
|
||||
);
|
||||
if ($targetname && ($targetname eq $target)) {
|
||||
$found = 1;
|
||||
last;
|
||||
}
|
||||
}
|
||||
}
|
||||
return $found;
|
||||
};
|
||||
@ -514,15 +546,15 @@ my $udev_query_path = sub {
|
||||
|
||||
my $device_path;
|
||||
my $cmd = [
|
||||
'udevadm',
|
||||
'info',
|
||||
'--query=path',
|
||||
$dev,
|
||||
'udevadm', 'info', '--query=path', $dev,
|
||||
];
|
||||
eval {
|
||||
run_command($cmd, outfunc => sub {
|
||||
$device_path = shift;
|
||||
});
|
||||
run_command(
|
||||
$cmd,
|
||||
outfunc => sub {
|
||||
$device_path = shift;
|
||||
},
|
||||
);
|
||||
};
|
||||
die "failed to query device path for '$dev': $@\n" if $@;
|
||||
|
||||
@ -540,23 +572,27 @@ $resolve_virtual_devices = sub {
|
||||
|
||||
my $resolved = [];
|
||||
if ($dev =~ m!^/devices/virtual/block/!) {
|
||||
dir_glob_foreach("/sys/$dev/slaves", '([^.].+)', sub {
|
||||
my ($slave) = @_;
|
||||
dir_glob_foreach(
|
||||
"/sys/$dev/slaves",
|
||||
'([^.].+)',
|
||||
sub {
|
||||
my ($slave) = @_;
|
||||
|
||||
# don't check devices multiple times
|
||||
return if $visited->{$slave};
|
||||
$visited->{$slave} = 1;
|
||||
# don't check devices multiple times
|
||||
return if $visited->{$slave};
|
||||
$visited->{$slave} = 1;
|
||||
|
||||
my $path;
|
||||
eval { $path = $udev_query_path->("/dev/$slave"); };
|
||||
return if $@;
|
||||
my $path;
|
||||
eval { $path = $udev_query_path->("/dev/$slave"); };
|
||||
return if $@;
|
||||
|
||||
my $nested_resolved = $resolve_virtual_devices->($path, $visited);
|
||||
my $nested_resolved = $resolve_virtual_devices->($path, $visited);
|
||||
|
||||
push @$resolved, @$nested_resolved;
|
||||
});
|
||||
push @$resolved, @$nested_resolved;
|
||||
},
|
||||
);
|
||||
} else {
|
||||
push @$resolved, $dev;
|
||||
push @$resolved, $dev;
|
||||
}
|
||||
|
||||
return $resolved;
|
||||
@ -570,7 +606,7 @@ sub activate_volume {
|
||||
die "failed to get realpath for '$path': $!\n" if !$real_path;
|
||||
# in case $path does not exist or is not a symlink, check if the returned
|
||||
# $real_path is a block device
|
||||
die "resolved realpath '$real_path' is not a block device\n" if ! -b $real_path;
|
||||
die "resolved realpath '$real_path' is not a block device\n" if !-b $real_path;
|
||||
|
||||
my $device_path = $udev_query_path->($real_path);
|
||||
my $resolved_paths = $resolve_virtual_devices->($device_path);
|
||||
@ -585,8 +621,8 @@ sub check_connection {
|
||||
my $portals = iscsi_portals($scfg->{target}, $scfg->{portal});
|
||||
|
||||
for my $portal (@$portals) {
|
||||
my $result = iscsi_test_portal($scfg->{target}, $portal, $cache);
|
||||
return $result if $result;
|
||||
my $result = iscsi_test_portal($scfg->{target}, $portal, $cache);
|
||||
return $result if $result;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -601,17 +637,16 @@ sub volume_has_feature {
|
||||
my ($class, $scfg, $feature, $storeid, $volname, $snapname, $running) = @_;
|
||||
|
||||
my $features = {
|
||||
copy => { current => 1},
|
||||
copy => { current => 1 },
|
||||
};
|
||||
|
||||
my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) =
|
||||
$class->parse_volname($volname);
|
||||
my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) = $class->parse_volname($volname);
|
||||
|
||||
my $key = undef;
|
||||
if ($snapname){
|
||||
$key = 'snap';
|
||||
if ($snapname) {
|
||||
$key = 'snap';
|
||||
} else {
|
||||
$key = $isBase ? 'base' : 'current';
|
||||
$key = $isBase ? 'base' : 'current';
|
||||
}
|
||||
return 1 if $features->{$feature}->{$key};
|
||||
|
||||
@ -629,15 +664,15 @@ sub volume_export_formats {
|
||||
|
||||
sub volume_export {
|
||||
my (
|
||||
$class,
|
||||
$scfg,
|
||||
$storeid,
|
||||
$fh,
|
||||
$volname,
|
||||
$format,
|
||||
$snapshot,
|
||||
$base_snapshot,
|
||||
$with_snapshots,
|
||||
$class,
|
||||
$scfg,
|
||||
$storeid,
|
||||
$fh,
|
||||
$volname,
|
||||
$format,
|
||||
$snapshot,
|
||||
$base_snapshot,
|
||||
$with_snapshots,
|
||||
) = @_;
|
||||
|
||||
die "volume export format $format not available for $class\n" if $format ne 'raw+size';
|
||||
@ -647,13 +682,16 @@ sub volume_export {
|
||||
|
||||
my $file = $class->filesystem_path($scfg, $volname, $snapshot);
|
||||
my $size;
|
||||
run_command(['/sbin/blockdev', '--getsize64', $file], outfunc => sub {
|
||||
my ($line) = @_;
|
||||
die "unexpected output from /sbin/blockdev: $line\n" if $line !~ /^(\d+)$/;
|
||||
$size = int($1);
|
||||
});
|
||||
run_command(
|
||||
['/sbin/blockdev', '--getsize64', $file],
|
||||
outfunc => sub {
|
||||
my ($line) = @_;
|
||||
die "unexpected output from /sbin/blockdev: $line\n" if $line !~ /^(\d+)$/;
|
||||
$size = int($1);
|
||||
},
|
||||
);
|
||||
PVE::Storage::Plugin::write_common_header($fh, $size);
|
||||
run_command(['dd', "if=$file", "bs=64k", "status=progress"], output => '>&'.fileno($fh));
|
||||
run_command(['dd', "if=$file", "bs=64k", "status=progress"], output => '>&' . fileno($fh));
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
@ -20,7 +20,7 @@ my $ignore_no_medium_warnings = sub {
|
||||
# ignore those, most of the time they're from (virtual) IPMI/iKVM devices
|
||||
# and just spam the log..
|
||||
if ($line !~ /open failed: No medium found/) {
|
||||
print STDERR "$line\n";
|
||||
print STDERR "$line\n";
|
||||
}
|
||||
};
|
||||
|
||||
@ -32,35 +32,51 @@ sub lvm_pv_info {
|
||||
my $has_label = 0;
|
||||
|
||||
my $cmd = ['/usr/bin/file', '-L', '-s', $device];
|
||||
run_command($cmd, outfunc => sub {
|
||||
my $line = shift;
|
||||
$has_label = 1 if $line =~ m/LVM2/;
|
||||
});
|
||||
run_command(
|
||||
$cmd,
|
||||
outfunc => sub {
|
||||
my $line = shift;
|
||||
$has_label = 1 if $line =~ m/LVM2/;
|
||||
},
|
||||
);
|
||||
|
||||
return undef if !$has_label;
|
||||
|
||||
$cmd = ['/sbin/pvs', '--separator', ':', '--noheadings', '--units', 'k',
|
||||
'--unbuffered', '--nosuffix', '--options',
|
||||
'pv_name,pv_size,vg_name,pv_uuid', $device];
|
||||
$cmd = [
|
||||
'/sbin/pvs',
|
||||
'--separator',
|
||||
':',
|
||||
'--noheadings',
|
||||
'--units',
|
||||
'k',
|
||||
'--unbuffered',
|
||||
'--nosuffix',
|
||||
'--options',
|
||||
'pv_name,pv_size,vg_name,pv_uuid',
|
||||
$device,
|
||||
];
|
||||
|
||||
my $pvinfo;
|
||||
run_command($cmd, outfunc => sub {
|
||||
my $line = shift;
|
||||
run_command(
|
||||
$cmd,
|
||||
outfunc => sub {
|
||||
my $line = shift;
|
||||
|
||||
$line = trim($line);
|
||||
$line = trim($line);
|
||||
|
||||
my ($pvname, $size, $vgname, $uuid) = split(':', $line);
|
||||
my ($pvname, $size, $vgname, $uuid) = split(':', $line);
|
||||
|
||||
die "found multiple pvs entries for device '$device'\n"
|
||||
if $pvinfo;
|
||||
die "found multiple pvs entries for device '$device'\n"
|
||||
if $pvinfo;
|
||||
|
||||
$pvinfo = {
|
||||
pvname => $pvname,
|
||||
size => int($size),
|
||||
vgname => $vgname,
|
||||
uuid => $uuid,
|
||||
};
|
||||
});
|
||||
$pvinfo = {
|
||||
pvname => $pvname,
|
||||
size => int($size),
|
||||
vgname => $vgname,
|
||||
uuid => $uuid,
|
||||
};
|
||||
},
|
||||
);
|
||||
|
||||
return $pvinfo;
|
||||
}
|
||||
@ -69,9 +85,9 @@ sub clear_first_sector {
|
||||
my ($dev) = shift;
|
||||
|
||||
if (my $fh = IO::File->new($dev, "w")) {
|
||||
my $buf = 0 x 512;
|
||||
syswrite $fh, $buf;
|
||||
$fh->close();
|
||||
my $buf = 0 x 512;
|
||||
syswrite $fh, $buf;
|
||||
$fh->close();
|
||||
}
|
||||
}
|
||||
|
||||
@ -81,8 +97,8 @@ sub lvm_create_volume_group {
|
||||
my $res = lvm_pv_info($device);
|
||||
|
||||
if ($res->{vgname}) {
|
||||
return if $res->{vgname} eq $vgname; # already created
|
||||
die "device '$device' is already used by volume group '$res->{vgname}'\n";
|
||||
return if $res->{vgname} eq $vgname; # already created
|
||||
die "device '$device' is already used by volume group '$res->{vgname}'\n";
|
||||
}
|
||||
|
||||
clear_first_sector($device); # else pvcreate fails
|
||||
@ -96,58 +112,76 @@ sub lvm_create_volume_group {
|
||||
$cmd = ['/sbin/vgcreate', $vgname, $device];
|
||||
# push @$cmd, '-c', 'y' if $shared; # we do not use this yet
|
||||
|
||||
run_command($cmd, errmsg => "vgcreate $vgname $device error", errfunc => $ignore_no_medium_warnings, outfunc => $ignore_no_medium_warnings);
|
||||
run_command(
|
||||
$cmd,
|
||||
errmsg => "vgcreate $vgname $device error",
|
||||
errfunc => $ignore_no_medium_warnings,
|
||||
outfunc => $ignore_no_medium_warnings,
|
||||
);
|
||||
}
|
||||
|
||||
sub lvm_destroy_volume_group {
|
||||
my ($vgname) = @_;
|
||||
|
||||
run_command(
|
||||
['vgremove', '-y', $vgname],
|
||||
errmsg => "unable to remove volume group $vgname",
|
||||
errfunc => $ignore_no_medium_warnings,
|
||||
outfunc => $ignore_no_medium_warnings,
|
||||
['vgremove', '-y', $vgname],
|
||||
errmsg => "unable to remove volume group $vgname",
|
||||
errfunc => $ignore_no_medium_warnings,
|
||||
outfunc => $ignore_no_medium_warnings,
|
||||
);
|
||||
}
|
||||
|
||||
sub lvm_vgs {
|
||||
my ($includepvs) = @_;
|
||||
|
||||
my $cmd = ['/sbin/vgs', '--separator', ':', '--noheadings', '--units', 'b',
|
||||
'--unbuffered', '--nosuffix', '--options'];
|
||||
my $cmd = [
|
||||
'/sbin/vgs',
|
||||
'--separator',
|
||||
':',
|
||||
'--noheadings',
|
||||
'--units',
|
||||
'b',
|
||||
'--unbuffered',
|
||||
'--nosuffix',
|
||||
'--options',
|
||||
];
|
||||
|
||||
my $cols = [qw(vg_name vg_size vg_free lv_count)];
|
||||
|
||||
if ($includepvs) {
|
||||
push @$cols, qw(pv_name pv_size pv_free);
|
||||
push @$cols, qw(pv_name pv_size pv_free);
|
||||
}
|
||||
|
||||
push @$cmd, join(',', @$cols);
|
||||
|
||||
my $vgs = {};
|
||||
eval {
|
||||
run_command($cmd, outfunc => sub {
|
||||
my $line = shift;
|
||||
$line = trim($line);
|
||||
run_command(
|
||||
$cmd,
|
||||
outfunc => sub {
|
||||
my $line = shift;
|
||||
$line = trim($line);
|
||||
|
||||
my ($name, $size, $free, $lvcount, $pvname, $pvsize, $pvfree) = split (':', $line);
|
||||
my ($name, $size, $free, $lvcount, $pvname, $pvsize, $pvfree) =
|
||||
split(':', $line);
|
||||
|
||||
$vgs->{$name} //= {
|
||||
size => int ($size),
|
||||
free => int ($free),
|
||||
lvcount => int($lvcount)
|
||||
};
|
||||
$vgs->{$name} //= {
|
||||
size => int($size),
|
||||
free => int($free),
|
||||
lvcount => int($lvcount),
|
||||
};
|
||||
|
||||
if (defined($pvname) && defined($pvsize) && defined($pvfree)) {
|
||||
push @{$vgs->{$name}->{pvs}}, {
|
||||
name => $pvname,
|
||||
size => int($pvsize),
|
||||
free => int($pvfree),
|
||||
};
|
||||
}
|
||||
},
|
||||
errfunc => $ignore_no_medium_warnings,
|
||||
);
|
||||
if (defined($pvname) && defined($pvsize) && defined($pvfree)) {
|
||||
push @{ $vgs->{$name}->{pvs} },
|
||||
{
|
||||
name => $pvname,
|
||||
size => int($pvsize),
|
||||
free => int($pvfree),
|
||||
};
|
||||
}
|
||||
},
|
||||
errfunc => $ignore_no_medium_warnings,
|
||||
);
|
||||
};
|
||||
my $err = $@;
|
||||
|
||||
@ -161,49 +195,73 @@ sub lvm_vgs {
|
||||
sub lvm_list_volumes {
|
||||
my ($vgname) = @_;
|
||||
|
||||
my $option_list = 'vg_name,lv_name,lv_size,lv_attr,pool_lv,data_percent,metadata_percent,snap_percent,uuid,tags,metadata_size,time';
|
||||
my $option_list =
|
||||
'vg_name,lv_name,lv_size,lv_attr,pool_lv,data_percent,metadata_percent,snap_percent,uuid,tags,metadata_size,time';
|
||||
|
||||
my $cmd = [
|
||||
'/sbin/lvs', '--separator', ':', '--noheadings', '--units', 'b',
|
||||
'--unbuffered', '--nosuffix',
|
||||
'--config', 'report/time_format="%s"',
|
||||
'--options', $option_list,
|
||||
'/sbin/lvs',
|
||||
'--separator',
|
||||
':',
|
||||
'--noheadings',
|
||||
'--units',
|
||||
'b',
|
||||
'--unbuffered',
|
||||
'--nosuffix',
|
||||
'--config',
|
||||
'report/time_format="%s"',
|
||||
'--options',
|
||||
$option_list,
|
||||
];
|
||||
|
||||
push @$cmd, $vgname if $vgname;
|
||||
|
||||
my $lvs = {};
|
||||
run_command($cmd, outfunc => sub {
|
||||
my $line = shift;
|
||||
run_command(
|
||||
$cmd,
|
||||
outfunc => sub {
|
||||
my $line = shift;
|
||||
|
||||
$line = trim($line);
|
||||
$line = trim($line);
|
||||
|
||||
my ($vg_name, $lv_name, $lv_size, $lv_attr, $pool_lv, $data_percent, $meta_percent, $snap_percent, $uuid, $tags, $meta_size, $ctime) = split(':', $line);
|
||||
return if !$vg_name;
|
||||
return if !$lv_name;
|
||||
my (
|
||||
$vg_name,
|
||||
$lv_name,
|
||||
$lv_size,
|
||||
$lv_attr,
|
||||
$pool_lv,
|
||||
$data_percent,
|
||||
$meta_percent,
|
||||
$snap_percent,
|
||||
$uuid,
|
||||
$tags,
|
||||
$meta_size,
|
||||
$ctime,
|
||||
) = split(':', $line);
|
||||
return if !$vg_name;
|
||||
return if !$lv_name;
|
||||
|
||||
my $lv_type = substr($lv_attr, 0, 1);
|
||||
my $lv_type = substr($lv_attr, 0, 1);
|
||||
|
||||
my $d = {
|
||||
lv_size => int($lv_size),
|
||||
lv_state => substr($lv_attr, 4, 1),
|
||||
lv_type => $lv_type,
|
||||
};
|
||||
$d->{pool_lv} = $pool_lv if $pool_lv;
|
||||
$d->{tags} = $tags if $tags;
|
||||
$d->{ctime} = $ctime;
|
||||
my $d = {
|
||||
lv_size => int($lv_size),
|
||||
lv_state => substr($lv_attr, 4, 1),
|
||||
lv_type => $lv_type,
|
||||
};
|
||||
$d->{pool_lv} = $pool_lv if $pool_lv;
|
||||
$d->{tags} = $tags if $tags;
|
||||
$d->{ctime} = $ctime;
|
||||
|
||||
if ($lv_type eq 't') {
|
||||
$data_percent ||= 0;
|
||||
$meta_percent ||= 0;
|
||||
$snap_percent ||= 0;
|
||||
$d->{metadata_size} = int($meta_size);
|
||||
$d->{metadata_used} = int(($meta_percent * $meta_size)/100);
|
||||
$d->{used} = int(($data_percent * $lv_size)/100);
|
||||
}
|
||||
$lvs->{$vg_name}->{$lv_name} = $d;
|
||||
},
|
||||
errfunc => $ignore_no_medium_warnings,
|
||||
if ($lv_type eq 't') {
|
||||
$data_percent ||= 0;
|
||||
$meta_percent ||= 0;
|
||||
$snap_percent ||= 0;
|
||||
$d->{metadata_size} = int($meta_size);
|
||||
$d->{metadata_used} = int(($meta_percent * $meta_size) / 100);
|
||||
$d->{used} = int(($data_percent * $lv_size) / 100);
|
||||
}
|
||||
$lvs->{$vg_name}->{$lv_name} = $d;
|
||||
},
|
||||
errfunc => $ignore_no_medium_warnings,
|
||||
);
|
||||
|
||||
return $lvs;
|
||||
@ -217,48 +275,50 @@ sub type {
|
||||
|
||||
sub plugindata {
|
||||
return {
|
||||
content => [ {images => 1, rootdir => 1}, { images => 1 }],
|
||||
'sensitive-properties' => {},
|
||||
content => [{ images => 1, rootdir => 1 }, { images => 1 }],
|
||||
'sensitive-properties' => {},
|
||||
};
|
||||
}
|
||||
|
||||
sub properties {
|
||||
return {
|
||||
vgname => {
|
||||
description => "Volume group name.",
|
||||
type => 'string', format => 'pve-storage-vgname',
|
||||
},
|
||||
base => {
|
||||
description => "Base volume. This volume is automatically activated.",
|
||||
type => 'string', format => 'pve-volume-id',
|
||||
},
|
||||
saferemove => {
|
||||
description => "Zero-out data when removing LVs.",
|
||||
type => 'boolean',
|
||||
},
|
||||
saferemove_throughput => {
|
||||
description => "Wipe throughput (cstream -t parameter value).",
|
||||
type => 'string',
|
||||
},
|
||||
tagged_only => {
|
||||
description => "Only use logical volumes tagged with 'pve-vm-ID'.",
|
||||
type => 'boolean',
|
||||
}
|
||||
vgname => {
|
||||
description => "Volume group name.",
|
||||
type => 'string',
|
||||
format => 'pve-storage-vgname',
|
||||
},
|
||||
base => {
|
||||
description => "Base volume. This volume is automatically activated.",
|
||||
type => 'string',
|
||||
format => 'pve-volume-id',
|
||||
},
|
||||
saferemove => {
|
||||
description => "Zero-out data when removing LVs.",
|
||||
type => 'boolean',
|
||||
},
|
||||
saferemove_throughput => {
|
||||
description => "Wipe throughput (cstream -t parameter value).",
|
||||
type => 'string',
|
||||
},
|
||||
tagged_only => {
|
||||
description => "Only use logical volumes tagged with 'pve-vm-ID'.",
|
||||
type => 'boolean',
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
sub options {
|
||||
return {
|
||||
vgname => { fixed => 1 },
|
||||
nodes => { optional => 1 },
|
||||
shared => { optional => 1 },
|
||||
disable => { optional => 1 },
|
||||
saferemove => { optional => 1 },
|
||||
saferemove_throughput => { optional => 1 },
|
||||
content => { optional => 1 },
|
||||
base => { fixed => 1, optional => 1 },
|
||||
tagged_only => { optional => 1 },
|
||||
bwlimit => { optional => 1 },
|
||||
vgname => { fixed => 1 },
|
||||
nodes => { optional => 1 },
|
||||
shared => { optional => 1 },
|
||||
disable => { optional => 1 },
|
||||
saferemove => { optional => 1 },
|
||||
saferemove_throughput => { optional => 1 },
|
||||
content => { optional => 1 },
|
||||
base => { fixed => 1, optional => 1 },
|
||||
tagged_only => { optional => 1 },
|
||||
bwlimit => { optional => 1 },
|
||||
};
|
||||
}
|
||||
|
||||
@ -268,21 +328,21 @@ sub on_add_hook {
|
||||
my ($class, $storeid, $scfg, %param) = @_;
|
||||
|
||||
if (my $base = $scfg->{base}) {
|
||||
my ($baseid, $volname) = PVE::Storage::parse_volume_id($base);
|
||||
my ($baseid, $volname) = PVE::Storage::parse_volume_id($base);
|
||||
|
||||
my $cfg = PVE::Storage::config();
|
||||
my $basecfg = PVE::Storage::storage_config ($cfg, $baseid, 1);
|
||||
die "base storage ID '$baseid' does not exist\n" if !$basecfg;
|
||||
my $cfg = PVE::Storage::config();
|
||||
my $basecfg = PVE::Storage::storage_config($cfg, $baseid, 1);
|
||||
die "base storage ID '$baseid' does not exist\n" if !$basecfg;
|
||||
|
||||
# we only support iscsi for now
|
||||
die "unsupported base type '$basecfg->{type}'"
|
||||
if $basecfg->{type} ne 'iscsi';
|
||||
# we only support iscsi for now
|
||||
die "unsupported base type '$basecfg->{type}'"
|
||||
if $basecfg->{type} ne 'iscsi';
|
||||
|
||||
my $path = PVE::Storage::path($cfg, $base);
|
||||
my $path = PVE::Storage::path($cfg, $base);
|
||||
|
||||
PVE::Storage::activate_storage($cfg, $baseid);
|
||||
PVE::Storage::activate_storage($cfg, $baseid);
|
||||
|
||||
lvm_create_volume_group($path, $scfg->{vgname}, $scfg->{shared});
|
||||
lvm_create_volume_group($path, $scfg->{vgname}, $scfg->{shared});
|
||||
}
|
||||
|
||||
return;
|
||||
@ -294,7 +354,7 @@ sub parse_volname {
|
||||
PVE::Storage::Plugin::parse_lvm_name($volname);
|
||||
|
||||
if ($volname =~ m/^(vm-(\d+)-\S+)$/) {
|
||||
return ('images', $1, $2, undef, undef, undef, 'raw');
|
||||
return ('images', $1, $2, undef, undef, undef, 'raw');
|
||||
}
|
||||
|
||||
die "unable to parse lvm volume name '$volname'\n";
|
||||
@ -303,7 +363,7 @@ sub parse_volname {
|
||||
sub filesystem_path {
|
||||
my ($class, $scfg, $volname, $snapname) = @_;
|
||||
|
||||
die "lvm snapshot is not implemented"if defined($snapname);
|
||||
die "lvm snapshot is not implemented" if defined($snapname);
|
||||
|
||||
my ($vtype, $name, $vmid) = $class->parse_volname($volname);
|
||||
|
||||
@ -333,7 +393,7 @@ sub find_free_diskname {
|
||||
|
||||
my $lvs = lvm_list_volumes($vg);
|
||||
|
||||
my $disk_list = [ keys %{$lvs->{$vg}} ];
|
||||
my $disk_list = [keys %{ $lvs->{$vg} }];
|
||||
|
||||
return PVE::Storage::Plugin::get_next_vm_diskname($disk_list, $storeid, $vmid, undef, $scfg);
|
||||
}
|
||||
@ -342,12 +402,12 @@ sub lvcreate {
|
||||
my ($vg, $name, $size, $tags) = @_;
|
||||
|
||||
if ($size =~ m/\d$/) { # no unit is given
|
||||
$size .= "k"; # default to kilobytes
|
||||
$size .= "k"; # default to kilobytes
|
||||
}
|
||||
|
||||
my $cmd = ['/sbin/lvcreate', '-aly', '-Wy', '--yes', '--size', $size, '--name', $name];
|
||||
for my $tag (@$tags) {
|
||||
push @$cmd, '--addtag', $tag;
|
||||
push @$cmd, '--addtag', $tag;
|
||||
}
|
||||
push @$cmd, $vg;
|
||||
|
||||
@ -358,8 +418,8 @@ sub lvrename {
|
||||
my ($vg, $oldname, $newname) = @_;
|
||||
|
||||
run_command(
|
||||
['/sbin/lvrename', $vg, $oldname, $newname],
|
||||
errmsg => "lvrename '${vg}/${oldname}' to '${newname}' error",
|
||||
['/sbin/lvrename', $vg, $oldname, $newname],
|
||||
errmsg => "lvrename '${vg}/${oldname}' to '${newname}' error",
|
||||
);
|
||||
}
|
||||
|
||||
@ -369,20 +429,20 @@ sub alloc_image {
|
||||
die "unsupported format '$fmt'" if $fmt ne 'raw';
|
||||
|
||||
die "illegal name '$name' - should be 'vm-$vmid-*'\n"
|
||||
if $name && $name !~ m/^vm-$vmid-/;
|
||||
if $name && $name !~ m/^vm-$vmid-/;
|
||||
|
||||
my $vgs = lvm_vgs();
|
||||
|
||||
my $vg = $scfg->{vgname};
|
||||
|
||||
die "no such volume group '$vg'\n" if !defined ($vgs->{$vg});
|
||||
die "no such volume group '$vg'\n" if !defined($vgs->{$vg});
|
||||
|
||||
my $free = int($vgs->{$vg}->{free});
|
||||
|
||||
die "not enough free space ($free < $size)\n" if $free < $size;
|
||||
|
||||
$name = $class->find_free_diskname($storeid, $scfg, $vmid)
|
||||
if !$name;
|
||||
if !$name;
|
||||
|
||||
lvcreate($vg, $name, $size, ["pve-vm-$vmid"]);
|
||||
|
||||
@ -398,31 +458,47 @@ sub free_image {
|
||||
# and to allow thin provisioning
|
||||
|
||||
my $zero_out_worker = sub {
|
||||
print "zero-out data on image $volname (/dev/$vg/del-$volname)\n";
|
||||
print "zero-out data on image $volname (/dev/$vg/del-$volname)\n";
|
||||
|
||||
# wipe throughput up to 10MB/s by default; may be overwritten with saferemove_throughput
|
||||
my $throughput = '-10485760';
|
||||
if ($scfg->{saferemove_throughput}) {
|
||||
$throughput = $scfg->{saferemove_throughput};
|
||||
}
|
||||
# wipe throughput up to 10MB/s by default; may be overwritten with saferemove_throughput
|
||||
my $throughput = '-10485760';
|
||||
if ($scfg->{saferemove_throughput}) {
|
||||
$throughput = $scfg->{saferemove_throughput};
|
||||
}
|
||||
|
||||
my $cmd = [
|
||||
'/usr/bin/cstream',
|
||||
'-i', '/dev/zero',
|
||||
'-o', "/dev/$vg/del-$volname",
|
||||
'-T', '10',
|
||||
'-v', '1',
|
||||
'-b', '1048576',
|
||||
'-t', "$throughput"
|
||||
];
|
||||
eval { run_command($cmd, errmsg => "zero out finished (note: 'No space left on device' is ok here)"); };
|
||||
warn $@ if $@;
|
||||
my $cmd = [
|
||||
'/usr/bin/cstream',
|
||||
'-i',
|
||||
'/dev/zero',
|
||||
'-o',
|
||||
"/dev/$vg/del-$volname",
|
||||
'-T',
|
||||
'10',
|
||||
'-v',
|
||||
'1',
|
||||
'-b',
|
||||
'1048576',
|
||||
'-t',
|
||||
"$throughput",
|
||||
];
|
||||
eval {
|
||||
run_command(
|
||||
$cmd,
|
||||
errmsg => "zero out finished (note: 'No space left on device' is ok here)",
|
||||
);
|
||||
};
|
||||
warn $@ if $@;
|
||||
|
||||
$class->cluster_lock_storage($storeid, $scfg->{shared}, undef, sub {
|
||||
my $cmd = ['/sbin/lvremove', '-f', "$vg/del-$volname"];
|
||||
run_command($cmd, errmsg => "lvremove '$vg/del-$volname' error");
|
||||
});
|
||||
print "successfully removed volume $volname ($vg/del-$volname)\n";
|
||||
$class->cluster_lock_storage(
|
||||
$storeid,
|
||||
$scfg->{shared},
|
||||
undef,
|
||||
sub {
|
||||
my $cmd = ['/sbin/lvremove', '-f', "$vg/del-$volname"];
|
||||
run_command($cmd, errmsg => "lvremove '$vg/del-$volname' error");
|
||||
},
|
||||
);
|
||||
print "successfully removed volume $volname ($vg/del-$volname)\n";
|
||||
};
|
||||
|
||||
my $cmd = ['/sbin/lvchange', '-aly', "$vg/$volname"];
|
||||
@ -431,14 +507,14 @@ sub free_image {
|
||||
run_command($cmd, errmsg => "can't refresh LV '$vg/$volname' to zero-out its data");
|
||||
|
||||
if ($scfg->{saferemove}) {
|
||||
# avoid long running task, so we only rename here
|
||||
$cmd = ['/sbin/lvrename', $vg, $volname, "del-$volname"];
|
||||
run_command($cmd, errmsg => "lvrename '$vg/$volname' error");
|
||||
return $zero_out_worker;
|
||||
# avoid long running task, so we only rename here
|
||||
$cmd = ['/sbin/lvrename', $vg, $volname, "del-$volname"];
|
||||
run_command($cmd, errmsg => "lvrename '$vg/$volname' error");
|
||||
return $zero_out_worker;
|
||||
} else {
|
||||
my $tmpvg = $scfg->{vgname};
|
||||
$cmd = ['/sbin/lvremove', '-f', "$tmpvg/$volname"];
|
||||
run_command($cmd, errmsg => "lvremove '$tmpvg/$volname' error");
|
||||
my $tmpvg = $scfg->{vgname};
|
||||
$cmd = ['/sbin/lvremove', '-f', "$tmpvg/$volname"];
|
||||
run_command($cmd, errmsg => "lvremove '$tmpvg/$volname' error");
|
||||
}
|
||||
|
||||
return undef;
|
||||
@ -461,32 +537,36 @@ sub list_images {
|
||||
|
||||
if (my $dat = $cache->{lvs}->{$vgname}) {
|
||||
|
||||
foreach my $volname (keys %$dat) {
|
||||
foreach my $volname (keys %$dat) {
|
||||
|
||||
next if $volname !~ m/^vm-(\d+)-/;
|
||||
my $owner = $1;
|
||||
next if $volname !~ m/^vm-(\d+)-/;
|
||||
my $owner = $1;
|
||||
|
||||
my $info = $dat->{$volname};
|
||||
my $info = $dat->{$volname};
|
||||
|
||||
next if $scfg->{tagged_only} && !&$check_tags($info->{tags});
|
||||
next if $scfg->{tagged_only} && !&$check_tags($info->{tags});
|
||||
|
||||
# Allow mirrored and RAID LVs
|
||||
next if $info->{lv_type} !~ m/^[-mMrR]$/;
|
||||
# Allow mirrored and RAID LVs
|
||||
next if $info->{lv_type} !~ m/^[-mMrR]$/;
|
||||
|
||||
my $volid = "$storeid:$volname";
|
||||
my $volid = "$storeid:$volname";
|
||||
|
||||
if ($vollist) {
|
||||
my $found = grep { $_ eq $volid } @$vollist;
|
||||
next if !$found;
|
||||
} else {
|
||||
next if defined($vmid) && ($owner ne $vmid);
|
||||
}
|
||||
if ($vollist) {
|
||||
my $found = grep { $_ eq $volid } @$vollist;
|
||||
next if !$found;
|
||||
} else {
|
||||
next if defined($vmid) && ($owner ne $vmid);
|
||||
}
|
||||
|
||||
push @$res, {
|
||||
volid => $volid, format => 'raw', size => $info->{lv_size}, vmid => $owner,
|
||||
ctime => $info->{ctime},
|
||||
};
|
||||
}
|
||||
push @$res,
|
||||
{
|
||||
volid => $volid,
|
||||
format => 'raw',
|
||||
size => $info->{lv_size},
|
||||
vmid => $owner,
|
||||
ctime => $info->{ctime},
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
return $res;
|
||||
@ -499,8 +579,8 @@ sub status {
|
||||
|
||||
my $vgname = $scfg->{vgname};
|
||||
|
||||
if (my $info = $cache->{vgs}->{$vgname}) {
|
||||
return ($info->{size}, $info->{free}, $info->{size} - $info->{free}, 1);
|
||||
if (my $info = $cache->{vgs}->{$vgname}) {
|
||||
return ($info->{size}, $info->{free}, $info->{size} - $info->{free}, 1);
|
||||
}
|
||||
|
||||
return undef;
|
||||
@ -513,12 +593,17 @@ sub activate_storage {
|
||||
|
||||
# In LVM2, vgscans take place automatically;
|
||||
# this is just to be sure
|
||||
if ($cache->{vgs} && !$cache->{vgscaned} &&
|
||||
!$cache->{vgs}->{$scfg->{vgname}}) {
|
||||
$cache->{vgscaned} = 1;
|
||||
my $cmd = ['/sbin/vgscan', '--ignorelockingfailure', '--mknodes'];
|
||||
eval { run_command($cmd, outfunc => sub {}); };
|
||||
warn $@ if $@;
|
||||
if (
|
||||
$cache->{vgs}
|
||||
&& !$cache->{vgscaned}
|
||||
&& !$cache->{vgs}->{ $scfg->{vgname} }
|
||||
) {
|
||||
$cache->{vgscaned} = 1;
|
||||
my $cmd = ['/sbin/vgscan', '--ignorelockingfailure', '--mknodes'];
|
||||
eval {
|
||||
run_command($cmd, outfunc => sub { });
|
||||
};
|
||||
warn $@ if $@;
|
||||
}
|
||||
|
||||
# we do not acticate any volumes here ('vgchange -aly')
|
||||
@ -549,7 +634,7 @@ sub deactivate_volume {
|
||||
my ($class, $storeid, $scfg, $volname, $snapname, $cache) = @_;
|
||||
|
||||
my $path = $class->path($scfg, $volname, $storeid, $snapname);
|
||||
return if ! -b $path;
|
||||
return if !-b $path;
|
||||
|
||||
my $cmd = ['/sbin/lvchange', '-aln', $path];
|
||||
run_command($cmd, errmsg => "can't deactivate LV '$path'");
|
||||
@ -558,14 +643,19 @@ sub deactivate_volume {
|
||||
sub volume_resize {
|
||||
my ($class, $scfg, $storeid, $volname, $size, $running) = @_;
|
||||
|
||||
$size = ($size/1024/1024) . "M";
|
||||
$size = ($size / 1024 / 1024) . "M";
|
||||
|
||||
my $path = $class->path($scfg, $volname);
|
||||
my $cmd = ['/sbin/lvextend', '-L', $size, $path];
|
||||
|
||||
$class->cluster_lock_storage($storeid, $scfg->{shared}, undef, sub {
|
||||
run_command($cmd, errmsg => "error resizing volume '$path'");
|
||||
});
|
||||
$class->cluster_lock_storage(
|
||||
$storeid,
|
||||
$scfg->{shared},
|
||||
undef,
|
||||
sub {
|
||||
run_command($cmd, errmsg => "error resizing volume '$path'");
|
||||
},
|
||||
);
|
||||
|
||||
return 1;
|
||||
}
|
||||
@ -574,14 +664,29 @@ sub volume_size_info {
|
||||
my ($class, $scfg, $storeid, $volname, $timeout) = @_;
|
||||
my $path = $class->filesystem_path($scfg, $volname);
|
||||
|
||||
my $cmd = ['/sbin/lvs', '--separator', ':', '--noheadings', '--units', 'b',
|
||||
'--unbuffered', '--nosuffix', '--options', 'lv_size', $path];
|
||||
my $cmd = [
|
||||
'/sbin/lvs',
|
||||
'--separator',
|
||||
':',
|
||||
'--noheadings',
|
||||
'--units',
|
||||
'b',
|
||||
'--unbuffered',
|
||||
'--nosuffix',
|
||||
'--options',
|
||||
'lv_size',
|
||||
$path,
|
||||
];
|
||||
|
||||
my $size;
|
||||
run_command($cmd, timeout => $timeout, errmsg => "can't get size of '$path'",
|
||||
outfunc => sub {
|
||||
$size = int(shift);
|
||||
});
|
||||
run_command(
|
||||
$cmd,
|
||||
timeout => $timeout,
|
||||
errmsg => "can't get size of '$path'",
|
||||
outfunc => sub {
|
||||
$size = int(shift);
|
||||
},
|
||||
);
|
||||
return wantarray ? ($size, 'raw', 0, undef) : $size;
|
||||
}
|
||||
|
||||
@ -607,18 +712,17 @@ sub volume_has_feature {
|
||||
my ($class, $scfg, $feature, $storeid, $volname, $snapname, $running) = @_;
|
||||
|
||||
my $features = {
|
||||
copy => { base => 1, current => 1},
|
||||
rename => {current => 1},
|
||||
copy => { base => 1, current => 1 },
|
||||
rename => { current => 1 },
|
||||
};
|
||||
|
||||
my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) =
|
||||
$class->parse_volname($volname);
|
||||
my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) = $class->parse_volname($volname);
|
||||
|
||||
my $key = undef;
|
||||
if($snapname){
|
||||
$key = 'snap';
|
||||
}else{
|
||||
$key = $isBase ? 'base' : 'current';
|
||||
if ($snapname) {
|
||||
$key = 'snap';
|
||||
} else {
|
||||
$key = $isBase ? 'base' : 'current';
|
||||
}
|
||||
return 1 if $features->{$feature}->{$key};
|
||||
|
||||
@ -628,27 +732,33 @@ sub volume_has_feature {
|
||||
sub volume_export_formats {
|
||||
my ($class, $scfg, $storeid, $volname, $snapshot, $base_snapshot, $with_snapshots) = @_;
|
||||
return () if defined($snapshot); # lvm-thin only
|
||||
return volume_import_formats($class, $scfg, $storeid, $volname, $snapshot, $base_snapshot, $with_snapshots);
|
||||
return volume_import_formats(
|
||||
$class, $scfg, $storeid, $volname, $snapshot, $base_snapshot, $with_snapshots,
|
||||
);
|
||||
}
|
||||
|
||||
sub volume_export {
|
||||
my ($class, $scfg, $storeid, $fh, $volname, $format, $snapshot, $base_snapshot, $with_snapshots) = @_;
|
||||
my ($class, $scfg, $storeid, $fh, $volname, $format, $snapshot, $base_snapshot, $with_snapshots)
|
||||
= @_;
|
||||
die "volume export format $format not available for $class\n"
|
||||
if $format ne 'raw+size';
|
||||
if $format ne 'raw+size';
|
||||
die "cannot export volumes together with their snapshots in $class\n"
|
||||
if $with_snapshots;
|
||||
if $with_snapshots;
|
||||
die "cannot export a snapshot in $class\n" if defined($snapshot);
|
||||
die "cannot export an incremental stream in $class\n" if defined($base_snapshot);
|
||||
my $file = $class->path($scfg, $volname, $storeid);
|
||||
my $size;
|
||||
# should be faster than querying LVM, also checks for the device file's availability
|
||||
run_command(['/sbin/blockdev', '--getsize64', $file], outfunc => sub {
|
||||
my ($line) = @_;
|
||||
die "unexpected output from /sbin/blockdev: $line\n" if $line !~ /^(\d+)$/;
|
||||
$size = int($1);
|
||||
});
|
||||
run_command(
|
||||
['/sbin/blockdev', '--getsize64', $file],
|
||||
outfunc => sub {
|
||||
my ($line) = @_;
|
||||
die "unexpected output from /sbin/blockdev: $line\n" if $line !~ /^(\d+)$/;
|
||||
$size = int($1);
|
||||
},
|
||||
);
|
||||
PVE::Storage::Plugin::write_common_header($fh, $size);
|
||||
run_command(['dd', "if=$file", "bs=64k", "status=progress"], output => '>&'.fileno($fh));
|
||||
run_command(['dd', "if=$file", "bs=64k", "status=progress"], output => '>&' . fileno($fh));
|
||||
}
|
||||
|
||||
sub volume_import_formats {
|
||||
@ -659,53 +769,64 @@ sub volume_import_formats {
|
||||
}
|
||||
|
||||
sub volume_import {
|
||||
my ($class, $scfg, $storeid, $fh, $volname, $format, $snapshot, $base_snapshot, $with_snapshots, $allow_rename) = @_;
|
||||
my (
|
||||
$class,
|
||||
$scfg,
|
||||
$storeid,
|
||||
$fh,
|
||||
$volname,
|
||||
$format,
|
||||
$snapshot,
|
||||
$base_snapshot,
|
||||
$with_snapshots,
|
||||
$allow_rename,
|
||||
) = @_;
|
||||
die "volume import format $format not available for $class\n"
|
||||
if $format ne 'raw+size';
|
||||
if $format ne 'raw+size';
|
||||
die "cannot import volumes together with their snapshots in $class\n"
|
||||
if $with_snapshots;
|
||||
if $with_snapshots;
|
||||
die "cannot import an incremental stream in $class\n" if defined($base_snapshot);
|
||||
|
||||
my ($vtype, $name, $vmid, $basename, $basevmid, $isBase, $file_format) =
|
||||
$class->parse_volname($volname);
|
||||
$class->parse_volname($volname);
|
||||
die "cannot import format $format into a file of format $file_format\n"
|
||||
if $file_format ne 'raw';
|
||||
if $file_format ne 'raw';
|
||||
|
||||
my $vg = $scfg->{vgname};
|
||||
my $lvs = lvm_list_volumes($vg);
|
||||
if ($lvs->{$vg}->{$volname}) {
|
||||
die "volume $vg/$volname already exists\n" if !$allow_rename;
|
||||
warn "volume $vg/$volname already exists - importing with a different name\n";
|
||||
$name = undef;
|
||||
die "volume $vg/$volname already exists\n" if !$allow_rename;
|
||||
warn "volume $vg/$volname already exists - importing with a different name\n";
|
||||
$name = undef;
|
||||
}
|
||||
|
||||
my ($size) = PVE::Storage::Plugin::read_common_header($fh);
|
||||
$size = PVE::Storage::Common::align_size_up($size, 1024) / 1024;
|
||||
|
||||
eval {
|
||||
my $allocname = $class->alloc_image($storeid, $scfg, $vmid, 'raw', $name, $size);
|
||||
my $oldname = $volname;
|
||||
$volname = $allocname;
|
||||
if (defined($name) && $allocname ne $oldname) {
|
||||
die "internal error: unexpected allocated name: '$allocname' != '$oldname'\n";
|
||||
}
|
||||
my $file = $class->path($scfg, $volname, $storeid)
|
||||
or die "internal error: failed to get path to newly allocated volume $volname\n";
|
||||
my $allocname = $class->alloc_image($storeid, $scfg, $vmid, 'raw', $name, $size);
|
||||
my $oldname = $volname;
|
||||
$volname = $allocname;
|
||||
if (defined($name) && $allocname ne $oldname) {
|
||||
die "internal error: unexpected allocated name: '$allocname' != '$oldname'\n";
|
||||
}
|
||||
my $file = $class->path($scfg, $volname, $storeid)
|
||||
or die "internal error: failed to get path to newly allocated volume $volname\n";
|
||||
|
||||
$class->volume_import_write($fh, $file);
|
||||
$class->volume_import_write($fh, $file);
|
||||
};
|
||||
if (my $err = $@) {
|
||||
my $cleanup_worker = eval { $class->free_image($storeid, $scfg, $volname, 0) };
|
||||
warn $@ if $@;
|
||||
my $cleanup_worker = eval { $class->free_image($storeid, $scfg, $volname, 0) };
|
||||
warn $@ if $@;
|
||||
|
||||
if ($cleanup_worker) {
|
||||
my $rpcenv = PVE::RPCEnvironment::get();
|
||||
my $authuser = $rpcenv->get_user();
|
||||
if ($cleanup_worker) {
|
||||
my $rpcenv = PVE::RPCEnvironment::get();
|
||||
my $authuser = $rpcenv->get_user();
|
||||
|
||||
$rpcenv->fork_worker('imgdel', undef, $authuser, $cleanup_worker);
|
||||
}
|
||||
$rpcenv->fork_worker('imgdel', undef, $authuser, $cleanup_worker);
|
||||
}
|
||||
|
||||
die $err;
|
||||
die $err;
|
||||
}
|
||||
|
||||
return "$storeid:$volname";
|
||||
@ -713,29 +834,22 @@ sub volume_import {
|
||||
|
||||
sub volume_import_write {
|
||||
my ($class, $input_fh, $output_file) = @_;
|
||||
run_command(['dd', "of=$output_file", 'bs=64k'],
|
||||
input => '<&'.fileno($input_fh));
|
||||
run_command(['dd', "of=$output_file", 'bs=64k'], input => '<&' . fileno($input_fh));
|
||||
}
|
||||
|
||||
sub rename_volume {
|
||||
my ($class, $scfg, $storeid, $source_volname, $target_vmid, $target_volname) = @_;
|
||||
|
||||
my (
|
||||
undef,
|
||||
$source_image,
|
||||
$source_vmid,
|
||||
$base_name,
|
||||
$base_vmid,
|
||||
undef,
|
||||
$format
|
||||
undef, $source_image, $source_vmid, $base_name, $base_vmid, undef, $format,
|
||||
) = $class->parse_volname($source_volname);
|
||||
$target_volname = $class->find_free_diskname($storeid, $scfg, $target_vmid, $format)
|
||||
if !$target_volname;
|
||||
if !$target_volname;
|
||||
|
||||
my $vg = $scfg->{vgname};
|
||||
my $lvs = lvm_list_volumes($vg);
|
||||
die "target volume '${target_volname}' already exists\n"
|
||||
if ($lvs->{$vg}->{$target_volname});
|
||||
if ($lvs->{$vg}->{$target_volname});
|
||||
|
||||
lvrename($vg, $source_volname, $target_volname);
|
||||
return "${storeid}:${target_volname}";
|
||||
|
||||
@ -17,12 +17,12 @@ my $get_lun_cmd_map = sub {
|
||||
my $sbdadmcmd = "/usr/sbin/sbdadm";
|
||||
|
||||
my $cmdmap = {
|
||||
create_lu => { cmd => $stmfadmcmd, method => 'create-lu' },
|
||||
delete_lu => { cmd => $stmfadmcmd, method => 'delete-lu' },
|
||||
import_lu => { cmd => $stmfadmcmd, method => 'import-lu' },
|
||||
modify_lu => { cmd => $stmfadmcmd, method => 'modify-lu' },
|
||||
add_view => { cmd => $stmfadmcmd, method => 'add-view' },
|
||||
list_view => { cmd => $stmfadmcmd, method => 'list-view' },
|
||||
create_lu => { cmd => $stmfadmcmd, method => 'create-lu' },
|
||||
delete_lu => { cmd => $stmfadmcmd, method => 'delete-lu' },
|
||||
import_lu => { cmd => $stmfadmcmd, method => 'import-lu' },
|
||||
modify_lu => { cmd => $stmfadmcmd, method => 'modify-lu' },
|
||||
add_view => { cmd => $stmfadmcmd, method => 'add-view' },
|
||||
list_view => { cmd => $stmfadmcmd, method => 'list-view' },
|
||||
list_lu => { cmd => $sbdadmcmd, method => 'list-lu' },
|
||||
};
|
||||
|
||||
@ -45,15 +45,15 @@ sub run_lun_command {
|
||||
$timeout = 10 if !$timeout;
|
||||
|
||||
my $output = sub {
|
||||
my $line = shift;
|
||||
$msg .= "$line\n";
|
||||
my $line = shift;
|
||||
$msg .= "$line\n";
|
||||
};
|
||||
|
||||
if ($method eq 'create_lu') {
|
||||
my $wcd = 'false';
|
||||
my $wcd = 'false';
|
||||
if ($scfg->{nowritecache}) {
|
||||
$wcd = 'true';
|
||||
}
|
||||
$wcd = 'true';
|
||||
}
|
||||
my $prefix = '600144f';
|
||||
my $digest = md5_hex($params[0]);
|
||||
$digest =~ /(\w{7}(.*))/;
|
||||
@ -68,13 +68,13 @@ sub run_lun_command {
|
||||
@params = undef;
|
||||
} elsif ($method eq 'add_view') {
|
||||
if ($scfg->{comstar_tg}) {
|
||||
unshift @params, $scfg->{comstar_tg};
|
||||
unshift @params, '--target-group';
|
||||
}
|
||||
unshift @params, $scfg->{comstar_tg};
|
||||
unshift @params, '--target-group';
|
||||
}
|
||||
if ($scfg->{comstar_hg}) {
|
||||
unshift @params, $scfg->{comstar_hg};
|
||||
unshift @params, '--host-group';
|
||||
}
|
||||
unshift @params, $scfg->{comstar_hg};
|
||||
unshift @params, '--host-group';
|
||||
}
|
||||
}
|
||||
|
||||
my $cmdmap = $get_lun_cmd_map->($method);
|
||||
@ -83,7 +83,15 @@ sub run_lun_command {
|
||||
|
||||
$target = 'root@' . $scfg->{portal};
|
||||
|
||||
my $cmd = [@ssh_cmd, '-i', "$id_rsa_path/$scfg->{portal}_id_rsa", $target, $luncmd, $lunmethod, @params];
|
||||
my $cmd = [
|
||||
@ssh_cmd,
|
||||
'-i',
|
||||
"$id_rsa_path/$scfg->{portal}_id_rsa",
|
||||
$target,
|
||||
$luncmd,
|
||||
$lunmethod,
|
||||
@params,
|
||||
];
|
||||
|
||||
run_command($cmd, outfunc => $output, timeout => $timeout);
|
||||
|
||||
|
||||
@ -48,36 +48,42 @@ my $execute_command = sub {
|
||||
$timeout = 10 if !$timeout;
|
||||
|
||||
my $output = sub {
|
||||
my $line = shift;
|
||||
$msg .= "$line\n";
|
||||
my $line = shift;
|
||||
$msg .= "$line\n";
|
||||
};
|
||||
|
||||
my $errfunc = sub {
|
||||
my $line = shift;
|
||||
$err .= "$line";
|
||||
my $line = shift;
|
||||
$err .= "$line";
|
||||
};
|
||||
|
||||
if ($exec eq 'scp') {
|
||||
$target = 'root@[' . $scfg->{portal} . ']';
|
||||
$cmd = [@scp_cmd, '-i', "$id_rsa_path/$scfg->{portal}_id_rsa", '--', $method, "$target:$params[0]"];
|
||||
$cmd = [
|
||||
@scp_cmd,
|
||||
'-i',
|
||||
"$id_rsa_path/$scfg->{portal}_id_rsa",
|
||||
'--',
|
||||
$method,
|
||||
"$target:$params[0]",
|
||||
];
|
||||
} else {
|
||||
$target = 'root@' . $scfg->{portal};
|
||||
$cmd = [@ssh_cmd, '-i', "$id_rsa_path/$scfg->{portal}_id_rsa", $target, '--', $method, @params];
|
||||
$cmd = [@ssh_cmd, '-i', "$id_rsa_path/$scfg->{portal}_id_rsa", $target, '--', $method,
|
||||
@params];
|
||||
}
|
||||
|
||||
eval {
|
||||
run_command($cmd, outfunc => $output, errfunc => $errfunc, timeout => $timeout);
|
||||
};
|
||||
eval { run_command($cmd, outfunc => $output, errfunc => $errfunc, timeout => $timeout); };
|
||||
if ($@) {
|
||||
$res = {
|
||||
result => 0,
|
||||
msg => $err,
|
||||
}
|
||||
};
|
||||
} else {
|
||||
$res = {
|
||||
result => 1,
|
||||
msg => $msg,
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
return $res;
|
||||
@ -104,10 +110,9 @@ my $read_config = sub {
|
||||
|
||||
$target = 'root@' . $scfg->{portal};
|
||||
|
||||
my $cmd = [@ssh_cmd, '-i', "$id_rsa_path/$scfg->{portal}_id_rsa", $target, $luncmd, $CONFIG_FILE];
|
||||
eval {
|
||||
run_command($cmd, outfunc => $output, errfunc => $errfunc, timeout => $timeout);
|
||||
};
|
||||
my $cmd =
|
||||
[@ssh_cmd, '-i', "$id_rsa_path/$scfg->{portal}_id_rsa", $target, $luncmd, $CONFIG_FILE];
|
||||
eval { run_command($cmd, outfunc => $output, errfunc => $errfunc, timeout => $timeout); };
|
||||
if ($@) {
|
||||
die $err if ($err !~ /No such file or directory/);
|
||||
die "No configuration found. Install iet on $scfg->{portal}" if $msg eq '';
|
||||
@ -141,7 +146,7 @@ my $parser = sub {
|
||||
foreach (@cfgfile) {
|
||||
$line++;
|
||||
if ($_ =~ /^\s*Target\s*([\w\-\:\.]+)\s*$/) {
|
||||
if ($1 eq $scfg->{target} && ! $cfg_target) {
|
||||
if ($1 eq $scfg->{target} && !$cfg_target) {
|
||||
# start colect info
|
||||
die "$line: Parse error [$_]" if $SETTINGS;
|
||||
$SETTINGS->{target} = $1;
|
||||
@ -157,7 +162,7 @@ my $parser = sub {
|
||||
} else {
|
||||
if ($cfg_target) {
|
||||
$SETTINGS->{text} .= "$_\n";
|
||||
next if ($_ =~ /^\s*#/ || ! $_);
|
||||
next if ($_ =~ /^\s*#/ || !$_);
|
||||
my $option = $_;
|
||||
if ($_ =~ /^(\w+)\s*#/) {
|
||||
$option = $1;
|
||||
@ -176,7 +181,7 @@ my $parser = sub {
|
||||
foreach (@lun) {
|
||||
my @lun_opt = split '=', $_;
|
||||
die "$line: Parse error [$option]" unless (scalar(@lun_opt) == 2);
|
||||
$conf->{$lun_opt[0]} = $lun_opt[1];
|
||||
$conf->{ $lun_opt[0] } = $lun_opt[1];
|
||||
}
|
||||
if ($conf->{Path} && $conf->{Path} =~ /^$base\/$scfg->{pool}\/([\w\-]+)$/) {
|
||||
$conf->{include} = 1;
|
||||
@ -184,7 +189,7 @@ my $parser = sub {
|
||||
$conf->{include} = 0;
|
||||
}
|
||||
$conf->{lun} = $num;
|
||||
push @{$SETTINGS->{luns}}, $conf;
|
||||
push @{ $SETTINGS->{luns} }, $conf;
|
||||
} else {
|
||||
die "$line: Parse error [$option]";
|
||||
}
|
||||
@ -202,19 +207,24 @@ my $update_config = sub {
|
||||
my $config = '';
|
||||
|
||||
while ((my $option, my $value) = each(%$SETTINGS)) {
|
||||
next if ($option eq 'include' || $option eq 'luns' || $option eq 'Path' || $option eq 'text' || $option eq 'used');
|
||||
next
|
||||
if ($option eq 'include'
|
||||
|| $option eq 'luns'
|
||||
|| $option eq 'Path'
|
||||
|| $option eq 'text'
|
||||
|| $option eq 'used');
|
||||
if ($option eq 'target') {
|
||||
$config = "\n\nTarget " . $SETTINGS->{target} . "\n" . $config;
|
||||
} else {
|
||||
$config .= "\t$option\t\t\t$value\n";
|
||||
}
|
||||
}
|
||||
foreach my $lun (@{$SETTINGS->{luns}}) {
|
||||
foreach my $lun (@{ $SETTINGS->{luns} }) {
|
||||
my $lun_opt = '';
|
||||
while ((my $option, my $value) = each(%$lun)) {
|
||||
next if ($option eq 'include' || $option eq 'lun' || $option eq 'Path');
|
||||
if ($lun_opt eq '') {
|
||||
$lun_opt = $option . '=' . $value;
|
||||
$lun_opt = $option . '=' . $value;
|
||||
} else {
|
||||
$lun_opt .= ',' . $option . '=' . $value;
|
||||
}
|
||||
@ -260,12 +270,12 @@ my $get_lu_name = sub {
|
||||
my $used = ();
|
||||
my $i;
|
||||
|
||||
if (! exists $SETTINGS->{used}) {
|
||||
if (!exists $SETTINGS->{used}) {
|
||||
for ($i = 0; $i < $MAX_LUNS; $i++) {
|
||||
$used->{$i} = 0;
|
||||
}
|
||||
foreach my $lun (@{$SETTINGS->{luns}}) {
|
||||
$used->{$lun->{lun}} = 1;
|
||||
foreach my $lun (@{ $SETTINGS->{luns} }) {
|
||||
$used->{ $lun->{lun} } = 1;
|
||||
}
|
||||
$SETTINGS->{used} = $used;
|
||||
}
|
||||
@ -282,14 +292,14 @@ my $get_lu_name = sub {
|
||||
my $init_lu_name = sub {
|
||||
my $used = ();
|
||||
|
||||
if (! exists($SETTINGS->{used})) {
|
||||
if (!exists($SETTINGS->{used})) {
|
||||
for (my $i = 0; $i < $MAX_LUNS; $i++) {
|
||||
$used->{$i} = 0;
|
||||
}
|
||||
$SETTINGS->{used} = $used;
|
||||
}
|
||||
foreach my $lun (@{$SETTINGS->{luns}}) {
|
||||
$SETTINGS->{used}->{$lun->{lun}} = 1;
|
||||
foreach my $lun (@{ $SETTINGS->{luns} }) {
|
||||
$SETTINGS->{used}->{ $lun->{lun} } = 1;
|
||||
}
|
||||
};
|
||||
|
||||
@ -297,7 +307,7 @@ my $free_lu_name = sub {
|
||||
my ($lu_name) = @_;
|
||||
my $new;
|
||||
|
||||
foreach my $lun (@{$SETTINGS->{luns}}) {
|
||||
foreach my $lun (@{ $SETTINGS->{luns} }) {
|
||||
if ($lun->{lun} != $lu_name) {
|
||||
push @$new, $lun;
|
||||
}
|
||||
@ -310,7 +320,8 @@ my $free_lu_name = sub {
|
||||
my $make_lun = sub {
|
||||
my ($scfg, $path) = @_;
|
||||
|
||||
die 'Maximum number of LUNs per target is 16384' if scalar @{$SETTINGS->{luns}} >= $MAX_LUNS;
|
||||
die 'Maximum number of LUNs per target is 16384'
|
||||
if scalar @{ $SETTINGS->{luns} } >= $MAX_LUNS;
|
||||
|
||||
my $lun = $get_lu_name->();
|
||||
my $conf = {
|
||||
@ -319,7 +330,7 @@ my $make_lun = sub {
|
||||
Type => 'blockio',
|
||||
include => 1,
|
||||
};
|
||||
push @{$SETTINGS->{luns}}, $conf;
|
||||
push @{ $SETTINGS->{luns} }, $conf;
|
||||
|
||||
return $conf;
|
||||
};
|
||||
@ -329,7 +340,7 @@ my $list_view = sub {
|
||||
my $lun = undef;
|
||||
|
||||
my $object = $params[0];
|
||||
foreach my $lun (@{$SETTINGS->{luns}}) {
|
||||
foreach my $lun (@{ $SETTINGS->{luns} }) {
|
||||
next unless $lun->{include} == 1;
|
||||
if ($lun->{Path} =~ /^$object$/) {
|
||||
return $lun->{lun} if (defined($lun->{lun}));
|
||||
@ -345,7 +356,7 @@ my $list_lun = sub {
|
||||
my $name = undef;
|
||||
|
||||
my $object = $params[0];
|
||||
foreach my $lun (@{$SETTINGS->{luns}}) {
|
||||
foreach my $lun (@{ $SETTINGS->{luns} }) {
|
||||
next unless $lun->{include} == 1;
|
||||
if ($lun->{Path} =~ /^$object$/) {
|
||||
return $lun->{Path};
|
||||
@ -381,12 +392,12 @@ my $create_lun = sub {
|
||||
|
||||
my $delete_lun = sub {
|
||||
my ($scfg, $timeout, $method, @params) = @_;
|
||||
my $res = {msg => undef};
|
||||
my $res = { msg => undef };
|
||||
|
||||
my $path = $params[0];
|
||||
my $tid = $get_target_tid->($scfg);
|
||||
|
||||
foreach my $lun (@{$SETTINGS->{luns}}) {
|
||||
foreach my $lun (@{ $SETTINGS->{luns} }) {
|
||||
if ($lun->{Path} eq $path) {
|
||||
@params = ('--op', 'delete', "--tid=$tid", "--lun=$lun->{lun}");
|
||||
$res = $execute_command->($scfg, 'ssh', $timeout, $ietadm, @params);
|
||||
@ -417,7 +428,7 @@ my $modify_lun = sub {
|
||||
my $path = $params[1];
|
||||
my $tid = $get_target_tid->($scfg);
|
||||
|
||||
foreach my $cfg (@{$SETTINGS->{luns}}) {
|
||||
foreach my $cfg (@{ $SETTINGS->{luns} }) {
|
||||
if ($cfg->{Path} eq $path) {
|
||||
$lun = $cfg;
|
||||
last;
|
||||
@ -446,13 +457,13 @@ my $get_lun_cmd_map = sub {
|
||||
my ($method) = @_;
|
||||
|
||||
my $cmdmap = {
|
||||
create_lu => { cmd => $create_lun },
|
||||
delete_lu => { cmd => $delete_lun },
|
||||
import_lu => { cmd => $import_lun },
|
||||
modify_lu => { cmd => $modify_lun },
|
||||
add_view => { cmd => $add_view },
|
||||
list_view => { cmd => $list_view },
|
||||
list_lu => { cmd => $list_lun },
|
||||
create_lu => { cmd => $create_lun },
|
||||
delete_lu => { cmd => $delete_lun },
|
||||
import_lu => { cmd => $import_lun },
|
||||
modify_lu => { cmd => $modify_lun },
|
||||
add_view => { cmd => $add_view },
|
||||
list_view => { cmd => $list_view },
|
||||
list_lu => { cmd => $list_lun },
|
||||
};
|
||||
|
||||
die "unknown command '$method'" unless exists $cmdmap->{$method};
|
||||
|
||||
@ -10,12 +10,12 @@ use warnings;
|
||||
use PVE::Tools qw(run_command file_read_firstline trim dir_glob_regex dir_glob_foreach);
|
||||
|
||||
my @CONFIG_FILES = (
|
||||
'/usr/local/etc/istgt/istgt.conf', # FreeBSD, FreeNAS
|
||||
'/var/etc/iscsi/istgt.conf' # NAS4Free
|
||||
'/usr/local/etc/istgt/istgt.conf', # FreeBSD, FreeNAS
|
||||
'/var/etc/iscsi/istgt.conf' # NAS4Free
|
||||
);
|
||||
my @DAEMONS = (
|
||||
'/usr/local/etc/rc.d/istgt', # FreeBSD, FreeNAS
|
||||
'/var/etc/rc.d/istgt' # NAS4Free
|
||||
'/usr/local/etc/rc.d/istgt', # FreeBSD, FreeNAS
|
||||
'/var/etc/rc.d/istgt' # NAS4Free
|
||||
);
|
||||
|
||||
# A logical unit can max have 63 LUNs
|
||||
@ -69,13 +69,13 @@ my $read_config = sub {
|
||||
$timeout = 10 if !$timeout;
|
||||
|
||||
my $output = sub {
|
||||
my $line = shift;
|
||||
$msg .= "$line\n";
|
||||
my $line = shift;
|
||||
$msg .= "$line\n";
|
||||
};
|
||||
|
||||
my $errfunc = sub {
|
||||
my $line = shift;
|
||||
$err .= "$line";
|
||||
my $line = shift;
|
||||
$err .= "$line";
|
||||
};
|
||||
|
||||
$target = 'root@' . $scfg->{portal};
|
||||
@ -83,7 +83,8 @@ my $read_config = sub {
|
||||
my $daemon = 0;
|
||||
foreach my $config (@CONFIG_FILES) {
|
||||
$err = undef;
|
||||
my $cmd = [@ssh_cmd, '-i', "$id_rsa_path/$scfg->{portal}_id_rsa", $target, $luncmd, $config];
|
||||
my $cmd =
|
||||
[@ssh_cmd, '-i', "$id_rsa_path/$scfg->{portal}_id_rsa", $target, $luncmd, $config];
|
||||
eval {
|
||||
run_command($cmd, outfunc => $output, errfunc => $errfunc, timeout => $timeout);
|
||||
};
|
||||
@ -119,17 +120,17 @@ my $parse_size = sub {
|
||||
return 0 if !$text;
|
||||
|
||||
if ($text =~ m/^(\d+(\.\d+)?)([TGMK]B)?$/) {
|
||||
my ($size, $reminder, $unit) = ($1, $2, $3);
|
||||
return $size if !$unit;
|
||||
if ($unit eq 'KB') {
|
||||
$size *= 1024;
|
||||
} elsif ($unit eq 'MB') {
|
||||
$size *= 1024*1024;
|
||||
} elsif ($unit eq 'GB') {
|
||||
$size *= 1024*1024*1024;
|
||||
} elsif ($unit eq 'TB') {
|
||||
$size *= 1024*1024*1024*1024;
|
||||
}
|
||||
my ($size, $reminder, $unit) = ($1, $2, $3);
|
||||
return $size if !$unit;
|
||||
if ($unit eq 'KB') {
|
||||
$size *= 1024;
|
||||
} elsif ($unit eq 'MB') {
|
||||
$size *= 1024 * 1024;
|
||||
} elsif ($unit eq 'GB') {
|
||||
$size *= 1024 * 1024 * 1024;
|
||||
} elsif ($unit eq 'TB') {
|
||||
$size *= 1024 * 1024 * 1024 * 1024;
|
||||
}
|
||||
if ($reminder) {
|
||||
$size = ceil($size);
|
||||
}
|
||||
@ -151,9 +152,9 @@ my $size_with_unit = sub {
|
||||
if ($size =~ m/^\d+$/) {
|
||||
++$n and $size /= 1024 until $size < 1024;
|
||||
if ($size =~ /\./) {
|
||||
return sprintf "%.2f%s", $size, ( qw[bytes KB MB GB TB] )[ $n ];
|
||||
return sprintf "%.2f%s", $size, (qw[bytes KB MB GB TB])[$n];
|
||||
} else {
|
||||
return sprintf "%d%s", $size, ( qw[bytes KB MB GB TB] )[ $n ];
|
||||
return sprintf "%d%s", $size, (qw[bytes KB MB GB TB])[$n];
|
||||
}
|
||||
}
|
||||
die "$size: Not a number";
|
||||
@ -164,18 +165,18 @@ my $lun_dumper = sub {
|
||||
my $config = '';
|
||||
|
||||
$config .= "\n[$lun]\n";
|
||||
$config .= 'TargetName ' . $SETTINGS->{$lun}->{TargetName} . "\n";
|
||||
$config .= 'Mapping ' . $SETTINGS->{$lun}->{Mapping} . "\n";
|
||||
$config .= 'AuthGroup ' . $SETTINGS->{$lun}->{AuthGroup} . "\n";
|
||||
$config .= 'UnitType ' . $SETTINGS->{$lun}->{UnitType} . "\n";
|
||||
$config .= 'QueueDepth ' . $SETTINGS->{$lun}->{QueueDepth} . "\n";
|
||||
$config .= 'TargetName ' . $SETTINGS->{$lun}->{TargetName} . "\n";
|
||||
$config .= 'Mapping ' . $SETTINGS->{$lun}->{Mapping} . "\n";
|
||||
$config .= 'AuthGroup ' . $SETTINGS->{$lun}->{AuthGroup} . "\n";
|
||||
$config .= 'UnitType ' . $SETTINGS->{$lun}->{UnitType} . "\n";
|
||||
$config .= 'QueueDepth ' . $SETTINGS->{$lun}->{QueueDepth} . "\n";
|
||||
|
||||
foreach my $conf (@{$SETTINGS->{$lun}->{luns}}) {
|
||||
$config .= "$conf->{lun} Storage " . $conf->{Storage};
|
||||
foreach my $conf (@{ $SETTINGS->{$lun}->{luns} }) {
|
||||
$config .= "$conf->{lun} Storage " . $conf->{Storage};
|
||||
$config .= ' ' . $size_with_unit->($conf->{Size}) . "\n";
|
||||
foreach ($conf->{options}) {
|
||||
if ($_) {
|
||||
$config .= "$conf->{lun} Option " . $_ . "\n";
|
||||
$config .= "$conf->{lun} Option " . $_ . "\n";
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -189,11 +190,11 @@ my $get_lu_name = sub {
|
||||
my $used = ();
|
||||
my $i;
|
||||
|
||||
if (! exists $SETTINGS->{$target}->{used}) {
|
||||
if (!exists $SETTINGS->{$target}->{used}) {
|
||||
for ($i = 0; $i < $MAX_LUNS; $i++) {
|
||||
$used->{$i} = 0;
|
||||
}
|
||||
foreach my $lun (@{$SETTINGS->{$target}->{luns}}) {
|
||||
foreach my $lun (@{ $SETTINGS->{$target}->{luns} }) {
|
||||
$lun->{lun} =~ /^LUN(\d+)$/;
|
||||
$used->{$1} = 1;
|
||||
}
|
||||
@ -213,13 +214,13 @@ my $init_lu_name = sub {
|
||||
my ($target) = @_;
|
||||
my $used = ();
|
||||
|
||||
if (! exists($SETTINGS->{$target}->{used})) {
|
||||
if (!exists($SETTINGS->{$target}->{used})) {
|
||||
for (my $i = 0; $i < $MAX_LUNS; $i++) {
|
||||
$used->{$i} = 0;
|
||||
}
|
||||
$SETTINGS->{$target}->{used} = $used;
|
||||
}
|
||||
foreach my $lun (@{$SETTINGS->{$target}->{luns}}) {
|
||||
foreach my $lun (@{ $SETTINGS->{$target}->{luns} }) {
|
||||
$lun->{lun} =~ /^LUN(\d+)$/;
|
||||
$SETTINGS->{$target}->{used}->{$1} = 1;
|
||||
}
|
||||
@ -236,7 +237,8 @@ my $make_lun = sub {
|
||||
my ($scfg, $path) = @_;
|
||||
|
||||
my $target = $SETTINGS->{current};
|
||||
die 'Maximum number of LUNs per target is 63' if scalar @{$SETTINGS->{$target}->{luns}} >= $MAX_LUNS;
|
||||
die 'Maximum number of LUNs per target is 63'
|
||||
if scalar @{ $SETTINGS->{$target}->{luns} } >= $MAX_LUNS;
|
||||
|
||||
my @options = ();
|
||||
my $lun = $get_lu_name->($target);
|
||||
@ -249,7 +251,7 @@ my $make_lun = sub {
|
||||
Size => 'AUTO',
|
||||
options => @options,
|
||||
};
|
||||
push @{$SETTINGS->{$target}->{luns}}, $conf;
|
||||
push @{ $SETTINGS->{$target}->{luns} }, $conf;
|
||||
|
||||
return $conf->{lun};
|
||||
};
|
||||
@ -290,7 +292,7 @@ my $parser = sub {
|
||||
if ($arg2 =~ /^Storage\s*(.+)/i) {
|
||||
$SETTINGS->{$lun}->{$arg1}->{storage} = $1;
|
||||
} elsif ($arg2 =~ /^Option\s*(.+)/i) {
|
||||
push @{$SETTINGS->{$lun}->{$arg1}->{options}}, $1;
|
||||
push @{ $SETTINGS->{$lun}->{$arg1}->{options} }, $1;
|
||||
} else {
|
||||
$SETTINGS->{$lun}->{$arg1} = $arg2;
|
||||
}
|
||||
@ -307,10 +309,10 @@ my $parser = sub {
|
||||
my $base = get_base;
|
||||
|
||||
for (my $i = 1; $i <= $max; $i++) {
|
||||
my $target = $SETTINGS->{nodebase}.':'.$SETTINGS->{"LogicalUnit$i"}->{TargetName};
|
||||
my $target = $SETTINGS->{nodebase} . ':' . $SETTINGS->{"LogicalUnit$i"}->{TargetName};
|
||||
if ($target eq $scfg->{target}) {
|
||||
my $lu = ();
|
||||
while ((my $key, my $val) = each(%{$SETTINGS->{"LogicalUnit$i"}})) {
|
||||
while ((my $key, my $val) = each(%{ $SETTINGS->{"LogicalUnit$i"} })) {
|
||||
if ($key =~ /^LUN\d+/) {
|
||||
$val->{storage} =~ /^([\w\/\-]+)\s+(\w+)/;
|
||||
my $storage = $1;
|
||||
@ -318,7 +320,7 @@ my $parser = sub {
|
||||
my $conf = undef;
|
||||
my @options = ();
|
||||
if ($val->{options}) {
|
||||
@options = @{$val->{options}};
|
||||
@options = @{ $val->{options} };
|
||||
}
|
||||
if ($storage =~ /^$base\/$scfg->{pool}\/([\w\-]+)$/) {
|
||||
$conf = {
|
||||
@ -326,7 +328,7 @@ my $parser = sub {
|
||||
Storage => $storage,
|
||||
Size => $size,
|
||||
options => @options,
|
||||
}
|
||||
};
|
||||
}
|
||||
push @$lu, $conf if $conf;
|
||||
delete $SETTINGS->{"LogicalUnit$i"}->{$key};
|
||||
@ -349,9 +351,9 @@ my $list_lun = sub {
|
||||
my $name = undef;
|
||||
|
||||
my $object = $params[0];
|
||||
for my $key (keys %$SETTINGS) {
|
||||
for my $key (keys %$SETTINGS) {
|
||||
next unless $key =~ /^LogicalUnit\d+$/;
|
||||
foreach my $lun (@{$SETTINGS->{$key}->{luns}}) {
|
||||
foreach my $lun (@{ $SETTINGS->{$key}->{luns} }) {
|
||||
if ($lun->{Storage} =~ /^$object$/) {
|
||||
return $lun->{Storage};
|
||||
}
|
||||
@ -399,7 +401,7 @@ my $delete_lun = sub {
|
||||
my $target = $SETTINGS->{current};
|
||||
my $luns = ();
|
||||
|
||||
foreach my $conf (@{$SETTINGS->{$target}->{luns}}) {
|
||||
foreach my $conf (@{ $SETTINGS->{$target}->{luns} }) {
|
||||
if ($conf->{Storage} =~ /^$params[0]$/) {
|
||||
$free_lu_name->($target, $conf->{lun});
|
||||
} else {
|
||||
@ -448,7 +450,7 @@ my $add_view = sub {
|
||||
params => \@params,
|
||||
};
|
||||
} else {
|
||||
@params = ('-HUP', '`cat '. "$SETTINGS->{pidfile}`");
|
||||
@params = ('-HUP', '`cat ' . "$SETTINGS->{pidfile}`");
|
||||
$cmdmap = {
|
||||
cmd => 'ssh',
|
||||
method => 'kill',
|
||||
@ -477,9 +479,9 @@ my $list_view = sub {
|
||||
my $lun = undef;
|
||||
|
||||
my $object = $params[0];
|
||||
for my $key (keys %$SETTINGS) {
|
||||
for my $key (keys %$SETTINGS) {
|
||||
next unless $key =~ /^LogicalUnit\d+$/;
|
||||
foreach my $lun (@{$SETTINGS->{$key}->{luns}}) {
|
||||
foreach my $lun (@{ $SETTINGS->{$key}->{luns} }) {
|
||||
if ($lun->{Storage} =~ /^$object$/) {
|
||||
if ($lun->{lun} =~ /^LUN(\d+)/) {
|
||||
return $1;
|
||||
@ -496,13 +498,13 @@ my $get_lun_cmd_map = sub {
|
||||
my ($method) = @_;
|
||||
|
||||
my $cmdmap = {
|
||||
create_lu => { cmd => $create_lun },
|
||||
delete_lu => { cmd => $delete_lun },
|
||||
import_lu => { cmd => $import_lun },
|
||||
modify_lu => { cmd => $modify_lun },
|
||||
add_view => { cmd => $add_view },
|
||||
list_view => { cmd => $list_view },
|
||||
list_lu => { cmd => $list_lun },
|
||||
create_lu => { cmd => $create_lun },
|
||||
delete_lu => { cmd => $delete_lun },
|
||||
import_lu => { cmd => $import_lun },
|
||||
modify_lu => { cmd => $modify_lun },
|
||||
add_view => { cmd => $add_view },
|
||||
list_view => { cmd => $list_view },
|
||||
list_lu => { cmd => $list_lun },
|
||||
};
|
||||
|
||||
die "unknown command '$method'" unless exists $cmdmap->{$method};
|
||||
@ -522,8 +524,8 @@ sub run_lun_command {
|
||||
my $is_add_view = 0;
|
||||
|
||||
my $output = sub {
|
||||
my $line = shift;
|
||||
$msg .= "$line\n";
|
||||
my $line = shift;
|
||||
$msg .= "$line\n";
|
||||
};
|
||||
|
||||
$target = 'root@' . $scfg->{portal};
|
||||
@ -531,18 +533,31 @@ sub run_lun_command {
|
||||
$parser->($scfg) unless $SETTINGS;
|
||||
my $cmdmap = $get_lun_cmd_map->($method);
|
||||
if ($method eq 'add_view') {
|
||||
$is_add_view = 1 ;
|
||||
$is_add_view = 1;
|
||||
$timeout = 15;
|
||||
}
|
||||
if (ref $cmdmap->{cmd} eq 'CODE') {
|
||||
$res = $cmdmap->{cmd}->($scfg, $timeout, $method, @params);
|
||||
if (ref $res) {
|
||||
$method = $res->{method};
|
||||
@params = @{$res->{params}};
|
||||
@params = @{ $res->{params} };
|
||||
if ($res->{cmd} eq 'scp') {
|
||||
$cmd = [@scp_cmd, '-i', "$id_rsa_path/$scfg->{portal}_id_rsa", $method, "$target:$params[0]"];
|
||||
$cmd = [
|
||||
@scp_cmd,
|
||||
'-i',
|
||||
"$id_rsa_path/$scfg->{portal}_id_rsa",
|
||||
$method,
|
||||
"$target:$params[0]",
|
||||
];
|
||||
} else {
|
||||
$cmd = [@ssh_cmd, '-i', "$id_rsa_path/$scfg->{portal}_id_rsa", $target, $method, @params];
|
||||
$cmd = [
|
||||
@ssh_cmd,
|
||||
'-i',
|
||||
"$id_rsa_path/$scfg->{portal}_id_rsa",
|
||||
$target,
|
||||
$method,
|
||||
@params,
|
||||
];
|
||||
}
|
||||
} else {
|
||||
return $res;
|
||||
@ -550,12 +565,18 @@ sub run_lun_command {
|
||||
} else {
|
||||
$luncmd = $cmdmap->{cmd};
|
||||
$method = $cmdmap->{method};
|
||||
$cmd = [@ssh_cmd, '-i', "$id_rsa_path/$scfg->{portal}_id_rsa", $target, $luncmd, $method, @params];
|
||||
$cmd = [
|
||||
@ssh_cmd,
|
||||
'-i',
|
||||
"$id_rsa_path/$scfg->{portal}_id_rsa",
|
||||
$target,
|
||||
$luncmd,
|
||||
$method,
|
||||
@params,
|
||||
];
|
||||
}
|
||||
|
||||
eval {
|
||||
run_command($cmd, outfunc => $output, timeout => $timeout);
|
||||
};
|
||||
eval { run_command($cmd, outfunc => $output, timeout => $timeout); };
|
||||
if ($@ && $is_add_view) {
|
||||
my $err = $@;
|
||||
if ($OLD_CONFIG) {
|
||||
@ -565,15 +586,11 @@ sub run_lun_command {
|
||||
print $fh $OLD_CONFIG;
|
||||
close $fh;
|
||||
$cmd = [@scp_cmd, '-i', "$id_rsa_path/$scfg->{portal}_id_rsa", $file, $CONFIG_FILE];
|
||||
eval {
|
||||
run_command($cmd, outfunc => $output, timeout => $timeout);
|
||||
};
|
||||
eval { run_command($cmd, outfunc => $output, timeout => $timeout); };
|
||||
$err1 = $@ if $@;
|
||||
unlink $file;
|
||||
die "$err\n$err1" if $err1;
|
||||
eval {
|
||||
run_lun_command($scfg, undef, 'add_view', 'restart');
|
||||
};
|
||||
eval { run_lun_command($scfg, undef, 'add_view', 'restart'); };
|
||||
die "$err\n$@" if ($@);
|
||||
}
|
||||
die $err;
|
||||
|
||||
@ -29,8 +29,8 @@ sub get_base;
|
||||
# targetcli constants
|
||||
# config file location differs from distro to distro
|
||||
my @CONFIG_FILES = (
|
||||
'/etc/rtslib-fb-target/saveconfig.json', # Debian 9.x et al
|
||||
'/etc/target/saveconfig.json' , # ArchLinux, CentOS
|
||||
'/etc/rtslib-fb-target/saveconfig.json', # Debian 9.x et al
|
||||
'/etc/target/saveconfig.json', # ArchLinux, CentOS
|
||||
);
|
||||
my $BACKSTORE = '/backstores/block';
|
||||
|
||||
@ -58,21 +58,27 @@ my $execute_remote_command = sub {
|
||||
my $errfunc = sub { $err .= "$_[0]\n" };
|
||||
|
||||
$target = 'root@' . $scfg->{portal};
|
||||
$cmd = [@ssh_cmd, '-i', "$id_rsa_path/$scfg->{portal}_id_rsa", $target, '--', $remote_command, @params];
|
||||
$cmd = [
|
||||
@ssh_cmd,
|
||||
'-i',
|
||||
"$id_rsa_path/$scfg->{portal}_id_rsa",
|
||||
$target,
|
||||
'--',
|
||||
$remote_command,
|
||||
@params,
|
||||
];
|
||||
|
||||
eval {
|
||||
run_command($cmd, outfunc => $output, errfunc => $errfunc, timeout => $timeout);
|
||||
};
|
||||
eval { run_command($cmd, outfunc => $output, errfunc => $errfunc, timeout => $timeout); };
|
||||
if ($@) {
|
||||
$res = {
|
||||
result => 0,
|
||||
msg => $err,
|
||||
}
|
||||
$res = {
|
||||
result => 0,
|
||||
msg => $err,
|
||||
};
|
||||
} else {
|
||||
$res = {
|
||||
result => 1,
|
||||
msg => $msg,
|
||||
}
|
||||
$res = {
|
||||
result => 1,
|
||||
msg => $msg,
|
||||
};
|
||||
}
|
||||
|
||||
return $res;
|
||||
@ -96,14 +102,15 @@ my $read_config = sub {
|
||||
$target = 'root@' . $scfg->{portal};
|
||||
|
||||
foreach my $oneFile (@CONFIG_FILES) {
|
||||
my $cmd = [@ssh_cmd, '-i', "$id_rsa_path/$scfg->{portal}_id_rsa", $target, $luncmd, $oneFile];
|
||||
eval {
|
||||
run_command($cmd, outfunc => $output, errfunc => $errfunc, timeout => $timeout);
|
||||
};
|
||||
if ($@) {
|
||||
die $err if ($err !~ /No such file or directory/);
|
||||
}
|
||||
return $msg if $msg ne '';
|
||||
my $cmd =
|
||||
[@ssh_cmd, '-i', "$id_rsa_path/$scfg->{portal}_id_rsa", $target, $luncmd, $oneFile];
|
||||
eval {
|
||||
run_command($cmd, outfunc => $output, errfunc => $errfunc, timeout => $timeout);
|
||||
};
|
||||
if ($@) {
|
||||
die $err if ($err !~ /No such file or directory/);
|
||||
}
|
||||
return $msg if $msg ne '';
|
||||
}
|
||||
|
||||
die "No configuration found. Install targetcli on $scfg->{portal}\n" if $msg eq '';
|
||||
@ -123,11 +130,11 @@ my $get_config = sub {
|
||||
|
||||
# Return settings of a specific target
|
||||
my $get_target_settings = sub {
|
||||
my ($scfg) = @_;
|
||||
my ($scfg) = @_;
|
||||
|
||||
my $id = "$scfg->{portal}.$scfg->{target}";
|
||||
return undef if !$SETTINGS;
|
||||
return $SETTINGS->{$id};
|
||||
my $id = "$scfg->{portal}.$scfg->{target}";
|
||||
return undef if !$SETTINGS;
|
||||
return $SETTINGS->{$id};
|
||||
};
|
||||
|
||||
# fetches and parses targetcli config from the portal
|
||||
@ -137,46 +144,47 @@ my $parser = sub {
|
||||
my $tpg_tag;
|
||||
|
||||
if ($tpg =~ /^tpg(\d+)$/) {
|
||||
$tpg_tag = $1;
|
||||
$tpg_tag = $1;
|
||||
} else {
|
||||
die "Target Portal Group has invalid value, must contain string 'tpg' and a suffix number, eg 'tpg17'\n";
|
||||
die
|
||||
"Target Portal Group has invalid value, must contain string 'tpg' and a suffix number, eg 'tpg17'\n";
|
||||
}
|
||||
|
||||
my $config = $get_config->($scfg);
|
||||
my $jsonconfig = JSON->new->utf8->decode($config);
|
||||
|
||||
my $haveTarget = 0;
|
||||
foreach my $target (@{$jsonconfig->{targets}}) {
|
||||
# only interested in iSCSI targets
|
||||
next if !($target->{fabric} eq 'iscsi' && $target->{wwn} eq $scfg->{target});
|
||||
# find correct TPG
|
||||
foreach my $tpg (@{$target->{tpgs}}) {
|
||||
if ($tpg->{tag} == $tpg_tag) {
|
||||
my $res = [];
|
||||
foreach my $lun (@{$tpg->{luns}}) {
|
||||
my ($idx, $storage_object);
|
||||
if ($lun->{index} =~ /^(\d+)$/) {
|
||||
$idx = $1;
|
||||
}
|
||||
if ($lun->{storage_object} =~ m|^($BACKSTORE/.*)$|) {
|
||||
$storage_object = $1;
|
||||
}
|
||||
die "Invalid lun definition in config!\n"
|
||||
if !(defined($idx) && defined($storage_object));
|
||||
push @$res, { index => $idx, storage_object => $storage_object };
|
||||
}
|
||||
foreach my $target (@{ $jsonconfig->{targets} }) {
|
||||
# only interested in iSCSI targets
|
||||
next if !($target->{fabric} eq 'iscsi' && $target->{wwn} eq $scfg->{target});
|
||||
# find correct TPG
|
||||
foreach my $tpg (@{ $target->{tpgs} }) {
|
||||
if ($tpg->{tag} == $tpg_tag) {
|
||||
my $res = [];
|
||||
foreach my $lun (@{ $tpg->{luns} }) {
|
||||
my ($idx, $storage_object);
|
||||
if ($lun->{index} =~ /^(\d+)$/) {
|
||||
$idx = $1;
|
||||
}
|
||||
if ($lun->{storage_object} =~ m|^($BACKSTORE/.*)$|) {
|
||||
$storage_object = $1;
|
||||
}
|
||||
die "Invalid lun definition in config!\n"
|
||||
if !(defined($idx) && defined($storage_object));
|
||||
push @$res, { index => $idx, storage_object => $storage_object };
|
||||
}
|
||||
|
||||
my $id = "$scfg->{portal}.$scfg->{target}";
|
||||
$SETTINGS->{$id}->{luns} = $res;
|
||||
$haveTarget = 1;
|
||||
last;
|
||||
}
|
||||
}
|
||||
my $id = "$scfg->{portal}.$scfg->{target}";
|
||||
$SETTINGS->{$id}->{luns} = $res;
|
||||
$haveTarget = 1;
|
||||
last;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# seriously unhappy if the target server lacks iSCSI target configuration ...
|
||||
if (!$haveTarget) {
|
||||
die "target portal group tpg$tpg_tag not found!\n";
|
||||
die "target portal group tpg$tpg_tag not found!\n";
|
||||
}
|
||||
};
|
||||
|
||||
@ -194,10 +202,10 @@ my $free_lu_name = sub {
|
||||
|
||||
my $new = [];
|
||||
my $target = $get_target_settings->($scfg);
|
||||
foreach my $lun (@{$target->{luns}}) {
|
||||
if ($lun->{storage_object} ne "$BACKSTORE/$lu_name") {
|
||||
push @$new, $lun;
|
||||
}
|
||||
foreach my $lun (@{ $target->{luns} }) {
|
||||
if ($lun->{storage_object} ne "$BACKSTORE/$lu_name") {
|
||||
push @$new, $lun;
|
||||
}
|
||||
}
|
||||
|
||||
$target->{luns} = $new;
|
||||
@ -208,12 +216,12 @@ my $register_lun = sub {
|
||||
my ($scfg, $idx, $volname) = @_;
|
||||
|
||||
my $conf = {
|
||||
index => $idx,
|
||||
storage_object => "$BACKSTORE/$volname",
|
||||
is_new => 1,
|
||||
index => $idx,
|
||||
storage_object => "$BACKSTORE/$volname",
|
||||
is_new => 1,
|
||||
};
|
||||
my $target = $get_target_settings->($scfg);
|
||||
push @{$target->{luns}}, $conf;
|
||||
push @{ $target->{luns} }, $conf;
|
||||
|
||||
return $conf;
|
||||
};
|
||||
@ -225,17 +233,17 @@ my $extract_volname = sub {
|
||||
|
||||
my $base = get_base;
|
||||
if ($lunpath =~ /^$base\/$scfg->{pool}\/([\w\-]+)$/) {
|
||||
$volname = $1;
|
||||
my $prefix = $get_backstore_prefix->($scfg);
|
||||
my $target = $get_target_settings->($scfg);
|
||||
foreach my $lun (@{$target->{luns}}) {
|
||||
# If we have a lun with the pool prefix matching this vol, then return this one
|
||||
# like pool-pve-vm-100-disk-0
|
||||
# Else, just fallback to the old name scheme which is vm-100-disk-0
|
||||
if ($lun->{storage_object} =~ /^$BACKSTORE\/($prefix$volname)$/) {
|
||||
return $1;
|
||||
}
|
||||
}
|
||||
$volname = $1;
|
||||
my $prefix = $get_backstore_prefix->($scfg);
|
||||
my $target = $get_target_settings->($scfg);
|
||||
foreach my $lun (@{ $target->{luns} }) {
|
||||
# If we have a lun with the pool prefix matching this vol, then return this one
|
||||
# like pool-pve-vm-100-disk-0
|
||||
# Else, just fallback to the old name scheme which is vm-100-disk-0
|
||||
if ($lun->{storage_object} =~ /^$BACKSTORE\/($prefix$volname)$/) {
|
||||
return $1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return $volname;
|
||||
@ -252,10 +260,10 @@ my $list_view = sub {
|
||||
|
||||
return undef if !defined($volname); # nothing to search for..
|
||||
|
||||
foreach my $lun (@{$target->{luns}}) {
|
||||
if ($lun->{storage_object} eq "$BACKSTORE/$volname") {
|
||||
return $lun->{index};
|
||||
}
|
||||
foreach my $lun (@{ $target->{luns} }) {
|
||||
if ($lun->{storage_object} eq "$BACKSTORE/$volname") {
|
||||
return $lun->{index};
|
||||
}
|
||||
}
|
||||
|
||||
return $lun;
|
||||
@ -269,10 +277,10 @@ my $list_lun = sub {
|
||||
my $volname = $extract_volname->($scfg, $object);
|
||||
my $target = $get_target_settings->($scfg);
|
||||
|
||||
foreach my $lun (@{$target->{luns}}) {
|
||||
if ($lun->{storage_object} eq "$BACKSTORE/$volname") {
|
||||
return $object;
|
||||
}
|
||||
foreach my $lun (@{ $target->{luns} }) {
|
||||
if ($lun->{storage_object} eq "$BACKSTORE/$volname") {
|
||||
return $object;
|
||||
}
|
||||
}
|
||||
|
||||
return undef;
|
||||
@ -283,7 +291,7 @@ my $create_lun = sub {
|
||||
my ($scfg, $timeout, $method, @params) = @_;
|
||||
|
||||
if ($list_lun->($scfg, $timeout, $method, @params)) {
|
||||
die "$params[0]: LUN already exists!";
|
||||
die "$params[0]: LUN already exists!";
|
||||
}
|
||||
|
||||
my $device = $params[0];
|
||||
@ -294,18 +302,18 @@ my $create_lun = sub {
|
||||
my $tpg = $scfg->{lio_tpg} || die "Target Portal Group not set, aborting!\n";
|
||||
|
||||
# step 1: create backstore for device
|
||||
my @cliparams = ($BACKSTORE, 'create', "name=$volname", "dev=$device" );
|
||||
my @cliparams = ($BACKSTORE, 'create', "name=$volname", "dev=$device");
|
||||
my $res = $execute_remote_command->($scfg, $timeout, $targetcli, @cliparams);
|
||||
die $res->{msg} if !$res->{result};
|
||||
|
||||
# step 2: enable unmap support on the backstore
|
||||
@cliparams = ($BACKSTORE . '/' . $volname, 'set', 'attribute', 'emulate_tpu=1' );
|
||||
@cliparams = ($BACKSTORE . '/' . $volname, 'set', 'attribute', 'emulate_tpu=1');
|
||||
$res = $execute_remote_command->($scfg, $timeout, $targetcli, @cliparams);
|
||||
die $res->{msg} if !$res->{result};
|
||||
|
||||
# step 3: register lun with target
|
||||
# targetcli /iscsi/iqn.2018-04.at.bestsolution.somehost:target/tpg1/luns/ create /backstores/block/foobar
|
||||
@cliparams = ("/iscsi/$scfg->{target}/$tpg/luns/", 'create', "$BACKSTORE/$volname" );
|
||||
@cliparams = ("/iscsi/$scfg->{target}/$tpg/luns/", 'create', "$BACKSTORE/$volname");
|
||||
$res = $execute_remote_command->($scfg, $timeout, $targetcli, @cliparams);
|
||||
die $res->{msg} if !$res->{result};
|
||||
|
||||
@ -314,9 +322,9 @@ my $create_lun = sub {
|
||||
# changed without our knowledge, so relying on the number that targetcli returns
|
||||
my $lun_idx;
|
||||
if ($res->{msg} =~ /LUN (\d+)/) {
|
||||
$lun_idx = $1;
|
||||
$lun_idx = $1;
|
||||
} else {
|
||||
die "unable to determine new LUN index: $res->{msg}";
|
||||
die "unable to determine new LUN index: $res->{msg}";
|
||||
}
|
||||
|
||||
$register_lun->($scfg, $lun_idx, $volname);
|
||||
@ -330,7 +338,7 @@ my $create_lun = sub {
|
||||
|
||||
my $delete_lun = sub {
|
||||
my ($scfg, $timeout, $method, @params) = @_;
|
||||
my $res = {msg => undef};
|
||||
my $res = { msg => undef };
|
||||
|
||||
my $tpg = $scfg->{lio_tpg} || die "Target Portal Group not set, aborting!\n";
|
||||
|
||||
@ -338,30 +346,30 @@ my $delete_lun = sub {
|
||||
my $volname = $extract_volname->($scfg, $path);
|
||||
my $target = $get_target_settings->($scfg);
|
||||
|
||||
foreach my $lun (@{$target->{luns}}) {
|
||||
next if $lun->{storage_object} ne "$BACKSTORE/$volname";
|
||||
foreach my $lun (@{ $target->{luns} }) {
|
||||
next if $lun->{storage_object} ne "$BACKSTORE/$volname";
|
||||
|
||||
# step 1: delete the lun
|
||||
my @cliparams = ("/iscsi/$scfg->{target}/$tpg/luns/", 'delete', "lun$lun->{index}" );
|
||||
my $res = $execute_remote_command->($scfg, $timeout, $targetcli, @cliparams);
|
||||
do {
|
||||
die $res->{msg};
|
||||
} unless $res->{result};
|
||||
# step 1: delete the lun
|
||||
my @cliparams = ("/iscsi/$scfg->{target}/$tpg/luns/", 'delete', "lun$lun->{index}");
|
||||
my $res = $execute_remote_command->($scfg, $timeout, $targetcli, @cliparams);
|
||||
do {
|
||||
die $res->{msg};
|
||||
} unless $res->{result};
|
||||
|
||||
# step 2: delete the backstore
|
||||
@cliparams = ($BACKSTORE, 'delete', $volname);
|
||||
$res = $execute_remote_command->($scfg, $timeout, $targetcli, @cliparams);
|
||||
do {
|
||||
die $res->{msg};
|
||||
} unless $res->{result};
|
||||
# step 2: delete the backstore
|
||||
@cliparams = ($BACKSTORE, 'delete', $volname);
|
||||
$res = $execute_remote_command->($scfg, $timeout, $targetcli, @cliparams);
|
||||
do {
|
||||
die $res->{msg};
|
||||
} unless $res->{result};
|
||||
|
||||
# step 3: save to be safe ...
|
||||
$execute_remote_command->($scfg, $timeout, $targetcli, 'saveconfig');
|
||||
# step 3: save to be safe ...
|
||||
$execute_remote_command->($scfg, $timeout, $targetcli, 'saveconfig');
|
||||
|
||||
# update internal cache
|
||||
$free_lu_name->($scfg, $volname);
|
||||
# update internal cache
|
||||
$free_lu_name->($scfg, $volname);
|
||||
|
||||
last;
|
||||
last;
|
||||
}
|
||||
|
||||
return $res->{msg};
|
||||
@ -387,13 +395,13 @@ my $add_view = sub {
|
||||
};
|
||||
|
||||
my %lun_cmd_map = (
|
||||
create_lu => $create_lun,
|
||||
delete_lu => $delete_lun,
|
||||
import_lu => $import_lun,
|
||||
modify_lu => $modify_lun,
|
||||
add_view => $add_view,
|
||||
list_view => $list_view,
|
||||
list_lu => $list_lun,
|
||||
create_lu => $create_lun,
|
||||
delete_lu => $delete_lun,
|
||||
import_lu => $import_lun,
|
||||
modify_lu => $modify_lun,
|
||||
add_view => $add_view,
|
||||
list_view => $list_view,
|
||||
list_lu => $list_lun,
|
||||
);
|
||||
|
||||
sub run_lun_command {
|
||||
@ -403,8 +411,8 @@ sub run_lun_command {
|
||||
my $timediff = time - $SETTINGS_TIMESTAMP;
|
||||
my $target = $get_target_settings->($scfg);
|
||||
if (!$target || $timediff > $SETTINGS_MAXAGE) {
|
||||
$SETTINGS_TIMESTAMP = time;
|
||||
$parser->($scfg);
|
||||
$SETTINGS_TIMESTAMP = time;
|
||||
$parser->($scfg);
|
||||
}
|
||||
|
||||
die "unknown command '$method'" unless exists $lun_cmd_map{$method};
|
||||
|
||||
@ -30,28 +30,29 @@ sub type {
|
||||
|
||||
sub plugindata {
|
||||
return {
|
||||
content => [ {images => 1, rootdir => 1}, { images => 1, rootdir => 1}],
|
||||
'sensitive-properties' => {},
|
||||
content => [{ images => 1, rootdir => 1 }, { images => 1, rootdir => 1 }],
|
||||
'sensitive-properties' => {},
|
||||
};
|
||||
}
|
||||
|
||||
sub properties {
|
||||
return {
|
||||
thinpool => {
|
||||
description => "LVM thin pool LV name.",
|
||||
type => 'string', format => 'pve-storage-vgname',
|
||||
},
|
||||
thinpool => {
|
||||
description => "LVM thin pool LV name.",
|
||||
type => 'string',
|
||||
format => 'pve-storage-vgname',
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
sub options {
|
||||
return {
|
||||
thinpool => { fixed => 1 },
|
||||
vgname => { fixed => 1 },
|
||||
thinpool => { fixed => 1 },
|
||||
vgname => { fixed => 1 },
|
||||
nodes => { optional => 1 },
|
||||
disable => { optional => 1 },
|
||||
content => { optional => 1 },
|
||||
bwlimit => { optional => 1 },
|
||||
disable => { optional => 1 },
|
||||
content => { optional => 1 },
|
||||
bwlimit => { optional => 1 },
|
||||
};
|
||||
}
|
||||
|
||||
@ -64,7 +65,7 @@ sub parse_volname {
|
||||
PVE::Storage::Plugin::parse_lvm_name($volname);
|
||||
|
||||
if ($volname =~ m/^((vm|base)-(\d+)-\S+)$/) {
|
||||
return ('images', $1, $3, undef, undef, $2 eq 'base', 'raw');
|
||||
return ('images', $1, $3, undef, undef, $2 eq 'base', 'raw');
|
||||
}
|
||||
|
||||
die "unable to parse lvm volume name '$volname'\n";
|
||||
@ -77,7 +78,7 @@ sub filesystem_path {
|
||||
|
||||
my $vg = $scfg->{vgname};
|
||||
|
||||
my $path = defined($snapname) ? "/dev/$vg/snap_${name}_$snapname": "/dev/$vg/$name";
|
||||
my $path = defined($snapname) ? "/dev/$vg/snap_${name}_$snapname" : "/dev/$vg/$name";
|
||||
|
||||
return wantarray ? ($path, $vmid, $vtype) : $path;
|
||||
}
|
||||
@ -88,19 +89,27 @@ sub alloc_image {
|
||||
die "unsupported format '$fmt'" if $fmt ne 'raw';
|
||||
|
||||
die "illegal name '$name' - should be 'vm-$vmid-*'\n"
|
||||
if $name && $name !~ m/^vm-$vmid-/;
|
||||
if $name && $name !~ m/^vm-$vmid-/;
|
||||
|
||||
my $vgs = PVE::Storage::LVMPlugin::lvm_vgs();
|
||||
|
||||
my $vg = $scfg->{vgname};
|
||||
|
||||
die "no such volume group '$vg'\n" if !defined ($vgs->{$vg});
|
||||
die "no such volume group '$vg'\n" if !defined($vgs->{$vg});
|
||||
|
||||
$name = $class->find_free_diskname($storeid, $scfg, $vmid)
|
||||
if !$name;
|
||||
if !$name;
|
||||
|
||||
my $cmd = ['/sbin/lvcreate', '-aly', '-V', "${size}k", '--name', $name,
|
||||
'--thinpool', "$vg/$scfg->{thinpool}" ];
|
||||
my $cmd = [
|
||||
'/sbin/lvcreate',
|
||||
'-aly',
|
||||
'-V',
|
||||
"${size}k",
|
||||
'--name',
|
||||
$name,
|
||||
'--thinpool',
|
||||
"$vg/$scfg->{thinpool}",
|
||||
];
|
||||
|
||||
run_command($cmd, errmsg => "lvcreate '$vg/$name' error");
|
||||
|
||||
@ -114,20 +123,20 @@ sub free_image {
|
||||
|
||||
my $lvs = PVE::Storage::LVMPlugin::lvm_list_volumes($vg);
|
||||
|
||||
if (my $dat = $lvs->{$scfg->{vgname}}) {
|
||||
if (my $dat = $lvs->{ $scfg->{vgname} }) {
|
||||
|
||||
# remove all volume snapshots first
|
||||
foreach my $lv (keys %$dat) {
|
||||
next if $lv !~ m/^snap_${volname}_${PVE::JSONSchema::CONFIGID_RE}$/;
|
||||
my $cmd = ['/sbin/lvremove', '-f', "$vg/$lv"];
|
||||
run_command($cmd, errmsg => "lvremove snapshot '$vg/$lv' error");
|
||||
}
|
||||
# remove all volume snapshots first
|
||||
foreach my $lv (keys %$dat) {
|
||||
next if $lv !~ m/^snap_${volname}_${PVE::JSONSchema::CONFIGID_RE}$/;
|
||||
my $cmd = ['/sbin/lvremove', '-f', "$vg/$lv"];
|
||||
run_command($cmd, errmsg => "lvremove snapshot '$vg/$lv' error");
|
||||
}
|
||||
|
||||
# finally remove original (if exists)
|
||||
if ($dat->{$volname}) {
|
||||
my $cmd = ['/sbin/lvremove', '-f', "$vg/$volname"];
|
||||
run_command($cmd, errmsg => "lvremove '$vg/$volname' error");
|
||||
}
|
||||
# finally remove original (if exists)
|
||||
if ($dat->{$volname}) {
|
||||
my $cmd = ['/sbin/lvremove', '-f', "$vg/$volname"];
|
||||
run_command($cmd, errmsg => "lvremove '$vg/$volname' error");
|
||||
}
|
||||
}
|
||||
|
||||
return undef;
|
||||
@ -144,31 +153,35 @@ sub list_images {
|
||||
|
||||
if (my $dat = $cache->{lvs}->{$vgname}) {
|
||||
|
||||
foreach my $volname (keys %$dat) {
|
||||
foreach my $volname (keys %$dat) {
|
||||
|
||||
next if $volname !~ m/^(vm|base)-(\d+)-/;
|
||||
my $owner = $2;
|
||||
next if $volname !~ m/^(vm|base)-(\d+)-/;
|
||||
my $owner = $2;
|
||||
|
||||
my $info = $dat->{$volname};
|
||||
my $info = $dat->{$volname};
|
||||
|
||||
next if $info->{lv_type} ne 'V';
|
||||
next if $info->{lv_type} ne 'V';
|
||||
|
||||
next if $info->{pool_lv} ne $scfg->{thinpool};
|
||||
next if $info->{pool_lv} ne $scfg->{thinpool};
|
||||
|
||||
my $volid = "$storeid:$volname";
|
||||
my $volid = "$storeid:$volname";
|
||||
|
||||
if ($vollist) {
|
||||
my $found = grep { $_ eq $volid } @$vollist;
|
||||
next if !$found;
|
||||
} else {
|
||||
next if defined($vmid) && ($owner ne $vmid);
|
||||
}
|
||||
if ($vollist) {
|
||||
my $found = grep { $_ eq $volid } @$vollist;
|
||||
next if !$found;
|
||||
} else {
|
||||
next if defined($vmid) && ($owner ne $vmid);
|
||||
}
|
||||
|
||||
push @$res, {
|
||||
volid => $volid, format => 'raw', size => $info->{lv_size}, vmid => $owner,
|
||||
ctime => $info->{ctime},
|
||||
};
|
||||
}
|
||||
push @$res,
|
||||
{
|
||||
volid => $volid,
|
||||
format => 'raw',
|
||||
size => $info->{lv_size},
|
||||
vmid => $owner,
|
||||
ctime => $info->{ctime},
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
return $res;
|
||||
@ -181,13 +194,13 @@ sub list_thinpools {
|
||||
my $thinpools = [];
|
||||
|
||||
foreach my $vg (keys %$lvs) {
|
||||
foreach my $lvname (keys %{$lvs->{$vg}}) {
|
||||
next if $lvs->{$vg}->{$lvname}->{lv_type} ne 't';
|
||||
my $lv = $lvs->{$vg}->{$lvname};
|
||||
$lv->{lv} = $lvname;
|
||||
$lv->{vg} = $vg;
|
||||
push @$thinpools, $lv;
|
||||
}
|
||||
foreach my $lvname (keys %{ $lvs->{$vg} }) {
|
||||
next if $lvs->{$vg}->{$lvname}->{lv_type} ne 't';
|
||||
my $lv = $lvs->{$vg}->{$lvname};
|
||||
$lv->{lv} = $lvname;
|
||||
$lv->{vg} = $vg;
|
||||
push @$thinpools, $lv;
|
||||
}
|
||||
}
|
||||
|
||||
return $thinpools;
|
||||
@ -198,17 +211,17 @@ sub status {
|
||||
|
||||
my $lvs = $cache->{lvs} ||= PVE::Storage::LVMPlugin::lvm_list_volumes();
|
||||
|
||||
return if !$lvs->{$scfg->{vgname}};
|
||||
return if !$lvs->{ $scfg->{vgname} };
|
||||
|
||||
my $info = $lvs->{$scfg->{vgname}}->{$scfg->{thinpool}};
|
||||
my $info = $lvs->{ $scfg->{vgname} }->{ $scfg->{thinpool} };
|
||||
|
||||
return if !$info || $info->{lv_type} ne 't' || !$info->{lv_size};
|
||||
|
||||
return (
|
||||
$info->{lv_size},
|
||||
$info->{lv_size} - $info->{used},
|
||||
$info->{used},
|
||||
$info->{lv_state} eq 'a' ? 1 : 0,
|
||||
$info->{lv_size},
|
||||
$info->{lv_size} - $info->{used},
|
||||
$info->{used},
|
||||
$info->{lv_state} eq 'a' ? 1 : 0,
|
||||
);
|
||||
}
|
||||
|
||||
@ -221,7 +234,10 @@ my $activate_lv = sub {
|
||||
|
||||
return if $lvs->{$vg}->{$lv}->{lv_state} eq 'a';
|
||||
|
||||
run_command(['lvchange', '-ay', '-K', "$vg/$lv"], errmsg => "activating LV '$vg/$lv' failed");
|
||||
run_command(
|
||||
['lvchange', '-ay', '-K', "$vg/$lv"],
|
||||
errmsg => "activating LV '$vg/$lv' failed",
|
||||
);
|
||||
|
||||
$lvs->{$vg}->{$lv}->{lv_state} = 'a'; # update cache
|
||||
|
||||
@ -256,7 +272,7 @@ sub deactivate_volume {
|
||||
run_command(['lvchange', '-an', "$vg/$lv"], errmsg => "deactivate_volume '$vg/$lv' error");
|
||||
|
||||
$cache->{lvs}->{$vg}->{$lv}->{lv_state} = '-' # update cache
|
||||
if $cache->{lvs} && $cache->{lvs}->{$vg} && $cache->{lvs}->{$vg}->{$lv};
|
||||
if $cache->{lvs} && $cache->{lvs}->{$vg} && $cache->{lvs}->{$vg}->{$lv};
|
||||
|
||||
return;
|
||||
}
|
||||
@ -269,14 +285,13 @@ sub clone_image {
|
||||
my $lv;
|
||||
|
||||
if ($snap) {
|
||||
$lv = "$vg/snap_${volname}_$snap";
|
||||
$lv = "$vg/snap_${volname}_$snap";
|
||||
} else {
|
||||
my ($vtype, undef, undef, undef, undef, $isBase, $format) =
|
||||
$class->parse_volname($volname);
|
||||
my ($vtype, undef, undef, undef, undef, $isBase, $format) = $class->parse_volname($volname);
|
||||
|
||||
die "clone_image only works on base images\n" if !$isBase;
|
||||
die "clone_image only works on base images\n" if !$isBase;
|
||||
|
||||
$lv = "$vg/$volname";
|
||||
$lv = "$vg/$volname";
|
||||
}
|
||||
|
||||
my $name = $class->find_free_diskname($storeid, $scfg, $vmid);
|
||||
@ -290,8 +305,7 @@ sub clone_image {
|
||||
sub create_base {
|
||||
my ($class, $storeid, $scfg, $volname) = @_;
|
||||
|
||||
my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) =
|
||||
$class->parse_volname($volname);
|
||||
my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) = $class->parse_volname($volname);
|
||||
|
||||
die "create_base not possible with base image\n" if $isBase;
|
||||
|
||||
@ -299,11 +313,11 @@ sub create_base {
|
||||
my $lvs = PVE::Storage::LVMPlugin::lvm_list_volumes($vg);
|
||||
|
||||
if (my $dat = $lvs->{$vg}) {
|
||||
# to avoid confusion, reject if we find volume snapshots
|
||||
foreach my $lv (keys %$dat) {
|
||||
die "unable to create base volume - found snaphost '$lv'\n"
|
||||
if $lv =~ m/^snap_${volname}_(\w+)$/;
|
||||
}
|
||||
# to avoid confusion, reject if we find volume snapshots
|
||||
foreach my $lv (keys %$dat) {
|
||||
die "unable to create base volume - found snaphost '$lv'\n"
|
||||
if $lv =~ m/^snap_${volname}_(\w+)$/;
|
||||
}
|
||||
}
|
||||
|
||||
my $newname = $name;
|
||||
@ -362,22 +376,21 @@ sub volume_has_feature {
|
||||
my ($class, $scfg, $feature, $storeid, $volname, $snapname, $running) = @_;
|
||||
|
||||
my $features = {
|
||||
snapshot => { current => 1 },
|
||||
clone => { base => 1, snap => 1},
|
||||
template => { current => 1},
|
||||
copy => { base => 1, current => 1, snap => 1},
|
||||
sparseinit => { base => 1, current => 1},
|
||||
rename => {current => 1},
|
||||
snapshot => { current => 1 },
|
||||
clone => { base => 1, snap => 1 },
|
||||
template => { current => 1 },
|
||||
copy => { base => 1, current => 1, snap => 1 },
|
||||
sparseinit => { base => 1, current => 1 },
|
||||
rename => { current => 1 },
|
||||
};
|
||||
|
||||
my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) =
|
||||
$class->parse_volname($volname);
|
||||
my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) = $class->parse_volname($volname);
|
||||
|
||||
my $key = undef;
|
||||
if($snapname){
|
||||
$key = 'snap';
|
||||
}else{
|
||||
$key = $isBase ? 'base' : 'current';
|
||||
if ($snapname) {
|
||||
$key = 'snap';
|
||||
} else {
|
||||
$key = $isBase ? 'base' : 'current';
|
||||
}
|
||||
return 1 if $features->{$feature}->{$key};
|
||||
|
||||
@ -385,51 +398,62 @@ sub volume_has_feature {
|
||||
}
|
||||
|
||||
sub volume_import {
|
||||
my ($class, $scfg, $storeid, $fh, $volname, $format, $snapshot, $base_snapshot, $with_snapshots, $allow_rename) = @_;
|
||||
my (
|
||||
$class,
|
||||
$scfg,
|
||||
$storeid,
|
||||
$fh,
|
||||
$volname,
|
||||
$format,
|
||||
$snapshot,
|
||||
$base_snapshot,
|
||||
$with_snapshots,
|
||||
$allow_rename,
|
||||
) = @_;
|
||||
|
||||
my ($vtype, $name, $vmid, $basename, $basevmid, $isBase, $file_format) =
|
||||
$class->parse_volname($volname);
|
||||
$class->parse_volname($volname);
|
||||
|
||||
if (!$isBase) {
|
||||
return $class->SUPER::volume_import(
|
||||
$scfg,
|
||||
$storeid,
|
||||
$fh,
|
||||
$volname,
|
||||
$format,
|
||||
$snapshot,
|
||||
$base_snapshot,
|
||||
$with_snapshots,
|
||||
$allow_rename
|
||||
);
|
||||
return $class->SUPER::volume_import(
|
||||
$scfg,
|
||||
$storeid,
|
||||
$fh,
|
||||
$volname,
|
||||
$format,
|
||||
$snapshot,
|
||||
$base_snapshot,
|
||||
$with_snapshots,
|
||||
$allow_rename,
|
||||
);
|
||||
} else {
|
||||
my $tempname;
|
||||
my $vg = $scfg->{vgname};
|
||||
my $lvs = PVE::Storage::LVMPlugin::lvm_list_volumes($vg);
|
||||
if ($lvs->{$vg}->{$volname}) {
|
||||
die "volume $vg/$volname already exists\n" if !$allow_rename;
|
||||
warn "volume $vg/$volname already exists - importing with a different name\n";
|
||||
my $tempname;
|
||||
my $vg = $scfg->{vgname};
|
||||
my $lvs = PVE::Storage::LVMPlugin::lvm_list_volumes($vg);
|
||||
if ($lvs->{$vg}->{$volname}) {
|
||||
die "volume $vg/$volname already exists\n" if !$allow_rename;
|
||||
warn "volume $vg/$volname already exists - importing with a different name\n";
|
||||
|
||||
$tempname = $class->find_free_diskname($storeid, $scfg, $vmid);
|
||||
} else {
|
||||
$tempname = $volname;
|
||||
$tempname =~ s/base/vm/;
|
||||
}
|
||||
$tempname = $class->find_free_diskname($storeid, $scfg, $vmid);
|
||||
} else {
|
||||
$tempname = $volname;
|
||||
$tempname =~ s/base/vm/;
|
||||
}
|
||||
|
||||
my $newvolid = $class->SUPER::volume_import(
|
||||
$scfg,
|
||||
$storeid,
|
||||
$fh,
|
||||
$tempname,
|
||||
$format,
|
||||
$snapshot,
|
||||
$base_snapshot,
|
||||
$with_snapshots,
|
||||
$allow_rename
|
||||
);
|
||||
($storeid,my $newname) = PVE::Storage::parse_volume_id($newvolid);
|
||||
my $newvolid = $class->SUPER::volume_import(
|
||||
$scfg,
|
||||
$storeid,
|
||||
$fh,
|
||||
$tempname,
|
||||
$format,
|
||||
$snapshot,
|
||||
$base_snapshot,
|
||||
$with_snapshots,
|
||||
$allow_rename,
|
||||
);
|
||||
($storeid, my $newname) = PVE::Storage::parse_volume_id($newvolid);
|
||||
|
||||
$volname = $class->create_base($storeid, $scfg, $newname);
|
||||
$volname = $class->create_base($storeid, $scfg, $newname);
|
||||
}
|
||||
|
||||
return "$storeid:$volname";
|
||||
@ -438,8 +462,10 @@ sub volume_import {
|
||||
# used in LVMPlugin->volume_import
|
||||
sub volume_import_write {
|
||||
my ($class, $input_fh, $output_file) = @_;
|
||||
run_command(['dd', "of=$output_file", 'conv=sparse', 'bs=64k'],
|
||||
input => '<&'.fileno($input_fh));
|
||||
run_command(
|
||||
['dd', "of=$output_file", 'conv=sparse', 'bs=64k'],
|
||||
input => '<&' . fileno($input_fh),
|
||||
);
|
||||
}
|
||||
|
||||
1;
|
||||
|
||||
@ -24,9 +24,9 @@ sub nfs_is_mounted {
|
||||
|
||||
$mountdata = PVE::ProcFSTools::parse_proc_mounts() if !$mountdata;
|
||||
return $mountpoint if grep {
|
||||
$_->[2] =~ /^nfs/ &&
|
||||
$_->[0] =~ m|^\Q$source\E/?$| &&
|
||||
$_->[1] eq $mountpoint
|
||||
$_->[2] =~ /^nfs/
|
||||
&& $_->[0] =~ m|^\Q$source\E/?$|
|
||||
&& $_->[1] eq $mountpoint
|
||||
} @$mountdata;
|
||||
return undef;
|
||||
}
|
||||
@ -39,8 +39,8 @@ sub nfs_mount {
|
||||
|
||||
my $cmd = ['/bin/mount', '-t', 'nfs', $source, $mountpoint];
|
||||
if ($options) {
|
||||
push @$cmd, '-o', $options;
|
||||
}
|
||||
push @$cmd, '-o', $options;
|
||||
}
|
||||
|
||||
run_command($cmd, errmsg => "mount error");
|
||||
}
|
||||
@ -53,49 +53,60 @@ sub type {
|
||||
|
||||
sub plugindata {
|
||||
return {
|
||||
content => [ { images => 1, rootdir => 1, vztmpl => 1, iso => 1, backup => 1, snippets => 1, import => 1 },
|
||||
{ images => 1 }],
|
||||
format => [ { raw => 1, qcow2 => 1, vmdk => 1 } , 'raw' ],
|
||||
'sensitive-properties' => {},
|
||||
content => [
|
||||
{
|
||||
images => 1,
|
||||
rootdir => 1,
|
||||
vztmpl => 1,
|
||||
iso => 1,
|
||||
backup => 1,
|
||||
snippets => 1,
|
||||
import => 1,
|
||||
},
|
||||
{ images => 1 },
|
||||
],
|
||||
format => [{ raw => 1, qcow2 => 1, vmdk => 1 }, 'raw'],
|
||||
'sensitive-properties' => {},
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
sub properties {
|
||||
return {
|
||||
export => {
|
||||
description => "NFS export path.",
|
||||
type => 'string', format => 'pve-storage-path',
|
||||
},
|
||||
server => {
|
||||
description => "Server IP or DNS name.",
|
||||
type => 'string', format => 'pve-storage-server',
|
||||
},
|
||||
export => {
|
||||
description => "NFS export path.",
|
||||
type => 'string',
|
||||
format => 'pve-storage-path',
|
||||
},
|
||||
server => {
|
||||
description => "Server IP or DNS name.",
|
||||
type => 'string',
|
||||
format => 'pve-storage-server',
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
sub options {
|
||||
return {
|
||||
path => { fixed => 1 },
|
||||
'content-dirs' => { optional => 1 },
|
||||
server => { fixed => 1 },
|
||||
export => { fixed => 1 },
|
||||
nodes => { optional => 1 },
|
||||
disable => { optional => 1 },
|
||||
maxfiles => { optional => 1 },
|
||||
'prune-backups' => { optional => 1 },
|
||||
'max-protected-backups' => { optional => 1 },
|
||||
options => { optional => 1 },
|
||||
content => { optional => 1 },
|
||||
format => { optional => 1 },
|
||||
mkdir => { optional => 1 },
|
||||
'create-base-path' => { optional => 1 },
|
||||
'create-subdirs' => { optional => 1 },
|
||||
bwlimit => { optional => 1 },
|
||||
preallocation => { optional => 1 },
|
||||
path => { fixed => 1 },
|
||||
'content-dirs' => { optional => 1 },
|
||||
server => { fixed => 1 },
|
||||
export => { fixed => 1 },
|
||||
nodes => { optional => 1 },
|
||||
disable => { optional => 1 },
|
||||
maxfiles => { optional => 1 },
|
||||
'prune-backups' => { optional => 1 },
|
||||
'max-protected-backups' => { optional => 1 },
|
||||
options => { optional => 1 },
|
||||
content => { optional => 1 },
|
||||
format => { optional => 1 },
|
||||
mkdir => { optional => 1 },
|
||||
'create-base-path' => { optional => 1 },
|
||||
'create-subdirs' => { optional => 1 },
|
||||
bwlimit => { optional => 1 },
|
||||
preallocation => { optional => 1 },
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
sub check_config {
|
||||
my ($class, $sectionId, $config, $create, $skipSchemaCheck) = @_;
|
||||
|
||||
@ -110,13 +121,13 @@ sub status {
|
||||
my ($class, $storeid, $scfg, $cache) = @_;
|
||||
|
||||
$cache->{mountdata} = PVE::ProcFSTools::parse_proc_mounts()
|
||||
if !$cache->{mountdata};
|
||||
if !$cache->{mountdata};
|
||||
|
||||
my $path = $scfg->{path};
|
||||
my $server = $scfg->{server};
|
||||
my $export = $scfg->{export};
|
||||
|
||||
return undef if !nfs_is_mounted($server, $export, $path, $cache->{mountdata});
|
||||
return undef if !nfs_is_mounted($server, $export, $path, $cache->{mountdata});
|
||||
|
||||
return $class->SUPER::status($storeid, $scfg, $cache);
|
||||
}
|
||||
@ -125,20 +136,20 @@ sub activate_storage {
|
||||
my ($class, $storeid, $scfg, $cache) = @_;
|
||||
|
||||
$cache->{mountdata} = PVE::ProcFSTools::parse_proc_mounts()
|
||||
if !$cache->{mountdata};
|
||||
if !$cache->{mountdata};
|
||||
|
||||
my $path = $scfg->{path};
|
||||
my $server = $scfg->{server};
|
||||
my $export = $scfg->{export};
|
||||
|
||||
if (!nfs_is_mounted($server, $export, $path, $cache->{mountdata})) {
|
||||
# NOTE: only call mkpath when not mounted (avoid hang when NFS server is offline
|
||||
$class->config_aware_base_mkdir($scfg, $path);
|
||||
# NOTE: only call mkpath when not mounted (avoid hang when NFS server is offline
|
||||
$class->config_aware_base_mkdir($scfg, $path);
|
||||
|
||||
die "unable to activate storage '$storeid' - " .
|
||||
"directory '$path' does not exist\n" if ! -d $path;
|
||||
die "unable to activate storage '$storeid' - " . "directory '$path' does not exist\n"
|
||||
if !-d $path;
|
||||
|
||||
nfs_mount($server, $export, $path, $scfg->{options});
|
||||
nfs_mount($server, $export, $path, $scfg->{options});
|
||||
}
|
||||
|
||||
$class->SUPER::activate_storage($storeid, $scfg, $cache);
|
||||
@ -148,15 +159,15 @@ sub deactivate_storage {
|
||||
my ($class, $storeid, $scfg, $cache) = @_;
|
||||
|
||||
$cache->{mountdata} = PVE::ProcFSTools::parse_proc_mounts()
|
||||
if !$cache->{mountdata};
|
||||
if !$cache->{mountdata};
|
||||
|
||||
my $path = $scfg->{path};
|
||||
my $server = $scfg->{server};
|
||||
my $export = $scfg->{export};
|
||||
|
||||
if (nfs_is_mounted($server, $export, $path, $cache->{mountdata})) {
|
||||
my $cmd = ['/bin/umount', $path];
|
||||
run_command($cmd, errmsg => 'umount error');
|
||||
if (nfs_is_mounted($server, $export, $path, $cache->{mountdata})) {
|
||||
my $cmd = ['/bin/umount', $path];
|
||||
run_command($cmd, errmsg => 'umount error');
|
||||
}
|
||||
}
|
||||
|
||||
@ -170,33 +181,35 @@ sub check_connection {
|
||||
|
||||
my $is_v4 = defined($opts) && $opts =~ /vers=4.*/;
|
||||
if ($is_v4) {
|
||||
my $ip = PVE::JSONSchema::pve_verify_ip($server, 1);
|
||||
if (!defined($ip)) {
|
||||
$ip = PVE::Network::get_ip_from_hostname($server);
|
||||
}
|
||||
my $ip = PVE::JSONSchema::pve_verify_ip($server, 1);
|
||||
if (!defined($ip)) {
|
||||
$ip = PVE::Network::get_ip_from_hostname($server);
|
||||
}
|
||||
|
||||
my $transport = PVE::JSONSchema::pve_verify_ipv4($ip, 1) ? 'tcp' : 'tcp6';
|
||||
my $transport = PVE::JSONSchema::pve_verify_ipv4($ip, 1) ? 'tcp' : 'tcp6';
|
||||
|
||||
# nfsv4 uses a pseudo-filesystem always beginning with /
|
||||
# no exports are listed
|
||||
$cmd = ['/usr/sbin/rpcinfo', '-T', $transport, $ip, 'nfs', '4'];
|
||||
# nfsv4 uses a pseudo-filesystem always beginning with /
|
||||
# no exports are listed
|
||||
$cmd = ['/usr/sbin/rpcinfo', '-T', $transport, $ip, 'nfs', '4'];
|
||||
} else {
|
||||
$cmd = ['/sbin/showmount', '--no-headers', '--exports', $server];
|
||||
$cmd = ['/sbin/showmount', '--no-headers', '--exports', $server];
|
||||
}
|
||||
|
||||
eval { run_command($cmd, timeout => 10, outfunc => sub {}, errfunc => sub {}) };
|
||||
eval {
|
||||
run_command($cmd, timeout => 10, outfunc => sub { }, errfunc => sub { });
|
||||
};
|
||||
if (my $err = $@) {
|
||||
if ($is_v4) {
|
||||
my $port = 2049;
|
||||
$port = $1 if defined($opts) && $opts =~ /port=(\d+)/;
|
||||
if ($is_v4) {
|
||||
my $port = 2049;
|
||||
$port = $1 if defined($opts) && $opts =~ /port=(\d+)/;
|
||||
|
||||
# rpcinfo is expected to work when the port is 0 (see 'man 5 nfs') and tcp_ping()
|
||||
# defaults to port 7 when passing in 0.
|
||||
return 0 if $port == 0;
|
||||
# rpcinfo is expected to work when the port is 0 (see 'man 5 nfs') and tcp_ping()
|
||||
# defaults to port 7 when passing in 0.
|
||||
return 0 if $port == 0;
|
||||
|
||||
return PVE::Network::tcp_ping($server, $port, 2);
|
||||
}
|
||||
return 0;
|
||||
return PVE::Network::tcp_ping($server, $port, 2);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 1;
|
||||
|
||||
@ -29,51 +29,53 @@ sub type {
|
||||
|
||||
sub plugindata {
|
||||
return {
|
||||
content => [ {backup => 1, none => 1}, { backup => 1 }],
|
||||
'sensitive-properties' => {
|
||||
'encryption-key' => 1,
|
||||
'master-pubkey' => 1,
|
||||
password => 1,
|
||||
},
|
||||
content => [{ backup => 1, none => 1 }, { backup => 1 }],
|
||||
'sensitive-properties' => {
|
||||
'encryption-key' => 1,
|
||||
'master-pubkey' => 1,
|
||||
password => 1,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
sub properties {
|
||||
return {
|
||||
datastore => {
|
||||
description => "Proxmox Backup Server datastore name.",
|
||||
type => 'string',
|
||||
},
|
||||
# openssl s_client -connect <host>:8007 2>&1 |openssl x509 -fingerprint -sha256
|
||||
fingerprint => get_standard_option('fingerprint-sha256'),
|
||||
'encryption-key' => {
|
||||
description => "Encryption key. Use 'autogen' to generate one automatically without passphrase.",
|
||||
type => 'string',
|
||||
},
|
||||
'master-pubkey' => {
|
||||
description => "Base64-encoded, PEM-formatted public RSA key. Used to encrypt a copy of the encryption-key which will be added to each encrypted backup.",
|
||||
type => 'string',
|
||||
},
|
||||
datastore => {
|
||||
description => "Proxmox Backup Server datastore name.",
|
||||
type => 'string',
|
||||
},
|
||||
# openssl s_client -connect <host>:8007 2>&1 |openssl x509 -fingerprint -sha256
|
||||
fingerprint => get_standard_option('fingerprint-sha256'),
|
||||
'encryption-key' => {
|
||||
description =>
|
||||
"Encryption key. Use 'autogen' to generate one automatically without passphrase.",
|
||||
type => 'string',
|
||||
},
|
||||
'master-pubkey' => {
|
||||
description =>
|
||||
"Base64-encoded, PEM-formatted public RSA key. Used to encrypt a copy of the encryption-key which will be added to each encrypted backup.",
|
||||
type => 'string',
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
sub options {
|
||||
return {
|
||||
server => { fixed => 1 },
|
||||
datastore => { fixed => 1 },
|
||||
namespace => { optional => 1 },
|
||||
port => { optional => 1 },
|
||||
nodes => { optional => 1},
|
||||
disable => { optional => 1},
|
||||
content => { optional => 1},
|
||||
username => { optional => 1 },
|
||||
password => { optional => 1 },
|
||||
'encryption-key' => { optional => 1 },
|
||||
'master-pubkey' => { optional => 1 },
|
||||
maxfiles => { optional => 1 },
|
||||
'prune-backups' => { optional => 1 },
|
||||
'max-protected-backups' => { optional => 1 },
|
||||
fingerprint => { optional => 1 },
|
||||
server => { fixed => 1 },
|
||||
datastore => { fixed => 1 },
|
||||
namespace => { optional => 1 },
|
||||
port => { optional => 1 },
|
||||
nodes => { optional => 1 },
|
||||
disable => { optional => 1 },
|
||||
content => { optional => 1 },
|
||||
username => { optional => 1 },
|
||||
password => { optional => 1 },
|
||||
'encryption-key' => { optional => 1 },
|
||||
'master-pubkey' => { optional => 1 },
|
||||
maxfiles => { optional => 1 },
|
||||
'prune-backups' => { optional => 1 },
|
||||
'max-protected-backups' => { optional => 1 },
|
||||
fingerprint => { optional => 1 },
|
||||
};
|
||||
}
|
||||
|
||||
@ -131,8 +133,8 @@ sub pbs_delete_encryption_key {
|
||||
my $pwfile = pbs_encryption_key_file_name($scfg, $storeid);
|
||||
|
||||
if (!unlink $pwfile) {
|
||||
return if $! == ENOENT;
|
||||
die "failed to delete encryption key! $!\n";
|
||||
return if $! == ENOENT;
|
||||
die "failed to delete encryption key! $!\n";
|
||||
}
|
||||
delete $scfg->{'encryption-key'};
|
||||
}
|
||||
@ -153,13 +155,13 @@ sub pbs_open_encryption_key {
|
||||
|
||||
my $keyfd;
|
||||
if (!open($keyfd, '<', $encryption_key_file)) {
|
||||
if ($! == ENOENT) {
|
||||
my $encryption_fp = $scfg->{'encryption-key'};
|
||||
die "encryption configured ('$encryption_fp') but no encryption key file found!\n"
|
||||
if $encryption_fp;
|
||||
return undef;
|
||||
}
|
||||
die "failed to open encryption key: $encryption_key_file: $!\n";
|
||||
if ($! == ENOENT) {
|
||||
my $encryption_fp = $scfg->{'encryption-key'};
|
||||
die "encryption configured ('$encryption_fp') but no encryption key file found!\n"
|
||||
if $encryption_fp;
|
||||
return undef;
|
||||
}
|
||||
die "failed to open encryption key: $encryption_key_file: $!\n";
|
||||
}
|
||||
|
||||
return $keyfd;
|
||||
@ -186,8 +188,8 @@ sub pbs_delete_master_pubkey {
|
||||
my $pwfile = pbs_master_pubkey_file_name($scfg, $storeid);
|
||||
|
||||
if (!unlink $pwfile) {
|
||||
return if $! == ENOENT;
|
||||
die "failed to delete master public key! $!\n";
|
||||
return if $! == ENOENT;
|
||||
die "failed to delete master public key! $!\n";
|
||||
}
|
||||
delete $scfg->{'master-pubkey'};
|
||||
}
|
||||
@ -208,12 +210,12 @@ sub pbs_open_master_pubkey {
|
||||
|
||||
my $keyfd;
|
||||
if (!open($keyfd, '<', $master_pubkey_file)) {
|
||||
if ($! == ENOENT) {
|
||||
die "master public key configured but no key file found!\n"
|
||||
if $scfg->{'master-pubkey'};
|
||||
return undef;
|
||||
}
|
||||
die "failed to open master public key: $master_pubkey_file: $!\n";
|
||||
if ($! == ENOENT) {
|
||||
die "master public key configured but no key file found!\n"
|
||||
if $scfg->{'master-pubkey'};
|
||||
return undef;
|
||||
}
|
||||
die "failed to open master public key: $master_pubkey_file: $!\n";
|
||||
}
|
||||
|
||||
return $keyfd;
|
||||
@ -244,24 +246,24 @@ my sub api_param_from_volname : prototype($$$) {
|
||||
|
||||
my @tm = (POSIX::strptime($timestr, "%FT%TZ"));
|
||||
# expect sec, min, hour, mday, mon, year
|
||||
die "error parsing time from '$volname'" if grep { !defined($_) } @tm[0..5];
|
||||
die "error parsing time from '$volname'" if grep { !defined($_) } @tm[0 .. 5];
|
||||
|
||||
my $btime;
|
||||
{
|
||||
local $ENV{TZ} = 'UTC'; # $timestr is UTC
|
||||
local $ENV{TZ} = 'UTC'; # $timestr is UTC
|
||||
|
||||
# Fill in isdst to avoid undef warning. No daylight saving time for UTC.
|
||||
$tm[8] //= 0;
|
||||
# Fill in isdst to avoid undef warning. No daylight saving time for UTC.
|
||||
$tm[8] //= 0;
|
||||
|
||||
my $since_epoch = mktime(@tm) or die "error converting time from '$volname'\n";
|
||||
$btime = int($since_epoch);
|
||||
my $since_epoch = mktime(@tm) or die "error converting time from '$volname'\n";
|
||||
$btime = int($since_epoch);
|
||||
}
|
||||
|
||||
return {
|
||||
(ns($scfg, 'ns')),
|
||||
'backup-type' => $btype,
|
||||
'backup-id' => $bid,
|
||||
'backup-time' => $btime,
|
||||
(ns($scfg, 'ns')),
|
||||
'backup-type' => $btype,
|
||||
'backup-id' => $bid,
|
||||
'backup-time' => $btime,
|
||||
};
|
||||
}
|
||||
|
||||
@ -283,7 +285,7 @@ my sub do_raw_client_cmd {
|
||||
|
||||
my $client_exe = '/usr/bin/proxmox-backup-client';
|
||||
die "executable not found '$client_exe'! Proxmox backup client not installed?\n"
|
||||
if ! -x $client_exe;
|
||||
if !-x $client_exe;
|
||||
|
||||
my $repo = PVE::PBSClient::get_repository($scfg);
|
||||
|
||||
@ -298,29 +300,29 @@ my sub do_raw_client_cmd {
|
||||
# This must live in the top scope to not get closed before the `run_command`
|
||||
my ($keyfd, $master_fd);
|
||||
if ($use_crypto) {
|
||||
if (defined($keyfd = pbs_open_encryption_key($scfg, $storeid))) {
|
||||
my $flags = fcntl($keyfd, F_GETFD, 0)
|
||||
// die "failed to get file descriptor flags: $!\n";
|
||||
fcntl($keyfd, F_SETFD, $flags & ~FD_CLOEXEC)
|
||||
or die "failed to remove FD_CLOEXEC from encryption key file descriptor\n";
|
||||
push @$cmd, '--crypt-mode=encrypt', '--keyfd='.fileno($keyfd);
|
||||
if ($use_master && defined($master_fd = pbs_open_master_pubkey($scfg, $storeid))) {
|
||||
my $flags = fcntl($master_fd, F_GETFD, 0)
|
||||
// die "failed to get file descriptor flags: $!\n";
|
||||
fcntl($master_fd, F_SETFD, $flags & ~FD_CLOEXEC)
|
||||
or die "failed to remove FD_CLOEXEC from master public key file descriptor\n";
|
||||
push @$cmd, '--master-pubkey-fd='.fileno($master_fd);
|
||||
}
|
||||
} else {
|
||||
push @$cmd, '--crypt-mode=none';
|
||||
}
|
||||
if (defined($keyfd = pbs_open_encryption_key($scfg, $storeid))) {
|
||||
my $flags = fcntl($keyfd, F_GETFD, 0)
|
||||
// die "failed to get file descriptor flags: $!\n";
|
||||
fcntl($keyfd, F_SETFD, $flags & ~FD_CLOEXEC)
|
||||
or die "failed to remove FD_CLOEXEC from encryption key file descriptor\n";
|
||||
push @$cmd, '--crypt-mode=encrypt', '--keyfd=' . fileno($keyfd);
|
||||
if ($use_master && defined($master_fd = pbs_open_master_pubkey($scfg, $storeid))) {
|
||||
my $flags = fcntl($master_fd, F_GETFD, 0)
|
||||
// die "failed to get file descriptor flags: $!\n";
|
||||
fcntl($master_fd, F_SETFD, $flags & ~FD_CLOEXEC)
|
||||
or die "failed to remove FD_CLOEXEC from master public key file descriptor\n";
|
||||
push @$cmd, '--master-pubkey-fd=' . fileno($master_fd);
|
||||
}
|
||||
} else {
|
||||
push @$cmd, '--crypt-mode=none';
|
||||
}
|
||||
}
|
||||
|
||||
push @$cmd, @$param if defined($param);
|
||||
|
||||
push @$cmd, "--repository", $repo;
|
||||
if ($client_cmd ne 'status' && defined(my $ns = $scfg->{namespace})) {
|
||||
push @$cmd, '--ns', $ns;
|
||||
push @$cmd, '--ns', $ns;
|
||||
}
|
||||
|
||||
local $ENV{PBS_PASSWORD} = pbs_get_password($scfg, $storeid);
|
||||
@ -332,7 +334,7 @@ my sub do_raw_client_cmd {
|
||||
local $ENV{PROXMOX_OUTPUT_NO_HEADER} = 1;
|
||||
|
||||
if (my $logfunc = $opts{logfunc}) {
|
||||
$logfunc->("run: " . join(' ', @$cmd));
|
||||
$logfunc->("run: " . join(' ', @$cmd));
|
||||
}
|
||||
|
||||
run_command($cmd, %opts);
|
||||
@ -357,12 +359,15 @@ sub run_client_cmd {
|
||||
my $outfunc = sub { $json_str .= "$_[0]\n" };
|
||||
|
||||
$param = [] if !defined($param);
|
||||
$param = [ $param ] if !ref($param);
|
||||
$param = [$param] if !ref($param);
|
||||
|
||||
$param = [@$param, '--output-format=json'] if !$no_output;
|
||||
|
||||
do_raw_client_cmd($scfg, $storeid, $client_cmd, $param,
|
||||
outfunc => $outfunc, errmsg => 'proxmox-backup-client failed');
|
||||
do_raw_client_cmd(
|
||||
$scfg, $storeid, $client_cmd, $param,
|
||||
outfunc => $outfunc,
|
||||
errmsg => 'proxmox-backup-client failed',
|
||||
);
|
||||
|
||||
return undef if $no_output;
|
||||
|
||||
@ -383,15 +388,18 @@ sub extract_vzdump_config {
|
||||
|
||||
my $config_name;
|
||||
if ($format eq 'pbs-vm') {
|
||||
$config_name = 'qemu-server.conf';
|
||||
} elsif ($format eq 'pbs-ct') {
|
||||
$config_name = 'pct.conf';
|
||||
$config_name = 'qemu-server.conf';
|
||||
} elsif ($format eq 'pbs-ct') {
|
||||
$config_name = 'pct.conf';
|
||||
} else {
|
||||
die "unable to extract configuration for backup format '$format'\n";
|
||||
die "unable to extract configuration for backup format '$format'\n";
|
||||
}
|
||||
|
||||
do_raw_client_cmd($scfg, $storeid, 'restore', [ $name, $config_name, '-' ],
|
||||
outfunc => $outfunc, errmsg => 'proxmox-backup-client failed');
|
||||
do_raw_client_cmd(
|
||||
$scfg, $storeid, 'restore', [$name, $config_name, '-'],
|
||||
outfunc => $outfunc,
|
||||
errmsg => 'proxmox-backup-client failed',
|
||||
);
|
||||
|
||||
return $config;
|
||||
}
|
||||
@ -407,19 +415,19 @@ sub prune_backups {
|
||||
my $backup_groups = {};
|
||||
|
||||
if (defined($vmid) && defined($type)) {
|
||||
# no need to get the list of volumes, we only got a single backup group anyway
|
||||
$backup_groups->{"$type/$vmid"} = 1;
|
||||
# no need to get the list of volumes, we only got a single backup group anyway
|
||||
$backup_groups->{"$type/$vmid"} = 1;
|
||||
} else {
|
||||
my $backups = eval { $class->list_volumes($storeid, $scfg, $vmid, ['backup']) };
|
||||
die "failed to get list of all backups to prune - $@" if $@;
|
||||
my $backups = eval { $class->list_volumes($storeid, $scfg, $vmid, ['backup']) };
|
||||
die "failed to get list of all backups to prune - $@" if $@;
|
||||
|
||||
foreach my $backup (@{$backups}) {
|
||||
(my $backup_type = $backup->{format}) =~ s/^pbs-//;
|
||||
next if defined($type) && $backup_type ne $type;
|
||||
foreach my $backup (@{$backups}) {
|
||||
(my $backup_type = $backup->{format}) =~ s/^pbs-//;
|
||||
next if defined($type) && $backup_type ne $type;
|
||||
|
||||
my $backup_group = "$backup_type/$backup->{vmid}";
|
||||
$backup_groups->{$backup_group} = 1;
|
||||
}
|
||||
my $backup_group = "$backup_type/$backup->{vmid}";
|
||||
$backup_groups->{$backup_group} = 1;
|
||||
}
|
||||
}
|
||||
|
||||
my @param;
|
||||
@ -427,13 +435,13 @@ sub prune_backups {
|
||||
my $keep_all = delete $keep->{'keep-all'};
|
||||
|
||||
if (!$keep_all) {
|
||||
foreach my $opt (keys %{$keep}) {
|
||||
next if $keep->{$opt} == 0;
|
||||
push @param, "--$opt";
|
||||
push @param, "$keep->{$opt}";
|
||||
}
|
||||
foreach my $opt (keys %{$keep}) {
|
||||
next if $keep->{$opt} == 0;
|
||||
push @param, "--$opt";
|
||||
push @param, "$keep->{$opt}";
|
||||
}
|
||||
} else { # no need to pass anything to PBS
|
||||
$keep = { 'keep-all' => 1 };
|
||||
$keep = { 'keep-all' => 1 };
|
||||
}
|
||||
|
||||
push @param, '--dry-run' if $dryrun;
|
||||
@ -442,39 +450,40 @@ sub prune_backups {
|
||||
my $failed;
|
||||
|
||||
foreach my $backup_group (keys %{$backup_groups}) {
|
||||
$logfunc->('info', "running 'proxmox-backup-client prune' for '$backup_group'")
|
||||
if !$dryrun;
|
||||
eval {
|
||||
my $res = run_client_cmd($scfg, $storeid, 'prune', [ $backup_group, @param ]);
|
||||
$logfunc->('info', "running 'proxmox-backup-client prune' for '$backup_group'")
|
||||
if !$dryrun;
|
||||
eval {
|
||||
my $res = run_client_cmd($scfg, $storeid, 'prune', [$backup_group, @param]);
|
||||
|
||||
foreach my $backup (@{$res}) {
|
||||
die "result from proxmox-backup-client is not as expected\n"
|
||||
if !defined($backup->{'backup-time'})
|
||||
|| !defined($backup->{'backup-type'})
|
||||
|| !defined($backup->{'backup-id'})
|
||||
|| !defined($backup->{'keep'});
|
||||
foreach my $backup (@{$res}) {
|
||||
die "result from proxmox-backup-client is not as expected\n"
|
||||
if !defined($backup->{'backup-time'})
|
||||
|| !defined($backup->{'backup-type'})
|
||||
|| !defined($backup->{'backup-id'})
|
||||
|| !defined($backup->{'keep'});
|
||||
|
||||
my $ctime = $backup->{'backup-time'};
|
||||
my $type = $backup->{'backup-type'};
|
||||
my $vmid = $backup->{'backup-id'};
|
||||
my $volid = print_volid($storeid, $type, $vmid, $ctime);
|
||||
my $ctime = $backup->{'backup-time'};
|
||||
my $type = $backup->{'backup-type'};
|
||||
my $vmid = $backup->{'backup-id'};
|
||||
my $volid = print_volid($storeid, $type, $vmid, $ctime);
|
||||
|
||||
my $mark = $backup->{keep} ? 'keep' : 'remove';
|
||||
$mark = 'protected' if $backup->{protected};
|
||||
my $mark = $backup->{keep} ? 'keep' : 'remove';
|
||||
$mark = 'protected' if $backup->{protected};
|
||||
|
||||
push @{$prune_list}, {
|
||||
ctime => $ctime,
|
||||
mark => $mark,
|
||||
type => $type eq 'vm' ? 'qemu' : 'lxc',
|
||||
vmid => $vmid,
|
||||
volid => $volid,
|
||||
};
|
||||
}
|
||||
};
|
||||
if (my $err = $@) {
|
||||
$logfunc->('err', "prune '$backup_group': $err\n");
|
||||
$failed = 1;
|
||||
}
|
||||
push @{$prune_list},
|
||||
{
|
||||
ctime => $ctime,
|
||||
mark => $mark,
|
||||
type => $type eq 'vm' ? 'qemu' : 'lxc',
|
||||
vmid => $vmid,
|
||||
volid => $volid,
|
||||
};
|
||||
}
|
||||
};
|
||||
if (my $err = $@) {
|
||||
$logfunc->('err', "prune '$backup_group': $err\n");
|
||||
$failed = 1;
|
||||
}
|
||||
}
|
||||
die "error pruning backups - check log\n" if $failed;
|
||||
|
||||
@ -485,7 +494,7 @@ my $autogen_encryption_key = sub {
|
||||
my ($scfg, $storeid) = @_;
|
||||
my $encfile = pbs_encryption_key_file_name($scfg, $storeid);
|
||||
if (-f $encfile) {
|
||||
rename $encfile, "$encfile.old";
|
||||
rename $encfile, "$encfile.old";
|
||||
}
|
||||
my $cmd = ['proxmox-backup-client', 'key', 'create', '--kdf', 'none', $encfile];
|
||||
run_command($cmd, errmsg => 'failed to create encryption key');
|
||||
@ -498,38 +507,38 @@ sub on_add_hook {
|
||||
my $res = {};
|
||||
|
||||
if (defined(my $password = $param{password})) {
|
||||
pbs_set_password($scfg, $storeid, $password);
|
||||
pbs_set_password($scfg, $storeid, $password);
|
||||
} else {
|
||||
pbs_delete_password($scfg, $storeid);
|
||||
pbs_delete_password($scfg, $storeid);
|
||||
}
|
||||
|
||||
if (defined(my $encryption_key = $param{'encryption-key'})) {
|
||||
my $decoded_key;
|
||||
if ($encryption_key eq 'autogen') {
|
||||
$res->{'encryption-key'} = $autogen_encryption_key->($scfg, $storeid);
|
||||
$decoded_key = decode_json($res->{'encryption-key'});
|
||||
} else {
|
||||
$decoded_key = eval { decode_json($encryption_key) };
|
||||
if ($@ || !exists($decoded_key->{data})) {
|
||||
die "Value does not seems like a valid, JSON formatted encryption key!\n";
|
||||
}
|
||||
pbs_set_encryption_key($scfg, $storeid, $encryption_key);
|
||||
$res->{'encryption-key'} = $encryption_key;
|
||||
}
|
||||
$scfg->{'encryption-key'} = $decoded_key->{fingerprint} || 1;
|
||||
my $decoded_key;
|
||||
if ($encryption_key eq 'autogen') {
|
||||
$res->{'encryption-key'} = $autogen_encryption_key->($scfg, $storeid);
|
||||
$decoded_key = decode_json($res->{'encryption-key'});
|
||||
} else {
|
||||
$decoded_key = eval { decode_json($encryption_key) };
|
||||
if ($@ || !exists($decoded_key->{data})) {
|
||||
die "Value does not seems like a valid, JSON formatted encryption key!\n";
|
||||
}
|
||||
pbs_set_encryption_key($scfg, $storeid, $encryption_key);
|
||||
$res->{'encryption-key'} = $encryption_key;
|
||||
}
|
||||
$scfg->{'encryption-key'} = $decoded_key->{fingerprint} || 1;
|
||||
} else {
|
||||
pbs_delete_encryption_key($scfg, $storeid);
|
||||
pbs_delete_encryption_key($scfg, $storeid);
|
||||
}
|
||||
|
||||
if (defined(my $master_key = delete $param{'master-pubkey'})) {
|
||||
die "'master-pubkey' can only be used together with 'encryption-key'\n"
|
||||
if !defined($scfg->{'encryption-key'});
|
||||
die "'master-pubkey' can only be used together with 'encryption-key'\n"
|
||||
if !defined($scfg->{'encryption-key'});
|
||||
|
||||
my $decoded = decode_base64($master_key);
|
||||
pbs_set_master_pubkey($scfg, $storeid, $decoded);
|
||||
$scfg->{'master-pubkey'} = 1;
|
||||
my $decoded = decode_base64($master_key);
|
||||
pbs_set_master_pubkey($scfg, $storeid, $decoded);
|
||||
$scfg->{'master-pubkey'} = 1;
|
||||
} else {
|
||||
pbs_delete_master_pubkey($scfg, $storeid);
|
||||
pbs_delete_master_pubkey($scfg, $storeid);
|
||||
}
|
||||
|
||||
return $res;
|
||||
@ -541,43 +550,43 @@ sub on_update_hook {
|
||||
my $res = {};
|
||||
|
||||
if (exists($param{password})) {
|
||||
if (defined($param{password})) {
|
||||
pbs_set_password($scfg, $storeid, $param{password});
|
||||
} else {
|
||||
pbs_delete_password($scfg, $storeid);
|
||||
}
|
||||
if (defined($param{password})) {
|
||||
pbs_set_password($scfg, $storeid, $param{password});
|
||||
} else {
|
||||
pbs_delete_password($scfg, $storeid);
|
||||
}
|
||||
}
|
||||
|
||||
if (exists($param{'encryption-key'})) {
|
||||
if (defined(my $encryption_key = delete($param{'encryption-key'}))) {
|
||||
my $decoded_key;
|
||||
if ($encryption_key eq 'autogen') {
|
||||
$res->{'encryption-key'} = $autogen_encryption_key->($scfg, $storeid);
|
||||
$decoded_key = decode_json($res->{'encryption-key'});
|
||||
} else {
|
||||
$decoded_key = eval { decode_json($encryption_key) };
|
||||
if ($@ || !exists($decoded_key->{data})) {
|
||||
die "Value does not seems like a valid, JSON formatted encryption key!\n";
|
||||
}
|
||||
pbs_set_encryption_key($scfg, $storeid, $encryption_key);
|
||||
$res->{'encryption-key'} = $encryption_key;
|
||||
}
|
||||
$scfg->{'encryption-key'} = $decoded_key->{fingerprint} || 1;
|
||||
} else {
|
||||
pbs_delete_encryption_key($scfg, $storeid);
|
||||
delete $scfg->{'encryption-key'};
|
||||
}
|
||||
if (defined(my $encryption_key = delete($param{'encryption-key'}))) {
|
||||
my $decoded_key;
|
||||
if ($encryption_key eq 'autogen') {
|
||||
$res->{'encryption-key'} = $autogen_encryption_key->($scfg, $storeid);
|
||||
$decoded_key = decode_json($res->{'encryption-key'});
|
||||
} else {
|
||||
$decoded_key = eval { decode_json($encryption_key) };
|
||||
if ($@ || !exists($decoded_key->{data})) {
|
||||
die "Value does not seems like a valid, JSON formatted encryption key!\n";
|
||||
}
|
||||
pbs_set_encryption_key($scfg, $storeid, $encryption_key);
|
||||
$res->{'encryption-key'} = $encryption_key;
|
||||
}
|
||||
$scfg->{'encryption-key'} = $decoded_key->{fingerprint} || 1;
|
||||
} else {
|
||||
pbs_delete_encryption_key($scfg, $storeid);
|
||||
delete $scfg->{'encryption-key'};
|
||||
}
|
||||
}
|
||||
|
||||
if (exists($param{'master-pubkey'})) {
|
||||
if (defined(my $master_key = delete($param{'master-pubkey'}))) {
|
||||
my $decoded = decode_base64($master_key);
|
||||
if (defined(my $master_key = delete($param{'master-pubkey'}))) {
|
||||
my $decoded = decode_base64($master_key);
|
||||
|
||||
pbs_set_master_pubkey($scfg, $storeid, $decoded);
|
||||
$scfg->{'master-pubkey'} = 1;
|
||||
} else {
|
||||
pbs_delete_master_pubkey($scfg, $storeid);
|
||||
}
|
||||
pbs_set_master_pubkey($scfg, $storeid, $decoded);
|
||||
$scfg->{'master-pubkey'} = 1;
|
||||
} else {
|
||||
pbs_delete_master_pubkey($scfg, $storeid);
|
||||
}
|
||||
}
|
||||
|
||||
return $res;
|
||||
@ -596,19 +605,21 @@ sub on_delete_hook {
|
||||
sub parse_volname {
|
||||
my ($class, $volname) = @_;
|
||||
|
||||
if ($volname =~ m!^backup/([^\s_]+)/([^\s_]+)/([0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z)$!) {
|
||||
my $btype = $1;
|
||||
my $bid = $2;
|
||||
my $btime = $3;
|
||||
my $format = "pbs-$btype";
|
||||
if ($volname =~
|
||||
m!^backup/([^\s_]+)/([^\s_]+)/([0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z)$!
|
||||
) {
|
||||
my $btype = $1;
|
||||
my $bid = $2;
|
||||
my $btime = $3;
|
||||
my $format = "pbs-$btype";
|
||||
|
||||
my $name = "$btype/$bid/$btime";
|
||||
my $name = "$btype/$bid/$btime";
|
||||
|
||||
if ($bid =~ m/^\d+$/) {
|
||||
return ('backup', $name, $bid, undef, undef, undef, $format);
|
||||
} else {
|
||||
return ('backup', $name, undef, undef, undef, undef, $format);
|
||||
}
|
||||
if ($bid =~ m/^\d+$/) {
|
||||
return ('backup', $name, $bid, undef, undef, undef, $format);
|
||||
} else {
|
||||
return ('backup', $name, undef, undef, undef, undef, $format);
|
||||
}
|
||||
}
|
||||
|
||||
die "unable to parse PBS volume name '$volname'\n";
|
||||
@ -618,7 +629,7 @@ sub path {
|
||||
my ($class, $scfg, $volname, $storeid, $snapname) = @_;
|
||||
|
||||
die "volume snapshot is not possible on pbs storage"
|
||||
if defined($snapname);
|
||||
if defined($snapname);
|
||||
|
||||
my ($vtype, $name, $vmid) = $class->parse_volname($volname);
|
||||
|
||||
@ -627,8 +638,8 @@ sub path {
|
||||
# artificial url - we currently do not use that anywhere
|
||||
my $path = "pbs://$repo/$name";
|
||||
if (defined(my $ns = $scfg->{namespace})) {
|
||||
$ns =~ s|/|%2f|g; # other characters to escape aren't allowed in the namespace schema
|
||||
$path .= "?ns=$ns";
|
||||
$ns =~ s|/|%2f|g; # other characters to escape aren't allowed in the namespace schema
|
||||
$path .= "?ns=$ns";
|
||||
}
|
||||
|
||||
return ($path, $vmid, $vtype);
|
||||
@ -657,12 +668,11 @@ sub free_image {
|
||||
|
||||
my ($vtype, $name, $vmid) = $class->parse_volname($volname);
|
||||
|
||||
run_client_cmd($scfg, $storeid, "forget", [ $name ], 1);
|
||||
run_client_cmd($scfg, $storeid, "forget", [$name], 1);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
sub list_images {
|
||||
my ($class, $storeid, $scfg, $vmid, $vollist, $cache) = @_;
|
||||
|
||||
@ -678,13 +688,13 @@ my sub snapshot_files_encrypted {
|
||||
my $any;
|
||||
my $all = 1;
|
||||
for my $file (@$files) {
|
||||
my $fn = $file->{filename};
|
||||
next if $fn eq 'client.log.blob' || $fn eq 'index.json.blob';
|
||||
my $fn = $file->{filename};
|
||||
next if $fn eq 'client.log.blob' || $fn eq 'index.json.blob';
|
||||
|
||||
my $crypt = $file->{'crypt-mode'};
|
||||
my $crypt = $file->{'crypt-mode'};
|
||||
|
||||
$all = 0 if !$crypt || $crypt ne 'encrypt';
|
||||
$any ||= defined($crypt) && $crypt eq 'encrypt';
|
||||
$all = 0 if !$crypt || $crypt ne 'encrypt';
|
||||
$any ||= defined($crypt) && $crypt eq 'encrypt';
|
||||
}
|
||||
return $any && $all;
|
||||
}
|
||||
@ -699,22 +709,22 @@ my sub pbs_api_connect {
|
||||
my $user = $scfg->{username} // 'root@pam';
|
||||
|
||||
if (my $tokenid = PVE::AccessControl::pve_verify_tokenid($user, 1)) {
|
||||
$params->{apitoken} = "PBSAPIToken=${tokenid}:${password}";
|
||||
$params->{apitoken} = "PBSAPIToken=${tokenid}:${password}";
|
||||
} else {
|
||||
$params->{password} = $password;
|
||||
$params->{username} = $user;
|
||||
$params->{password} = $password;
|
||||
$params->{username} = $user;
|
||||
}
|
||||
|
||||
if (my $fp = $scfg->{fingerprint}) {
|
||||
$params->{cached_fingerprints}->{uc($fp)} = 1;
|
||||
$params->{cached_fingerprints}->{ uc($fp) } = 1;
|
||||
}
|
||||
|
||||
my $conn = PVE::APIClient::LWP->new(
|
||||
%$params,
|
||||
host => $scfg->{server},
|
||||
port => $scfg->{port} // 8007,
|
||||
timeout => ($timeout // 7), # cope with a 401 (3s api delay) and high latency
|
||||
cookie_name => 'PBSAuthCookie',
|
||||
%$params,
|
||||
host => $scfg->{server},
|
||||
port => $scfg->{port} // 8007,
|
||||
timeout => ($timeout // 7), # cope with a 401 (3s api delay) and high latency
|
||||
cookie_name => 'PBSAuthCookie',
|
||||
);
|
||||
|
||||
return $conn;
|
||||
@ -738,37 +748,37 @@ sub list_volumes {
|
||||
die "error listing snapshots - $@" if $@;
|
||||
|
||||
foreach my $item (@$data) {
|
||||
my $btype = $item->{"backup-type"};
|
||||
my $bid = $item->{"backup-id"};
|
||||
my $epoch = $item->{"backup-time"};
|
||||
my $size = $item->{size} // 1;
|
||||
my $btype = $item->{"backup-type"};
|
||||
my $bid = $item->{"backup-id"};
|
||||
my $epoch = $item->{"backup-time"};
|
||||
my $size = $item->{size} // 1;
|
||||
|
||||
next if !($btype eq 'vm' || $btype eq 'ct');
|
||||
next if $bid !~ m/^\d+$/;
|
||||
next if defined($vmid) && $bid ne $vmid;
|
||||
next if !($btype eq 'vm' || $btype eq 'ct');
|
||||
next if $bid !~ m/^\d+$/;
|
||||
next if defined($vmid) && $bid ne $vmid;
|
||||
|
||||
my $volid = print_volid($storeid, $btype, $bid, $epoch);
|
||||
my $volid = print_volid($storeid, $btype, $bid, $epoch);
|
||||
|
||||
my $info = {
|
||||
volid => $volid,
|
||||
format => "pbs-$btype",
|
||||
size => $size,
|
||||
content => 'backup',
|
||||
vmid => int($bid),
|
||||
ctime => $epoch,
|
||||
subtype => $btype eq 'vm' ? 'qemu' : 'lxc', # convert to PVE backup type
|
||||
};
|
||||
my $info = {
|
||||
volid => $volid,
|
||||
format => "pbs-$btype",
|
||||
size => $size,
|
||||
content => 'backup',
|
||||
vmid => int($bid),
|
||||
ctime => $epoch,
|
||||
subtype => $btype eq 'vm' ? 'qemu' : 'lxc', # convert to PVE backup type
|
||||
};
|
||||
|
||||
$info->{verification} = $item->{verification} if defined($item->{verification});
|
||||
$info->{notes} = $item->{comment} if defined($item->{comment});
|
||||
$info->{protected} = 1 if $item->{protected};
|
||||
if (defined($item->{fingerprint})) {
|
||||
$info->{encrypted} = $item->{fingerprint};
|
||||
} elsif (snapshot_files_encrypted($item->{files})) {
|
||||
$info->{encrypted} = '1';
|
||||
}
|
||||
$info->{verification} = $item->{verification} if defined($item->{verification});
|
||||
$info->{notes} = $item->{comment} if defined($item->{comment});
|
||||
$info->{protected} = 1 if $item->{protected};
|
||||
if (defined($item->{fingerprint})) {
|
||||
$info->{encrypted} = $item->{fingerprint};
|
||||
} elsif (snapshot_files_encrypted($item->{files})) {
|
||||
$info->{encrypted} = '1';
|
||||
}
|
||||
|
||||
push @$res, $info;
|
||||
push @$res, $info;
|
||||
}
|
||||
|
||||
return $res;
|
||||
@ -783,15 +793,15 @@ sub status {
|
||||
my $active = 0;
|
||||
|
||||
eval {
|
||||
my $res = run_client_cmd($scfg, $storeid, "status");
|
||||
my $res = run_client_cmd($scfg, $storeid, "status");
|
||||
|
||||
$active = 1;
|
||||
$total = $res->{total};
|
||||
$used = $res->{used};
|
||||
$free = $res->{avail};
|
||||
$active = 1;
|
||||
$total = $res->{total};
|
||||
$used = $res->{used};
|
||||
$free = $res->{avail};
|
||||
};
|
||||
if (my $err = $@) {
|
||||
warn $err;
|
||||
warn $err;
|
||||
}
|
||||
|
||||
return ($total, $free, $used, $active);
|
||||
@ -826,9 +836,9 @@ sub activate_storage {
|
||||
my $datastore = $scfg->{datastore};
|
||||
|
||||
for my $ds (@$datastores) {
|
||||
if ($ds->{store} eq $datastore) {
|
||||
return 1;
|
||||
}
|
||||
if ($ds->{store} eq $datastore) {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
die "$storeid: Cannot find datastore '$datastore', check permissions and existence!\n";
|
||||
@ -860,9 +870,9 @@ sub deactivate_volume {
|
||||
sub get_volume_notes {
|
||||
my ($class, $scfg, $storeid, $volname, $timeout) = @_;
|
||||
|
||||
my (undef, $name, undef, undef, undef, undef, $format) = $class->parse_volname($volname);
|
||||
my (undef, $name, undef, undef, undef, undef, $format) = $class->parse_volname($volname);
|
||||
|
||||
my $data = run_client_cmd($scfg, $storeid, "snapshot", [ "notes", "show", $name ]);
|
||||
my $data = run_client_cmd($scfg, $storeid, "snapshot", ["notes", "show", $name]);
|
||||
|
||||
return $data->{notes};
|
||||
}
|
||||
@ -872,9 +882,9 @@ sub get_volume_notes {
|
||||
sub update_volume_notes {
|
||||
my ($class, $scfg, $storeid, $volname, $notes, $timeout) = @_;
|
||||
|
||||
my (undef, $name, undef, undef, undef, undef, $format) = $class->parse_volname($volname);
|
||||
my (undef, $name, undef, undef, undef, undef, $format) = $class->parse_volname($volname);
|
||||
|
||||
run_client_cmd($scfg, $storeid, "snapshot", [ "notes", "update", $name, $notes ], 1);
|
||||
run_client_cmd($scfg, $storeid, "snapshot", ["notes", "update", $name, $notes], 1);
|
||||
|
||||
return undef;
|
||||
}
|
||||
@ -883,22 +893,22 @@ sub get_volume_attribute {
|
||||
my ($class, $scfg, $storeid, $volname, $attribute) = @_;
|
||||
|
||||
if ($attribute eq 'notes') {
|
||||
return $class->get_volume_notes($scfg, $storeid, $volname);
|
||||
return $class->get_volume_notes($scfg, $storeid, $volname);
|
||||
}
|
||||
|
||||
if ($attribute eq 'protected') {
|
||||
my $param = api_param_from_volname($class, $scfg, $volname);
|
||||
my $param = api_param_from_volname($class, $scfg, $volname);
|
||||
|
||||
my $password = pbs_get_password($scfg, $storeid);
|
||||
my $conn = pbs_api_connect($scfg, $password);
|
||||
my $datastore = $scfg->{datastore};
|
||||
my $password = pbs_get_password($scfg, $storeid);
|
||||
my $conn = pbs_api_connect($scfg, $password);
|
||||
my $datastore = $scfg->{datastore};
|
||||
|
||||
my $res = eval { $conn->get("/api2/json/admin/datastore/$datastore/$attribute", $param); };
|
||||
if (my $err = $@) {
|
||||
return if $err->{code} == 404; # not supported
|
||||
die $err;
|
||||
}
|
||||
return $res;
|
||||
my $res = eval { $conn->get("/api2/json/admin/datastore/$datastore/$attribute", $param); };
|
||||
if (my $err = $@) {
|
||||
return if $err->{code} == 404; # not supported
|
||||
die $err;
|
||||
}
|
||||
return $res;
|
||||
}
|
||||
|
||||
return;
|
||||
@ -908,24 +918,24 @@ sub update_volume_attribute {
|
||||
my ($class, $scfg, $storeid, $volname, $attribute, $value) = @_;
|
||||
|
||||
if ($attribute eq 'notes') {
|
||||
return $class->update_volume_notes($scfg, $storeid, $volname, $value);
|
||||
return $class->update_volume_notes($scfg, $storeid, $volname, $value);
|
||||
}
|
||||
|
||||
if ($attribute eq 'protected') {
|
||||
my $param = api_param_from_volname($class, $scfg, $volname);
|
||||
$param->{$attribute} = $value;
|
||||
my $param = api_param_from_volname($class, $scfg, $volname);
|
||||
$param->{$attribute} = $value;
|
||||
|
||||
my $password = pbs_get_password($scfg, $storeid);
|
||||
my $conn = pbs_api_connect($scfg, $password);
|
||||
my $datastore = $scfg->{datastore};
|
||||
my $password = pbs_get_password($scfg, $storeid);
|
||||
my $conn = pbs_api_connect($scfg, $password);
|
||||
my $datastore = $scfg->{datastore};
|
||||
|
||||
eval { $conn->put("/api2/json/admin/datastore/$datastore/$attribute", $param); };
|
||||
if (my $err = $@) {
|
||||
die "Server is not recent enough to support feature '$attribute'\n"
|
||||
if $err->{code} == 404;
|
||||
die $err;
|
||||
}
|
||||
return;
|
||||
eval { $conn->put("/api2/json/admin/datastore/$datastore/$attribute", $param); };
|
||||
if (my $err = $@) {
|
||||
die "Server is not recent enough to support feature '$attribute'\n"
|
||||
if $err->{code} == 404;
|
||||
die $err;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
die "attribute '$attribute' is not supported for storage type '$scfg->{type}'\n";
|
||||
@ -934,15 +944,15 @@ sub update_volume_attribute {
|
||||
sub volume_size_info {
|
||||
my ($class, $scfg, $storeid, $volname, $timeout) = @_;
|
||||
|
||||
my ($vtype, $name, undef, undef, undef, undef, $format) = $class->parse_volname($volname);
|
||||
my ($vtype, $name, undef, undef, undef, undef, $format) = $class->parse_volname($volname);
|
||||
|
||||
my $data = run_client_cmd($scfg, $storeid, "files", [ $name ]);
|
||||
my $data = run_client_cmd($scfg, $storeid, "files", [$name]);
|
||||
|
||||
my $size = 0;
|
||||
foreach my $info (@$data) {
|
||||
if ($info->{size} && $info->{size} =~ /^(\d+)$/) { # untaints
|
||||
$size += $1;
|
||||
}
|
||||
if ($info->{size} && $info->{size} =~ /^(\d+)$/) { # untaints
|
||||
$size += $1;
|
||||
}
|
||||
}
|
||||
|
||||
my $used = $size;
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -10,7 +10,7 @@ use Net::IP;
|
||||
use POSIX qw(ceil);
|
||||
|
||||
use PVE::CephConfig;
|
||||
use PVE::Cluster qw(cfs_read_file);;
|
||||
use PVE::Cluster qw(cfs_read_file);
|
||||
use PVE::JSONSchema qw(get_standard_option);
|
||||
use PVE::ProcFSTools;
|
||||
use PVE::RADOS;
|
||||
@ -32,7 +32,7 @@ my $librados_connect = sub {
|
||||
my ($scfg, $storeid, $options) = @_;
|
||||
|
||||
$options->{timeout} = 60
|
||||
if !defined($options->{timeout}) && PVE::RPCEnvironment->is_worker();
|
||||
if !defined($options->{timeout}) && PVE::RPCEnvironment->is_worker();
|
||||
|
||||
my $librados_config = PVE::CephConfig::ceph_connect_option($scfg, $storeid, $options->%*);
|
||||
|
||||
@ -47,27 +47,27 @@ my sub get_rbd_path {
|
||||
$path .= "/$scfg->{namespace}" if defined($scfg->{namespace});
|
||||
$path .= "/$volume" if defined($volume);
|
||||
return $path;
|
||||
};
|
||||
}
|
||||
|
||||
my sub get_rbd_dev_path {
|
||||
my ($scfg, $storeid, $volume) = @_;
|
||||
|
||||
my $cluster_id = '';
|
||||
if ($scfg->{fsid}) {
|
||||
# NOTE: the config doesn't support this currently (but it could!), hack for qemu-server tests
|
||||
$cluster_id = $scfg->{fsid};
|
||||
# NOTE: the config doesn't support this currently (but it could!), hack for qemu-server tests
|
||||
$cluster_id = $scfg->{fsid};
|
||||
} elsif ($scfg->{monhost}) {
|
||||
my $rados = $librados_connect->($scfg, $storeid);
|
||||
$cluster_id = $rados->mon_command({ prefix => 'fsid', format => 'json' })->{fsid};
|
||||
my $rados = $librados_connect->($scfg, $storeid);
|
||||
$cluster_id = $rados->mon_command({ prefix => 'fsid', format => 'json' })->{fsid};
|
||||
} else {
|
||||
$cluster_id = cfs_read_file('ceph.conf')->{global}->{fsid};
|
||||
$cluster_id = cfs_read_file('ceph.conf')->{global}->{fsid};
|
||||
}
|
||||
|
||||
my $uuid_pattern = "([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})";
|
||||
if ($cluster_id =~ qr/^${uuid_pattern}$/is) {
|
||||
$cluster_id = $1; # use untained value
|
||||
$cluster_id = $1; # use untained value
|
||||
} else {
|
||||
die "cluster fsid has invalid format\n";
|
||||
die "cluster fsid has invalid format\n";
|
||||
}
|
||||
|
||||
my $rbd_path = get_rbd_path($scfg, $volume);
|
||||
@ -75,11 +75,11 @@ my sub get_rbd_dev_path {
|
||||
my $path = "/dev/rbd/${rbd_path}";
|
||||
|
||||
if (!-e $pve_path && -e $path) {
|
||||
# possibly mapped before rbd-pve rule existed
|
||||
my $real_dev = abs_path($path);
|
||||
my ($rbd_id) = ($real_dev =~ m|/dev/rbd([0-9]+)$|);
|
||||
my $dev_cluster_id = file_read_firstline("/sys/devices/rbd/${rbd_id}/cluster_fsid");
|
||||
return $path if $cluster_id eq $dev_cluster_id;
|
||||
# possibly mapped before rbd-pve rule existed
|
||||
my $real_dev = abs_path($path);
|
||||
my ($rbd_id) = ($real_dev =~ m|/dev/rbd([0-9]+)$|);
|
||||
my $dev_cluster_id = file_read_firstline("/sys/devices/rbd/${rbd_id}/cluster_fsid");
|
||||
return $path if $cluster_id eq $dev_cluster_id;
|
||||
}
|
||||
return $pve_path;
|
||||
}
|
||||
@ -88,25 +88,26 @@ my $rbd_cmd = sub {
|
||||
my ($scfg, $storeid, $op, @options) = @_;
|
||||
|
||||
my $cmd_option = PVE::CephConfig::ceph_connect_option($scfg, $storeid);
|
||||
my $pool = $scfg->{pool} ? $scfg->{pool} : 'rbd';
|
||||
my $pool = $scfg->{pool} ? $scfg->{pool} : 'rbd';
|
||||
|
||||
my $cmd = ['/usr/bin/rbd'];
|
||||
if ($op eq 'import') {
|
||||
push $cmd->@*, '--dest-pool', $pool;
|
||||
push $cmd->@*, '--dest-pool', $pool;
|
||||
} else {
|
||||
push $cmd->@*, '-p', $pool;
|
||||
push $cmd->@*, '-p', $pool;
|
||||
}
|
||||
|
||||
if (defined(my $namespace = $scfg->{namespace})) {
|
||||
# some subcommands will fail if the --namespace parameter is present
|
||||
my $no_namespace_parameter = {
|
||||
unmap => 1,
|
||||
};
|
||||
push @$cmd, '--namespace', "$namespace" if !$no_namespace_parameter->{$op};
|
||||
# some subcommands will fail if the --namespace parameter is present
|
||||
my $no_namespace_parameter = {
|
||||
unmap => 1,
|
||||
};
|
||||
push @$cmd, '--namespace', "$namespace" if !$no_namespace_parameter->{$op};
|
||||
}
|
||||
push @$cmd, '-c', $cmd_option->{ceph_conf} if ($cmd_option->{ceph_conf});
|
||||
push @$cmd, '-m', $cmd_option->{mon_host} if ($cmd_option->{mon_host});
|
||||
push @$cmd, '--auth_supported', $cmd_option->{auth_supported} if ($cmd_option->{auth_supported});
|
||||
push @$cmd, '--auth_supported', $cmd_option->{auth_supported}
|
||||
if ($cmd_option->{auth_supported});
|
||||
push @$cmd, '-n', "client.$cmd_option->{userid}" if ($cmd_option->{userid});
|
||||
push @$cmd, '--keyring', $cmd_option->{keyring} if ($cmd_option->{keyring});
|
||||
|
||||
@ -125,42 +126,45 @@ my $krbd_feature_update = sub {
|
||||
my ($kmajor, $kminor) = PVE::ProcFSTools::kernel_version();
|
||||
|
||||
if ($kmajor > 5 || $kmajor == 5 && $kminor >= 3) {
|
||||
# 'deep-flatten' can only be disabled, not enabled after image creation
|
||||
push @enable, 'fast-diff', 'object-map';
|
||||
# 'deep-flatten' can only be disabled, not enabled after image creation
|
||||
push @enable, 'fast-diff', 'object-map';
|
||||
} else {
|
||||
push @disable, 'fast-diff', 'object-map', 'deep-flatten';
|
||||
push @disable, 'fast-diff', 'object-map', 'deep-flatten';
|
||||
}
|
||||
|
||||
if ($kmajor >= 5) {
|
||||
push @enable, 'exclusive-lock';
|
||||
push @enable, 'exclusive-lock';
|
||||
} else {
|
||||
push @disable, 'exclusive-lock';
|
||||
push @disable, 'exclusive-lock';
|
||||
}
|
||||
|
||||
my $active_features_list = (rbd_volume_info($scfg, $storeid, $name))[4];
|
||||
my $active_features = { map { $_ => 1 } @$active_features_list };
|
||||
|
||||
my $to_disable = join(',', grep { $active_features->{$_} } @disable);
|
||||
my $to_enable = join(',', grep { !$active_features->{$_} } @enable );
|
||||
my $to_disable = join(',', grep { $active_features->{$_} } @disable);
|
||||
my $to_enable = join(',', grep { !$active_features->{$_} } @enable);
|
||||
|
||||
if ($to_disable) {
|
||||
print "disable RBD image features this kernel RBD drivers is not compatible with: $to_disable\n";
|
||||
my $cmd = $rbd_cmd->($scfg, $storeid, 'feature', 'disable', $name, $to_disable);
|
||||
run_rbd_command(
|
||||
$cmd,
|
||||
errmsg => "could not disable krbd-incompatible image features '$to_disable' for rbd image: $name",
|
||||
);
|
||||
print
|
||||
"disable RBD image features this kernel RBD drivers is not compatible with: $to_disable\n";
|
||||
my $cmd = $rbd_cmd->($scfg, $storeid, 'feature', 'disable', $name, $to_disable);
|
||||
run_rbd_command(
|
||||
$cmd,
|
||||
errmsg =>
|
||||
"could not disable krbd-incompatible image features '$to_disable' for rbd image: $name",
|
||||
);
|
||||
}
|
||||
if ($to_enable) {
|
||||
print "enable RBD image features this kernel RBD drivers supports: $to_enable\n";
|
||||
eval {
|
||||
my $cmd = $rbd_cmd->($scfg, $storeid, 'feature', 'enable', $name, $to_enable);
|
||||
run_rbd_command(
|
||||
$cmd,
|
||||
errmsg => "could not enable krbd-compatible image features '$to_enable' for rbd image: $name",
|
||||
);
|
||||
};
|
||||
warn "$@" if $@;
|
||||
print "enable RBD image features this kernel RBD drivers supports: $to_enable\n";
|
||||
eval {
|
||||
my $cmd = $rbd_cmd->($scfg, $storeid, 'feature', 'enable', $name, $to_enable);
|
||||
run_rbd_command(
|
||||
$cmd,
|
||||
errmsg =>
|
||||
"could not enable krbd-compatible image features '$to_enable' for rbd image: $name",
|
||||
);
|
||||
};
|
||||
warn "$@" if $@;
|
||||
}
|
||||
};
|
||||
|
||||
@ -170,24 +174,26 @@ sub run_rbd_command {
|
||||
my $lasterr;
|
||||
my $errmsg = $args{errmsg} . ": " || "";
|
||||
if (!exists($args{errfunc})) {
|
||||
# ' error: 2014-02-06 11:51:59.839135 7f09f94d0760 -1 librbd: snap_unprotect: can't unprotect;
|
||||
# at least 1 child(ren) in pool cephstor1
|
||||
$args{errfunc} = sub {
|
||||
my $line = shift;
|
||||
if ($line =~ m/^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d+ [0-9a-f]+ [\-\d]+ librbd: (.*)$/) {
|
||||
$lasterr = "$1\n";
|
||||
} else {
|
||||
$lasterr = $line;
|
||||
}
|
||||
print STDERR $lasterr;
|
||||
*STDERR->flush();
|
||||
};
|
||||
# ' error: 2014-02-06 11:51:59.839135 7f09f94d0760 -1 librbd: snap_unprotect: can't unprotect;
|
||||
# at least 1 child(ren) in pool cephstor1
|
||||
$args{errfunc} = sub {
|
||||
my $line = shift;
|
||||
if ($line =~
|
||||
m/^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d+ [0-9a-f]+ [\-\d]+ librbd: (.*)$/
|
||||
) {
|
||||
$lasterr = "$1\n";
|
||||
} else {
|
||||
$lasterr = $line;
|
||||
}
|
||||
print STDERR $lasterr;
|
||||
*STDERR->flush();
|
||||
};
|
||||
}
|
||||
|
||||
eval { run_command($cmd, %args); };
|
||||
if (my $err = $@) {
|
||||
die $errmsg . $lasterr if length($lasterr);
|
||||
die $err;
|
||||
die $errmsg . $lasterr if length($lasterr);
|
||||
die $err;
|
||||
}
|
||||
|
||||
return undef;
|
||||
@ -200,33 +206,33 @@ sub rbd_ls {
|
||||
my $parser = sub { $raw .= shift };
|
||||
|
||||
my $cmd = $rbd_cmd->($scfg, $storeid, 'ls', '-l', '--format', 'json');
|
||||
run_rbd_command($cmd, errmsg => "rbd error", errfunc => sub {}, outfunc => $parser);
|
||||
run_rbd_command($cmd, errmsg => "rbd error", errfunc => sub { }, outfunc => $parser);
|
||||
|
||||
my $result;
|
||||
if ($raw eq '') {
|
||||
$result = [];
|
||||
$result = [];
|
||||
} elsif ($raw =~ m/^(\[.*\])$/s) { # untaint
|
||||
$result = JSON::decode_json($1);
|
||||
$result = JSON::decode_json($1);
|
||||
} else {
|
||||
die "got unexpected data from rbd ls: '$raw'\n";
|
||||
die "got unexpected data from rbd ls: '$raw'\n";
|
||||
}
|
||||
|
||||
my $list = {};
|
||||
|
||||
foreach my $el (@$result) {
|
||||
next if defined($el->{snapshot});
|
||||
next if defined($el->{snapshot});
|
||||
|
||||
my $image = $el->{image};
|
||||
my $image = $el->{image};
|
||||
|
||||
my ($owner) = $image =~ m/^(?:vm|base)-(\d+)-/;
|
||||
next if !defined($owner);
|
||||
my ($owner) = $image =~ m/^(?:vm|base)-(\d+)-/;
|
||||
next if !defined($owner);
|
||||
|
||||
$list->{$image} = {
|
||||
name => $image,
|
||||
size => $el->{size},
|
||||
parent => $get_parent_image_name->($el->{parent}),
|
||||
vmid => $owner
|
||||
};
|
||||
$list->{$image} = {
|
||||
name => $image,
|
||||
size => $el->{size},
|
||||
parent => $get_parent_image_name->($el->{parent}),
|
||||
vmid => $owner,
|
||||
};
|
||||
}
|
||||
|
||||
return $list;
|
||||
@ -238,28 +244,33 @@ sub rbd_ls_snap {
|
||||
my $cmd = $rbd_cmd->($scfg, $storeid, 'snap', 'ls', $name, '--format', 'json');
|
||||
|
||||
my $raw = '';
|
||||
run_rbd_command($cmd, errmsg => "rbd error", errfunc => sub {}, outfunc => sub { $raw .= shift; });
|
||||
run_rbd_command(
|
||||
$cmd,
|
||||
errmsg => "rbd error",
|
||||
errfunc => sub { },
|
||||
outfunc => sub { $raw .= shift; },
|
||||
);
|
||||
|
||||
my $list;
|
||||
if ($raw =~ m/^(\[.*\])$/s) { # untaint
|
||||
$list = eval { JSON::decode_json($1) };
|
||||
die "invalid JSON output from 'rbd snap ls $name': $@\n" if $@;
|
||||
$list = eval { JSON::decode_json($1) };
|
||||
die "invalid JSON output from 'rbd snap ls $name': $@\n" if $@;
|
||||
} else {
|
||||
die "got unexpected data from 'rbd snap ls $name': '$raw'\n";
|
||||
die "got unexpected data from 'rbd snap ls $name': '$raw'\n";
|
||||
}
|
||||
|
||||
$list = [] if !defined($list);
|
||||
|
||||
my $res = {};
|
||||
foreach my $el (@$list) {
|
||||
my $snap = $el->{name};
|
||||
my $protected = defined($el->{protected}) && $el->{protected} eq "true" ? 1 : undef;
|
||||
$res->{$snap} = {
|
||||
name => $snap,
|
||||
id => $el->{id} // undef,
|
||||
size => $el->{size} // 0,
|
||||
protected => $protected,
|
||||
};
|
||||
my $snap = $el->{name};
|
||||
my $protected = defined($el->{protected}) && $el->{protected} eq "true" ? 1 : undef;
|
||||
$res->{$snap} = {
|
||||
name => $snap,
|
||||
id => $el->{id} // undef,
|
||||
size => $el->{size} // 0,
|
||||
protected => $protected,
|
||||
};
|
||||
}
|
||||
return $res;
|
||||
}
|
||||
@ -271,7 +282,7 @@ sub rbd_volume_info {
|
||||
|
||||
my @options = ('info', $volname, '--format', 'json');
|
||||
if ($snap) {
|
||||
push @options, '--snap', $snap;
|
||||
push @options, '--snap', $snap;
|
||||
}
|
||||
|
||||
$cmd = $rbd_cmd->($scfg, $storeid, @options);
|
||||
@ -279,19 +290,20 @@ sub rbd_volume_info {
|
||||
my $raw = '';
|
||||
my $parser = sub { $raw .= shift };
|
||||
|
||||
run_rbd_command($cmd, errmsg => "rbd error", errfunc => sub {}, outfunc => $parser);
|
||||
run_rbd_command($cmd, errmsg => "rbd error", errfunc => sub { }, outfunc => $parser);
|
||||
|
||||
my $volume;
|
||||
if ($raw eq '') {
|
||||
$volume = {};
|
||||
$volume = {};
|
||||
} elsif ($raw =~ m/^(\{.*\})$/s) { # untaint
|
||||
$volume = JSON::decode_json($1);
|
||||
$volume = JSON::decode_json($1);
|
||||
} else {
|
||||
die "got unexpected data from rbd info: '$raw'\n";
|
||||
die "got unexpected data from rbd info: '$raw'\n";
|
||||
}
|
||||
|
||||
$volume->{parent} = $get_parent_image_name->($volume->{parent});
|
||||
$volume->{protected} = defined($volume->{protected}) && $volume->{protected} eq "true" ? 1 : undef;
|
||||
$volume->{protected} =
|
||||
defined($volume->{protected}) && $volume->{protected} eq "true" ? 1 : undef;
|
||||
|
||||
return $volume->@{qw(size parent format protected features)};
|
||||
}
|
||||
@ -305,31 +317,31 @@ sub rbd_volume_du {
|
||||
my $raw = '';
|
||||
my $parser = sub { $raw .= shift };
|
||||
|
||||
run_rbd_command($cmd, errmsg => "rbd error", errfunc => sub {}, outfunc => $parser);
|
||||
run_rbd_command($cmd, errmsg => "rbd error", errfunc => sub { }, outfunc => $parser);
|
||||
|
||||
my $volume;
|
||||
if ($raw eq '') {
|
||||
$volume = {};
|
||||
$volume = {};
|
||||
} elsif ($raw =~ m/^(\{.*\})$/s) { # untaint
|
||||
$volume = JSON::decode_json($1);
|
||||
$volume = JSON::decode_json($1);
|
||||
} else {
|
||||
die "got unexpected data from rbd du: '$raw'\n";
|
||||
die "got unexpected data from rbd du: '$raw'\n";
|
||||
}
|
||||
|
||||
if (!defined($volume->{images})) {
|
||||
die "got no images from rbd du\n";
|
||||
die "got no images from rbd du\n";
|
||||
}
|
||||
|
||||
# `rbd du` returns array of images for name matching `volname`,
|
||||
# including snapshots.
|
||||
my $images = $volume->{images};
|
||||
foreach my $image (@$images) {
|
||||
next if defined($image->{snapshot});
|
||||
next if !defined($image->{used_size}) || !defined($image->{name});
|
||||
next if defined($image->{snapshot});
|
||||
next if !defined($image->{used_size}) || !defined($image->{name});
|
||||
|
||||
# Return `used_size` of first volume with matching name which
|
||||
# is not a snapshot.
|
||||
return $image->{used_size} if $image->{name} eq $volname;
|
||||
# Return `used_size` of first volume with matching name which
|
||||
# is not a snapshot.
|
||||
return $image->{used_size} if $image->{name} eq $volname;
|
||||
}
|
||||
|
||||
die "got no matching image from rbd du\n";
|
||||
@ -341,18 +353,22 @@ my sub rbd_volume_exists {
|
||||
my $cmd = $rbd_cmd->($scfg, $storeid, 'ls', '--format', 'json');
|
||||
my $raw = '';
|
||||
run_rbd_command(
|
||||
$cmd, errmsg => "rbd error", errfunc => sub {}, outfunc => sub { $raw .= shift; });
|
||||
$cmd,
|
||||
errmsg => "rbd error",
|
||||
errfunc => sub { },
|
||||
outfunc => sub { $raw .= shift; },
|
||||
);
|
||||
|
||||
my $list;
|
||||
if ($raw =~ m/^(\[.*\])$/s) { # untaint
|
||||
$list = eval { JSON::decode_json($1); };
|
||||
die "invalid JSON output from 'rbd ls': $@\n" if $@;
|
||||
$list = eval { JSON::decode_json($1); };
|
||||
die "invalid JSON output from 'rbd ls': $@\n" if $@;
|
||||
} else {
|
||||
die "got unexpected data from 'rbd ls': '$raw'\n";
|
||||
die "got unexpected data from 'rbd ls': '$raw'\n";
|
||||
}
|
||||
|
||||
for my $name ($list->@*) {
|
||||
return 1 if $name eq $volname;
|
||||
return 1 if $name eq $volname;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -366,62 +382,63 @@ sub type {
|
||||
|
||||
sub plugindata {
|
||||
return {
|
||||
content => [ {images => 1, rootdir => 1}, { images => 1 }],
|
||||
'sensitive-properties' => { keyring => 1 },
|
||||
content => [{ images => 1, rootdir => 1 }, { images => 1 }],
|
||||
'sensitive-properties' => { keyring => 1 },
|
||||
};
|
||||
}
|
||||
|
||||
sub properties {
|
||||
return {
|
||||
monhost => {
|
||||
description => "IP addresses of monitors (for external clusters).",
|
||||
type => 'string', format => 'pve-storage-portal-dns-list',
|
||||
},
|
||||
pool => {
|
||||
description => "Pool.",
|
||||
type => 'string',
|
||||
},
|
||||
'data-pool' => {
|
||||
description => "Data Pool (for erasure coding only)",
|
||||
type => 'string',
|
||||
},
|
||||
namespace => {
|
||||
description => "Namespace.",
|
||||
type => 'string',
|
||||
},
|
||||
username => {
|
||||
description => "RBD Id.",
|
||||
type => 'string',
|
||||
},
|
||||
authsupported => {
|
||||
description => "Authsupported.",
|
||||
type => 'string',
|
||||
},
|
||||
krbd => {
|
||||
description => "Always access rbd through krbd kernel module.",
|
||||
type => 'boolean',
|
||||
default => 0,
|
||||
},
|
||||
keyring => {
|
||||
description => "Client keyring contents (for external clusters).",
|
||||
type => 'string',
|
||||
},
|
||||
monhost => {
|
||||
description => "IP addresses of monitors (for external clusters).",
|
||||
type => 'string',
|
||||
format => 'pve-storage-portal-dns-list',
|
||||
},
|
||||
pool => {
|
||||
description => "Pool.",
|
||||
type => 'string',
|
||||
},
|
||||
'data-pool' => {
|
||||
description => "Data Pool (for erasure coding only)",
|
||||
type => 'string',
|
||||
},
|
||||
namespace => {
|
||||
description => "Namespace.",
|
||||
type => 'string',
|
||||
},
|
||||
username => {
|
||||
description => "RBD Id.",
|
||||
type => 'string',
|
||||
},
|
||||
authsupported => {
|
||||
description => "Authsupported.",
|
||||
type => 'string',
|
||||
},
|
||||
krbd => {
|
||||
description => "Always access rbd through krbd kernel module.",
|
||||
type => 'boolean',
|
||||
default => 0,
|
||||
},
|
||||
keyring => {
|
||||
description => "Client keyring contents (for external clusters).",
|
||||
type => 'string',
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
sub options {
|
||||
return {
|
||||
nodes => { optional => 1 },
|
||||
disable => { optional => 1 },
|
||||
monhost => { optional => 1},
|
||||
pool => { optional => 1 },
|
||||
'data-pool' => { optional => 1 },
|
||||
namespace => { optional => 1 },
|
||||
username => { optional => 1 },
|
||||
content => { optional => 1 },
|
||||
krbd => { optional => 1 },
|
||||
keyring => { optional => 1 },
|
||||
bwlimit => { optional => 1 },
|
||||
nodes => { optional => 1 },
|
||||
disable => { optional => 1 },
|
||||
monhost => { optional => 1 },
|
||||
pool => { optional => 1 },
|
||||
'data-pool' => { optional => 1 },
|
||||
namespace => { optional => 1 },
|
||||
username => { optional => 1 },
|
||||
content => { optional => 1 },
|
||||
krbd => { optional => 1 },
|
||||
keyring => { optional => 1 },
|
||||
bwlimit => { optional => 1 },
|
||||
};
|
||||
}
|
||||
|
||||
@ -439,11 +456,11 @@ sub on_update_hook {
|
||||
my ($class, $storeid, $scfg, %param) = @_;
|
||||
|
||||
if (exists($param{keyring})) {
|
||||
if (defined($param{keyring})) {
|
||||
PVE::CephConfig::ceph_create_keyfile($scfg->{type}, $storeid, $param{keyring});
|
||||
} else {
|
||||
PVE::CephConfig::ceph_remove_keyfile($scfg->{type}, $storeid);
|
||||
}
|
||||
if (defined($param{keyring})) {
|
||||
PVE::CephConfig::ceph_create_keyfile($scfg->{type}, $storeid, $param{keyring});
|
||||
} else {
|
||||
PVE::CephConfig::ceph_remove_keyfile($scfg->{type}, $storeid);
|
||||
}
|
||||
}
|
||||
|
||||
return;
|
||||
@ -459,7 +476,7 @@ sub parse_volname {
|
||||
my ($class, $volname) = @_;
|
||||
|
||||
if ($volname =~ m/^((base-(\d+)-\S+)\/)?((base)?(vm)?-(\d+)-\S+)$/) {
|
||||
return ('images', $4, $7, $2, $3, $5, 'raw');
|
||||
return ('images', $4, $7, $2, $3, $5, 'raw');
|
||||
}
|
||||
|
||||
die "unable to parse rbd volume name '$volname'\n";
|
||||
@ -470,11 +487,11 @@ sub path {
|
||||
|
||||
my $cmd_option = PVE::CephConfig::ceph_connect_option($scfg, $storeid);
|
||||
my ($vtype, $name, $vmid) = $class->parse_volname($volname);
|
||||
$name .= '@'.$snapname if $snapname;
|
||||
$name .= '@' . $snapname if $snapname;
|
||||
|
||||
if ($scfg->{krbd}) {
|
||||
my $rbd_dev_path = get_rbd_dev_path($scfg, $storeid, $name);
|
||||
return ($rbd_dev_path, $vmid, $vtype);
|
||||
my $rbd_dev_path = get_rbd_dev_path($scfg, $storeid, $name);
|
||||
return ($rbd_dev_path, $vmid, $vtype);
|
||||
}
|
||||
|
||||
my $rbd_path = get_rbd_path($scfg, $name);
|
||||
@ -482,10 +499,10 @@ sub path {
|
||||
|
||||
$path .= ":conf=$cmd_option->{ceph_conf}" if $cmd_option->{ceph_conf};
|
||||
if (defined($scfg->{monhost})) {
|
||||
my $monhost = PVE::CephConfig::hostlist($scfg->{monhost}, ';');
|
||||
$monhost =~ s/:/\\:/g;
|
||||
$path .= ":mon_host=$monhost";
|
||||
$path .= ":auth_supported=$cmd_option->{auth_supported}";
|
||||
my $monhost = PVE::CephConfig::hostlist($scfg->{monhost}, ';');
|
||||
$monhost =~ s/:/\\:/g;
|
||||
$path .= ":mon_host=$monhost";
|
||||
$path .= ":auth_supported=$cmd_option->{auth_supported}";
|
||||
}
|
||||
|
||||
$path .= ":id=$cmd_option->{userid}:keyring=$cmd_option->{keyring}" if ($cmd_option->{keyring});
|
||||
@ -501,14 +518,14 @@ sub find_free_diskname {
|
||||
my $disk_list = [];
|
||||
|
||||
my $parser = sub {
|
||||
my $line = shift;
|
||||
if ($line =~ m/^(.*)$/) { # untaint
|
||||
push @$disk_list, $1;
|
||||
}
|
||||
my $line = shift;
|
||||
if ($line =~ m/^(.*)$/) { # untaint
|
||||
push @$disk_list, $1;
|
||||
}
|
||||
};
|
||||
|
||||
eval {
|
||||
run_rbd_command($cmd, errmsg => "rbd error", errfunc => sub {}, outfunc => $parser);
|
||||
run_rbd_command($cmd, errmsg => "rbd error", errfunc => sub { }, outfunc => $parser);
|
||||
};
|
||||
my $err = $@;
|
||||
|
||||
@ -522,8 +539,7 @@ sub create_base {
|
||||
|
||||
my $snap = '__base__';
|
||||
|
||||
my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) =
|
||||
$class->parse_volname($volname);
|
||||
my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) = $class->parse_volname($volname);
|
||||
|
||||
die "create_base not possible with base image\n" if $isBase;
|
||||
|
||||
@ -533,7 +549,7 @@ sub create_base {
|
||||
die "rbd image must be at format V2" if $format ne "2";
|
||||
|
||||
die "volname '$volname' contains wrong information about parent $parent $basename\n"
|
||||
if $basename && (!$parent || $parent ne $basename."@".$snap);
|
||||
if $basename && (!$parent || $parent ne $basename . "@" . $snap);
|
||||
|
||||
my $newname = $name;
|
||||
$newname =~ s/^vm-/base-/;
|
||||
@ -541,26 +557,24 @@ sub create_base {
|
||||
my $newvolname = $basename ? "$basename/$newname" : "$newname";
|
||||
|
||||
my $cmd = $rbd_cmd->(
|
||||
$scfg,
|
||||
$storeid,
|
||||
'rename',
|
||||
get_rbd_path($scfg, $name),
|
||||
get_rbd_path($scfg, $newname),
|
||||
$scfg, $storeid, 'rename',
|
||||
get_rbd_path($scfg, $name),
|
||||
get_rbd_path($scfg, $newname),
|
||||
);
|
||||
run_rbd_command($cmd, errmsg => "rbd rename '$name' error");
|
||||
|
||||
eval { $class->unmap_volume($storeid, $scfg, $volname); };
|
||||
warn $@ if $@;
|
||||
|
||||
my $running = undef; #fixme : is create_base always offline ?
|
||||
my $running = undef; #fixme : is create_base always offline ?
|
||||
|
||||
$class->volume_snapshot($scfg, $storeid, $newname, $snap, $running);
|
||||
|
||||
my (undef, undef, undef, $protected) = rbd_volume_info($scfg, $storeid, $newname, $snap);
|
||||
|
||||
if (!$protected){
|
||||
my $cmd = $rbd_cmd->($scfg, $storeid, 'snap', 'protect', $newname, '--snap', $snap);
|
||||
run_rbd_command($cmd, errmsg => "rbd protect $newname snap '$snap' error");
|
||||
if (!$protected) {
|
||||
my $cmd = $rbd_cmd->($scfg, $storeid, 'snap', 'protect', $newname, '--snap', $snap);
|
||||
run_rbd_command($cmd, errmsg => "rbd protect $newname snap '$snap' error");
|
||||
}
|
||||
|
||||
return $newvolname;
|
||||
@ -573,31 +587,30 @@ sub clone_image {
|
||||
my $snap = '__base__';
|
||||
$snap = $snapname if length $snapname;
|
||||
|
||||
my ($vtype, $basename, $basevmid, undef, undef, $isBase) =
|
||||
$class->parse_volname($volname);
|
||||
my ($vtype, $basename, $basevmid, undef, undef, $isBase) = $class->parse_volname($volname);
|
||||
|
||||
die "$volname is not a base image and snapname is not provided\n"
|
||||
if !$isBase && !length($snapname);
|
||||
die "$volname is not a base image and snapname is not provided\n"
|
||||
if !$isBase && !length($snapname);
|
||||
|
||||
my $name = $class->find_free_diskname($storeid, $scfg, $vmid);
|
||||
|
||||
warn "clone $volname: $basename snapname $snap to $name\n";
|
||||
|
||||
if (length($snapname)) {
|
||||
my (undef, undef, undef, $protected) = rbd_volume_info($scfg, $storeid, $volname, $snapname);
|
||||
my (undef, undef, undef, $protected) =
|
||||
rbd_volume_info($scfg, $storeid, $volname, $snapname);
|
||||
|
||||
if (!$protected) {
|
||||
my $cmd = $rbd_cmd->($scfg, $storeid, 'snap', 'protect', $volname, '--snap', $snapname);
|
||||
run_rbd_command($cmd, errmsg => "rbd protect $volname snap $snapname error");
|
||||
}
|
||||
if (!$protected) {
|
||||
my $cmd = $rbd_cmd->($scfg, $storeid, 'snap', 'protect', $volname, '--snap', $snapname);
|
||||
run_rbd_command($cmd, errmsg => "rbd protect $volname snap $snapname error");
|
||||
}
|
||||
}
|
||||
|
||||
my $newvol = "$basename/$name";
|
||||
$newvol = $name if length($snapname);
|
||||
|
||||
my @options = (
|
||||
get_rbd_path($scfg, $basename),
|
||||
'--snap', $snap,
|
||||
get_rbd_path($scfg, $basename), '--snap', $snap,
|
||||
);
|
||||
push @options, ('--data-pool', $scfg->{'data-pool'}) if $scfg->{'data-pool'};
|
||||
|
||||
@ -610,15 +623,13 @@ sub clone_image {
|
||||
sub alloc_image {
|
||||
my ($class, $storeid, $scfg, $vmid, $fmt, $name, $size) = @_;
|
||||
|
||||
|
||||
die "illegal name '$name' - should be 'vm-$vmid-*'\n"
|
||||
if $name && $name !~ m/^vm-$vmid-/;
|
||||
if $name && $name !~ m/^vm-$vmid-/;
|
||||
|
||||
$name = $class->find_free_diskname($storeid, $scfg, $vmid) if !$name;
|
||||
|
||||
my @options = (
|
||||
'--image-format' , 2,
|
||||
'--size', int(($size + 1023) / 1024),
|
||||
'--image-format', 2, '--size', int(($size + 1023) / 1024),
|
||||
);
|
||||
push @options, ('--data-pool', $scfg->{'data-pool'}) if $scfg->{'data-pool'};
|
||||
|
||||
@ -631,21 +642,19 @@ sub alloc_image {
|
||||
sub free_image {
|
||||
my ($class, $storeid, $scfg, $volname, $isBase) = @_;
|
||||
|
||||
my ($vtype, $name, $vmid, undef, undef, undef) =
|
||||
$class->parse_volname($volname);
|
||||
|
||||
my ($vtype, $name, $vmid, undef, undef, undef) = $class->parse_volname($volname);
|
||||
|
||||
my $snaps = rbd_ls_snap($scfg, $storeid, $name);
|
||||
foreach my $snap (keys %$snaps) {
|
||||
if ($snaps->{$snap}->{protected}) {
|
||||
my $cmd = $rbd_cmd->($scfg, $storeid, 'snap', 'unprotect', $name, '--snap', $snap);
|
||||
run_rbd_command($cmd, errmsg => "rbd unprotect $name snap '$snap' error");
|
||||
}
|
||||
if ($snaps->{$snap}->{protected}) {
|
||||
my $cmd = $rbd_cmd->($scfg, $storeid, 'snap', 'unprotect', $name, '--snap', $snap);
|
||||
run_rbd_command($cmd, errmsg => "rbd unprotect $name snap '$snap' error");
|
||||
}
|
||||
}
|
||||
|
||||
$class->deactivate_volume($storeid, $scfg, $volname);
|
||||
|
||||
my $cmd = $rbd_cmd->($scfg, $storeid, 'snap', 'purge', $name);
|
||||
my $cmd = $rbd_cmd->($scfg, $storeid, 'snap', 'purge', $name);
|
||||
run_rbd_command($cmd, errmsg => "rbd snap purge '$name' error");
|
||||
|
||||
$cmd = $rbd_cmd->($scfg, $storeid, 'rm', $name);
|
||||
@ -662,25 +671,25 @@ sub list_images {
|
||||
|
||||
my $res = [];
|
||||
for my $image (sort keys %$dat) {
|
||||
my $info = $dat->{$image};
|
||||
my ($volname, $parent, $owner) = $info->@{'name', 'parent', 'vmid'};
|
||||
my $info = $dat->{$image};
|
||||
my ($volname, $parent, $owner) = $info->@{ 'name', 'parent', 'vmid' };
|
||||
|
||||
if ($parent && $parent =~ m/^(base-\d+-\S+)\@__base__$/) {
|
||||
$info->{volid} = "$storeid:$1/$volname";
|
||||
} else {
|
||||
$info->{volid} = "$storeid:$volname";
|
||||
}
|
||||
if ($parent && $parent =~ m/^(base-\d+-\S+)\@__base__$/) {
|
||||
$info->{volid} = "$storeid:$1/$volname";
|
||||
} else {
|
||||
$info->{volid} = "$storeid:$volname";
|
||||
}
|
||||
|
||||
if ($vollist) {
|
||||
my $found = grep { $_ eq $info->{volid} } @$vollist;
|
||||
next if !$found;
|
||||
} else {
|
||||
next if defined ($vmid) && ($owner ne $vmid);
|
||||
}
|
||||
if ($vollist) {
|
||||
my $found = grep { $_ eq $info->{volid} } @$vollist;
|
||||
next if !$found;
|
||||
} else {
|
||||
next if defined($vmid) && ($owner ne $vmid);
|
||||
}
|
||||
|
||||
$info->{format} = 'raw';
|
||||
$info->{format} = 'raw';
|
||||
|
||||
push @$res, $info;
|
||||
push @$res, $info;
|
||||
}
|
||||
|
||||
return $res;
|
||||
@ -694,11 +703,11 @@ sub status {
|
||||
|
||||
my $pool = $scfg->{'data-pool'} // $scfg->{pool} // 'rbd';
|
||||
|
||||
my ($d) = grep { $_->{name} eq $pool } @{$df->{pools}};
|
||||
my ($d) = grep { $_->{name} eq $pool } @{ $df->{pools} };
|
||||
|
||||
if (!defined($d)) {
|
||||
warn "could not get usage stats for pool '$pool'\n";
|
||||
return;
|
||||
warn "could not get usage stats for pool '$pool'\n";
|
||||
return;
|
||||
}
|
||||
|
||||
# max_avail -> max available space for data w/o replication in the pool
|
||||
@ -727,7 +736,7 @@ sub map_volume {
|
||||
my ($vtype, $img_name, $vmid) = $class->parse_volname($volname);
|
||||
|
||||
my $name = $img_name;
|
||||
$name .= '@'.$snapname if $snapname;
|
||||
$name .= '@' . $snapname if $snapname;
|
||||
|
||||
my $kerneldev = get_rbd_dev_path($scfg, $storeid, $name);
|
||||
|
||||
@ -746,13 +755,13 @@ sub unmap_volume {
|
||||
my ($class, $storeid, $scfg, $volname, $snapname) = @_;
|
||||
|
||||
my ($vtype, $name, $vmid) = $class->parse_volname($volname);
|
||||
$name .= '@'.$snapname if $snapname;
|
||||
$name .= '@' . $snapname if $snapname;
|
||||
|
||||
my $kerneldev = get_rbd_dev_path($scfg, $storeid, $name);
|
||||
|
||||
if (-b $kerneldev) {
|
||||
my $cmd = $rbd_cmd->($scfg, $storeid, 'unmap', $kerneldev);
|
||||
run_rbd_command($cmd, errmsg => "can't unmap rbd device $kerneldev");
|
||||
my $cmd = $rbd_cmd->($scfg, $storeid, 'unmap', $kerneldev);
|
||||
run_rbd_command($cmd, errmsg => "can't unmap rbd device $kerneldev");
|
||||
}
|
||||
|
||||
return 1;
|
||||
@ -790,7 +799,8 @@ sub volume_resize {
|
||||
|
||||
my ($vtype, $name, $vmid) = $class->parse_volname($volname);
|
||||
|
||||
my $cmd = $rbd_cmd->($scfg, $storeid, 'resize', '--size', int(ceil($size/1024/1024)), $name);
|
||||
my $cmd =
|
||||
$rbd_cmd->($scfg, $storeid, 'resize', '--size', int(ceil($size / 1024 / 1024)), $name);
|
||||
run_rbd_command($cmd, errmsg => "rbd resize '$volname' error");
|
||||
return undef;
|
||||
}
|
||||
@ -822,9 +832,9 @@ sub volume_snapshot_delete {
|
||||
my ($vtype, $name, $vmid) = $class->parse_volname($volname);
|
||||
|
||||
my (undef, undef, undef, $protected) = rbd_volume_info($scfg, $storeid, $name, $snap);
|
||||
if ($protected){
|
||||
my $cmd = $rbd_cmd->($scfg, $storeid, 'snap', 'unprotect', $name, '--snap', $snap);
|
||||
run_rbd_command($cmd, errmsg => "rbd unprotect $name snap '$snap' error");
|
||||
if ($protected) {
|
||||
my $cmd = $rbd_cmd->($scfg, $storeid, 'snap', 'unprotect', $name, '--snap', $snap);
|
||||
run_rbd_command($cmd, errmsg => "rbd unprotect $name snap '$snap' error");
|
||||
}
|
||||
|
||||
my $cmd = $rbd_cmd->($scfg, $storeid, 'snap', 'rm', '--snap', $snap, $name);
|
||||
@ -841,22 +851,22 @@ sub volume_snapshot_needs_fsfreeze {
|
||||
sub volume_has_feature {
|
||||
my ($class, $scfg, $feature, $storeid, $volname, $snapname, $running) = @_;
|
||||
|
||||
my $features = {
|
||||
snapshot => { current => 1, snap => 1},
|
||||
clone => { base => 1, snap => 1},
|
||||
template => { current => 1},
|
||||
copy => { base => 1, current => 1, snap => 1},
|
||||
sparseinit => { base => 1, current => 1},
|
||||
rename => {current => 1},
|
||||
my $features = {
|
||||
snapshot => { current => 1, snap => 1 },
|
||||
clone => { base => 1, snap => 1 },
|
||||
template => { current => 1 },
|
||||
copy => { base => 1, current => 1, snap => 1 },
|
||||
sparseinit => { base => 1, current => 1 },
|
||||
rename => { current => 1 },
|
||||
};
|
||||
|
||||
my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) = $class->parse_volname($volname);
|
||||
|
||||
my $key = undef;
|
||||
if ($snapname){
|
||||
$key = 'snap';
|
||||
if ($snapname) {
|
||||
$key = 'snap';
|
||||
} else {
|
||||
$key = $isBase ? 'base' : 'current';
|
||||
$key = $isBase ? 'base' : 'current';
|
||||
}
|
||||
return 1 if $features->{$feature}->{$key};
|
||||
|
||||
@ -867,20 +877,21 @@ sub volume_export_formats {
|
||||
my ($class, $scfg, $storeid, $volname, $snapshot, $base_snapshot, $with_snapshots) = @_;
|
||||
|
||||
return $class->volume_import_formats(
|
||||
$scfg, $storeid, $volname, $snapshot, $base_snapshot, $with_snapshots);
|
||||
$scfg, $storeid, $volname, $snapshot, $base_snapshot, $with_snapshots,
|
||||
);
|
||||
}
|
||||
|
||||
sub volume_export {
|
||||
my (
|
||||
$class,
|
||||
$scfg,
|
||||
$storeid,
|
||||
$fh,
|
||||
$volname,
|
||||
$format,
|
||||
$snapshot,
|
||||
$base_snapshot,
|
||||
$with_snapshots,
|
||||
$class,
|
||||
$scfg,
|
||||
$storeid,
|
||||
$fh,
|
||||
$volname,
|
||||
$format,
|
||||
$snapshot,
|
||||
$base_snapshot,
|
||||
$with_snapshots,
|
||||
) = @_;
|
||||
|
||||
die "volume export format $format not available for $class\n" if $format ne 'raw+size';
|
||||
@ -891,9 +902,9 @@ sub volume_export {
|
||||
PVE::Storage::Plugin::write_common_header($fh, $size);
|
||||
my $cmd = $rbd_cmd->($scfg, $storeid, 'export', '--export-format', '1', $volname, '-');
|
||||
run_rbd_command(
|
||||
$cmd,
|
||||
errmsg => 'could not export image',
|
||||
output => '>&'.fileno($fh),
|
||||
$cmd,
|
||||
errmsg => 'could not export image',
|
||||
output => '>&' . fileno($fh),
|
||||
);
|
||||
|
||||
return;
|
||||
@ -908,16 +919,16 @@ sub volume_import_formats {
|
||||
|
||||
sub volume_import {
|
||||
my (
|
||||
$class,
|
||||
$scfg,
|
||||
$storeid,
|
||||
$fh,
|
||||
$volname,
|
||||
$format,
|
||||
$snapshot,
|
||||
$base_snapshot,
|
||||
$with_snapshots,
|
||||
$allow_rename,
|
||||
$class,
|
||||
$scfg,
|
||||
$storeid,
|
||||
$fh,
|
||||
$volname,
|
||||
$format,
|
||||
$snapshot,
|
||||
$base_snapshot,
|
||||
$with_snapshots,
|
||||
$allow_rename,
|
||||
) = @_;
|
||||
|
||||
die "volume import format $format not available for $class\n" if $format ne 'raw+size';
|
||||
@ -926,32 +937,32 @@ sub volume_import {
|
||||
|
||||
my (undef, $name, $vmid, undef, undef, undef, $file_format) = $class->parse_volname($volname);
|
||||
die "cannot import format $format into a volume of format $file_format\n"
|
||||
if $file_format ne 'raw';
|
||||
if $file_format ne 'raw';
|
||||
|
||||
if (rbd_volume_exists($scfg, $storeid, $name)) {
|
||||
die "volume $name already exists\n" if !$allow_rename;
|
||||
warn "volume $name already exists - importing with a different name\n";
|
||||
$volname = $class->find_free_diskname($storeid, $scfg, $vmid, $file_format);
|
||||
die "volume $name already exists\n" if !$allow_rename;
|
||||
warn "volume $name already exists - importing with a different name\n";
|
||||
$volname = $class->find_free_diskname($storeid, $scfg, $vmid, $file_format);
|
||||
}
|
||||
|
||||
my ($size) = PVE::Storage::Plugin::read_common_header($fh);
|
||||
$size = PVE::Storage::Common::align_size_up($size, 1024) / 1024;
|
||||
|
||||
eval {
|
||||
my $cmd = $rbd_cmd->($scfg, $storeid, 'import', '--export-format', '1', '-', $volname);
|
||||
run_rbd_command(
|
||||
$cmd,
|
||||
errmsg => 'could not import image',
|
||||
input => '<&'.fileno($fh),
|
||||
);
|
||||
my $cmd = $rbd_cmd->($scfg, $storeid, 'import', '--export-format', '1', '-', $volname);
|
||||
run_rbd_command(
|
||||
$cmd,
|
||||
errmsg => 'could not import image',
|
||||
input => '<&' . fileno($fh),
|
||||
);
|
||||
};
|
||||
if (my $err = $@) {
|
||||
# FIXME there is a slight race between finding the free disk name and removal here
|
||||
# Does not only affect this plugin, see:
|
||||
# https://lore.proxmox.com/pve-devel/20240403150712.262773-1-h.duerr@proxmox.com/
|
||||
eval { $class->free_image($storeid, $scfg, $volname, 0, $file_format); };
|
||||
warn $@ if $@;
|
||||
die $err;
|
||||
# FIXME there is a slight race between finding the free disk name and removal here
|
||||
# Does not only affect this plugin, see:
|
||||
# https://lore.proxmox.com/pve-devel/20240403150712.262773-1-h.duerr@proxmox.com/
|
||||
eval { $class->free_image($storeid, $scfg, $volname, 0, $file_format); };
|
||||
warn $@ if $@;
|
||||
die $err;
|
||||
}
|
||||
|
||||
return "$storeid:$volname";
|
||||
@ -961,25 +972,19 @@ sub rename_volume {
|
||||
my ($class, $scfg, $storeid, $source_volname, $target_vmid, $target_volname) = @_;
|
||||
|
||||
my (
|
||||
undef,
|
||||
$source_image,
|
||||
$source_vmid,
|
||||
$base_name,
|
||||
$base_vmid,
|
||||
undef,
|
||||
$format
|
||||
undef, $source_image, $source_vmid, $base_name, $base_vmid, undef, $format,
|
||||
) = $class->parse_volname($source_volname);
|
||||
$target_volname = $class->find_free_diskname($storeid, $scfg, $target_vmid, $format)
|
||||
if !$target_volname;
|
||||
if !$target_volname;
|
||||
|
||||
die "target volume '${target_volname}' already exists\n"
|
||||
if rbd_volume_exists($scfg, $storeid, $target_volname);
|
||||
if rbd_volume_exists($scfg, $storeid, $target_volname);
|
||||
|
||||
my $cmd = $rbd_cmd->($scfg, $storeid, 'rename', $source_image, $target_volname);
|
||||
|
||||
run_rbd_command(
|
||||
$cmd,
|
||||
errmsg => "could not rename image '${source_image}' to '${target_volname}'",
|
||||
$cmd,
|
||||
errmsg => "could not rename image '${source_image}' to '${target_volname}'",
|
||||
);
|
||||
|
||||
eval { $class->unmap_volume($storeid, $scfg, $source_volname); };
|
||||
|
||||
@ -14,19 +14,18 @@ use PVE::Storage::LunCmd::Istgt;
|
||||
use PVE::Storage::LunCmd::Iet;
|
||||
use PVE::Storage::LunCmd::LIO;
|
||||
|
||||
|
||||
my @ssh_opts = ('-o', 'BatchMode=yes');
|
||||
my @ssh_cmd = ('/usr/bin/ssh', @ssh_opts);
|
||||
my $id_rsa_path = '/etc/pve/priv/zfs';
|
||||
|
||||
my $lun_cmds = {
|
||||
create_lu => 1,
|
||||
delete_lu => 1,
|
||||
import_lu => 1,
|
||||
modify_lu => 1,
|
||||
add_view => 1,
|
||||
list_view => 1,
|
||||
list_lu => 1,
|
||||
create_lu => 1,
|
||||
delete_lu => 1,
|
||||
import_lu => 1,
|
||||
modify_lu => 1,
|
||||
add_view => 1,
|
||||
list_view => 1,
|
||||
list_lu => 1,
|
||||
};
|
||||
|
||||
my $zfs_unknown_scsi_provider = sub {
|
||||
@ -54,14 +53,15 @@ my $zfs_get_base = sub {
|
||||
sub zfs_request {
|
||||
my ($class, $scfg, $timeout, $method, @params) = @_;
|
||||
|
||||
$timeout = PVE::RPCEnvironment->is_worker() ? 60*60 : 10
|
||||
if !$timeout;
|
||||
$timeout = PVE::RPCEnvironment->is_worker() ? 60 * 60 : 10
|
||||
if !$timeout;
|
||||
|
||||
my $msg = '';
|
||||
|
||||
if ($lun_cmds->{$method}) {
|
||||
if ($scfg->{iscsiprovider} eq 'comstar') {
|
||||
$msg = PVE::Storage::LunCmd::Comstar::run_lun_command($scfg, $timeout, $method, @params);
|
||||
$msg =
|
||||
PVE::Storage::LunCmd::Comstar::run_lun_command($scfg, $timeout, $method, @params);
|
||||
} elsif ($scfg->{iscsiprovider} eq 'istgt') {
|
||||
$msg = PVE::Storage::LunCmd::Istgt::run_lun_command($scfg, $timeout, $method, @params);
|
||||
} elsif ($scfg->{iscsiprovider} eq 'iet') {
|
||||
@ -73,21 +73,21 @@ sub zfs_request {
|
||||
}
|
||||
} else {
|
||||
|
||||
my $target = 'root@' . $scfg->{portal};
|
||||
my $target = 'root@' . $scfg->{portal};
|
||||
|
||||
my $cmd = [@ssh_cmd, '-i', "$id_rsa_path/$scfg->{portal}_id_rsa", $target];
|
||||
my $cmd = [@ssh_cmd, '-i', "$id_rsa_path/$scfg->{portal}_id_rsa", $target];
|
||||
|
||||
if ($method eq 'zpool_list') {
|
||||
push @$cmd, 'zpool', 'list';
|
||||
} else {
|
||||
push @$cmd, 'zfs', $method;
|
||||
push @$cmd, 'zpool', 'list';
|
||||
} else {
|
||||
push @$cmd, 'zfs', $method;
|
||||
}
|
||||
|
||||
push @$cmd, @params;
|
||||
push @$cmd, @params;
|
||||
|
||||
my $output = sub {
|
||||
my $line = shift;
|
||||
$msg .= "$line\n";
|
||||
my $output = sub {
|
||||
my $line = shift;
|
||||
$msg .= "$line\n";
|
||||
};
|
||||
|
||||
run_command($cmd, outfunc => $output, timeout => $timeout);
|
||||
@ -116,7 +116,7 @@ sub zfs_add_lun_mapping_entry {
|
||||
my ($class, $scfg, $zvol, $guid) = @_;
|
||||
|
||||
if (!defined($guid)) {
|
||||
$guid = $class->zfs_get_lu_name($scfg, $zvol);
|
||||
$guid = $class->zfs_get_lu_name($scfg, $zvol);
|
||||
}
|
||||
|
||||
$class->zfs_request($scfg, undef, 'add_view', $guid);
|
||||
@ -160,7 +160,7 @@ sub zfs_get_lun_number {
|
||||
die "could not find lun_number for guid $guid" if !$guid;
|
||||
|
||||
if ($class->zfs_request($scfg, undef, 'list_view', $guid) =~ /^(\d+)$/) {
|
||||
return $1;
|
||||
return $1;
|
||||
}
|
||||
|
||||
die "lun_number for guid $guid is not a number";
|
||||
@ -174,55 +174,55 @@ sub type {
|
||||
|
||||
sub plugindata {
|
||||
return {
|
||||
content => [ {images => 1}, { images => 1 }],
|
||||
'sensitive-properties' => {},
|
||||
content => [{ images => 1 }, { images => 1 }],
|
||||
'sensitive-properties' => {},
|
||||
};
|
||||
}
|
||||
|
||||
sub properties {
|
||||
return {
|
||||
iscsiprovider => {
|
||||
description => "iscsi provider",
|
||||
type => 'string',
|
||||
},
|
||||
# this will disable write caching on comstar and istgt.
|
||||
# it is not implemented for iet. iet blockio always operates with
|
||||
# writethrough caching when not in readonly mode
|
||||
nowritecache => {
|
||||
description => "disable write caching on the target",
|
||||
type => 'boolean',
|
||||
},
|
||||
comstar_tg => {
|
||||
description => "target group for comstar views",
|
||||
type => 'string',
|
||||
},
|
||||
comstar_hg => {
|
||||
description => "host group for comstar views",
|
||||
type => 'string',
|
||||
},
|
||||
lio_tpg => {
|
||||
description => "target portal group for Linux LIO targets",
|
||||
type => 'string',
|
||||
},
|
||||
iscsiprovider => {
|
||||
description => "iscsi provider",
|
||||
type => 'string',
|
||||
},
|
||||
# this will disable write caching on comstar and istgt.
|
||||
# it is not implemented for iet. iet blockio always operates with
|
||||
# writethrough caching when not in readonly mode
|
||||
nowritecache => {
|
||||
description => "disable write caching on the target",
|
||||
type => 'boolean',
|
||||
},
|
||||
comstar_tg => {
|
||||
description => "target group for comstar views",
|
||||
type => 'string',
|
||||
},
|
||||
comstar_hg => {
|
||||
description => "host group for comstar views",
|
||||
type => 'string',
|
||||
},
|
||||
lio_tpg => {
|
||||
description => "target portal group for Linux LIO targets",
|
||||
type => 'string',
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
sub options {
|
||||
return {
|
||||
nodes => { optional => 1 },
|
||||
disable => { optional => 1 },
|
||||
portal => { fixed => 1 },
|
||||
target => { fixed => 1 },
|
||||
pool => { fixed => 1 },
|
||||
blocksize => { fixed => 1 },
|
||||
iscsiprovider => { fixed => 1 },
|
||||
nowritecache => { optional => 1 },
|
||||
sparse => { optional => 1 },
|
||||
comstar_hg => { optional => 1 },
|
||||
comstar_tg => { optional => 1 },
|
||||
lio_tpg => { optional => 1 },
|
||||
content => { optional => 1 },
|
||||
bwlimit => { optional => 1 },
|
||||
nodes => { optional => 1 },
|
||||
disable => { optional => 1 },
|
||||
portal => { fixed => 1 },
|
||||
target => { fixed => 1 },
|
||||
pool => { fixed => 1 },
|
||||
blocksize => { fixed => 1 },
|
||||
iscsiprovider => { fixed => 1 },
|
||||
nowritecache => { optional => 1 },
|
||||
sparse => { optional => 1 },
|
||||
comstar_hg => { optional => 1 },
|
||||
comstar_tg => { optional => 1 },
|
||||
lio_tpg => { optional => 1 },
|
||||
content => { optional => 1 },
|
||||
bwlimit => { optional => 1 },
|
||||
};
|
||||
}
|
||||
|
||||
@ -232,7 +232,7 @@ sub path {
|
||||
my ($class, $scfg, $volname, $storeid, $snapname) = @_;
|
||||
|
||||
die "direct access to snapshots not implemented"
|
||||
if defined($snapname);
|
||||
if defined($snapname);
|
||||
|
||||
my ($vtype, $name, $vmid) = $class->parse_volname($volname);
|
||||
|
||||
@ -252,8 +252,7 @@ sub create_base {
|
||||
|
||||
my $snap = '__base__';
|
||||
|
||||
my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) =
|
||||
$class->parse_volname($volname);
|
||||
my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) = $class->parse_volname($volname);
|
||||
|
||||
die "create_base not possible with base image\n" if $isBase;
|
||||
|
||||
@ -268,7 +267,7 @@ sub create_base {
|
||||
my $guid = $class->zfs_create_lu($scfg, $newname);
|
||||
$class->zfs_add_lun_mapping_entry($scfg, $newname, $guid);
|
||||
|
||||
my $running = undef; #fixme : is create_base always offline ?
|
||||
my $running = undef; #fixme : is create_base always offline ?
|
||||
|
||||
$class->volume_snapshot($scfg, $storeid, $newname, $snap, $running);
|
||||
|
||||
@ -291,18 +290,18 @@ sub clone_image {
|
||||
|
||||
sub alloc_image {
|
||||
my ($class, $storeid, $scfg, $vmid, $fmt, $name, $size) = @_;
|
||||
|
||||
|
||||
die "unsupported format '$fmt'" if $fmt ne 'raw';
|
||||
|
||||
die "illegal name '$name' - should be 'vm-$vmid-*'\n"
|
||||
if $name && $name !~ m/^vm-$vmid-/;
|
||||
if $name && $name !~ m/^vm-$vmid-/;
|
||||
|
||||
my $volname = $name;
|
||||
|
||||
$volname = $class->find_free_diskname($storeid, $scfg, $vmid, $fmt) if !$volname;
|
||||
|
||||
|
||||
$class->zfs_create_zvol($scfg, $volname, $size);
|
||||
|
||||
|
||||
my $guid = $class->zfs_create_lu($scfg, $volname);
|
||||
$class->zfs_add_lun_mapping_entry($scfg, $volname, $guid);
|
||||
|
||||
@ -370,21 +369,20 @@ sub volume_has_feature {
|
||||
my ($class, $scfg, $feature, $storeid, $volname, $snapname, $running) = @_;
|
||||
|
||||
my $features = {
|
||||
snapshot => { current => 1, snap => 1},
|
||||
clone => { base => 1},
|
||||
template => { current => 1},
|
||||
copy => { base => 1, current => 1},
|
||||
snapshot => { current => 1, snap => 1 },
|
||||
clone => { base => 1 },
|
||||
template => { current => 1 },
|
||||
copy => { base => 1, current => 1 },
|
||||
};
|
||||
|
||||
my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) =
|
||||
$class->parse_volname($volname);
|
||||
my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) = $class->parse_volname($volname);
|
||||
|
||||
my $key = undef;
|
||||
|
||||
if ($snapname) {
|
||||
$key = 'snap';
|
||||
$key = 'snap';
|
||||
} else {
|
||||
$key = $isBase ? 'base' : 'current';
|
||||
$key = $isBase ? 'base' : 'current';
|
||||
}
|
||||
|
||||
return 1 if $features->{$feature}->{$key};
|
||||
|
||||
@ -20,39 +20,40 @@ sub type {
|
||||
|
||||
sub plugindata {
|
||||
return {
|
||||
content => [ {images => 1, rootdir => 1}, {images => 1 , rootdir => 1}],
|
||||
format => [ { raw => 1, subvol => 1 } , 'raw' ],
|
||||
'sensitive-properties' => {},
|
||||
content => [{ images => 1, rootdir => 1 }, { images => 1, rootdir => 1 }],
|
||||
format => [{ raw => 1, subvol => 1 }, 'raw'],
|
||||
'sensitive-properties' => {},
|
||||
};
|
||||
}
|
||||
|
||||
sub properties {
|
||||
return {
|
||||
blocksize => {
|
||||
description => "block size",
|
||||
type => 'string',
|
||||
},
|
||||
sparse => {
|
||||
description => "use sparse volumes",
|
||||
type => 'boolean',
|
||||
},
|
||||
mountpoint => {
|
||||
description => "mount point",
|
||||
type => 'string', format => 'pve-storage-path',
|
||||
},
|
||||
blocksize => {
|
||||
description => "block size",
|
||||
type => 'string',
|
||||
},
|
||||
sparse => {
|
||||
description => "use sparse volumes",
|
||||
type => 'boolean',
|
||||
},
|
||||
mountpoint => {
|
||||
description => "mount point",
|
||||
type => 'string',
|
||||
format => 'pve-storage-path',
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
sub options {
|
||||
return {
|
||||
pool => { fixed => 1 },
|
||||
blocksize => { optional => 1 },
|
||||
sparse => { optional => 1 },
|
||||
nodes => { optional => 1 },
|
||||
disable => { optional => 1 },
|
||||
content => { optional => 1 },
|
||||
bwlimit => { optional => 1 },
|
||||
mountpoint => { optional => 1 },
|
||||
pool => { fixed => 1 },
|
||||
blocksize => { optional => 1 },
|
||||
sparse => { optional => 1 },
|
||||
nodes => { optional => 1 },
|
||||
disable => { optional => 1 },
|
||||
content => { optional => 1 },
|
||||
bwlimit => { optional => 1 },
|
||||
mountpoint => { optional => 1 },
|
||||
};
|
||||
}
|
||||
|
||||
@ -67,35 +68,35 @@ sub zfs_parse_zvol_list {
|
||||
|
||||
my @lines = split /\n/, $text;
|
||||
foreach my $line (@lines) {
|
||||
my ($dataset, $size, $origin, $type, $refquota) = split(/\s+/, $line);
|
||||
next if !($type eq 'volume' || $type eq 'filesystem');
|
||||
my ($dataset, $size, $origin, $type, $refquota) = split(/\s+/, $line);
|
||||
next if !($type eq 'volume' || $type eq 'filesystem');
|
||||
|
||||
my $zvol = {};
|
||||
my @parts = split /\//, $dataset;
|
||||
next if scalar(@parts) < 2; # we need pool/name
|
||||
my $name = pop @parts;
|
||||
my $parsed_pool = join('/', @parts);
|
||||
next if $parsed_pool ne $pool;
|
||||
my $zvol = {};
|
||||
my @parts = split /\//, $dataset;
|
||||
next if scalar(@parts) < 2; # we need pool/name
|
||||
my $name = pop @parts;
|
||||
my $parsed_pool = join('/', @parts);
|
||||
next if $parsed_pool ne $pool;
|
||||
|
||||
next unless $name =~ m!^(vm|base|subvol|basevol)-(\d+)-(\S+)$!;
|
||||
$zvol->{owner} = $2;
|
||||
next unless $name =~ m!^(vm|base|subvol|basevol)-(\d+)-(\S+)$!;
|
||||
$zvol->{owner} = $2;
|
||||
|
||||
$zvol->{name} = $name;
|
||||
if ($type eq 'filesystem') {
|
||||
if ($refquota eq 'none') {
|
||||
$zvol->{size} = 0;
|
||||
} else {
|
||||
$zvol->{size} = $refquota + 0;
|
||||
}
|
||||
$zvol->{format} = 'subvol';
|
||||
} else {
|
||||
$zvol->{size} = $size + 0;
|
||||
$zvol->{format} = 'raw';
|
||||
}
|
||||
if ($origin !~ /^-$/) {
|
||||
$zvol->{origin} = $origin;
|
||||
}
|
||||
push @$list, $zvol;
|
||||
$zvol->{name} = $name;
|
||||
if ($type eq 'filesystem') {
|
||||
if ($refquota eq 'none') {
|
||||
$zvol->{size} = 0;
|
||||
} else {
|
||||
$zvol->{size} = $refquota + 0;
|
||||
}
|
||||
$zvol->{format} = 'subvol';
|
||||
} else {
|
||||
$zvol->{size} = $size + 0;
|
||||
$zvol->{format} = 'raw';
|
||||
}
|
||||
if ($origin !~ /^-$/) {
|
||||
$zvol->{origin} = $origin;
|
||||
}
|
||||
push @$list, $zvol;
|
||||
}
|
||||
|
||||
return $list;
|
||||
@ -105,9 +106,9 @@ sub parse_volname {
|
||||
my ($class, $volname) = @_;
|
||||
|
||||
if ($volname =~ m/^(((base|basevol)-(\d+)-\S+)\/)?((base|basevol|vm|subvol)-(\d+)-\S+)$/) {
|
||||
my $format = ($6 eq 'subvol' || $6 eq 'basevol') ? 'subvol' : 'raw';
|
||||
my $isBase = ($6 eq 'base' || $6 eq 'basevol');
|
||||
return ('images', $5, $7, $2, $4, $isBase, $format);
|
||||
my $format = ($6 eq 'subvol' || $6 eq 'basevol') ? 'subvol' : 'raw';
|
||||
my $isBase = ($6 eq 'base' || $6 eq 'basevol');
|
||||
return ('images', $5, $7, $2, $4, $isBase, $format);
|
||||
}
|
||||
|
||||
die "unable to parse zfs volume name '$volname'\n";
|
||||
@ -123,17 +124,17 @@ sub on_add_hook {
|
||||
# ignore failure, pool might currently not be imported
|
||||
my $mountpoint;
|
||||
eval {
|
||||
my $res = $class->zfs_get_properties($scfg, 'mountpoint', $scfg->{pool}, 1);
|
||||
$mountpoint = PVE::Storage::Plugin::verify_path($res, 1) if defined($res);
|
||||
my $res = $class->zfs_get_properties($scfg, 'mountpoint', $scfg->{pool}, 1);
|
||||
$mountpoint = PVE::Storage::Plugin::verify_path($res, 1) if defined($res);
|
||||
};
|
||||
|
||||
if (defined($cfg_mountpoint)) {
|
||||
if (defined($mountpoint) && !($cfg_mountpoint =~ m|^\Q$mountpoint\E/?$|)) {
|
||||
warn "warning for $storeid - mountpoint: $cfg_mountpoint " .
|
||||
"does not match current mount point: $mountpoint\n";
|
||||
}
|
||||
if (defined($mountpoint) && !($cfg_mountpoint =~ m|^\Q$mountpoint\E/?$|)) {
|
||||
warn "warning for $storeid - mountpoint: $cfg_mountpoint "
|
||||
. "does not match current mount point: $mountpoint\n";
|
||||
}
|
||||
} else {
|
||||
$scfg->{mountpoint} = $mountpoint;
|
||||
$scfg->{mountpoint} = $mountpoint;
|
||||
}
|
||||
|
||||
return;
|
||||
@ -148,14 +149,14 @@ sub path {
|
||||
my $mountpoint = $scfg->{mountpoint} // "/$scfg->{pool}";
|
||||
|
||||
if ($vtype eq "images") {
|
||||
if ($name =~ m/^subvol-/ || $name =~ m/^basevol-/) {
|
||||
$path = "$mountpoint/$name";
|
||||
} else {
|
||||
$path = "/dev/zvol/$scfg->{pool}/$name";
|
||||
}
|
||||
$path .= "\@$snapname" if defined($snapname);
|
||||
if ($name =~ m/^subvol-/ || $name =~ m/^basevol-/) {
|
||||
$path = "$mountpoint/$name";
|
||||
} else {
|
||||
$path = "/dev/zvol/$scfg->{pool}/$name";
|
||||
}
|
||||
$path .= "\@$snapname" if defined($snapname);
|
||||
} else {
|
||||
die "$vtype is not allowed in ZFSPool!";
|
||||
die "$vtype is not allowed in ZFSPool!";
|
||||
}
|
||||
|
||||
return ($path, $vmid, $vtype);
|
||||
@ -167,12 +168,12 @@ sub zfs_request {
|
||||
my $cmd = [];
|
||||
|
||||
if ($method eq 'zpool_list') {
|
||||
push @$cmd, 'zpool', 'list';
|
||||
push @$cmd, 'zpool', 'list';
|
||||
} elsif ($method eq 'zpool_import') {
|
||||
push @$cmd, 'zpool', 'import';
|
||||
$timeout = 15 if !$timeout || $timeout < 15;
|
||||
push @$cmd, 'zpool', 'import';
|
||||
$timeout = 15 if !$timeout || $timeout < 15;
|
||||
} else {
|
||||
push @$cmd, 'zfs', $method;
|
||||
push @$cmd, 'zfs', $method;
|
||||
}
|
||||
push @$cmd, @params;
|
||||
|
||||
@ -180,10 +181,10 @@ sub zfs_request {
|
||||
my $output = sub { $msg .= "$_[0]\n" };
|
||||
|
||||
if (PVE::RPCEnvironment->is_worker()) {
|
||||
$timeout = 60*60 if !$timeout;
|
||||
$timeout = 60*5 if $timeout < 60*5;
|
||||
$timeout = 60 * 60 if !$timeout;
|
||||
$timeout = 60 * 5 if $timeout < 60 * 5;
|
||||
} else {
|
||||
$timeout = 10 if !$timeout;
|
||||
$timeout = 10 if !$timeout;
|
||||
}
|
||||
|
||||
run_command($cmd, errmsg => "zfs error", outfunc => $output, timeout => $timeout);
|
||||
@ -194,17 +195,17 @@ sub zfs_request {
|
||||
sub zfs_wait_for_zvol_link {
|
||||
my ($class, $scfg, $volname, $timeout) = @_;
|
||||
|
||||
my $default_timeout = PVE::RPCEnvironment->is_worker() ? 60*5 : 10;
|
||||
my $default_timeout = PVE::RPCEnvironment->is_worker() ? 60 * 5 : 10;
|
||||
$timeout = $default_timeout if !defined($timeout);
|
||||
|
||||
my ($devname, undef, undef) = $class->path($scfg, $volname);
|
||||
|
||||
for (my $i = 1; $i <= $timeout; $i++) {
|
||||
last if -b $devname;
|
||||
die "timeout: no zvol device link for '$volname' found after $timeout sec.\n"
|
||||
if $i == $timeout;
|
||||
last if -b $devname;
|
||||
die "timeout: no zvol device link for '$volname' found after $timeout sec.\n"
|
||||
if $i == $timeout;
|
||||
|
||||
sleep(1);
|
||||
sleep(1);
|
||||
}
|
||||
}
|
||||
|
||||
@ -215,28 +216,28 @@ sub alloc_image {
|
||||
|
||||
if ($fmt eq 'raw') {
|
||||
|
||||
die "illegal name '$volname' - should be 'vm-$vmid-*'\n"
|
||||
if $volname && $volname !~ m/^vm-$vmid-/;
|
||||
$volname = $class->find_free_diskname($storeid, $scfg, $vmid, $fmt)
|
||||
if !$volname;
|
||||
die "illegal name '$volname' - should be 'vm-$vmid-*'\n"
|
||||
if $volname && $volname !~ m/^vm-$vmid-/;
|
||||
$volname = $class->find_free_diskname($storeid, $scfg, $vmid, $fmt)
|
||||
if !$volname;
|
||||
|
||||
$class->zfs_create_zvol($scfg, $volname, $size);
|
||||
$class->zfs_wait_for_zvol_link($scfg, $volname);
|
||||
$class->zfs_create_zvol($scfg, $volname, $size);
|
||||
$class->zfs_wait_for_zvol_link($scfg, $volname);
|
||||
|
||||
} elsif ( $fmt eq 'subvol') {
|
||||
} elsif ($fmt eq 'subvol') {
|
||||
|
||||
die "illegal name '$volname' - should be 'subvol-$vmid-*'\n"
|
||||
if $volname && $volname !~ m/^subvol-$vmid-/;
|
||||
$volname = $class->find_free_diskname($storeid, $scfg, $vmid, $fmt)
|
||||
if !$volname;
|
||||
die "illegal name '$volname' - should be 'subvol-$vmid-*'\n"
|
||||
if $volname && $volname !~ m/^subvol-$vmid-/;
|
||||
$volname = $class->find_free_diskname($storeid, $scfg, $vmid, $fmt)
|
||||
if !$volname;
|
||||
|
||||
die "illegal name '$volname' - should be 'subvol-$vmid-*'\n"
|
||||
if $volname !~ m/^subvol-$vmid-/;
|
||||
die "illegal name '$volname' - should be 'subvol-$vmid-*'\n"
|
||||
if $volname !~ m/^subvol-$vmid-/;
|
||||
|
||||
$class->zfs_create_subvol($scfg, $volname, $size);
|
||||
$class->zfs_create_subvol($scfg, $volname, $size);
|
||||
|
||||
} else {
|
||||
die "unsupported format '$fmt'";
|
||||
die "unsupported format '$fmt'";
|
||||
}
|
||||
|
||||
return $volname;
|
||||
@ -260,25 +261,25 @@ sub list_images {
|
||||
my $res = [];
|
||||
|
||||
for my $info (values $zfs_list->%*) {
|
||||
my $volname = $info->{name};
|
||||
my $parent = $info->{parent};
|
||||
my $owner = $info->{vmid};
|
||||
my $volname = $info->{name};
|
||||
my $parent = $info->{parent};
|
||||
my $owner = $info->{vmid};
|
||||
|
||||
if ($parent && $parent =~ m/^(\S+)\@__base__$/) {
|
||||
my ($basename) = ($1);
|
||||
$info->{volid} = "$storeid:$basename/$volname";
|
||||
} else {
|
||||
$info->{volid} = "$storeid:$volname";
|
||||
}
|
||||
if ($parent && $parent =~ m/^(\S+)\@__base__$/) {
|
||||
my ($basename) = ($1);
|
||||
$info->{volid} = "$storeid:$basename/$volname";
|
||||
} else {
|
||||
$info->{volid} = "$storeid:$volname";
|
||||
}
|
||||
|
||||
if ($vollist) {
|
||||
my $found = grep { $_ eq $info->{volid} } @$vollist;
|
||||
next if !$found;
|
||||
} else {
|
||||
next if defined ($vmid) && ($owner ne $vmid);
|
||||
}
|
||||
if ($vollist) {
|
||||
my $found = grep { $_ eq $info->{volid} } @$vollist;
|
||||
next if !$found;
|
||||
} else {
|
||||
next if defined($vmid) && ($owner ne $vmid);
|
||||
}
|
||||
|
||||
push @$res, $info;
|
||||
push @$res, $info;
|
||||
}
|
||||
return $res;
|
||||
}
|
||||
@ -286,8 +287,8 @@ sub list_images {
|
||||
sub zfs_get_properties {
|
||||
my ($class, $scfg, $properties, $dataset, $timeout) = @_;
|
||||
|
||||
my $result = $class->zfs_request($scfg, $timeout, 'get', '-o', 'value',
|
||||
'-Hp', $properties, $dataset);
|
||||
my $result =
|
||||
$class->zfs_request($scfg, $timeout, 'get', '-o', 'value', '-Hp', $properties, $dataset);
|
||||
my @values = split /\n/, $result;
|
||||
return wantarray ? @values : $values[0];
|
||||
}
|
||||
@ -300,12 +301,12 @@ sub zfs_get_pool_stats {
|
||||
|
||||
my @lines = $class->zfs_get_properties($scfg, 'available,used', $scfg->{pool});
|
||||
|
||||
if($lines[0] =~ /^(\d+)$/) {
|
||||
$available = $1;
|
||||
if ($lines[0] =~ /^(\d+)$/) {
|
||||
$available = $1;
|
||||
}
|
||||
|
||||
if($lines[1] =~ /^(\d+)$/) {
|
||||
$used = $1;
|
||||
if ($lines[1] =~ /^(\d+)$/) {
|
||||
$used = $1;
|
||||
}
|
||||
|
||||
return ($available, $used);
|
||||
@ -336,8 +337,8 @@ sub zfs_create_subvol {
|
||||
my $dataset = "$scfg->{pool}/$volname";
|
||||
my $quota = $size ? "${size}k" : "none";
|
||||
|
||||
my $cmd = ['create', '-o', 'acltype=posixacl', '-o', 'xattr=sa',
|
||||
'-o', "refquota=${quota}", $dataset];
|
||||
my $cmd =
|
||||
['create', '-o', 'acltype=posixacl', '-o', 'xattr=sa', '-o', "refquota=${quota}", $dataset];
|
||||
|
||||
$class->zfs_request($scfg, undef, @$cmd);
|
||||
}
|
||||
@ -349,19 +350,19 @@ sub zfs_delete_zvol {
|
||||
|
||||
for (my $i = 0; $i < 6; $i++) {
|
||||
|
||||
eval { $class->zfs_request($scfg, undef, 'destroy', '-r', "$scfg->{pool}/$zvol"); };
|
||||
if ($err = $@) {
|
||||
if ($err =~ m/^zfs error:(.*): dataset is busy.*/) {
|
||||
sleep(1);
|
||||
} elsif ($err =~ m/^zfs error:.*: dataset does not exist.*$/) {
|
||||
$err = undef;
|
||||
last;
|
||||
} else {
|
||||
die $err;
|
||||
}
|
||||
} else {
|
||||
last;
|
||||
}
|
||||
eval { $class->zfs_request($scfg, undef, 'destroy', '-r', "$scfg->{pool}/$zvol"); };
|
||||
if ($err = $@) {
|
||||
if ($err =~ m/^zfs error:(.*): dataset is busy.*/) {
|
||||
sleep(1);
|
||||
} elsif ($err =~ m/^zfs error:.*: dataset does not exist.*$/) {
|
||||
$err = undef;
|
||||
last;
|
||||
} else {
|
||||
die $err;
|
||||
}
|
||||
} else {
|
||||
last;
|
||||
}
|
||||
}
|
||||
|
||||
die $err if $err;
|
||||
@ -371,16 +372,16 @@ sub zfs_list_zvol {
|
||||
my ($class, $scfg) = @_;
|
||||
|
||||
my $text = $class->zfs_request(
|
||||
$scfg,
|
||||
10,
|
||||
'list',
|
||||
'-o',
|
||||
'name,volsize,origin,type,refquota',
|
||||
'-t',
|
||||
'volume,filesystem',
|
||||
'-d1',
|
||||
'-Hp',
|
||||
$scfg->{pool},
|
||||
$scfg,
|
||||
10,
|
||||
'list',
|
||||
'-o',
|
||||
'name,volsize,origin,type,refquota',
|
||||
'-t',
|
||||
'volume,filesystem',
|
||||
'-d1',
|
||||
'-Hp',
|
||||
$scfg->{pool},
|
||||
);
|
||||
# It's still required to have zfs_parse_zvol_list filter by pool, because -d1 lists
|
||||
# $scfg->{pool} too and while unlikely, it could be named to be mistaken for a volume.
|
||||
@ -389,17 +390,17 @@ sub zfs_list_zvol {
|
||||
|
||||
my $list = {};
|
||||
foreach my $zvol (@$zvols) {
|
||||
my $name = $zvol->{name};
|
||||
my $parent = $zvol->{origin};
|
||||
if($zvol->{origin} && $zvol->{origin} =~ m/^$scfg->{pool}\/(\S+)$/){
|
||||
$parent = $1;
|
||||
}
|
||||
my $name = $zvol->{name};
|
||||
my $parent = $zvol->{origin};
|
||||
if ($zvol->{origin} && $zvol->{origin} =~ m/^$scfg->{pool}\/(\S+)$/) {
|
||||
$parent = $1;
|
||||
}
|
||||
|
||||
$list->{$name} = {
|
||||
name => $name,
|
||||
size => $zvol->{size},
|
||||
parent => $parent,
|
||||
format => $zvol->{format},
|
||||
$list->{$name} = {
|
||||
name => $name,
|
||||
size => $zvol->{size},
|
||||
parent => $parent,
|
||||
format => $zvol->{format},
|
||||
vmid => $zvol->{owner},
|
||||
};
|
||||
}
|
||||
@ -420,8 +421,8 @@ sub zfs_get_sorted_snapshot_list {
|
||||
|
||||
my $snap_names = [];
|
||||
for my $snapshot (@snapshots) {
|
||||
(my $snap_name = $snapshot) =~ s/^.*@//;
|
||||
push $snap_names->@*, $snap_name;
|
||||
(my $snap_name = $snapshot) =~ s/^.*@//;
|
||||
push $snap_names->@*, $snap_name;
|
||||
}
|
||||
return $snap_names;
|
||||
}
|
||||
@ -435,9 +436,9 @@ sub status {
|
||||
my $active = 0;
|
||||
|
||||
eval {
|
||||
($free, $used) = $class->zfs_get_pool_stats($scfg);
|
||||
$active = 1;
|
||||
$total = $free + $used;
|
||||
($free, $used) = $class->zfs_get_pool_stats($scfg);
|
||||
$active = 1;
|
||||
$total = $free + $used;
|
||||
};
|
||||
warn $@ if $@;
|
||||
|
||||
@ -447,16 +448,16 @@ sub status {
|
||||
sub volume_size_info {
|
||||
my ($class, $scfg, $storeid, $volname, $timeout) = @_;
|
||||
|
||||
my (undef, $vname, undef, $parent, undef, undef, $format) =
|
||||
$class->parse_volname($volname);
|
||||
my (undef, $vname, undef, $parent, undef, undef, $format) = $class->parse_volname($volname);
|
||||
|
||||
my $attr = $format eq 'subvol' ? 'refquota' : 'volsize';
|
||||
my ($size, $used) = $class->zfs_get_properties($scfg, "$attr,usedbydataset", "$scfg->{pool}/$vname");
|
||||
my ($size, $used) =
|
||||
$class->zfs_get_properties($scfg, "$attr,usedbydataset", "$scfg->{pool}/$vname");
|
||||
|
||||
$used = ($used =~ /^(\d+)$/) ? $1 : 0;
|
||||
|
||||
if ($size =~ /^(\d+)$/) {
|
||||
return wantarray ? ($1, $format, $used, $parent) : $1;
|
||||
return wantarray ? ($1, $format, $used, $parent) : $1;
|
||||
}
|
||||
|
||||
die "Could not get zfs volume size\n";
|
||||
@ -490,10 +491,10 @@ sub volume_snapshot_rollback {
|
||||
# caches, they get mounted in activate volume again
|
||||
# see zfs bug #10931 https://github.com/openzfs/zfs/issues/10931
|
||||
if ($format eq 'subvol') {
|
||||
eval { $class->zfs_request($scfg, undef, 'unmount', "$scfg->{pool}/$vname"); };
|
||||
if (my $err = $@) {
|
||||
die $err if $err !~ m/not currently mounted$/;
|
||||
}
|
||||
eval { $class->zfs_request($scfg, undef, 'unmount', "$scfg->{pool}/$vname"); };
|
||||
if (my $err = $@) {
|
||||
die $err if $err !~ m/not currently mounted$/;
|
||||
}
|
||||
}
|
||||
|
||||
return $msg;
|
||||
@ -509,20 +510,20 @@ sub volume_rollback_is_possible {
|
||||
my $found;
|
||||
$blockers //= []; # not guaranteed to be set by caller
|
||||
for my $snapshot ($snapshots->@*) {
|
||||
if ($snapshot eq $snap) {
|
||||
$found = 1;
|
||||
} elsif ($found) {
|
||||
push $blockers->@*, $snapshot;
|
||||
}
|
||||
if ($snapshot eq $snap) {
|
||||
$found = 1;
|
||||
} elsif ($found) {
|
||||
push $blockers->@*, $snapshot;
|
||||
}
|
||||
}
|
||||
|
||||
my $volid = "${storeid}:${volname}";
|
||||
|
||||
die "can't rollback, snapshot '$snap' does not exist on '$volid'\n"
|
||||
if !$found;
|
||||
if !$found;
|
||||
|
||||
die "can't rollback, '$snap' is not most recent snapshot on '$volid'\n"
|
||||
if scalar($blockers->@*) > 0;
|
||||
if scalar($blockers->@*) > 0;
|
||||
|
||||
return 1;
|
||||
}
|
||||
@ -540,13 +541,13 @@ sub volume_snapshot_info {
|
||||
|
||||
my $info = {};
|
||||
for my $line (@lines) {
|
||||
my ($snapshot, $guid, $creation) = split(/\s+/, $line);
|
||||
(my $snap_name = $snapshot) =~ s/^.*@//;
|
||||
my ($snapshot, $guid, $creation) = split(/\s+/, $line);
|
||||
(my $snap_name = $snapshot) =~ s/^.*@//;
|
||||
|
||||
$info->{$snap_name} = {
|
||||
id => $guid,
|
||||
timestamp => $creation,
|
||||
};
|
||||
$info->{$snap_name} = {
|
||||
id => $guid,
|
||||
timestamp => $creation,
|
||||
};
|
||||
}
|
||||
return $info;
|
||||
}
|
||||
@ -556,12 +557,12 @@ my sub dataset_mounted_heuristic {
|
||||
|
||||
my $mounts = PVE::ProcFSTools::parse_proc_mounts();
|
||||
for my $mp (@$mounts) {
|
||||
my ($what, $dir, $fs) = $mp->@*;
|
||||
next if $fs ne 'zfs';
|
||||
# check for root-dataset or any child-dataset (root-dataset could have 'canmount=off')
|
||||
# If any child is mounted heuristically assume that `zfs mount -a` was successful
|
||||
next if $what !~ m!^$dataset(?:/|$)!;
|
||||
return 1;
|
||||
my ($what, $dir, $fs) = $mp->@*;
|
||||
next if $fs ne 'zfs';
|
||||
# check for root-dataset or any child-dataset (root-dataset could have 'canmount=off')
|
||||
# If any child is mounted heuristically assume that `zfs mount -a` was successful
|
||||
next if $what !~ m!^$dataset(?:/|$)!;
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -576,21 +577,21 @@ sub activate_storage {
|
||||
return 1 if dataset_mounted_heuristic($dataset); # early return
|
||||
|
||||
my $pool_imported = sub {
|
||||
my @param = ('-o', 'name', '-H', $pool);
|
||||
my $res = eval { $class->zfs_request($scfg, undef, 'zpool_list', @param) };
|
||||
warn "$@\n" if $@;
|
||||
my @param = ('-o', 'name', '-H', $pool);
|
||||
my $res = eval { $class->zfs_request($scfg, undef, 'zpool_list', @param) };
|
||||
warn "$@\n" if $@;
|
||||
|
||||
return defined($res) && $res =~ m/$pool/;
|
||||
return defined($res) && $res =~ m/$pool/;
|
||||
};
|
||||
|
||||
if (!$pool_imported->()) {
|
||||
# import can only be done if not yet imported!
|
||||
my @param = ('-d', '/dev/disk/by-id/', '-o', 'cachefile=none', $pool);
|
||||
eval { $class->zfs_request($scfg, undef, 'zpool_import', @param) };
|
||||
if (my $err = $@) {
|
||||
# just could've raced with another import, so recheck if it is imported
|
||||
die "could not activate storage '$storeid', $err\n" if !$pool_imported->();
|
||||
}
|
||||
# import can only be done if not yet imported!
|
||||
my @param = ('-d', '/dev/disk/by-id/', '-o', 'cachefile=none', $pool);
|
||||
eval { $class->zfs_request($scfg, undef, 'zpool_import', @param) };
|
||||
if (my $err = $@) {
|
||||
# just could've raced with another import, so recheck if it is imported
|
||||
die "could not activate storage '$storeid', $err\n" if !$pool_imported->();
|
||||
}
|
||||
}
|
||||
eval { $class->zfs_request($scfg, undef, 'mount', '-a') };
|
||||
die "could not activate storage '$storeid', $@\n" if $@;
|
||||
@ -610,12 +611,12 @@ sub activate_volume {
|
||||
my (undef, $dataset, undef, undef, undef, undef, $format) = $class->parse_volname($volname);
|
||||
|
||||
if ($format eq 'raw') {
|
||||
$class->zfs_wait_for_zvol_link($scfg, $volname);
|
||||
$class->zfs_wait_for_zvol_link($scfg, $volname);
|
||||
} elsif ($format eq 'subvol') {
|
||||
my $mounted = $class->zfs_get_properties($scfg, 'mounted', "$scfg->{pool}/$dataset");
|
||||
if ($mounted !~ m/^yes$/) {
|
||||
$class->zfs_request($scfg, undef, 'mount', "$scfg->{pool}/$dataset");
|
||||
}
|
||||
my $mounted = $class->zfs_get_properties($scfg, 'mounted', "$scfg->{pool}/$dataset");
|
||||
if ($mounted !~ m/^yes$/) {
|
||||
$class->zfs_request($scfg, undef, 'mount', "$scfg->{pool}/$dataset");
|
||||
}
|
||||
}
|
||||
|
||||
return 1;
|
||||
@ -639,11 +640,27 @@ sub clone_image {
|
||||
my $name = $class->find_free_diskname($storeid, $scfg, $vmid, $format);
|
||||
|
||||
if ($format eq 'subvol') {
|
||||
my $size = $class->zfs_request($scfg, undef, 'list', '-Hp', '-o', 'refquota', "$scfg->{pool}/$basename");
|
||||
chomp($size);
|
||||
$class->zfs_request($scfg, undef, 'clone', "$scfg->{pool}/$basename\@$snap", "$scfg->{pool}/$name", '-o', "refquota=$size");
|
||||
my $size = $class->zfs_request(
|
||||
$scfg, undef, 'list', '-Hp', '-o', 'refquota', "$scfg->{pool}/$basename",
|
||||
);
|
||||
chomp($size);
|
||||
$class->zfs_request(
|
||||
$scfg,
|
||||
undef,
|
||||
'clone',
|
||||
"$scfg->{pool}/$basename\@$snap",
|
||||
"$scfg->{pool}/$name",
|
||||
'-o',
|
||||
"refquota=$size",
|
||||
);
|
||||
} else {
|
||||
$class->zfs_request($scfg, undef, 'clone', "$scfg->{pool}/$basename\@$snap", "$scfg->{pool}/$name");
|
||||
$class->zfs_request(
|
||||
$scfg,
|
||||
undef,
|
||||
'clone',
|
||||
"$scfg->{pool}/$basename\@$snap",
|
||||
"$scfg->{pool}/$name",
|
||||
);
|
||||
}
|
||||
|
||||
return "$basename/$name";
|
||||
@ -660,16 +677,16 @@ sub create_base {
|
||||
die "create_base not possible with base image\n" if $isBase;
|
||||
|
||||
my $newname = $name;
|
||||
if ( $format eq 'subvol' ) {
|
||||
$newname =~ s/^subvol-/basevol-/;
|
||||
if ($format eq 'subvol') {
|
||||
$newname =~ s/^subvol-/basevol-/;
|
||||
} else {
|
||||
$newname =~ s/^vm-/base-/;
|
||||
$newname =~ s/^vm-/base-/;
|
||||
}
|
||||
my $newvolname = $basename ? "$basename/$newname" : "$newname";
|
||||
|
||||
$class->zfs_request($scfg, undef, 'rename', "$scfg->{pool}/$name", "$scfg->{pool}/$newname");
|
||||
|
||||
my $running = undef; #fixme : is create_base always offline ?
|
||||
my $running = undef; #fixme : is create_base always offline ?
|
||||
|
||||
$class->volume_snapshot($scfg, $storeid, $newname, $snap, $running);
|
||||
|
||||
@ -679,17 +696,16 @@ sub create_base {
|
||||
sub volume_resize {
|
||||
my ($class, $scfg, $storeid, $volname, $size, $running) = @_;
|
||||
|
||||
my $new_size = int($size/1024);
|
||||
my $new_size = int($size / 1024);
|
||||
|
||||
my (undef, $vname, undef, undef, undef, undef, $format) =
|
||||
$class->parse_volname($volname);
|
||||
my (undef, $vname, undef, undef, undef, undef, $format) = $class->parse_volname($volname);
|
||||
|
||||
my $attr = $format eq 'subvol' ? 'refquota' : 'volsize';
|
||||
|
||||
# align size to 1M so we always have a valid multiple of the volume block size
|
||||
if ($format eq 'raw') {
|
||||
my $padding = (1024 - $new_size % 1024) % 1024;
|
||||
$new_size = $new_size + $padding;
|
||||
my $padding = (1024 - $new_size % 1024) % 1024;
|
||||
$new_size = $new_size + $padding;
|
||||
}
|
||||
|
||||
$class->zfs_request($scfg, undef, 'set', "$attr=${new_size}k", "$scfg->{pool}/$vname");
|
||||
@ -709,24 +725,23 @@ sub volume_has_feature {
|
||||
my ($class, $scfg, $feature, $storeid, $volname, $snapname, $running) = @_;
|
||||
|
||||
my $features = {
|
||||
snapshot => { current => 1, snap => 1},
|
||||
clone => { base => 1},
|
||||
template => { current => 1},
|
||||
copy => { base => 1, current => 1},
|
||||
sparseinit => { base => 1, current => 1},
|
||||
replicate => { base => 1, current => 1},
|
||||
rename => {current => 1},
|
||||
snapshot => { current => 1, snap => 1 },
|
||||
clone => { base => 1 },
|
||||
template => { current => 1 },
|
||||
copy => { base => 1, current => 1 },
|
||||
sparseinit => { base => 1, current => 1 },
|
||||
replicate => { base => 1, current => 1 },
|
||||
rename => { current => 1 },
|
||||
};
|
||||
|
||||
my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) =
|
||||
$class->parse_volname($volname);
|
||||
my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) = $class->parse_volname($volname);
|
||||
|
||||
my $key = undef;
|
||||
|
||||
if ($snapname) {
|
||||
$key = 'snap';
|
||||
$key = 'snap';
|
||||
} else {
|
||||
$key = $isBase ? 'base' : 'current';
|
||||
$key = $isBase ? 'base' : 'current';
|
||||
}
|
||||
|
||||
return 1 if $features->{$feature}->{$key};
|
||||
@ -735,19 +750,20 @@ sub volume_has_feature {
|
||||
}
|
||||
|
||||
sub volume_export {
|
||||
my ($class, $scfg, $storeid, $fh, $volname, $format, $snapshot, $base_snapshot, $with_snapshots) = @_;
|
||||
my ($class, $scfg, $storeid, $fh, $volname, $format, $snapshot, $base_snapshot, $with_snapshots)
|
||||
= @_;
|
||||
|
||||
die "unsupported export stream format for $class: $format\n"
|
||||
if $format ne 'zfs';
|
||||
if $format ne 'zfs';
|
||||
|
||||
die "$class storage can only export snapshots\n"
|
||||
if !defined($snapshot);
|
||||
if !defined($snapshot);
|
||||
|
||||
my $dataset = ($class->parse_volname($volname))[1];
|
||||
|
||||
my $fd = fileno($fh);
|
||||
die "internal error: invalid file handle for volume_export\n"
|
||||
if !defined($fd);
|
||||
if !defined($fd);
|
||||
$fd = ">&$fd";
|
||||
|
||||
# For zfs we always create a replication stream (-R) which means the remote
|
||||
@ -755,8 +771,8 @@ sub volume_export {
|
||||
# for all our use cases.
|
||||
my $cmd = ['zfs', 'send', '-Rpv'];
|
||||
if (defined($base_snapshot)) {
|
||||
my $arg = $with_snapshots ? '-I' : '-i';
|
||||
push @$cmd, $arg, $base_snapshot;
|
||||
my $arg = $with_snapshots ? '-I' : '-i';
|
||||
push @$cmd, $arg, $base_snapshot;
|
||||
}
|
||||
push @$cmd, '--', "$scfg->{pool}/$dataset\@$snapshot";
|
||||
|
||||
@ -776,39 +792,53 @@ sub volume_export_formats {
|
||||
}
|
||||
|
||||
sub volume_import {
|
||||
my ($class, $scfg, $storeid, $fh, $volname, $format, $snapshot, $base_snapshot, $with_snapshots, $allow_rename) = @_;
|
||||
my (
|
||||
$class,
|
||||
$scfg,
|
||||
$storeid,
|
||||
$fh,
|
||||
$volname,
|
||||
$format,
|
||||
$snapshot,
|
||||
$base_snapshot,
|
||||
$with_snapshots,
|
||||
$allow_rename,
|
||||
) = @_;
|
||||
|
||||
die "unsupported import stream format for $class: $format\n"
|
||||
if $format ne 'zfs';
|
||||
if $format ne 'zfs';
|
||||
|
||||
my $fd = fileno($fh);
|
||||
die "internal error: invalid file handle for volume_import\n"
|
||||
if !defined($fd);
|
||||
if !defined($fd);
|
||||
|
||||
my (undef, $dataset, $vmid, undef, undef, undef, $volume_format) =
|
||||
$class->parse_volname($volname);
|
||||
$class->parse_volname($volname);
|
||||
|
||||
my $zfspath = "$scfg->{pool}/$dataset";
|
||||
my $suffix = defined($base_snapshot) ? "\@$base_snapshot" : '';
|
||||
my $exists = 0 == run_command(['zfs', 'get', '-H', 'name', $zfspath.$suffix],
|
||||
noerr => 1, quiet => 1);
|
||||
my $exists = 0 == run_command(
|
||||
['zfs', 'get', '-H', 'name', $zfspath . $suffix],
|
||||
noerr => 1,
|
||||
quiet => 1,
|
||||
);
|
||||
if (defined($base_snapshot)) {
|
||||
die "base snapshot '$zfspath\@$base_snapshot' doesn't exist\n" if !$exists;
|
||||
die "base snapshot '$zfspath\@$base_snapshot' doesn't exist\n" if !$exists;
|
||||
} elsif ($exists) {
|
||||
die "volume '$zfspath' already exists\n" if !$allow_rename;
|
||||
warn "volume '$zfspath' already exists - importing with a different name\n";
|
||||
$dataset = $class->find_free_diskname($storeid, $scfg, $vmid, $volume_format);
|
||||
$zfspath = "$scfg->{pool}/$dataset";
|
||||
die "volume '$zfspath' already exists\n" if !$allow_rename;
|
||||
warn "volume '$zfspath' already exists - importing with a different name\n";
|
||||
$dataset = $class->find_free_diskname($storeid, $scfg, $vmid, $volume_format);
|
||||
$zfspath = "$scfg->{pool}/$dataset";
|
||||
}
|
||||
|
||||
eval { run_command(['zfs', 'recv', '-F', '--', $zfspath], input => "<&$fd") };
|
||||
if (my $err = $@) {
|
||||
if (defined($base_snapshot)) {
|
||||
eval { run_command(['zfs', 'rollback', '-r', '--', "$zfspath\@$base_snapshot"]) };
|
||||
} else {
|
||||
eval { run_command(['zfs', 'destroy', '-r', '--', $zfspath]) };
|
||||
}
|
||||
die $err;
|
||||
if (defined($base_snapshot)) {
|
||||
eval { run_command(['zfs', 'rollback', '-r', '--', "$zfspath\@$base_snapshot"]) };
|
||||
} else {
|
||||
eval { run_command(['zfs', 'destroy', '-r', '--', $zfspath]) };
|
||||
}
|
||||
die $err;
|
||||
}
|
||||
|
||||
return "$storeid:$dataset";
|
||||
@ -817,30 +847,29 @@ sub volume_import {
|
||||
sub volume_import_formats {
|
||||
my ($class, $scfg, $storeid, $volname, $snapshot, $base_snapshot, $with_snapshots) = @_;
|
||||
|
||||
return $class->volume_export_formats($scfg, $storeid, $volname, $snapshot, $base_snapshot, $with_snapshots);
|
||||
return $class->volume_export_formats(
|
||||
$scfg, $storeid, $volname, $snapshot, $base_snapshot, $with_snapshots,
|
||||
);
|
||||
}
|
||||
|
||||
sub rename_volume {
|
||||
my ($class, $scfg, $storeid, $source_volname, $target_vmid, $target_volname) = @_;
|
||||
|
||||
my (
|
||||
undef,
|
||||
$source_image,
|
||||
$source_vmid,
|
||||
$base_name,
|
||||
$base_vmid,
|
||||
undef,
|
||||
$format
|
||||
undef, $source_image, $source_vmid, $base_name, $base_vmid, undef, $format,
|
||||
) = $class->parse_volname($source_volname);
|
||||
$target_volname = $class->find_free_diskname($storeid, $scfg, $target_vmid, $format)
|
||||
if !$target_volname;
|
||||
if !$target_volname;
|
||||
|
||||
my $pool = $scfg->{pool};
|
||||
my $source_zfspath = "${pool}/${source_image}";
|
||||
my $target_zfspath = "${pool}/${target_volname}";
|
||||
|
||||
my $exists = 0 == run_command(['zfs', 'get', '-H', 'name', $target_zfspath],
|
||||
noerr => 1, quiet => 1);
|
||||
my $exists = 0 == run_command(
|
||||
['zfs', 'get', '-H', 'name', $target_zfspath],
|
||||
noerr => 1,
|
||||
quiet => 1,
|
||||
);
|
||||
die "target volume '${target_volname}' already exists\n" if $exists;
|
||||
|
||||
$class->zfs_request($scfg, 5, 'rename', ${source_zfspath}, ${target_zfspath});
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -22,119 +22,119 @@ my $NOTES_EXT = PVE::Storage::Plugin::NOTES_EXT;
|
||||
my $tests = [
|
||||
# backup archives
|
||||
{
|
||||
description => 'Backup archive, lxc, tgz, future millenium',
|
||||
archive => "backup/vzdump-lxc-$vmid-3070_01_01-00_00_00.tgz",
|
||||
expected => {
|
||||
'filename' => "vzdump-lxc-$vmid-3070_01_01-00_00_00.tgz",
|
||||
'logfilename' => "vzdump-lxc-$vmid-3070_01_01-00_00_00".$LOG_EXT,
|
||||
'notesfilename'=> "vzdump-lxc-$vmid-3070_01_01-00_00_00.tgz".$NOTES_EXT,
|
||||
'type' => 'lxc',
|
||||
'format' => 'tar',
|
||||
'decompressor' => ['tar', '-z'],
|
||||
'compression' => 'gz',
|
||||
'vmid' => $vmid,
|
||||
'ctime' => 60*60*24 * (365*1100 + 267),
|
||||
'is_std_name' => 1,
|
||||
},
|
||||
description => 'Backup archive, lxc, tgz, future millenium',
|
||||
archive => "backup/vzdump-lxc-$vmid-3070_01_01-00_00_00.tgz",
|
||||
expected => {
|
||||
'filename' => "vzdump-lxc-$vmid-3070_01_01-00_00_00.tgz",
|
||||
'logfilename' => "vzdump-lxc-$vmid-3070_01_01-00_00_00" . $LOG_EXT,
|
||||
'notesfilename' => "vzdump-lxc-$vmid-3070_01_01-00_00_00.tgz" . $NOTES_EXT,
|
||||
'type' => 'lxc',
|
||||
'format' => 'tar',
|
||||
'decompressor' => ['tar', '-z'],
|
||||
'compression' => 'gz',
|
||||
'vmid' => $vmid,
|
||||
'ctime' => 60 * 60 * 24 * (365 * 1100 + 267),
|
||||
'is_std_name' => 1,
|
||||
},
|
||||
},
|
||||
{
|
||||
description => 'Backup archive, lxc, tgz, very old',
|
||||
archive => "backup/vzdump-lxc-$vmid-1970_01_01-02_00_30.tgz",
|
||||
expected => {
|
||||
'filename' => "vzdump-lxc-$vmid-1970_01_01-02_00_30.tgz",
|
||||
'logfilename' => "vzdump-lxc-$vmid-1970_01_01-02_00_30".$LOG_EXT,
|
||||
'notesfilename'=> "vzdump-lxc-$vmid-1970_01_01-02_00_30.tgz".$NOTES_EXT,
|
||||
'type' => 'lxc',
|
||||
'format' => 'tar',
|
||||
'decompressor' => ['tar', '-z'],
|
||||
'compression' => 'gz',
|
||||
'vmid' => $vmid,
|
||||
'ctime' => 60*60*2 + 30,
|
||||
'is_std_name' => 1,
|
||||
},
|
||||
description => 'Backup archive, lxc, tgz, very old',
|
||||
archive => "backup/vzdump-lxc-$vmid-1970_01_01-02_00_30.tgz",
|
||||
expected => {
|
||||
'filename' => "vzdump-lxc-$vmid-1970_01_01-02_00_30.tgz",
|
||||
'logfilename' => "vzdump-lxc-$vmid-1970_01_01-02_00_30" . $LOG_EXT,
|
||||
'notesfilename' => "vzdump-lxc-$vmid-1970_01_01-02_00_30.tgz" . $NOTES_EXT,
|
||||
'type' => 'lxc',
|
||||
'format' => 'tar',
|
||||
'decompressor' => ['tar', '-z'],
|
||||
'compression' => 'gz',
|
||||
'vmid' => $vmid,
|
||||
'ctime' => 60 * 60 * 2 + 30,
|
||||
'is_std_name' => 1,
|
||||
},
|
||||
},
|
||||
{
|
||||
description => 'Backup archive, lxc, tgz',
|
||||
archive => "backup/vzdump-lxc-$vmid-2020_03_30-21_39_30.tgz",
|
||||
expected => {
|
||||
'filename' => "vzdump-lxc-$vmid-2020_03_30-21_39_30.tgz",
|
||||
'logfilename' => "vzdump-lxc-$vmid-2020_03_30-21_39_30".$LOG_EXT,
|
||||
'notesfilename'=> "vzdump-lxc-$vmid-2020_03_30-21_39_30.tgz".$NOTES_EXT,
|
||||
'type' => 'lxc',
|
||||
'format' => 'tar',
|
||||
'decompressor' => ['tar', '-z'],
|
||||
'compression' => 'gz',
|
||||
'vmid' => $vmid,
|
||||
'ctime' => 1585604370,
|
||||
'is_std_name' => 1,
|
||||
},
|
||||
description => 'Backup archive, lxc, tgz',
|
||||
archive => "backup/vzdump-lxc-$vmid-2020_03_30-21_39_30.tgz",
|
||||
expected => {
|
||||
'filename' => "vzdump-lxc-$vmid-2020_03_30-21_39_30.tgz",
|
||||
'logfilename' => "vzdump-lxc-$vmid-2020_03_30-21_39_30" . $LOG_EXT,
|
||||
'notesfilename' => "vzdump-lxc-$vmid-2020_03_30-21_39_30.tgz" . $NOTES_EXT,
|
||||
'type' => 'lxc',
|
||||
'format' => 'tar',
|
||||
'decompressor' => ['tar', '-z'],
|
||||
'compression' => 'gz',
|
||||
'vmid' => $vmid,
|
||||
'ctime' => 1585604370,
|
||||
'is_std_name' => 1,
|
||||
},
|
||||
},
|
||||
{
|
||||
description => 'Backup archive, openvz, tgz',
|
||||
archive => "backup/vzdump-openvz-$vmid-2020_03_30-21_39_30.tgz",
|
||||
expected => {
|
||||
'filename' => "vzdump-openvz-$vmid-2020_03_30-21_39_30.tgz",
|
||||
'logfilename' => "vzdump-openvz-$vmid-2020_03_30-21_39_30".$LOG_EXT,
|
||||
'notesfilename'=> "vzdump-openvz-$vmid-2020_03_30-21_39_30.tgz".$NOTES_EXT,
|
||||
'type' => 'openvz',
|
||||
'format' => 'tar',
|
||||
'decompressor' => ['tar', '-z'],
|
||||
'compression' => 'gz',
|
||||
'vmid' => $vmid,
|
||||
'ctime' => 1585604370,
|
||||
'is_std_name' => 1,
|
||||
},
|
||||
description => 'Backup archive, openvz, tgz',
|
||||
archive => "backup/vzdump-openvz-$vmid-2020_03_30-21_39_30.tgz",
|
||||
expected => {
|
||||
'filename' => "vzdump-openvz-$vmid-2020_03_30-21_39_30.tgz",
|
||||
'logfilename' => "vzdump-openvz-$vmid-2020_03_30-21_39_30" . $LOG_EXT,
|
||||
'notesfilename' => "vzdump-openvz-$vmid-2020_03_30-21_39_30.tgz" . $NOTES_EXT,
|
||||
'type' => 'openvz',
|
||||
'format' => 'tar',
|
||||
'decompressor' => ['tar', '-z'],
|
||||
'compression' => 'gz',
|
||||
'vmid' => $vmid,
|
||||
'ctime' => 1585604370,
|
||||
'is_std_name' => 1,
|
||||
},
|
||||
},
|
||||
{
|
||||
description => 'Backup archive, custom dump directory, qemu, tgz',
|
||||
archive => "/here/be/Back-ups/vzdump-qemu-$vmid-2020_03_30-21_39_30.tgz",
|
||||
expected => {
|
||||
'filename' => "vzdump-qemu-$vmid-2020_03_30-21_39_30.tgz",
|
||||
'logfilename' => "vzdump-qemu-$vmid-2020_03_30-21_39_30".$LOG_EXT,
|
||||
'notesfilename'=> "vzdump-qemu-$vmid-2020_03_30-21_39_30.tgz".$NOTES_EXT,
|
||||
'type' => 'qemu',
|
||||
'format' => 'tar',
|
||||
'decompressor' => ['tar', '-z'],
|
||||
'compression' => 'gz',
|
||||
'vmid' => $vmid,
|
||||
'ctime' => 1585604370,
|
||||
'is_std_name' => 1,
|
||||
},
|
||||
description => 'Backup archive, custom dump directory, qemu, tgz',
|
||||
archive => "/here/be/Back-ups/vzdump-qemu-$vmid-2020_03_30-21_39_30.tgz",
|
||||
expected => {
|
||||
'filename' => "vzdump-qemu-$vmid-2020_03_30-21_39_30.tgz",
|
||||
'logfilename' => "vzdump-qemu-$vmid-2020_03_30-21_39_30" . $LOG_EXT,
|
||||
'notesfilename' => "vzdump-qemu-$vmid-2020_03_30-21_39_30.tgz" . $NOTES_EXT,
|
||||
'type' => 'qemu',
|
||||
'format' => 'tar',
|
||||
'decompressor' => ['tar', '-z'],
|
||||
'compression' => 'gz',
|
||||
'vmid' => $vmid,
|
||||
'ctime' => 1585604370,
|
||||
'is_std_name' => 1,
|
||||
},
|
||||
},
|
||||
{
|
||||
description => 'Backup archive, none, tgz',
|
||||
archive => "backup/vzdump-qemu-$vmid-whatever-the-name_is_here.tgz",
|
||||
expected => {
|
||||
'filename' => "vzdump-qemu-$vmid-whatever-the-name_is_here.tgz",
|
||||
'type' => 'qemu',
|
||||
'format' => 'tar',
|
||||
'decompressor' => ['tar', '-z'],
|
||||
'compression' => 'gz',
|
||||
'is_std_name' => 0,
|
||||
},
|
||||
description => 'Backup archive, none, tgz',
|
||||
archive => "backup/vzdump-qemu-$vmid-whatever-the-name_is_here.tgz",
|
||||
expected => {
|
||||
'filename' => "vzdump-qemu-$vmid-whatever-the-name_is_here.tgz",
|
||||
'type' => 'qemu',
|
||||
'format' => 'tar',
|
||||
'decompressor' => ['tar', '-z'],
|
||||
'compression' => 'gz',
|
||||
'is_std_name' => 0,
|
||||
},
|
||||
},
|
||||
];
|
||||
|
||||
# add new compression fromats to test
|
||||
my $decompressor = {
|
||||
tar => {
|
||||
gz => ['tar', '-z'],
|
||||
lzo => ['tar', '--lzop'],
|
||||
zst => ['tar', '--zstd'],
|
||||
bz2 => ['tar', '--bzip2'],
|
||||
gz => ['tar', '-z'],
|
||||
lzo => ['tar', '--lzop'],
|
||||
zst => ['tar', '--zstd'],
|
||||
bz2 => ['tar', '--bzip2'],
|
||||
},
|
||||
vma => {
|
||||
gz => ['zcat'],
|
||||
lzo => ['lzop', '-d', '-c'],
|
||||
zst => ['zstd', '-q', '-d', '-c'],
|
||||
bz2 => ['bzcat', '-q'],
|
||||
gz => ['zcat'],
|
||||
lzo => ['lzop', '-d', '-c'],
|
||||
zst => ['zstd', '-q', '-d', '-c'],
|
||||
bz2 => ['bzcat', '-q'],
|
||||
},
|
||||
};
|
||||
|
||||
my $bkp_suffix = {
|
||||
qemu => [ 'vma', $decompressor->{vma}, ],
|
||||
lxc => [ 'tar', $decompressor->{tar}, ],
|
||||
openvz => [ 'tar', $decompressor->{tar}, ],
|
||||
qemu => ['vma', $decompressor->{vma}],
|
||||
lxc => ['tar', $decompressor->{tar}],
|
||||
openvz => ['tar', $decompressor->{tar}],
|
||||
};
|
||||
|
||||
# create more test cases for backup files matches
|
||||
@ -143,48 +143,48 @@ for my $virt (sort keys %$bkp_suffix) {
|
||||
my $archive_name = "vzdump-$virt-$vmid-2020_03_30-21_12_40";
|
||||
|
||||
for my $suffix (sort keys %$decomp) {
|
||||
push @$tests, {
|
||||
description => "Backup archive, $virt, $format.$suffix",
|
||||
archive => "backup/$archive_name.$format.$suffix",
|
||||
expected => {
|
||||
'filename' => "$archive_name.$format.$suffix",
|
||||
'logfilename' => $archive_name.$LOG_EXT,
|
||||
'notesfilename'=> "$archive_name.$format.$suffix".$NOTES_EXT,
|
||||
'type' => "$virt",
|
||||
'format' => "$format",
|
||||
'decompressor' => $decomp->{$suffix},
|
||||
'compression' => "$suffix",
|
||||
'vmid' => $vmid,
|
||||
'ctime' => 1585602760,
|
||||
'is_std_name' => 1,
|
||||
},
|
||||
};
|
||||
push @$tests,
|
||||
{
|
||||
description => "Backup archive, $virt, $format.$suffix",
|
||||
archive => "backup/$archive_name.$format.$suffix",
|
||||
expected => {
|
||||
'filename' => "$archive_name.$format.$suffix",
|
||||
'logfilename' => $archive_name . $LOG_EXT,
|
||||
'notesfilename' => "$archive_name.$format.$suffix" . $NOTES_EXT,
|
||||
'type' => "$virt",
|
||||
'format' => "$format",
|
||||
'decompressor' => $decomp->{$suffix},
|
||||
'compression' => "$suffix",
|
||||
'vmid' => $vmid,
|
||||
'ctime' => 1585602760,
|
||||
'is_std_name' => 1,
|
||||
},
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# add compression formats to test failed matches
|
||||
my $non_bkp_suffix = {
|
||||
'openvz' => [ 'zip', 'tgz.lzo', 'zip.gz', '', ],
|
||||
'lxc' => [ 'zip', 'tgz.lzo', 'zip.gz', '', ],
|
||||
'qemu' => [ 'vma.xz', 'vms.gz', 'vmx.zst', '', ],
|
||||
'none' => [ 'tar.gz', ],
|
||||
'openvz' => ['zip', 'tgz.lzo', 'zip.gz', ''],
|
||||
'lxc' => ['zip', 'tgz.lzo', 'zip.gz', ''],
|
||||
'qemu' => ['vma.xz', 'vms.gz', 'vmx.zst', ''],
|
||||
'none' => ['tar.gz'],
|
||||
};
|
||||
|
||||
# create tests for failed matches
|
||||
for my $virt (sort keys %$non_bkp_suffix) {
|
||||
my $suffix = $non_bkp_suffix->{$virt};
|
||||
for my $s (@$suffix) {
|
||||
my $archive = "backup/vzdump-$virt-$vmid-2020_03_30-21_12_40.$s";
|
||||
push @$tests, {
|
||||
description => "Failed match: Backup archive, $virt, $s",
|
||||
archive => $archive,
|
||||
expected => "ERROR: couldn't determine archive info from '$archive'\n",
|
||||
};
|
||||
my $archive = "backup/vzdump-$virt-$vmid-2020_03_30-21_12_40.$s";
|
||||
push @$tests,
|
||||
{
|
||||
description => "Failed match: Backup archive, $virt, $s",
|
||||
archive => $archive,
|
||||
expected => "ERROR: couldn't determine archive info from '$archive'\n",
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
plan tests => scalar @$tests;
|
||||
|
||||
for my $tt (@$tests) {
|
||||
|
||||
@ -23,57 +23,57 @@ sub mocked_run_command {
|
||||
|
||||
my $outputlines = [];
|
||||
if (my $ref = ref($cmd)) {
|
||||
if ($cmd->[0] =~ m/udevadm/i) {
|
||||
# simulate udevadm output
|
||||
my $dev = $cmd->[3];
|
||||
$dev =~ s|/sys/block/||;
|
||||
@$outputlines = split(/\n/, read_test_file("${dev}_udevadm"));
|
||||
if ($cmd->[0] =~ m/udevadm/i) {
|
||||
# simulate udevadm output
|
||||
my $dev = $cmd->[3];
|
||||
$dev =~ s|/sys/block/||;
|
||||
@$outputlines = split(/\n/, read_test_file("${dev}_udevadm"));
|
||||
|
||||
} elsif ($cmd->[0] =~ m/smartctl/i) {
|
||||
# simulate smartctl output
|
||||
my $dev;
|
||||
my $type;
|
||||
if (@$cmd > 3) {
|
||||
$dev = $cmd->[5];
|
||||
$type = 'smart';
|
||||
} else {
|
||||
$dev = $cmd->[2];
|
||||
$type = 'health';
|
||||
}
|
||||
$dev =~ s|/dev/||;
|
||||
@$outputlines = split(/\n/, read_test_file("${dev}_${type}"));
|
||||
} elsif ($cmd->[0] =~ m/sgdisk/i) {
|
||||
# simulate sgdisk
|
||||
die "implement me: @$cmd\n";
|
||||
} elsif ($cmd->[0] =~ m/zpool/i) {
|
||||
# simulate zpool output
|
||||
@$outputlines = split(/\n/, read_test_file('zpool'));
|
||||
} elsif ($cmd->[0] =~ m/smartctl/i) {
|
||||
# simulate smartctl output
|
||||
my $dev;
|
||||
my $type;
|
||||
if (@$cmd > 3) {
|
||||
$dev = $cmd->[5];
|
||||
$type = 'smart';
|
||||
} else {
|
||||
$dev = $cmd->[2];
|
||||
$type = 'health';
|
||||
}
|
||||
$dev =~ s|/dev/||;
|
||||
@$outputlines = split(/\n/, read_test_file("${dev}_${type}"));
|
||||
} elsif ($cmd->[0] =~ m/sgdisk/i) {
|
||||
# simulate sgdisk
|
||||
die "implement me: @$cmd\n";
|
||||
} elsif ($cmd->[0] =~ m/zpool/i) {
|
||||
# simulate zpool output
|
||||
@$outputlines = split(/\n/, read_test_file('zpool'));
|
||||
|
||||
} elsif ($cmd->[0] =~ m/pvs/i) {
|
||||
# simulate lvs output
|
||||
@$outputlines = split(/\n/, read_test_file('pvs'));
|
||||
} elsif ($cmd->[0] =~ m/lvs/i) {
|
||||
@$outputlines = split(/\n/, read_test_file('lvs'));
|
||||
} elsif ($cmd->[0] =~ m/lsblk/i) {
|
||||
my $content = read_test_file('lsblk');
|
||||
if ($content eq '') {
|
||||
$content = '{}';
|
||||
}
|
||||
@$outputlines = split(/\n/, $content);
|
||||
} else {
|
||||
die "unexpected run_command call: '@$cmd', aborting\n";
|
||||
}
|
||||
} elsif ($cmd->[0] =~ m/pvs/i) {
|
||||
# simulate lvs output
|
||||
@$outputlines = split(/\n/, read_test_file('pvs'));
|
||||
} elsif ($cmd->[0] =~ m/lvs/i) {
|
||||
@$outputlines = split(/\n/, read_test_file('lvs'));
|
||||
} elsif ($cmd->[0] =~ m/lsblk/i) {
|
||||
my $content = read_test_file('lsblk');
|
||||
if ($content eq '') {
|
||||
$content = '{}';
|
||||
}
|
||||
@$outputlines = split(/\n/, $content);
|
||||
} else {
|
||||
die "unexpected run_command call: '@$cmd', aborting\n";
|
||||
}
|
||||
} else {
|
||||
print "unexpected run_command call: '@$cmd', aborting\n";
|
||||
die;
|
||||
print "unexpected run_command call: '@$cmd', aborting\n";
|
||||
die;
|
||||
}
|
||||
|
||||
my $outfunc;
|
||||
if ($param{outfunc}) {
|
||||
$outfunc = $param{outfunc};
|
||||
map { &$outfunc(($_)) } @$outputlines;
|
||||
$outfunc = $param{outfunc};
|
||||
map { &$outfunc(($_)) } @$outputlines;
|
||||
|
||||
return 0;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
@ -107,16 +107,16 @@ sub mocked_dir_glob_foreach {
|
||||
my $lines = [];
|
||||
|
||||
# read lines in from file
|
||||
if ($dir =~ m{^/sys/block$} ) {
|
||||
@$lines = split(/\n/, read_test_file('disklist'));
|
||||
if ($dir =~ m{^/sys/block$}) {
|
||||
@$lines = split(/\n/, read_test_file('disklist'));
|
||||
} elsif ($dir =~ m{^/sys/block/([^/]+)}) {
|
||||
@$lines = split(/\n/, read_test_file('partlist'));
|
||||
@$lines = split(/\n/, read_test_file('partlist'));
|
||||
}
|
||||
|
||||
foreach my $line (@$lines) {
|
||||
if ($line =~ m/$regex/) {
|
||||
&$sub($line);
|
||||
}
|
||||
if ($line =~ m/$regex/) {
|
||||
&$sub($line);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -125,8 +125,8 @@ sub mocked_parse_proc_mounts {
|
||||
|
||||
my $mounts = [];
|
||||
|
||||
foreach my $line(split(/\n/, $text)) {
|
||||
push @$mounts, [split(/\s+/, $line)];
|
||||
foreach my $line (split(/\n/, $text)) {
|
||||
push @$mounts, [split(/\s+/, $line)];
|
||||
}
|
||||
|
||||
return $mounts;
|
||||
@ -135,83 +135,85 @@ sub mocked_parse_proc_mounts {
|
||||
sub read_test_file {
|
||||
my ($filename) = @_;
|
||||
|
||||
if (!-f "disk_tests/$testcasedir/$filename") {
|
||||
print "file '$testcasedir/$filename' not found\n";
|
||||
return '';
|
||||
if (!-f "disk_tests/$testcasedir/$filename") {
|
||||
print "file '$testcasedir/$filename' not found\n";
|
||||
return '';
|
||||
}
|
||||
open (my $fh, '<', "disk_tests/$testcasedir/$filename")
|
||||
or die "Cannot open disk_tests/$testcasedir/$filename: $!";
|
||||
open(my $fh, '<', "disk_tests/$testcasedir/$filename")
|
||||
or die "Cannot open disk_tests/$testcasedir/$filename: $!";
|
||||
|
||||
my $output = <$fh> // '';
|
||||
chomp $output if $output;
|
||||
while (my $line = <$fh>) {
|
||||
chomp $line;
|
||||
$output .= "\n$line";
|
||||
chomp $line;
|
||||
$output .= "\n$line";
|
||||
}
|
||||
|
||||
return $output;
|
||||
}
|
||||
|
||||
|
||||
sub test_disk_list {
|
||||
my ($testdir) = @_;
|
||||
subtest "Test '$testdir'" => sub {
|
||||
my $testcount = 0;
|
||||
$testcasedir = $testdir;
|
||||
my $testcount = 0;
|
||||
$testcasedir = $testdir;
|
||||
|
||||
my $disks;
|
||||
my $expected_disk_list;
|
||||
eval {
|
||||
$disks = PVE::Diskmanage::get_disks();
|
||||
};
|
||||
warn $@ if $@;
|
||||
$expected_disk_list = decode_json(read_test_file('disklist_expected.json'));
|
||||
my $disks;
|
||||
my $expected_disk_list;
|
||||
eval { $disks = PVE::Diskmanage::get_disks(); };
|
||||
warn $@ if $@;
|
||||
$expected_disk_list = decode_json(read_test_file('disklist_expected.json'));
|
||||
|
||||
print Dumper($disks) if $print;
|
||||
$testcount++;
|
||||
is_deeply($disks, $expected_disk_list, 'disk list should be the same');
|
||||
print Dumper($disks) if $print;
|
||||
$testcount++;
|
||||
is_deeply($disks, $expected_disk_list, 'disk list should be the same');
|
||||
|
||||
foreach my $disk (sort keys %$disks) {
|
||||
my $smart;
|
||||
my $expected_smart;
|
||||
eval {
|
||||
$smart = PVE::Diskmanage::get_smart_data("/dev/$disk");
|
||||
print Dumper($smart) if $print;
|
||||
$expected_smart = decode_json(read_test_file("${disk}_smart_expected.json"));
|
||||
};
|
||||
foreach my $disk (sort keys %$disks) {
|
||||
my $smart;
|
||||
my $expected_smart;
|
||||
eval {
|
||||
$smart = PVE::Diskmanage::get_smart_data("/dev/$disk");
|
||||
print Dumper($smart) if $print;
|
||||
$expected_smart = decode_json(read_test_file("${disk}_smart_expected.json"));
|
||||
};
|
||||
|
||||
if ($smart && $expected_smart) {
|
||||
$testcount++;
|
||||
is_deeply($smart, $expected_smart, "smart data for '$disk' should be the same");
|
||||
} elsif ($smart && -f "disk_tests/$testcasedir/${disk}_smart_expected.json") {
|
||||
$testcount++;
|
||||
ok(0, "could not parse expected smart for '$disk'\n");
|
||||
}
|
||||
my $disk_tmp = {};
|
||||
if ($smart && $expected_smart) {
|
||||
$testcount++;
|
||||
is_deeply($smart, $expected_smart, "smart data for '$disk' should be the same");
|
||||
} elsif ($smart && -f "disk_tests/$testcasedir/${disk}_smart_expected.json") {
|
||||
$testcount++;
|
||||
ok(0, "could not parse expected smart for '$disk'\n");
|
||||
}
|
||||
my $disk_tmp = {};
|
||||
|
||||
# test single disk parameter
|
||||
$disk_tmp = PVE::Diskmanage::get_disks($disk);
|
||||
warn $@ if $@;
|
||||
$testcount++;
|
||||
print Dumper $disk_tmp if $print;
|
||||
is_deeply($disk_tmp->{$disk}, $expected_disk_list->{$disk}, "disk $disk should be the same");
|
||||
# test single disk parameter
|
||||
$disk_tmp = PVE::Diskmanage::get_disks($disk);
|
||||
warn $@ if $@;
|
||||
$testcount++;
|
||||
print Dumper $disk_tmp if $print;
|
||||
is_deeply(
|
||||
$disk_tmp->{$disk},
|
||||
$expected_disk_list->{$disk},
|
||||
"disk $disk should be the same",
|
||||
);
|
||||
|
||||
# test wrong parameter
|
||||
eval { PVE::Diskmanage::get_disks({ test => 1 }); };
|
||||
my $err = $@;
|
||||
$testcount++;
|
||||
is_deeply(
|
||||
$err,
|
||||
"disks is not a string or array reference\n",
|
||||
"error message should be the same",
|
||||
);
|
||||
|
||||
# test wrong parameter
|
||||
eval {
|
||||
PVE::Diskmanage::get_disks( { test => 1 } );
|
||||
};
|
||||
my $err = $@;
|
||||
$testcount++;
|
||||
is_deeply($err, "disks is not a string or array reference\n", "error message should be the same");
|
||||
}
|
||||
# test multi disk parameter
|
||||
$disks = PVE::Diskmanage::get_disks([keys %$disks]);
|
||||
$testcount++;
|
||||
is_deeply($disks, $expected_disk_list, 'disk list should be the same');
|
||||
|
||||
}
|
||||
# test multi disk parameter
|
||||
$disks = PVE::Diskmanage::get_disks( [ keys %$disks ] );
|
||||
$testcount++;
|
||||
is_deeply($disks, $expected_disk_list, 'disk list should be the same');
|
||||
|
||||
done_testing($testcount);
|
||||
done_testing($testcount);
|
||||
};
|
||||
}
|
||||
|
||||
@ -235,24 +237,26 @@ $diskmanage_module->mock('is_iscsi' => \&mocked_is_iscsi);
|
||||
print("\tMocked is_iscsi\n");
|
||||
$diskmanage_module->mock('assert_blockdev' => sub { return 1; });
|
||||
print("\tMocked assert_blockdev\n");
|
||||
$diskmanage_module->mock('dir_is_empty' => sub {
|
||||
# all partitions have a holder dir
|
||||
my $val = shift;
|
||||
if ($val =~ m|^/sys/block/.+/.+/|) {
|
||||
return 0;
|
||||
}
|
||||
return 1;
|
||||
});
|
||||
$diskmanage_module->mock(
|
||||
'dir_is_empty' => sub {
|
||||
# all partitions have a holder dir
|
||||
my $val = shift;
|
||||
if ($val =~ m|^/sys/block/.+/.+/|) {
|
||||
return 0;
|
||||
}
|
||||
return 1;
|
||||
},
|
||||
);
|
||||
print("\tMocked dir_is_empty\n");
|
||||
$diskmanage_module->mock('check_bin' => sub { return 1; });
|
||||
print("\tMocked check_bin\n");
|
||||
my $tools_module= Test::MockModule->new('PVE::ProcFSTools', no_auto => 1);
|
||||
my $tools_module = Test::MockModule->new('PVE::ProcFSTools', no_auto => 1);
|
||||
$tools_module->mock('parse_proc_mounts' => \&mocked_parse_proc_mounts);
|
||||
print("\tMocked parse_proc_mounts\n");
|
||||
print("Done Setting up Mocking\n\n");
|
||||
|
||||
print("Beginning Tests:\n\n");
|
||||
opendir (my $dh, 'disk_tests')
|
||||
opendir(my $dh, 'disk_tests')
|
||||
or die "Cannot open disk_tests: $!";
|
||||
|
||||
while (readdir $dh) {
|
||||
|
||||
@ -16,54 +16,44 @@ my $path = '/some/path';
|
||||
# expected => the array of return values; or the die message
|
||||
my $tests = [
|
||||
{
|
||||
volname => '1234/vm-1234-disk-0.raw',
|
||||
snapname => undef,
|
||||
expected => [
|
||||
"$path/images/1234/vm-1234-disk-0.raw",
|
||||
'1234',
|
||||
'images'
|
||||
],
|
||||
volname => '1234/vm-1234-disk-0.raw',
|
||||
snapname => undef,
|
||||
expected => [
|
||||
"$path/images/1234/vm-1234-disk-0.raw", '1234', 'images',
|
||||
],
|
||||
},
|
||||
{
|
||||
volname => '1234/vm-1234-disk-0.raw',
|
||||
snapname => 'my_snap',
|
||||
expected => "can't snapshot this image format\n"
|
||||
volname => '1234/vm-1234-disk-0.raw',
|
||||
snapname => 'my_snap',
|
||||
expected => "can't snapshot this image format\n",
|
||||
},
|
||||
{
|
||||
volname => '1234/vm-1234-disk-0.qcow2',
|
||||
snapname => undef,
|
||||
expected => [
|
||||
"$path/images/1234/vm-1234-disk-0.qcow2",
|
||||
'1234',
|
||||
'images'
|
||||
],
|
||||
volname => '1234/vm-1234-disk-0.qcow2',
|
||||
snapname => undef,
|
||||
expected => [
|
||||
"$path/images/1234/vm-1234-disk-0.qcow2", '1234', 'images',
|
||||
],
|
||||
},
|
||||
{
|
||||
volname => '1234/vm-1234-disk-0.qcow2',
|
||||
snapname => 'my_snap',
|
||||
expected => [
|
||||
"$path/images/1234/vm-1234-disk-0.qcow2",
|
||||
'1234',
|
||||
'images'
|
||||
],
|
||||
volname => '1234/vm-1234-disk-0.qcow2',
|
||||
snapname => 'my_snap',
|
||||
expected => [
|
||||
"$path/images/1234/vm-1234-disk-0.qcow2", '1234', 'images',
|
||||
],
|
||||
},
|
||||
{
|
||||
volname => 'iso/my-awesome-proxmox.iso',
|
||||
snapname => undef,
|
||||
expected => [
|
||||
"$path/template/iso/my-awesome-proxmox.iso",
|
||||
undef,
|
||||
'iso'
|
||||
],
|
||||
volname => 'iso/my-awesome-proxmox.iso',
|
||||
snapname => undef,
|
||||
expected => [
|
||||
"$path/template/iso/my-awesome-proxmox.iso", undef, 'iso',
|
||||
],
|
||||
},
|
||||
{
|
||||
volname => "backup/vzdump-qemu-1234-2020_03_30-21_12_40.vma",
|
||||
snapname => undef,
|
||||
expected => [
|
||||
"$path/dump/vzdump-qemu-1234-2020_03_30-21_12_40.vma",
|
||||
1234,
|
||||
'backup'
|
||||
],
|
||||
volname => "backup/vzdump-qemu-1234-2020_03_30-21_12_40.vma",
|
||||
snapname => undef,
|
||||
expected => [
|
||||
"$path/dump/vzdump-qemu-1234-2020_03_30-21_12_40.vma", 1234, 'backup',
|
||||
],
|
||||
},
|
||||
];
|
||||
|
||||
@ -76,13 +66,11 @@ foreach my $tt (@$tests) {
|
||||
my $scfg = { path => $path };
|
||||
my $got;
|
||||
|
||||
eval {
|
||||
$got = [ PVE::Storage::Plugin->filesystem_path($scfg, $volname, $snapname) ];
|
||||
};
|
||||
eval { $got = [PVE::Storage::Plugin->filesystem_path($scfg, $volname, $snapname)]; };
|
||||
$got = $@ if $@;
|
||||
|
||||
is_deeply($got, $expected, "wantarray: filesystem_path for $volname")
|
||||
|| diag(explain($got));
|
||||
|| diag(explain($got));
|
||||
|
||||
}
|
||||
|
||||
|
||||
@ -17,21 +17,26 @@ my $vtype_subdirs = PVE::Storage::Plugin::get_vtype_subdirs();
|
||||
# [2] => expected return from get_subdir
|
||||
my $tests = [
|
||||
# failed matches
|
||||
[ $scfg_with_path, 'none', "unknown vtype 'none'\n" ],
|
||||
[ {}, 'iso', "storage definition has no path\n" ],
|
||||
[$scfg_with_path, 'none', "unknown vtype 'none'\n"],
|
||||
[{}, 'iso', "storage definition has no path\n"],
|
||||
];
|
||||
|
||||
# creates additional positive tests
|
||||
foreach my $type (keys %$vtype_subdirs) {
|
||||
my $path = "$scfg_with_path->{path}/$vtype_subdirs->{$type}";
|
||||
push @$tests, [ $scfg_with_path, $type, $path ];
|
||||
push @$tests, [$scfg_with_path, $type, $path];
|
||||
}
|
||||
|
||||
# creates additional tests for overrides
|
||||
foreach my $type (keys %$vtype_subdirs) {
|
||||
my $override = "${type}_override";
|
||||
my $scfg_with_override = { path => '/some/path', 'content-dirs' => { $type => $override } };
|
||||
push @$tests, [ $scfg_with_override, $type, "$scfg_with_override->{path}/$scfg_with_override->{'content-dirs'}->{$type}" ];
|
||||
push @$tests,
|
||||
[
|
||||
$scfg_with_override,
|
||||
$type,
|
||||
"$scfg_with_override->{path}/$scfg_with_override->{'content-dirs'}->{$type}",
|
||||
];
|
||||
}
|
||||
|
||||
plan tests => scalar @$tests;
|
||||
@ -43,7 +48,7 @@ foreach my $tt (@$tests) {
|
||||
eval { $got = PVE::Storage::Plugin->get_subdir($scfg, $type) };
|
||||
$got = $@ if $@;
|
||||
|
||||
is ($got, $expected, "get_subdir for $type") || diag(explain($got));
|
||||
is($got, $expected, "get_subdir for $type") || diag(explain($got));
|
||||
}
|
||||
|
||||
done_testing();
|
||||
|
||||
@ -27,52 +27,52 @@ use constant DEFAULT_CTIME => 1234567890;
|
||||
my $mocked_vmlist = {
|
||||
'version' => 1,
|
||||
'ids' => {
|
||||
'16110' => {
|
||||
'node' => 'x42',
|
||||
'type' => 'qemu',
|
||||
'version' => 4,
|
||||
},
|
||||
'16112' => {
|
||||
'node' => 'x42',
|
||||
'type' => 'lxc',
|
||||
'version' => 7,
|
||||
},
|
||||
'16114' => {
|
||||
'node' => 'x42',
|
||||
'type' => 'qemu',
|
||||
'version' => 2,
|
||||
},
|
||||
'16113' => {
|
||||
'node' => 'x42',
|
||||
'type' => 'qemu',
|
||||
'version' => 5,
|
||||
},
|
||||
'16115' => {
|
||||
'node' => 'x42',
|
||||
'type' => 'qemu',
|
||||
'version' => 1,
|
||||
},
|
||||
'9004' => {
|
||||
'node' => 'x42',
|
||||
'type' => 'qemu',
|
||||
'version' => 6,
|
||||
}
|
||||
}
|
||||
'16110' => {
|
||||
'node' => 'x42',
|
||||
'type' => 'qemu',
|
||||
'version' => 4,
|
||||
},
|
||||
'16112' => {
|
||||
'node' => 'x42',
|
||||
'type' => 'lxc',
|
||||
'version' => 7,
|
||||
},
|
||||
'16114' => {
|
||||
'node' => 'x42',
|
||||
'type' => 'qemu',
|
||||
'version' => 2,
|
||||
},
|
||||
'16113' => {
|
||||
'node' => 'x42',
|
||||
'type' => 'qemu',
|
||||
'version' => 5,
|
||||
},
|
||||
'16115' => {
|
||||
'node' => 'x42',
|
||||
'type' => 'qemu',
|
||||
'version' => 1,
|
||||
},
|
||||
'9004' => {
|
||||
'node' => 'x42',
|
||||
'type' => 'qemu',
|
||||
'version' => 6,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
my $storage_dir = File::Temp->newdir();
|
||||
my $scfg = {
|
||||
'type' => 'dir',
|
||||
'type' => 'dir',
|
||||
'maxfiles' => 0,
|
||||
'path' => $storage_dir,
|
||||
'shared' => 0,
|
||||
'content' => {
|
||||
'iso' => 1,
|
||||
'rootdir' => 1,
|
||||
'vztmpl' => 1,
|
||||
'images' => 1,
|
||||
'snippets' => 1,
|
||||
'backup' => 1,
|
||||
'path' => $storage_dir,
|
||||
'shared' => 0,
|
||||
'content' => {
|
||||
'iso' => 1,
|
||||
'rootdir' => 1,
|
||||
'vztmpl' => 1,
|
||||
'images' => 1,
|
||||
'snippets' => 1,
|
||||
'backup' => 1,
|
||||
},
|
||||
};
|
||||
|
||||
@ -84,389 +84,387 @@ my $scfg = {
|
||||
# (content, ctime, format, parent, size, used, vimd, volid)
|
||||
my @tests = (
|
||||
{
|
||||
description => 'VMID: 16110, VM, qcow2, backup, snippets',
|
||||
vmid => '16110',
|
||||
files => [
|
||||
"$storage_dir/images/16110/vm-16110-disk-0.qcow2",
|
||||
"$storage_dir/images/16110/vm-16110-disk-1.raw",
|
||||
"$storage_dir/images/16110/vm-16110-disk-2.vmdk",
|
||||
"$storage_dir/dump/vzdump-qemu-16110-2020_03_30-21_11_40.vma.gz",
|
||||
"$storage_dir/dump/vzdump-qemu-16110-2020_03_30-21_12_45.vma.lzo",
|
||||
"$storage_dir/dump/vzdump-qemu-16110-2020_03_30-21_13_55.vma",
|
||||
"$storage_dir/dump/vzdump-qemu-16110-2020_03_30-21_13_55.vma.zst",
|
||||
"$storage_dir/snippets/userconfig.yaml",
|
||||
"$storage_dir/snippets/hookscript.pl",
|
||||
],
|
||||
expected => [
|
||||
{
|
||||
'content' => 'images',
|
||||
'ctime' => DEFAULT_CTIME,
|
||||
'format' => 'qcow2',
|
||||
'parent' => undef,
|
||||
'size' => DEFAULT_SIZE,
|
||||
'used' => DEFAULT_USED,
|
||||
'vmid' => '16110',
|
||||
'volid' => 'local:16110/vm-16110-disk-0.qcow2',
|
||||
},
|
||||
{
|
||||
'content' => 'images',
|
||||
'ctime' => DEFAULT_CTIME,
|
||||
'format' => 'raw',
|
||||
'parent' => undef,
|
||||
'size' => DEFAULT_SIZE,
|
||||
'used' => DEFAULT_USED,
|
||||
'vmid' => '16110',
|
||||
'volid' => 'local:16110/vm-16110-disk-1.raw',
|
||||
},
|
||||
{
|
||||
'content' => 'images',
|
||||
'ctime' => DEFAULT_CTIME,
|
||||
'format' => 'vmdk',
|
||||
'parent' => undef,
|
||||
'size' => DEFAULT_SIZE,
|
||||
'used' => DEFAULT_USED,
|
||||
'vmid' => '16110',
|
||||
'volid' => 'local:16110/vm-16110-disk-2.vmdk',
|
||||
},
|
||||
{
|
||||
'content' => 'backup',
|
||||
'ctime' => 1585602700,
|
||||
'format' => 'vma.gz',
|
||||
'size' => DEFAULT_SIZE,
|
||||
'subtype' => 'qemu',
|
||||
'vmid' => '16110',
|
||||
'volid' => 'local:backup/vzdump-qemu-16110-2020_03_30-21_11_40.vma.gz',
|
||||
},
|
||||
{
|
||||
'content' => 'backup',
|
||||
'ctime' => 1585602765,
|
||||
'format' => 'vma.lzo',
|
||||
'size' => DEFAULT_SIZE,
|
||||
'subtype' => 'qemu',
|
||||
'vmid' => '16110',
|
||||
'volid' => 'local:backup/vzdump-qemu-16110-2020_03_30-21_12_45.vma.lzo',
|
||||
},
|
||||
{
|
||||
'content' => 'backup',
|
||||
'ctime' => 1585602835,
|
||||
'format' => 'vma',
|
||||
'size' => DEFAULT_SIZE,
|
||||
'subtype' => 'qemu',
|
||||
'vmid' => '16110',
|
||||
'volid' => 'local:backup/vzdump-qemu-16110-2020_03_30-21_13_55.vma',
|
||||
},
|
||||
{
|
||||
'content' => 'backup',
|
||||
'ctime' => 1585602835,
|
||||
'format' => 'vma.zst',
|
||||
'size' => DEFAULT_SIZE,
|
||||
'subtype' => 'qemu',
|
||||
'vmid' => '16110',
|
||||
'volid' => 'local:backup/vzdump-qemu-16110-2020_03_30-21_13_55.vma.zst',
|
||||
},
|
||||
{
|
||||
'content' => 'snippets',
|
||||
'ctime' => DEFAULT_CTIME,
|
||||
'format' => 'snippet',
|
||||
'size' => DEFAULT_SIZE,
|
||||
'volid' => 'local:snippets/hookscript.pl',
|
||||
},
|
||||
{
|
||||
'content' => 'snippets',
|
||||
'ctime' => DEFAULT_CTIME,
|
||||
'format' => 'snippet',
|
||||
'size' => DEFAULT_SIZE,
|
||||
'volid' => 'local:snippets/userconfig.yaml',
|
||||
},
|
||||
],
|
||||
description => 'VMID: 16110, VM, qcow2, backup, snippets',
|
||||
vmid => '16110',
|
||||
files => [
|
||||
"$storage_dir/images/16110/vm-16110-disk-0.qcow2",
|
||||
"$storage_dir/images/16110/vm-16110-disk-1.raw",
|
||||
"$storage_dir/images/16110/vm-16110-disk-2.vmdk",
|
||||
"$storage_dir/dump/vzdump-qemu-16110-2020_03_30-21_11_40.vma.gz",
|
||||
"$storage_dir/dump/vzdump-qemu-16110-2020_03_30-21_12_45.vma.lzo",
|
||||
"$storage_dir/dump/vzdump-qemu-16110-2020_03_30-21_13_55.vma",
|
||||
"$storage_dir/dump/vzdump-qemu-16110-2020_03_30-21_13_55.vma.zst",
|
||||
"$storage_dir/snippets/userconfig.yaml",
|
||||
"$storage_dir/snippets/hookscript.pl",
|
||||
],
|
||||
expected => [
|
||||
{
|
||||
'content' => 'images',
|
||||
'ctime' => DEFAULT_CTIME,
|
||||
'format' => 'qcow2',
|
||||
'parent' => undef,
|
||||
'size' => DEFAULT_SIZE,
|
||||
'used' => DEFAULT_USED,
|
||||
'vmid' => '16110',
|
||||
'volid' => 'local:16110/vm-16110-disk-0.qcow2',
|
||||
},
|
||||
{
|
||||
'content' => 'images',
|
||||
'ctime' => DEFAULT_CTIME,
|
||||
'format' => 'raw',
|
||||
'parent' => undef,
|
||||
'size' => DEFAULT_SIZE,
|
||||
'used' => DEFAULT_USED,
|
||||
'vmid' => '16110',
|
||||
'volid' => 'local:16110/vm-16110-disk-1.raw',
|
||||
},
|
||||
{
|
||||
'content' => 'images',
|
||||
'ctime' => DEFAULT_CTIME,
|
||||
'format' => 'vmdk',
|
||||
'parent' => undef,
|
||||
'size' => DEFAULT_SIZE,
|
||||
'used' => DEFAULT_USED,
|
||||
'vmid' => '16110',
|
||||
'volid' => 'local:16110/vm-16110-disk-2.vmdk',
|
||||
},
|
||||
{
|
||||
'content' => 'backup',
|
||||
'ctime' => 1585602700,
|
||||
'format' => 'vma.gz',
|
||||
'size' => DEFAULT_SIZE,
|
||||
'subtype' => 'qemu',
|
||||
'vmid' => '16110',
|
||||
'volid' => 'local:backup/vzdump-qemu-16110-2020_03_30-21_11_40.vma.gz',
|
||||
},
|
||||
{
|
||||
'content' => 'backup',
|
||||
'ctime' => 1585602765,
|
||||
'format' => 'vma.lzo',
|
||||
'size' => DEFAULT_SIZE,
|
||||
'subtype' => 'qemu',
|
||||
'vmid' => '16110',
|
||||
'volid' => 'local:backup/vzdump-qemu-16110-2020_03_30-21_12_45.vma.lzo',
|
||||
},
|
||||
{
|
||||
'content' => 'backup',
|
||||
'ctime' => 1585602835,
|
||||
'format' => 'vma',
|
||||
'size' => DEFAULT_SIZE,
|
||||
'subtype' => 'qemu',
|
||||
'vmid' => '16110',
|
||||
'volid' => 'local:backup/vzdump-qemu-16110-2020_03_30-21_13_55.vma',
|
||||
},
|
||||
{
|
||||
'content' => 'backup',
|
||||
'ctime' => 1585602835,
|
||||
'format' => 'vma.zst',
|
||||
'size' => DEFAULT_SIZE,
|
||||
'subtype' => 'qemu',
|
||||
'vmid' => '16110',
|
||||
'volid' => 'local:backup/vzdump-qemu-16110-2020_03_30-21_13_55.vma.zst',
|
||||
},
|
||||
{
|
||||
'content' => 'snippets',
|
||||
'ctime' => DEFAULT_CTIME,
|
||||
'format' => 'snippet',
|
||||
'size' => DEFAULT_SIZE,
|
||||
'volid' => 'local:snippets/hookscript.pl',
|
||||
},
|
||||
{
|
||||
'content' => 'snippets',
|
||||
'ctime' => DEFAULT_CTIME,
|
||||
'format' => 'snippet',
|
||||
'size' => DEFAULT_SIZE,
|
||||
'volid' => 'local:snippets/userconfig.yaml',
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
description => 'VMID: 16112, lxc, raw, backup',
|
||||
vmid => '16112',
|
||||
files => [
|
||||
"$storage_dir/images/16112/vm-16112-disk-0.raw",
|
||||
"$storage_dir/dump/vzdump-lxc-16112-2020_03_30-21_39_30.tar.lzo",
|
||||
"$storage_dir/dump/vzdump-lxc-16112-2020_03_30-21_49_30.tar.gz",
|
||||
"$storage_dir/dump/vzdump-lxc-16112-2020_03_30-21_49_30.tar.zst",
|
||||
"$storage_dir/dump/vzdump-lxc-16112-2020_03_30-21_59_30.tgz",
|
||||
"$storage_dir/dump/vzdump-openvz-16112-2020_03_30-21_39_30.tar.bz2",
|
||||
],
|
||||
expected => [
|
||||
{
|
||||
'content' => 'rootdir',
|
||||
'ctime' => DEFAULT_CTIME,
|
||||
'format' => 'raw',
|
||||
'parent' => undef,
|
||||
'size' => DEFAULT_SIZE,
|
||||
'used' => DEFAULT_USED,
|
||||
'vmid' => '16112',
|
||||
'volid' => 'local:16112/vm-16112-disk-0.raw',
|
||||
},
|
||||
{
|
||||
'content' => 'backup',
|
||||
'ctime' => 1585604370,
|
||||
'format' => 'tar.lzo',
|
||||
'size' => DEFAULT_SIZE,
|
||||
'subtype' => 'lxc',
|
||||
'vmid' => '16112',
|
||||
'volid' => 'local:backup/vzdump-lxc-16112-2020_03_30-21_39_30.tar.lzo',
|
||||
},
|
||||
{
|
||||
'content' => 'backup',
|
||||
'ctime' => 1585604970,
|
||||
'format' => 'tar.gz',
|
||||
'size' => DEFAULT_SIZE,
|
||||
'subtype' => 'lxc',
|
||||
'vmid' => '16112',
|
||||
'volid' => 'local:backup/vzdump-lxc-16112-2020_03_30-21_49_30.tar.gz',
|
||||
},
|
||||
{
|
||||
'content' => 'backup',
|
||||
'ctime' => 1585604970,
|
||||
'format' => 'tar.zst',
|
||||
'size' => DEFAULT_SIZE,
|
||||
'subtype' => 'lxc',
|
||||
'vmid' => '16112',
|
||||
'volid' => 'local:backup/vzdump-lxc-16112-2020_03_30-21_49_30.tar.zst',
|
||||
},
|
||||
{
|
||||
'content' => 'backup',
|
||||
'ctime' => 1585605570,
|
||||
'format' => 'tgz',
|
||||
'size' => DEFAULT_SIZE,
|
||||
'subtype' => 'lxc',
|
||||
'vmid' => '16112',
|
||||
'volid' => 'local:backup/vzdump-lxc-16112-2020_03_30-21_59_30.tgz',
|
||||
},
|
||||
{
|
||||
'content' => 'backup',
|
||||
'ctime' => 1585604370,
|
||||
'format' => 'tar.bz2',
|
||||
'size' => DEFAULT_SIZE,
|
||||
'subtype' => 'openvz',
|
||||
'vmid' => '16112',
|
||||
'volid' => 'local:backup/vzdump-openvz-16112-2020_03_30-21_39_30.tar.bz2',
|
||||
},
|
||||
],
|
||||
description => 'VMID: 16112, lxc, raw, backup',
|
||||
vmid => '16112',
|
||||
files => [
|
||||
"$storage_dir/images/16112/vm-16112-disk-0.raw",
|
||||
"$storage_dir/dump/vzdump-lxc-16112-2020_03_30-21_39_30.tar.lzo",
|
||||
"$storage_dir/dump/vzdump-lxc-16112-2020_03_30-21_49_30.tar.gz",
|
||||
"$storage_dir/dump/vzdump-lxc-16112-2020_03_30-21_49_30.tar.zst",
|
||||
"$storage_dir/dump/vzdump-lxc-16112-2020_03_30-21_59_30.tgz",
|
||||
"$storage_dir/dump/vzdump-openvz-16112-2020_03_30-21_39_30.tar.bz2",
|
||||
],
|
||||
expected => [
|
||||
{
|
||||
'content' => 'rootdir',
|
||||
'ctime' => DEFAULT_CTIME,
|
||||
'format' => 'raw',
|
||||
'parent' => undef,
|
||||
'size' => DEFAULT_SIZE,
|
||||
'used' => DEFAULT_USED,
|
||||
'vmid' => '16112',
|
||||
'volid' => 'local:16112/vm-16112-disk-0.raw',
|
||||
},
|
||||
{
|
||||
'content' => 'backup',
|
||||
'ctime' => 1585604370,
|
||||
'format' => 'tar.lzo',
|
||||
'size' => DEFAULT_SIZE,
|
||||
'subtype' => 'lxc',
|
||||
'vmid' => '16112',
|
||||
'volid' => 'local:backup/vzdump-lxc-16112-2020_03_30-21_39_30.tar.lzo',
|
||||
},
|
||||
{
|
||||
'content' => 'backup',
|
||||
'ctime' => 1585604970,
|
||||
'format' => 'tar.gz',
|
||||
'size' => DEFAULT_SIZE,
|
||||
'subtype' => 'lxc',
|
||||
'vmid' => '16112',
|
||||
'volid' => 'local:backup/vzdump-lxc-16112-2020_03_30-21_49_30.tar.gz',
|
||||
},
|
||||
{
|
||||
'content' => 'backup',
|
||||
'ctime' => 1585604970,
|
||||
'format' => 'tar.zst',
|
||||
'size' => DEFAULT_SIZE,
|
||||
'subtype' => 'lxc',
|
||||
'vmid' => '16112',
|
||||
'volid' => 'local:backup/vzdump-lxc-16112-2020_03_30-21_49_30.tar.zst',
|
||||
},
|
||||
{
|
||||
'content' => 'backup',
|
||||
'ctime' => 1585605570,
|
||||
'format' => 'tgz',
|
||||
'size' => DEFAULT_SIZE,
|
||||
'subtype' => 'lxc',
|
||||
'vmid' => '16112',
|
||||
'volid' => 'local:backup/vzdump-lxc-16112-2020_03_30-21_59_30.tgz',
|
||||
},
|
||||
{
|
||||
'content' => 'backup',
|
||||
'ctime' => 1585604370,
|
||||
'format' => 'tar.bz2',
|
||||
'size' => DEFAULT_SIZE,
|
||||
'subtype' => 'openvz',
|
||||
'vmid' => '16112',
|
||||
'volid' => 'local:backup/vzdump-openvz-16112-2020_03_30-21_39_30.tar.bz2',
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
description => 'VMID: 16114, VM, qcow2, linked clone',
|
||||
vmid => '16114',
|
||||
files => [
|
||||
"$storage_dir/images/16114/vm-16114-disk-0.qcow2",
|
||||
"$storage_dir/images/16114/vm-16114-disk-1.qcow2",
|
||||
],
|
||||
parent => [
|
||||
"../9004/base-9004-disk-0.qcow2",
|
||||
"../9004/base-9004-disk-1.qcow2",
|
||||
],
|
||||
expected => [
|
||||
{
|
||||
'content' => 'images',
|
||||
'ctime' => DEFAULT_CTIME,
|
||||
'format' => 'qcow2',
|
||||
'parent' => '../9004/base-9004-disk-0.qcow2',
|
||||
'size' => DEFAULT_SIZE,
|
||||
'used' => DEFAULT_USED,
|
||||
'vmid' => '16114',
|
||||
'volid' => 'local:9004/base-9004-disk-0.qcow2/16114/vm-16114-disk-0.qcow2',
|
||||
},
|
||||
{
|
||||
'content' => 'images',
|
||||
'ctime' => DEFAULT_CTIME,
|
||||
'format' => 'qcow2',
|
||||
'parent' => '../9004/base-9004-disk-1.qcow2',
|
||||
'size' => DEFAULT_SIZE,
|
||||
'used' => DEFAULT_USED,
|
||||
'vmid' => '16114',
|
||||
'volid' => 'local:9004/base-9004-disk-1.qcow2/16114/vm-16114-disk-1.qcow2',
|
||||
},
|
||||
],
|
||||
description => 'VMID: 16114, VM, qcow2, linked clone',
|
||||
vmid => '16114',
|
||||
files => [
|
||||
"$storage_dir/images/16114/vm-16114-disk-0.qcow2",
|
||||
"$storage_dir/images/16114/vm-16114-disk-1.qcow2",
|
||||
],
|
||||
parent => [
|
||||
"../9004/base-9004-disk-0.qcow2", "../9004/base-9004-disk-1.qcow2",
|
||||
],
|
||||
expected => [
|
||||
{
|
||||
'content' => 'images',
|
||||
'ctime' => DEFAULT_CTIME,
|
||||
'format' => 'qcow2',
|
||||
'parent' => '../9004/base-9004-disk-0.qcow2',
|
||||
'size' => DEFAULT_SIZE,
|
||||
'used' => DEFAULT_USED,
|
||||
'vmid' => '16114',
|
||||
'volid' => 'local:9004/base-9004-disk-0.qcow2/16114/vm-16114-disk-0.qcow2',
|
||||
},
|
||||
{
|
||||
'content' => 'images',
|
||||
'ctime' => DEFAULT_CTIME,
|
||||
'format' => 'qcow2',
|
||||
'parent' => '../9004/base-9004-disk-1.qcow2',
|
||||
'size' => DEFAULT_SIZE,
|
||||
'used' => DEFAULT_USED,
|
||||
'vmid' => '16114',
|
||||
'volid' => 'local:9004/base-9004-disk-1.qcow2/16114/vm-16114-disk-1.qcow2',
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
description => 'VMID: 9004, VM, template, qcow2',
|
||||
vmid => '9004',
|
||||
files => [
|
||||
"$storage_dir/images/9004/base-9004-disk-0.qcow2",
|
||||
"$storage_dir/images/9004/base-9004-disk-1.qcow2",
|
||||
],
|
||||
expected => [
|
||||
{
|
||||
'content' => 'images',
|
||||
'ctime' => DEFAULT_CTIME,
|
||||
'format' => 'qcow2',
|
||||
'parent' => undef,
|
||||
'size' => DEFAULT_SIZE,
|
||||
'used' => DEFAULT_USED,
|
||||
'vmid' => '9004',
|
||||
'volid' => 'local:9004/base-9004-disk-0.qcow2',
|
||||
},
|
||||
{
|
||||
'content' => 'images',
|
||||
'ctime' => DEFAULT_CTIME,
|
||||
'format' => 'qcow2',
|
||||
'parent' => undef,
|
||||
'size' => DEFAULT_SIZE,
|
||||
'used' => DEFAULT_USED,
|
||||
'vmid' => '9004',
|
||||
'volid' => 'local:9004/base-9004-disk-1.qcow2',
|
||||
},
|
||||
],
|
||||
description => 'VMID: 9004, VM, template, qcow2',
|
||||
vmid => '9004',
|
||||
files => [
|
||||
"$storage_dir/images/9004/base-9004-disk-0.qcow2",
|
||||
"$storage_dir/images/9004/base-9004-disk-1.qcow2",
|
||||
],
|
||||
expected => [
|
||||
{
|
||||
'content' => 'images',
|
||||
'ctime' => DEFAULT_CTIME,
|
||||
'format' => 'qcow2',
|
||||
'parent' => undef,
|
||||
'size' => DEFAULT_SIZE,
|
||||
'used' => DEFAULT_USED,
|
||||
'vmid' => '9004',
|
||||
'volid' => 'local:9004/base-9004-disk-0.qcow2',
|
||||
},
|
||||
{
|
||||
'content' => 'images',
|
||||
'ctime' => DEFAULT_CTIME,
|
||||
'format' => 'qcow2',
|
||||
'parent' => undef,
|
||||
'size' => DEFAULT_SIZE,
|
||||
'used' => DEFAULT_USED,
|
||||
'vmid' => '9004',
|
||||
'volid' => 'local:9004/base-9004-disk-1.qcow2',
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
description => 'VMID: none, templates, snippets, backup',
|
||||
vmid => undef,
|
||||
files => [
|
||||
"$storage_dir/dump/vzdump-lxc-19253-2020_02_03-19_57_43.tar.gz",
|
||||
"$storage_dir/dump/vzdump-lxc-19254-2019_01_21-19_29_19.tar",
|
||||
"$storage_dir/template/iso/archlinux-2020.02.01-x86_64.iso",
|
||||
"$storage_dir/template/iso/debian-8.11.1-amd64-DVD-1.iso",
|
||||
"$storage_dir/template/iso/debian-9.12.0-amd64-netinst.iso",
|
||||
"$storage_dir/template/iso/proxmox-ve_6.1-1.iso",
|
||||
"$storage_dir/template/cache/archlinux-base_20190924-1_amd64.tar.gz",
|
||||
"$storage_dir/template/cache/debian-10.0-standard_10.0-1_amd64.tar.gz",
|
||||
"$storage_dir/template/cache/debian-11.0-standard_11.0-1_amd64.tar.bz2",
|
||||
"$storage_dir/template/cache/alpine-3.10-default_20190626_amd64.tar.xz",
|
||||
"$storage_dir/snippets/userconfig.yaml",
|
||||
"$storage_dir/snippets/hookscript.pl",
|
||||
"$storage_dir/private/1234/", # fileparse needs / at the end
|
||||
"$storage_dir/private/1234/subvol-1234-disk-0.subvol/", # fileparse needs / at the end
|
||||
],
|
||||
expected => [
|
||||
{
|
||||
'content' => 'vztmpl',
|
||||
'ctime' => DEFAULT_CTIME,
|
||||
'format' => 'txz',
|
||||
'size' => DEFAULT_SIZE,
|
||||
'volid' => 'local:vztmpl/alpine-3.10-default_20190626_amd64.tar.xz',
|
||||
},
|
||||
{
|
||||
'content' => 'vztmpl',
|
||||
'ctime' => DEFAULT_CTIME,
|
||||
'format' => 'tgz',
|
||||
'size' => DEFAULT_SIZE,
|
||||
'volid' => 'local:vztmpl/archlinux-base_20190924-1_amd64.tar.gz',
|
||||
},
|
||||
{
|
||||
'content' => 'vztmpl',
|
||||
'ctime' => DEFAULT_CTIME,
|
||||
'format' => 'tgz',
|
||||
'size' => DEFAULT_SIZE,
|
||||
'volid' => 'local:vztmpl/debian-10.0-standard_10.0-1_amd64.tar.gz',
|
||||
},
|
||||
{
|
||||
'content' => 'vztmpl',
|
||||
'ctime' => DEFAULT_CTIME,
|
||||
'format' => 'tbz2',
|
||||
'size' => DEFAULT_SIZE,
|
||||
'volid' => 'local:vztmpl/debian-11.0-standard_11.0-1_amd64.tar.bz2',
|
||||
},
|
||||
{
|
||||
'content' => 'iso',
|
||||
'ctime' => DEFAULT_CTIME,
|
||||
'format' => 'iso',
|
||||
'size' => DEFAULT_SIZE,
|
||||
'volid' => 'local:iso/archlinux-2020.02.01-x86_64.iso',
|
||||
},
|
||||
{
|
||||
'content' => 'iso',
|
||||
'ctime' => DEFAULT_CTIME,
|
||||
'format' => 'iso',
|
||||
'size' => DEFAULT_SIZE,
|
||||
'volid' => 'local:iso/debian-8.11.1-amd64-DVD-1.iso',
|
||||
},
|
||||
{
|
||||
'content' => 'iso',
|
||||
'ctime' => DEFAULT_CTIME,
|
||||
'format' => 'iso',
|
||||
'size' => DEFAULT_SIZE,
|
||||
'volid' => 'local:iso/debian-9.12.0-amd64-netinst.iso',
|
||||
},
|
||||
{
|
||||
'content' => 'iso',
|
||||
'ctime' => DEFAULT_CTIME,
|
||||
'format' => 'iso',
|
||||
'size' => DEFAULT_SIZE,
|
||||
'volid' => 'local:iso/proxmox-ve_6.1-1.iso',
|
||||
},
|
||||
{
|
||||
'content' => 'backup',
|
||||
'ctime' => 1580759863,
|
||||
'format' => 'tar.gz',
|
||||
'size' => DEFAULT_SIZE,
|
||||
'subtype' => 'lxc',
|
||||
'vmid' => '19253',
|
||||
'volid' => 'local:backup/vzdump-lxc-19253-2020_02_03-19_57_43.tar.gz',
|
||||
},
|
||||
{
|
||||
'content' => 'backup',
|
||||
'ctime' => 1548098959,
|
||||
'format' => 'tar',
|
||||
'size' => DEFAULT_SIZE,
|
||||
'subtype' => 'lxc',
|
||||
'vmid' => '19254',
|
||||
'volid' => 'local:backup/vzdump-lxc-19254-2019_01_21-19_29_19.tar',
|
||||
},
|
||||
{
|
||||
'content' => 'snippets',
|
||||
'ctime' => DEFAULT_CTIME,
|
||||
'format' => 'snippet',
|
||||
'size' => DEFAULT_SIZE,
|
||||
'volid' => 'local:snippets/hookscript.pl',
|
||||
},
|
||||
{
|
||||
'content' => 'snippets',
|
||||
'ctime' => DEFAULT_CTIME,
|
||||
'format' => 'snippet',
|
||||
'size' => DEFAULT_SIZE,
|
||||
'volid' => 'local:snippets/userconfig.yaml',
|
||||
},
|
||||
],
|
||||
description => 'VMID: none, templates, snippets, backup',
|
||||
vmid => undef,
|
||||
files => [
|
||||
"$storage_dir/dump/vzdump-lxc-19253-2020_02_03-19_57_43.tar.gz",
|
||||
"$storage_dir/dump/vzdump-lxc-19254-2019_01_21-19_29_19.tar",
|
||||
"$storage_dir/template/iso/archlinux-2020.02.01-x86_64.iso",
|
||||
"$storage_dir/template/iso/debian-8.11.1-amd64-DVD-1.iso",
|
||||
"$storage_dir/template/iso/debian-9.12.0-amd64-netinst.iso",
|
||||
"$storage_dir/template/iso/proxmox-ve_6.1-1.iso",
|
||||
"$storage_dir/template/cache/archlinux-base_20190924-1_amd64.tar.gz",
|
||||
"$storage_dir/template/cache/debian-10.0-standard_10.0-1_amd64.tar.gz",
|
||||
"$storage_dir/template/cache/debian-11.0-standard_11.0-1_amd64.tar.bz2",
|
||||
"$storage_dir/template/cache/alpine-3.10-default_20190626_amd64.tar.xz",
|
||||
"$storage_dir/snippets/userconfig.yaml",
|
||||
"$storage_dir/snippets/hookscript.pl",
|
||||
"$storage_dir/private/1234/", # fileparse needs / at the end
|
||||
"$storage_dir/private/1234/subvol-1234-disk-0.subvol/", # fileparse needs / at the end
|
||||
],
|
||||
expected => [
|
||||
{
|
||||
'content' => 'vztmpl',
|
||||
'ctime' => DEFAULT_CTIME,
|
||||
'format' => 'txz',
|
||||
'size' => DEFAULT_SIZE,
|
||||
'volid' => 'local:vztmpl/alpine-3.10-default_20190626_amd64.tar.xz',
|
||||
},
|
||||
{
|
||||
'content' => 'vztmpl',
|
||||
'ctime' => DEFAULT_CTIME,
|
||||
'format' => 'tgz',
|
||||
'size' => DEFAULT_SIZE,
|
||||
'volid' => 'local:vztmpl/archlinux-base_20190924-1_amd64.tar.gz',
|
||||
},
|
||||
{
|
||||
'content' => 'vztmpl',
|
||||
'ctime' => DEFAULT_CTIME,
|
||||
'format' => 'tgz',
|
||||
'size' => DEFAULT_SIZE,
|
||||
'volid' => 'local:vztmpl/debian-10.0-standard_10.0-1_amd64.tar.gz',
|
||||
},
|
||||
{
|
||||
'content' => 'vztmpl',
|
||||
'ctime' => DEFAULT_CTIME,
|
||||
'format' => 'tbz2',
|
||||
'size' => DEFAULT_SIZE,
|
||||
'volid' => 'local:vztmpl/debian-11.0-standard_11.0-1_amd64.tar.bz2',
|
||||
},
|
||||
{
|
||||
'content' => 'iso',
|
||||
'ctime' => DEFAULT_CTIME,
|
||||
'format' => 'iso',
|
||||
'size' => DEFAULT_SIZE,
|
||||
'volid' => 'local:iso/archlinux-2020.02.01-x86_64.iso',
|
||||
},
|
||||
{
|
||||
'content' => 'iso',
|
||||
'ctime' => DEFAULT_CTIME,
|
||||
'format' => 'iso',
|
||||
'size' => DEFAULT_SIZE,
|
||||
'volid' => 'local:iso/debian-8.11.1-amd64-DVD-1.iso',
|
||||
},
|
||||
{
|
||||
'content' => 'iso',
|
||||
'ctime' => DEFAULT_CTIME,
|
||||
'format' => 'iso',
|
||||
'size' => DEFAULT_SIZE,
|
||||
'volid' => 'local:iso/debian-9.12.0-amd64-netinst.iso',
|
||||
},
|
||||
{
|
||||
'content' => 'iso',
|
||||
'ctime' => DEFAULT_CTIME,
|
||||
'format' => 'iso',
|
||||
'size' => DEFAULT_SIZE,
|
||||
'volid' => 'local:iso/proxmox-ve_6.1-1.iso',
|
||||
},
|
||||
{
|
||||
'content' => 'backup',
|
||||
'ctime' => 1580759863,
|
||||
'format' => 'tar.gz',
|
||||
'size' => DEFAULT_SIZE,
|
||||
'subtype' => 'lxc',
|
||||
'vmid' => '19253',
|
||||
'volid' => 'local:backup/vzdump-lxc-19253-2020_02_03-19_57_43.tar.gz',
|
||||
},
|
||||
{
|
||||
'content' => 'backup',
|
||||
'ctime' => 1548098959,
|
||||
'format' => 'tar',
|
||||
'size' => DEFAULT_SIZE,
|
||||
'subtype' => 'lxc',
|
||||
'vmid' => '19254',
|
||||
'volid' => 'local:backup/vzdump-lxc-19254-2019_01_21-19_29_19.tar',
|
||||
},
|
||||
{
|
||||
'content' => 'snippets',
|
||||
'ctime' => DEFAULT_CTIME,
|
||||
'format' => 'snippet',
|
||||
'size' => DEFAULT_SIZE,
|
||||
'volid' => 'local:snippets/hookscript.pl',
|
||||
},
|
||||
{
|
||||
'content' => 'snippets',
|
||||
'ctime' => DEFAULT_CTIME,
|
||||
'format' => 'snippet',
|
||||
'size' => DEFAULT_SIZE,
|
||||
'volid' => 'local:snippets/userconfig.yaml',
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
description => 'VMID: none, parent, non-matching',
|
||||
# string instead of vmid in folder
|
||||
#"$storage_dir/images/ssss/base-4321-disk-0.qcow2/1234/vm-1234-disk-0.qcow2",
|
||||
vmid => undef,
|
||||
files => [
|
||||
"$storage_dir/images/1234/vm-1234-disk-0.qcow2",
|
||||
],
|
||||
parent => [
|
||||
"../ssss/base-4321-disk-0.qcow2",
|
||||
],
|
||||
expected => [
|
||||
{
|
||||
'content' => 'images',
|
||||
'ctime' => DEFAULT_CTIME,
|
||||
'format' => 'qcow2',
|
||||
'parent' => '../ssss/base-4321-disk-0.qcow2',
|
||||
'size' => DEFAULT_SIZE,
|
||||
'used' => DEFAULT_USED,
|
||||
'vmid' => '1234',
|
||||
'volid' => 'local:1234/vm-1234-disk-0.qcow2',
|
||||
}
|
||||
],
|
||||
description => 'VMID: none, parent, non-matching',
|
||||
# string instead of vmid in folder
|
||||
#"$storage_dir/images/ssss/base-4321-disk-0.qcow2/1234/vm-1234-disk-0.qcow2",
|
||||
vmid => undef,
|
||||
files => [
|
||||
"$storage_dir/images/1234/vm-1234-disk-0.qcow2",
|
||||
],
|
||||
parent => [
|
||||
"../ssss/base-4321-disk-0.qcow2",
|
||||
],
|
||||
expected => [
|
||||
{
|
||||
'content' => 'images',
|
||||
'ctime' => DEFAULT_CTIME,
|
||||
'format' => 'qcow2',
|
||||
'parent' => '../ssss/base-4321-disk-0.qcow2',
|
||||
'size' => DEFAULT_SIZE,
|
||||
'used' => DEFAULT_USED,
|
||||
'vmid' => '1234',
|
||||
'volid' => 'local:1234/vm-1234-disk-0.qcow2',
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
description => 'VMID: none, non-matching',
|
||||
# failed matches
|
||||
vmid => undef,
|
||||
files => [
|
||||
"$storage_dir/images/ssss/base-4321-disk-0.raw",
|
||||
"$storage_dir/images/ssss/vm-1234-disk-0.qcow2",
|
||||
"$storage_dir/template/iso/yet-again-a-installation-disk.dvd",
|
||||
"$storage_dir/template/cache/debian-10.0-standard_10.0-1_amd64.zip.gz",
|
||||
"$storage_dir/private/subvol-19254-disk-0/19254",
|
||||
"$storage_dir/dump/vzdump-openvz-16112-2020_03_30-21_39_30.zip.gz",
|
||||
"$storage_dir/dump/vzdump-openvz-16112-2020_03_30-21_39_30.tgz.lzo",
|
||||
"$storage_dir/dump/vzdump-qemu-16110-2020_03_30-21_12_40.vma.xz",
|
||||
"$storage_dir/dump/vzdump-qemu-16110-2020_03_30-21_12_40.vms.gz",
|
||||
],
|
||||
expected => [], # returns empty list
|
||||
description => 'VMID: none, non-matching',
|
||||
# failed matches
|
||||
vmid => undef,
|
||||
files => [
|
||||
"$storage_dir/images/ssss/base-4321-disk-0.raw",
|
||||
"$storage_dir/images/ssss/vm-1234-disk-0.qcow2",
|
||||
"$storage_dir/template/iso/yet-again-a-installation-disk.dvd",
|
||||
"$storage_dir/template/cache/debian-10.0-standard_10.0-1_amd64.zip.gz",
|
||||
"$storage_dir/private/subvol-19254-disk-0/19254",
|
||||
"$storage_dir/dump/vzdump-openvz-16112-2020_03_30-21_39_30.zip.gz",
|
||||
"$storage_dir/dump/vzdump-openvz-16112-2020_03_30-21_39_30.tgz.lzo",
|
||||
"$storage_dir/dump/vzdump-qemu-16110-2020_03_30-21_12_40.vma.xz",
|
||||
"$storage_dir/dump/vzdump-qemu-16110-2020_03_30-21_12_40.vms.gz",
|
||||
],
|
||||
expected => [], # returns empty list
|
||||
},
|
||||
);
|
||||
|
||||
|
||||
# provide static vmlist for tests
|
||||
my $mock_cluster = Test::MockModule->new('PVE::Cluster', no_auto => 1);
|
||||
$mock_cluster->redefine(get_vmlist => sub { return $mocked_vmlist; });
|
||||
@ -474,26 +472,31 @@ $mock_cluster->redefine(get_vmlist => sub { return $mocked_vmlist; });
|
||||
# populate is File::stat's method to fill all information from CORE::stat into
|
||||
# an blessed array.
|
||||
my $mock_stat = Test::MockModule->new('File::stat', no_auto => 1);
|
||||
$mock_stat->redefine(populate => sub {
|
||||
my (@st) = @_;
|
||||
$st[7] = DEFAULT_SIZE;
|
||||
$st[10] = DEFAULT_CTIME;
|
||||
$mock_stat->redefine(
|
||||
populate => sub {
|
||||
my (@st) = @_;
|
||||
$st[7] = DEFAULT_SIZE;
|
||||
$st[10] = DEFAULT_CTIME;
|
||||
|
||||
my $result = $mock_stat->original('populate')->(@st);
|
||||
my $result = $mock_stat->original('populate')->(@st);
|
||||
|
||||
return $result;
|
||||
});
|
||||
return $result;
|
||||
},
|
||||
);
|
||||
|
||||
# override info provided by qemu-img in file_size_info
|
||||
my $mock_fsi = Test::MockModule->new('PVE::Storage::Plugin', no_auto => 1);
|
||||
$mock_fsi->redefine(file_size_info => sub {
|
||||
my ($size, $format, $used, $parent, $ctime) = $mock_fsi->original('file_size_info')->(@_);
|
||||
$mock_fsi->redefine(
|
||||
file_size_info => sub {
|
||||
my ($size, $format, $used, $parent, $ctime) =
|
||||
$mock_fsi->original('file_size_info')->(@_);
|
||||
|
||||
$size = DEFAULT_SIZE;
|
||||
$used = DEFAULT_USED;
|
||||
$size = DEFAULT_SIZE;
|
||||
$used = DEFAULT_USED;
|
||||
|
||||
return wantarray ? ($size, $format, $used, $parent, $ctime) : $size;
|
||||
});
|
||||
return wantarray ? ($size, $format, $used, $parent, $ctime) : $size;
|
||||
},
|
||||
);
|
||||
|
||||
my $plan = scalar @tests;
|
||||
plan tests => $plan + 1;
|
||||
@ -507,54 +510,56 @@ plan tests => $plan + 1;
|
||||
|
||||
PVE::Storage::Plugin->list_volumes('sid', $scfg_with_type, undef, ['images']);
|
||||
|
||||
is_deeply ($tested_vmlist, $original_vmlist,
|
||||
'PVE::Cluster::vmlist remains unmodified')
|
||||
|| diag ("Expected vmlist to remain\n", explain($original_vmlist),
|
||||
"but it turned to\n", explain($tested_vmlist));
|
||||
is_deeply($tested_vmlist, $original_vmlist, 'PVE::Cluster::vmlist remains unmodified')
|
||||
|| diag(
|
||||
"Expected vmlist to remain\n",
|
||||
explain($original_vmlist),
|
||||
"but it turned to\n",
|
||||
explain($tested_vmlist),
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
{
|
||||
my $sid = 'local';
|
||||
my $types = [ 'rootdir', 'images', 'vztmpl', 'iso', 'backup', 'snippets' ];
|
||||
my @suffixes = ( 'qcow2', 'raw', 'vmdk', 'vhdx' );
|
||||
my $types = ['rootdir', 'images', 'vztmpl', 'iso', 'backup', 'snippets'];
|
||||
my @suffixes = ('qcow2', 'raw', 'vmdk', 'vhdx');
|
||||
|
||||
# run through test cases
|
||||
foreach my $tt (@tests) {
|
||||
my $vmid = $tt->{vmid};
|
||||
my $files = $tt->{files};
|
||||
my $expected = $tt->{expected};
|
||||
my $description = $tt->{description};
|
||||
my $parent = $tt->{parent};
|
||||
my $vmid = $tt->{vmid};
|
||||
my $files = $tt->{files};
|
||||
my $expected = $tt->{expected};
|
||||
my $description = $tt->{description};
|
||||
my $parent = $tt->{parent};
|
||||
|
||||
# prepare environment
|
||||
my $num = 0; #parent disks
|
||||
for my $file (@$files) {
|
||||
my ($name, $dir, $suffix) = fileparse($file, @suffixes);
|
||||
# prepare environment
|
||||
my $num = 0; #parent disks
|
||||
for my $file (@$files) {
|
||||
my ($name, $dir, $suffix) = fileparse($file, @suffixes);
|
||||
|
||||
make_path($dir, { verbose => 1, mode => 0755 });
|
||||
make_path($dir, { verbose => 1, mode => 0755 });
|
||||
|
||||
if ($name) {
|
||||
# using qemu-img to also be able to represent the backing device
|
||||
my @cmd = ( '/usr/bin/qemu-img', 'create', "$file", DEFAULT_SIZE );
|
||||
push @cmd, ( '-f', $suffix ) if $suffix;
|
||||
push @cmd, ( '-u', '-b', @$parent[$num] ) if $parent;
|
||||
push @cmd, ( '-F', $suffix ) if $parent && $suffix;
|
||||
$num++;
|
||||
if ($name) {
|
||||
# using qemu-img to also be able to represent the backing device
|
||||
my @cmd = ('/usr/bin/qemu-img', 'create', "$file", DEFAULT_SIZE);
|
||||
push @cmd, ('-f', $suffix) if $suffix;
|
||||
push @cmd, ('-u', '-b', @$parent[$num]) if $parent;
|
||||
push @cmd, ('-F', $suffix) if $parent && $suffix;
|
||||
$num++;
|
||||
|
||||
run_command([@cmd]);
|
||||
}
|
||||
}
|
||||
run_command([@cmd]);
|
||||
}
|
||||
}
|
||||
|
||||
my $got;
|
||||
eval { $got = PVE::Storage::Plugin->list_volumes($sid, $scfg, $vmid, $types) };
|
||||
$got = $@ if $@;
|
||||
my $got;
|
||||
eval { $got = PVE::Storage::Plugin->list_volumes($sid, $scfg, $vmid, $types) };
|
||||
$got = $@ if $@;
|
||||
|
||||
is_deeply($got, $expected, $description) || diag(explain($got));
|
||||
is_deeply($got, $expected, $description) || diag(explain($got));
|
||||
|
||||
# clean up after each test case, otherwise
|
||||
# we get wrong results from leftover files
|
||||
remove_tree($storage_dir, { verbose => 1 });
|
||||
# clean up after each test case, otherwise
|
||||
# we get wrong results from leftover files
|
||||
remove_tree($storage_dir, { verbose => 1 });
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -19,251 +19,285 @@ my $tests = [
|
||||
# VM images
|
||||
#
|
||||
{
|
||||
description => 'VM disk image, linked, qcow2, vm- as base-',
|
||||
volname => "$vmid/vm-$vmid-disk-0.qcow2/$vmid/vm-$vmid-disk-0.qcow2",
|
||||
expected => [ 'images', "vm-$vmid-disk-0.qcow2", "$vmid", "vm-$vmid-disk-0.qcow2", "$vmid", undef, 'qcow2', ],
|
||||
description => 'VM disk image, linked, qcow2, vm- as base-',
|
||||
volname => "$vmid/vm-$vmid-disk-0.qcow2/$vmid/vm-$vmid-disk-0.qcow2",
|
||||
expected => [
|
||||
'images',
|
||||
"vm-$vmid-disk-0.qcow2",
|
||||
"$vmid",
|
||||
"vm-$vmid-disk-0.qcow2",
|
||||
"$vmid",
|
||||
undef,
|
||||
'qcow2',
|
||||
],
|
||||
},
|
||||
#
|
||||
# iso
|
||||
#
|
||||
{
|
||||
description => 'ISO image, iso',
|
||||
volname => 'iso/some-installation-disk.iso',
|
||||
expected => ['iso', 'some-installation-disk.iso', undef, undef, undef, undef, 'raw'],
|
||||
description => 'ISO image, iso',
|
||||
volname => 'iso/some-installation-disk.iso',
|
||||
expected => ['iso', 'some-installation-disk.iso', undef, undef, undef, undef, 'raw'],
|
||||
},
|
||||
{
|
||||
description => 'ISO image, img',
|
||||
volname => 'iso/some-other-installation-disk.img',
|
||||
expected => ['iso', 'some-other-installation-disk.img', undef, undef, undef, undef, 'raw'],
|
||||
description => 'ISO image, img',
|
||||
volname => 'iso/some-other-installation-disk.img',
|
||||
expected =>
|
||||
['iso', 'some-other-installation-disk.img', undef, undef, undef, undef, 'raw'],
|
||||
},
|
||||
#
|
||||
# container templates
|
||||
#
|
||||
{
|
||||
description => 'Container template tar.gz',
|
||||
volname => 'vztmpl/debian-10.0-standard_10.0-1_amd64.tar.gz',
|
||||
expected => ['vztmpl', 'debian-10.0-standard_10.0-1_amd64.tar.gz', undef, undef, undef, undef, 'raw'],
|
||||
description => 'Container template tar.gz',
|
||||
volname => 'vztmpl/debian-10.0-standard_10.0-1_amd64.tar.gz',
|
||||
expected => [
|
||||
'vztmpl',
|
||||
'debian-10.0-standard_10.0-1_amd64.tar.gz',
|
||||
undef,
|
||||
undef,
|
||||
undef,
|
||||
undef,
|
||||
'raw',
|
||||
],
|
||||
},
|
||||
{
|
||||
description => 'Container template tar.xz',
|
||||
volname => 'vztmpl/debian-10.0-standard_10.0-1_amd64.tar.xz',
|
||||
expected => ['vztmpl', 'debian-10.0-standard_10.0-1_amd64.tar.xz', undef, undef, undef, undef, 'raw'],
|
||||
description => 'Container template tar.xz',
|
||||
volname => 'vztmpl/debian-10.0-standard_10.0-1_amd64.tar.xz',
|
||||
expected => [
|
||||
'vztmpl',
|
||||
'debian-10.0-standard_10.0-1_amd64.tar.xz',
|
||||
undef,
|
||||
undef,
|
||||
undef,
|
||||
undef,
|
||||
'raw',
|
||||
],
|
||||
},
|
||||
{
|
||||
description => 'Container template tar.bz2',
|
||||
volname => 'vztmpl/debian-10.0-standard_10.0-1_amd64.tar.bz2',
|
||||
expected => ['vztmpl', 'debian-10.0-standard_10.0-1_amd64.tar.bz2', undef, undef, undef, undef, 'raw'],
|
||||
description => 'Container template tar.bz2',
|
||||
volname => 'vztmpl/debian-10.0-standard_10.0-1_amd64.tar.bz2',
|
||||
expected => [
|
||||
'vztmpl',
|
||||
'debian-10.0-standard_10.0-1_amd64.tar.bz2',
|
||||
undef,
|
||||
undef,
|
||||
undef,
|
||||
undef,
|
||||
'raw',
|
||||
],
|
||||
},
|
||||
#
|
||||
# container rootdir
|
||||
#
|
||||
{
|
||||
description => 'Container rootdir, sub directory',
|
||||
volname => "rootdir/$vmid",
|
||||
expected => ['rootdir', "$vmid", "$vmid"],
|
||||
description => 'Container rootdir, sub directory',
|
||||
volname => "rootdir/$vmid",
|
||||
expected => ['rootdir', "$vmid", "$vmid"],
|
||||
},
|
||||
{
|
||||
description => 'Container rootdir, subvol',
|
||||
volname => "$vmid/subvol-$vmid-disk-0.subvol",
|
||||
expected => [ 'images', "subvol-$vmid-disk-0.subvol", "$vmid", undef, undef, undef, 'subvol' ],
|
||||
description => 'Container rootdir, subvol',
|
||||
volname => "$vmid/subvol-$vmid-disk-0.subvol",
|
||||
expected =>
|
||||
['images', "subvol-$vmid-disk-0.subvol", "$vmid", undef, undef, undef, 'subvol'],
|
||||
},
|
||||
{
|
||||
description => 'Backup archive, no virtualization type',
|
||||
volname => "backup/vzdump-none-$vmid-2020_03_30-21_39_30.tar",
|
||||
expected => ['backup', "vzdump-none-$vmid-2020_03_30-21_39_30.tar", undef, undef, undef, undef, 'raw'],
|
||||
description => 'Backup archive, no virtualization type',
|
||||
volname => "backup/vzdump-none-$vmid-2020_03_30-21_39_30.tar",
|
||||
expected => [
|
||||
'backup',
|
||||
"vzdump-none-$vmid-2020_03_30-21_39_30.tar",
|
||||
undef,
|
||||
undef,
|
||||
undef,
|
||||
undef,
|
||||
'raw',
|
||||
],
|
||||
},
|
||||
#
|
||||
# Snippets
|
||||
#
|
||||
{
|
||||
description => 'Snippets, yaml',
|
||||
volname => 'snippets/userconfig.yaml',
|
||||
expected => ['snippets', 'userconfig.yaml', undef, undef, undef, undef, 'raw'],
|
||||
description => 'Snippets, yaml',
|
||||
volname => 'snippets/userconfig.yaml',
|
||||
expected => ['snippets', 'userconfig.yaml', undef, undef, undef, undef, 'raw'],
|
||||
},
|
||||
{
|
||||
description => 'Snippets, perl',
|
||||
volname => 'snippets/hookscript.pl',
|
||||
expected => ['snippets', 'hookscript.pl', undef, undef, undef, undef, 'raw'],
|
||||
description => 'Snippets, perl',
|
||||
volname => 'snippets/hookscript.pl',
|
||||
expected => ['snippets', 'hookscript.pl', undef, undef, undef, undef, 'raw'],
|
||||
},
|
||||
#
|
||||
# Import
|
||||
#
|
||||
{
|
||||
description => "Import, ova",
|
||||
volname => 'import/import.ova',
|
||||
expected => ['import', 'import.ova', undef, undef, undef ,undef, 'ova'],
|
||||
description => "Import, ova",
|
||||
volname => 'import/import.ova',
|
||||
expected => ['import', 'import.ova', undef, undef, undef, undef, 'ova'],
|
||||
},
|
||||
{
|
||||
description => "Import, ovf",
|
||||
volname => 'import/import.ovf',
|
||||
expected => ['import', 'import.ovf', undef, undef, undef ,undef, 'ovf'],
|
||||
description => "Import, ovf",
|
||||
volname => 'import/import.ovf',
|
||||
expected => ['import', 'import.ovf', undef, undef, undef, undef, 'ovf'],
|
||||
},
|
||||
{
|
||||
description => "Import, innner file of ova",
|
||||
volname => 'import/import.ova/disk.qcow2',
|
||||
expected => ['import', 'import.ova/disk.qcow2', undef, undef, undef, undef, 'ova+qcow2'],
|
||||
description => "Import, innner file of ova",
|
||||
volname => 'import/import.ova/disk.qcow2',
|
||||
expected =>
|
||||
['import', 'import.ova/disk.qcow2', undef, undef, undef, undef, 'ova+qcow2'],
|
||||
},
|
||||
{
|
||||
description => "Import, innner file of ova",
|
||||
volname => 'import/import.ova/disk.vmdk',
|
||||
expected => ['import', 'import.ova/disk.vmdk', undef, undef, undef, undef, 'ova+vmdk'],
|
||||
description => "Import, innner file of ova",
|
||||
volname => 'import/import.ova/disk.vmdk',
|
||||
expected => ['import', 'import.ova/disk.vmdk', undef, undef, undef, undef, 'ova+vmdk'],
|
||||
},
|
||||
{
|
||||
description => "Import, innner file of ova with whitespace in name",
|
||||
volname => 'import/import.ova/OS disk.vmdk',
|
||||
expected => ['import', 'import.ova/OS disk.vmdk', undef, undef, undef, undef, 'ova+vmdk'],
|
||||
description => "Import, innner file of ova with whitespace in name",
|
||||
volname => 'import/import.ova/OS disk.vmdk',
|
||||
expected =>
|
||||
['import', 'import.ova/OS disk.vmdk', undef, undef, undef, undef, 'ova+vmdk'],
|
||||
},
|
||||
{
|
||||
description => "Import, innner file of ova",
|
||||
volname => 'import/import.ova/disk.raw',
|
||||
expected => ['import', 'import.ova/disk.raw', undef, undef, undef, undef, 'ova+raw'],
|
||||
description => "Import, innner file of ova",
|
||||
volname => 'import/import.ova/disk.raw',
|
||||
expected => ['import', 'import.ova/disk.raw', undef, undef, undef, undef, 'ova+raw'],
|
||||
},
|
||||
#
|
||||
# failed matches
|
||||
#
|
||||
{
|
||||
description => "Failed match: VM disk image, base, raw",
|
||||
volname => "ssss/base-$vmid-disk-0.raw",
|
||||
expected => "unable to parse directory volume name 'ssss/base-$vmid-disk-0.raw'\n",
|
||||
description => "Failed match: VM disk image, base, raw",
|
||||
volname => "ssss/base-$vmid-disk-0.raw",
|
||||
expected => "unable to parse directory volume name 'ssss/base-$vmid-disk-0.raw'\n",
|
||||
},
|
||||
{
|
||||
description => 'Failed match: ISO image, dvd',
|
||||
volname => 'iso/yet-again-a-installation-disk.dvd',
|
||||
expected => "unable to parse directory volume name 'iso/yet-again-a-installation-disk.dvd'\n",
|
||||
description => 'Failed match: ISO image, dvd',
|
||||
volname => 'iso/yet-again-a-installation-disk.dvd',
|
||||
expected =>
|
||||
"unable to parse directory volume name 'iso/yet-again-a-installation-disk.dvd'\n",
|
||||
},
|
||||
{
|
||||
description => 'Failed match: Container template, zip.gz',
|
||||
volname => 'vztmpl/debian-10.0-standard_10.0-1_amd64.zip.gz',
|
||||
expected => "unable to parse directory volume name 'vztmpl/debian-10.0-standard_10.0-1_amd64.zip.gz'\n",
|
||||
description => 'Failed match: Container template, zip.gz',
|
||||
volname => 'vztmpl/debian-10.0-standard_10.0-1_amd64.zip.gz',
|
||||
expected =>
|
||||
"unable to parse directory volume name 'vztmpl/debian-10.0-standard_10.0-1_amd64.zip.gz'\n",
|
||||
},
|
||||
{
|
||||
description => 'Failed match: Container rootdir, subvol',
|
||||
volname => "rootdir/subvol-$vmid-disk-0",
|
||||
expected => "unable to parse directory volume name 'rootdir/subvol-$vmid-disk-0'\n",
|
||||
description => 'Failed match: Container rootdir, subvol',
|
||||
volname => "rootdir/subvol-$vmid-disk-0",
|
||||
expected => "unable to parse directory volume name 'rootdir/subvol-$vmid-disk-0'\n",
|
||||
},
|
||||
{
|
||||
description => 'Failed match: VM disk image, linked, vhdx',
|
||||
volname => "$vmid/base-$vmid-disk-0.vhdx/$vmid/vm-$vmid-disk-0.vhdx",
|
||||
expected => "unable to parse volume filename 'base-$vmid-disk-0.vhdx'\n",
|
||||
description => 'Failed match: VM disk image, linked, vhdx',
|
||||
volname => "$vmid/base-$vmid-disk-0.vhdx/$vmid/vm-$vmid-disk-0.vhdx",
|
||||
expected => "unable to parse volume filename 'base-$vmid-disk-0.vhdx'\n",
|
||||
},
|
||||
{
|
||||
description => 'Failed match: VM disk image, linked, qcow2, first vmid',
|
||||
volname => "ssss/base-$vmid-disk-0.qcow2/$vmid/vm-$vmid-disk-0.qcow2",
|
||||
expected => "unable to parse directory volume name 'ssss/base-$vmid-disk-0.qcow2/$vmid/vm-$vmid-disk-0.qcow2'\n",
|
||||
description => 'Failed match: VM disk image, linked, qcow2, first vmid',
|
||||
volname => "ssss/base-$vmid-disk-0.qcow2/$vmid/vm-$vmid-disk-0.qcow2",
|
||||
expected =>
|
||||
"unable to parse directory volume name 'ssss/base-$vmid-disk-0.qcow2/$vmid/vm-$vmid-disk-0.qcow2'\n",
|
||||
},
|
||||
{
|
||||
description => 'Failed match: VM disk image, linked, qcow2, second vmid',
|
||||
volname => "$vmid/base-$vmid-disk-0.qcow2/ssss/vm-$vmid-disk-0.qcow2",
|
||||
expected => "unable to parse volume filename 'base-$vmid-disk-0.qcow2/ssss/vm-$vmid-disk-0.qcow2'\n",
|
||||
description => 'Failed match: VM disk image, linked, qcow2, second vmid',
|
||||
volname => "$vmid/base-$vmid-disk-0.qcow2/ssss/vm-$vmid-disk-0.qcow2",
|
||||
expected =>
|
||||
"unable to parse volume filename 'base-$vmid-disk-0.qcow2/ssss/vm-$vmid-disk-0.qcow2'\n",
|
||||
},
|
||||
{
|
||||
description => "Failed match: import dir but no ova/ovf/disk image",
|
||||
volname => "import/test.foo",
|
||||
expected => "unable to parse directory volume name 'import/test.foo'\n",
|
||||
description => "Failed match: import dir but no ova/ovf/disk image",
|
||||
volname => "import/test.foo",
|
||||
expected => "unable to parse directory volume name 'import/test.foo'\n",
|
||||
},
|
||||
];
|
||||
|
||||
# create more test cases for VM disk images matches
|
||||
my $disk_suffix = [ 'raw', 'qcow2', 'vmdk' ];
|
||||
my $disk_suffix = ['raw', 'qcow2', 'vmdk'];
|
||||
foreach my $s (@$disk_suffix) {
|
||||
my @arr = (
|
||||
{
|
||||
description => "VM disk image, $s",
|
||||
volname => "$vmid/vm-$vmid-disk-1.$s",
|
||||
expected => [
|
||||
'images',
|
||||
"vm-$vmid-disk-1.$s",
|
||||
"$vmid",
|
||||
undef,
|
||||
undef,
|
||||
undef,
|
||||
"$s",
|
||||
],
|
||||
},
|
||||
{
|
||||
description => "VM disk image, linked, $s",
|
||||
volname => "$vmid/base-$vmid-disk-0.$s/$vmid/vm-$vmid-disk-0.$s",
|
||||
expected => [
|
||||
'images',
|
||||
"vm-$vmid-disk-0.$s",
|
||||
"$vmid",
|
||||
"base-$vmid-disk-0.$s",
|
||||
"$vmid",
|
||||
undef,
|
||||
"$s",
|
||||
],
|
||||
},
|
||||
{
|
||||
description => "VM disk image, base, $s",
|
||||
volname => "$vmid/base-$vmid-disk-0.$s",
|
||||
expected => [
|
||||
'images',
|
||||
"base-$vmid-disk-0.$s",
|
||||
"$vmid",
|
||||
undef,
|
||||
undef,
|
||||
'base-',
|
||||
"$s"
|
||||
],
|
||||
},
|
||||
{
|
||||
description => "VM disk image, $s",
|
||||
volname => "$vmid/vm-$vmid-disk-1.$s",
|
||||
expected => [
|
||||
'images', "vm-$vmid-disk-1.$s", "$vmid", undef, undef, undef, "$s",
|
||||
],
|
||||
},
|
||||
{
|
||||
description => "VM disk image, linked, $s",
|
||||
volname => "$vmid/base-$vmid-disk-0.$s/$vmid/vm-$vmid-disk-0.$s",
|
||||
expected => [
|
||||
'images',
|
||||
"vm-$vmid-disk-0.$s",
|
||||
"$vmid",
|
||||
"base-$vmid-disk-0.$s",
|
||||
"$vmid",
|
||||
undef,
|
||||
"$s",
|
||||
],
|
||||
},
|
||||
{
|
||||
description => "VM disk image, base, $s",
|
||||
volname => "$vmid/base-$vmid-disk-0.$s",
|
||||
expected => [
|
||||
'images', "base-$vmid-disk-0.$s", "$vmid", undef, undef, 'base-', "$s",
|
||||
],
|
||||
},
|
||||
);
|
||||
|
||||
push @$tests, @arr;
|
||||
}
|
||||
|
||||
|
||||
# create more test cases for backup files matches
|
||||
my $bkp_suffix = {
|
||||
qemu => [ 'vma', 'vma.gz', 'vma.lzo', 'vma.zst' ],
|
||||
lxc => [ 'tar', 'tgz', 'tar.gz', 'tar.lzo', 'tar.zst', 'tar.bz2' ],
|
||||
openvz => [ 'tar', 'tgz', 'tar.gz', 'tar.lzo', 'tar.zst' ],
|
||||
qemu => ['vma', 'vma.gz', 'vma.lzo', 'vma.zst'],
|
||||
lxc => ['tar', 'tgz', 'tar.gz', 'tar.lzo', 'tar.zst', 'tar.bz2'],
|
||||
openvz => ['tar', 'tgz', 'tar.gz', 'tar.lzo', 'tar.zst'],
|
||||
};
|
||||
|
||||
foreach my $virt (keys %$bkp_suffix) {
|
||||
my $suffix = $bkp_suffix->{$virt};
|
||||
foreach my $s (@$suffix) {
|
||||
my @arr = (
|
||||
{
|
||||
description => "Backup archive, $virt, $s",
|
||||
volname => "backup/vzdump-$virt-$vmid-2020_03_30-21_12_40.$s",
|
||||
expected => [
|
||||
'backup',
|
||||
"vzdump-$virt-$vmid-2020_03_30-21_12_40.$s",
|
||||
"$vmid",
|
||||
undef,
|
||||
undef,
|
||||
undef,
|
||||
'raw'
|
||||
],
|
||||
},
|
||||
);
|
||||
my @arr = (
|
||||
{
|
||||
description => "Backup archive, $virt, $s",
|
||||
volname => "backup/vzdump-$virt-$vmid-2020_03_30-21_12_40.$s",
|
||||
expected => [
|
||||
'backup',
|
||||
"vzdump-$virt-$vmid-2020_03_30-21_12_40.$s",
|
||||
"$vmid",
|
||||
undef,
|
||||
undef,
|
||||
undef,
|
||||
'raw',
|
||||
],
|
||||
},
|
||||
);
|
||||
|
||||
push @$tests, @arr;
|
||||
push @$tests, @arr;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# create more test cases for failed backup files matches
|
||||
my $non_bkp_suffix = {
|
||||
qemu => [ 'vms.gz', 'vma.xz' ],
|
||||
lxc => [ 'zip.gz', 'tgz.lzo' ],
|
||||
qemu => ['vms.gz', 'vma.xz'],
|
||||
lxc => ['zip.gz', 'tgz.lzo'],
|
||||
};
|
||||
foreach my $virt (keys %$non_bkp_suffix) {
|
||||
my $suffix = $non_bkp_suffix->{$virt};
|
||||
foreach my $s (@$suffix) {
|
||||
my @arr = (
|
||||
{
|
||||
description => "Failed match: Backup archive, $virt, $s",
|
||||
volname => "backup/vzdump-$virt-$vmid-2020_03_30-21_12_40.$s",
|
||||
expected => "unable to parse directory volume name 'backup/vzdump-$virt-$vmid-2020_03_30-21_12_40.$s'\n",
|
||||
},
|
||||
);
|
||||
my @arr = (
|
||||
{
|
||||
description => "Failed match: Backup archive, $virt, $s",
|
||||
volname => "backup/vzdump-$virt-$vmid-2020_03_30-21_12_40.$s",
|
||||
expected =>
|
||||
"unable to parse directory volume name 'backup/vzdump-$virt-$vmid-2020_03_30-21_12_40.$s'\n",
|
||||
},
|
||||
);
|
||||
|
||||
push @$tests, @arr;
|
||||
push @$tests, @arr;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#
|
||||
# run through test case array
|
||||
#
|
||||
@ -278,12 +312,12 @@ foreach my $t (@$tests) {
|
||||
my $expected = $t->{expected};
|
||||
|
||||
my $got;
|
||||
eval { $got = [ PVE::Storage::Plugin->parse_volname($volname) ] };
|
||||
eval { $got = [PVE::Storage::Plugin->parse_volname($volname)] };
|
||||
$got = $@ if $@;
|
||||
|
||||
is_deeply($got, $expected, $description);
|
||||
|
||||
$seen_vtype->{@$expected[0]} = 1 if ref $expected eq 'ARRAY';
|
||||
$seen_vtype->{ @$expected[0] } = 1 if ref $expected eq 'ARRAY';
|
||||
}
|
||||
|
||||
# to check if all $vtype_subdirs are defined in path_to_volume_id
|
||||
|
||||
@ -17,24 +17,24 @@ use File::Temp;
|
||||
my $storage_dir = File::Temp->newdir();
|
||||
my $scfg = {
|
||||
'digest' => 'd29306346b8b25b90a4a96165f1e8f52d1af1eda',
|
||||
'ids' => {
|
||||
'local' => {
|
||||
'shared' => 0,
|
||||
'path' => "$storage_dir",
|
||||
'type' => 'dir',
|
||||
'maxfiles' => 0,
|
||||
'content' => {
|
||||
'snippets' => 1,
|
||||
'rootdir' => 1,
|
||||
'images' => 1,
|
||||
'iso' => 1,
|
||||
'backup' => 1,
|
||||
'vztmpl' => 1,
|
||||
},
|
||||
},
|
||||
'ids' => {
|
||||
'local' => {
|
||||
'shared' => 0,
|
||||
'path' => "$storage_dir",
|
||||
'type' => 'dir',
|
||||
'maxfiles' => 0,
|
||||
'content' => {
|
||||
'snippets' => 1,
|
||||
'rootdir' => 1,
|
||||
'images' => 1,
|
||||
'iso' => 1,
|
||||
'backup' => 1,
|
||||
'vztmpl' => 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
'order' => {
|
||||
'local' => 1,
|
||||
'local' => 1,
|
||||
},
|
||||
};
|
||||
|
||||
@ -44,219 +44,199 @@ my $scfg = {
|
||||
# expected => the result that path_to_volume_id should return
|
||||
my @tests = (
|
||||
{
|
||||
description => 'Image, qcow2',
|
||||
volname => "$storage_dir/images/16110/vm-16110-disk-0.qcow2",
|
||||
expected => [
|
||||
'images',
|
||||
'local:16110/vm-16110-disk-0.qcow2',
|
||||
],
|
||||
description => 'Image, qcow2',
|
||||
volname => "$storage_dir/images/16110/vm-16110-disk-0.qcow2",
|
||||
expected => [
|
||||
'images', 'local:16110/vm-16110-disk-0.qcow2',
|
||||
],
|
||||
},
|
||||
{
|
||||
description => 'Image, raw',
|
||||
volname => "$storage_dir/images/16112/vm-16112-disk-0.raw",
|
||||
expected => [
|
||||
'images',
|
||||
'local:16112/vm-16112-disk-0.raw',
|
||||
],
|
||||
description => 'Image, raw',
|
||||
volname => "$storage_dir/images/16112/vm-16112-disk-0.raw",
|
||||
expected => [
|
||||
'images', 'local:16112/vm-16112-disk-0.raw',
|
||||
],
|
||||
},
|
||||
{
|
||||
description => 'Image template, qcow2',
|
||||
volname => "$storage_dir/images/9004/base-9004-disk-0.qcow2",
|
||||
expected => [
|
||||
'images',
|
||||
'local:9004/base-9004-disk-0.qcow2',
|
||||
],
|
||||
description => 'Image template, qcow2',
|
||||
volname => "$storage_dir/images/9004/base-9004-disk-0.qcow2",
|
||||
expected => [
|
||||
'images', 'local:9004/base-9004-disk-0.qcow2',
|
||||
],
|
||||
},
|
||||
|
||||
{
|
||||
description => 'Backup, vma.gz',
|
||||
volname => "$storage_dir/dump/vzdump-qemu-16110-2020_03_30-21_11_40.vma.gz",
|
||||
expected => [
|
||||
'backup',
|
||||
'local:backup/vzdump-qemu-16110-2020_03_30-21_11_40.vma.gz',
|
||||
],
|
||||
description => 'Backup, vma.gz',
|
||||
volname => "$storage_dir/dump/vzdump-qemu-16110-2020_03_30-21_11_40.vma.gz",
|
||||
expected => [
|
||||
'backup', 'local:backup/vzdump-qemu-16110-2020_03_30-21_11_40.vma.gz',
|
||||
],
|
||||
},
|
||||
{
|
||||
description => 'Backup, vma.lzo',
|
||||
volname => "$storage_dir/dump/vzdump-qemu-16110-2020_03_30-21_12_45.vma.lzo",
|
||||
expected => [
|
||||
'backup',
|
||||
'local:backup/vzdump-qemu-16110-2020_03_30-21_12_45.vma.lzo',
|
||||
],
|
||||
description => 'Backup, vma.lzo',
|
||||
volname => "$storage_dir/dump/vzdump-qemu-16110-2020_03_30-21_12_45.vma.lzo",
|
||||
expected => [
|
||||
'backup', 'local:backup/vzdump-qemu-16110-2020_03_30-21_12_45.vma.lzo',
|
||||
],
|
||||
},
|
||||
{
|
||||
description => 'Backup, vma',
|
||||
volname => "$storage_dir/dump/vzdump-qemu-16110-2020_03_30-21_13_55.vma",
|
||||
expected => [
|
||||
'backup',
|
||||
'local:backup/vzdump-qemu-16110-2020_03_30-21_13_55.vma',
|
||||
],
|
||||
description => 'Backup, vma',
|
||||
volname => "$storage_dir/dump/vzdump-qemu-16110-2020_03_30-21_13_55.vma",
|
||||
expected => [
|
||||
'backup', 'local:backup/vzdump-qemu-16110-2020_03_30-21_13_55.vma',
|
||||
],
|
||||
},
|
||||
{
|
||||
description => 'Backup, tar.lzo',
|
||||
volname => "$storage_dir/dump/vzdump-lxc-16112-2020_03_30-21_39_30.tar.lzo",
|
||||
expected => [
|
||||
'backup',
|
||||
'local:backup/vzdump-lxc-16112-2020_03_30-21_39_30.tar.lzo',
|
||||
],
|
||||
description => 'Backup, tar.lzo',
|
||||
volname => "$storage_dir/dump/vzdump-lxc-16112-2020_03_30-21_39_30.tar.lzo",
|
||||
expected => [
|
||||
'backup', 'local:backup/vzdump-lxc-16112-2020_03_30-21_39_30.tar.lzo',
|
||||
],
|
||||
},
|
||||
{
|
||||
description => 'Backup, vma.zst',
|
||||
volname => "$storage_dir/dump/vzdump-qemu-16110-2020_03_30-21_13_55.vma.zst",
|
||||
expected => [
|
||||
'backup',
|
||||
'local:backup/vzdump-qemu-16110-2020_03_30-21_13_55.vma.zst'
|
||||
],
|
||||
description => 'Backup, vma.zst',
|
||||
volname => "$storage_dir/dump/vzdump-qemu-16110-2020_03_30-21_13_55.vma.zst",
|
||||
expected => [
|
||||
'backup', 'local:backup/vzdump-qemu-16110-2020_03_30-21_13_55.vma.zst',
|
||||
],
|
||||
},
|
||||
{
|
||||
description => 'Backup, tar.zst',
|
||||
volname => "$storage_dir/dump/vzdump-lxc-16112-2020_03_30-21_39_30.tar.zst",
|
||||
expected => [
|
||||
'backup',
|
||||
'local:backup/vzdump-lxc-16112-2020_03_30-21_39_30.tar.zst'
|
||||
],
|
||||
description => 'Backup, tar.zst',
|
||||
volname => "$storage_dir/dump/vzdump-lxc-16112-2020_03_30-21_39_30.tar.zst",
|
||||
expected => [
|
||||
'backup', 'local:backup/vzdump-lxc-16112-2020_03_30-21_39_30.tar.zst',
|
||||
],
|
||||
},
|
||||
{
|
||||
description => 'Backup, tar.bz2',
|
||||
volname => "$storage_dir/dump/vzdump-openvz-16112-2020_03_30-21_39_30.tar.bz2",
|
||||
expected => [
|
||||
'backup',
|
||||
'local:backup/vzdump-openvz-16112-2020_03_30-21_39_30.tar.bz2',
|
||||
],
|
||||
description => 'Backup, tar.bz2',
|
||||
volname => "$storage_dir/dump/vzdump-openvz-16112-2020_03_30-21_39_30.tar.bz2",
|
||||
expected => [
|
||||
'backup', 'local:backup/vzdump-openvz-16112-2020_03_30-21_39_30.tar.bz2',
|
||||
],
|
||||
},
|
||||
|
||||
{
|
||||
description => 'ISO file',
|
||||
volname => "$storage_dir/template/iso/yet-again-a-installation-disk.iso",
|
||||
expected => [
|
||||
'iso',
|
||||
'local:iso/yet-again-a-installation-disk.iso',
|
||||
],
|
||||
description => 'ISO file',
|
||||
volname => "$storage_dir/template/iso/yet-again-a-installation-disk.iso",
|
||||
expected => [
|
||||
'iso', 'local:iso/yet-again-a-installation-disk.iso',
|
||||
],
|
||||
},
|
||||
{
|
||||
description => 'CT template, tar.gz',
|
||||
volname => "$storage_dir/template/cache/debian-10.0-standard_10.0-1_amd64.tar.gz",
|
||||
expected => [
|
||||
'vztmpl',
|
||||
'local:vztmpl/debian-10.0-standard_10.0-1_amd64.tar.gz',
|
||||
],
|
||||
description => 'CT template, tar.gz',
|
||||
volname => "$storage_dir/template/cache/debian-10.0-standard_10.0-1_amd64.tar.gz",
|
||||
expected => [
|
||||
'vztmpl', 'local:vztmpl/debian-10.0-standard_10.0-1_amd64.tar.gz',
|
||||
],
|
||||
},
|
||||
{
|
||||
description => 'CT template, wrong ending, tar bz2',
|
||||
volname => "$storage_dir/template/cache/debian-10.0-standard_10.0-1_amd64.tar.bz2",
|
||||
expected => [
|
||||
'vztmpl',
|
||||
'local:vztmpl/debian-10.0-standard_10.0-1_amd64.tar.bz2',
|
||||
],
|
||||
description => 'CT template, wrong ending, tar bz2',
|
||||
volname => "$storage_dir/template/cache/debian-10.0-standard_10.0-1_amd64.tar.bz2",
|
||||
expected => [
|
||||
'vztmpl', 'local:vztmpl/debian-10.0-standard_10.0-1_amd64.tar.bz2',
|
||||
],
|
||||
},
|
||||
|
||||
{
|
||||
description => 'Rootdir',
|
||||
volname => "$storage_dir/private/1234/", # fileparse needs / at the end
|
||||
expected => [
|
||||
'rootdir',
|
||||
'local:rootdir/1234',
|
||||
],
|
||||
description => 'Rootdir',
|
||||
volname => "$storage_dir/private/1234/", # fileparse needs / at the end
|
||||
expected => [
|
||||
'rootdir', 'local:rootdir/1234',
|
||||
],
|
||||
},
|
||||
{
|
||||
description => 'Rootdir, folder subvol',
|
||||
volname => "$storage_dir/images/1234/subvol-1234-disk-0.subvol/", # fileparse needs / at the end
|
||||
expected => [
|
||||
'images',
|
||||
'local:1234/subvol-1234-disk-0.subvol'
|
||||
],
|
||||
description => 'Rootdir, folder subvol',
|
||||
volname => "$storage_dir/images/1234/subvol-1234-disk-0.subvol/", # fileparse needs / at the end
|
||||
expected => [
|
||||
'images', 'local:1234/subvol-1234-disk-0.subvol',
|
||||
],
|
||||
},
|
||||
{
|
||||
description => 'Snippets, yaml',
|
||||
volname => "$storage_dir/snippets/userconfig.yaml",
|
||||
expected => [
|
||||
'snippets',
|
||||
'local:snippets/userconfig.yaml',
|
||||
],
|
||||
description => 'Snippets, yaml',
|
||||
volname => "$storage_dir/snippets/userconfig.yaml",
|
||||
expected => [
|
||||
'snippets', 'local:snippets/userconfig.yaml',
|
||||
],
|
||||
},
|
||||
{
|
||||
description => 'Snippets, hookscript',
|
||||
volname => "$storage_dir/snippets/hookscript.pl",
|
||||
expected => [
|
||||
'snippets',
|
||||
'local:snippets/hookscript.pl',
|
||||
],
|
||||
description => 'Snippets, hookscript',
|
||||
volname => "$storage_dir/snippets/hookscript.pl",
|
||||
expected => [
|
||||
'snippets', 'local:snippets/hookscript.pl',
|
||||
],
|
||||
},
|
||||
{
|
||||
description => 'CT template, tar.xz',
|
||||
volname => "$storage_dir/template/cache/debian-10.0-standard_10.0-1_amd64.tar.xz",
|
||||
expected => [
|
||||
'vztmpl',
|
||||
'local:vztmpl/debian-10.0-standard_10.0-1_amd64.tar.xz',
|
||||
],
|
||||
description => 'CT template, tar.xz',
|
||||
volname => "$storage_dir/template/cache/debian-10.0-standard_10.0-1_amd64.tar.xz",
|
||||
expected => [
|
||||
'vztmpl', 'local:vztmpl/debian-10.0-standard_10.0-1_amd64.tar.xz',
|
||||
],
|
||||
},
|
||||
{
|
||||
description => 'Import, ova',
|
||||
volname => "$storage_dir/import/import.ova",
|
||||
expected => [
|
||||
'import',
|
||||
'local:import/import.ova',
|
||||
],
|
||||
description => 'Import, ova',
|
||||
volname => "$storage_dir/import/import.ova",
|
||||
expected => [
|
||||
'import', 'local:import/import.ova',
|
||||
],
|
||||
},
|
||||
{
|
||||
description => 'Import, ovf',
|
||||
volname => "$storage_dir/import/import.ovf",
|
||||
expected => [
|
||||
'import',
|
||||
'local:import/import.ovf',
|
||||
],
|
||||
description => 'Import, ovf',
|
||||
volname => "$storage_dir/import/import.ovf",
|
||||
expected => [
|
||||
'import', 'local:import/import.ovf',
|
||||
],
|
||||
},
|
||||
|
||||
# no matches, path or files with failures
|
||||
{
|
||||
description => 'Base template, string as vmid in folder name',
|
||||
volname => "$storage_dir/images/ssss/base-4321-disk-0.raw",
|
||||
expected => [''],
|
||||
description => 'Base template, string as vmid in folder name',
|
||||
volname => "$storage_dir/images/ssss/base-4321-disk-0.raw",
|
||||
expected => [''],
|
||||
},
|
||||
{
|
||||
description => 'ISO file, wrong ending',
|
||||
volname => "$storage_dir/template/iso/yet-again-a-installation-disk.dvd",
|
||||
expected => [''],
|
||||
description => 'ISO file, wrong ending',
|
||||
volname => "$storage_dir/template/iso/yet-again-a-installation-disk.dvd",
|
||||
expected => [''],
|
||||
},
|
||||
{
|
||||
description => 'CT template, wrong ending, zip.gz',
|
||||
volname => "$storage_dir/template/cache/debian-10.0-standard_10.0-1_amd64.zip.gz",
|
||||
expected => [''],
|
||||
description => 'CT template, wrong ending, zip.gz',
|
||||
volname => "$storage_dir/template/cache/debian-10.0-standard_10.0-1_amd64.zip.gz",
|
||||
expected => [''],
|
||||
},
|
||||
{
|
||||
description => 'Rootdir as subvol, wrong path',
|
||||
volname => "$storage_dir/private/subvol-19254-disk-0/",
|
||||
expected => [''],
|
||||
description => 'Rootdir as subvol, wrong path',
|
||||
volname => "$storage_dir/private/subvol-19254-disk-0/",
|
||||
expected => [''],
|
||||
},
|
||||
{
|
||||
description => 'Backup, wrong format, openvz, zip.gz',
|
||||
volname => "$storage_dir/dump/vzdump-openvz-16112-2020_03_30-21_39_30.zip.gz",
|
||||
expected => [''],
|
||||
description => 'Backup, wrong format, openvz, zip.gz',
|
||||
volname => "$storage_dir/dump/vzdump-openvz-16112-2020_03_30-21_39_30.zip.gz",
|
||||
expected => [''],
|
||||
},
|
||||
{
|
||||
description => 'Backup, wrong format, openvz, tgz.lzo',
|
||||
volname => "$storage_dir/dump/vzdump-openvz-16112-2020_03_30-21_39_30.tgz.lzo",
|
||||
expected => [''],
|
||||
description => 'Backup, wrong format, openvz, tgz.lzo',
|
||||
volname => "$storage_dir/dump/vzdump-openvz-16112-2020_03_30-21_39_30.tgz.lzo",
|
||||
expected => [''],
|
||||
},
|
||||
{
|
||||
description => 'Backup, wrong ending, qemu, vma.xz',
|
||||
volname => "$storage_dir/dump/vzdump-qemu-16110-2020_03_30-21_12_40.vma.xz",
|
||||
expected => [''],
|
||||
description => 'Backup, wrong ending, qemu, vma.xz',
|
||||
volname => "$storage_dir/dump/vzdump-qemu-16110-2020_03_30-21_12_40.vma.xz",
|
||||
expected => [''],
|
||||
},
|
||||
{
|
||||
description => 'Backup, wrong format, qemu, vms.gz',
|
||||
volname => "$storage_dir/dump/vzdump-qemu-16110-2020_03_30-21_12_40.vms.gz",
|
||||
expected => [''],
|
||||
description => 'Backup, wrong format, qemu, vms.gz',
|
||||
volname => "$storage_dir/dump/vzdump-qemu-16110-2020_03_30-21_12_40.vms.gz",
|
||||
expected => [''],
|
||||
},
|
||||
{
|
||||
description => 'Image, string as vmid in folder name',
|
||||
volname => "$storage_dir/images/ssss/vm-1234-disk-0.qcow2",
|
||||
expected => [''],
|
||||
description => 'Image, string as vmid in folder name',
|
||||
volname => "$storage_dir/images/ssss/vm-1234-disk-0.qcow2",
|
||||
expected => [''],
|
||||
},
|
||||
{
|
||||
description => 'Import, non ova/ovf/disk image in import dir',
|
||||
volname => "$storage_dir/import/test.foo",
|
||||
expected => [''],
|
||||
description => 'Import, non ova/ovf/disk image in import dir',
|
||||
volname => "$storage_dir/import/test.foo",
|
||||
expected => [''],
|
||||
},
|
||||
);
|
||||
|
||||
@ -275,19 +255,19 @@ foreach my $tt (@tests) {
|
||||
make_path($dir, { verbose => 1, mode => 0755 });
|
||||
|
||||
if ($name) {
|
||||
open(my $fh, ">>", "$file") || die "Error open file: $!";
|
||||
close($fh);
|
||||
open(my $fh, ">>", "$file") || die "Error open file: $!";
|
||||
close($fh);
|
||||
}
|
||||
|
||||
# run tests
|
||||
my $got;
|
||||
eval { $got = [ PVE::Storage::path_to_volume_id($scfg, $file) ] };
|
||||
eval { $got = [PVE::Storage::path_to_volume_id($scfg, $file)] };
|
||||
$got = $@ if $@;
|
||||
|
||||
is_deeply($got, $expected, $description) || diag(explain($got));
|
||||
|
||||
$seen_vtype->{@$expected[0]} = 1
|
||||
if ( @$expected[0] ne '' && scalar @$expected > 1);
|
||||
$seen_vtype->{ @$expected[0] } = 1
|
||||
if (@$expected[0] ne '' && scalar @$expected > 1);
|
||||
}
|
||||
|
||||
# to check if all $vtype_subdirs are defined in path_to_volume_id
|
||||
|
||||
@ -18,183 +18,193 @@ my $mocked_backups_lists = {};
|
||||
my $basetime = 1577881101; # 2020_01_01-12_18_21 UTC
|
||||
|
||||
foreach my $vmid (@vmids) {
|
||||
push @{$mocked_backups_lists->{default}}, (
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-qemu-$vmid-2018_05_26-11_18_21.tar.zst",
|
||||
'ctime' => $basetime - 585*24*60*60 - 60*60,
|
||||
'vmid' => $vmid,
|
||||
},
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-qemu-$vmid-2019_12_31-11_18_21.tar.zst",
|
||||
'ctime' => $basetime - 24*60*60 - 60*60,
|
||||
'vmid' => $vmid,
|
||||
},
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-qemu-$vmid-2019_12_31-11_18_51.tar.zst",
|
||||
'ctime' => $basetime - 24*60*60 - 60*60 + 30,
|
||||
'vmid' => $vmid,
|
||||
'protected' => 1,
|
||||
},
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-qemu-$vmid-2019_12_31-11_19_21.tar.zst",
|
||||
'ctime' => $basetime - 24*60*60 - 60*60 + 60,
|
||||
'vmid' => $vmid,
|
||||
},
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-qemu-$vmid-2020_01_01-11_18_21.tar.zst",
|
||||
'ctime' => $basetime - 60*60,
|
||||
'vmid' => $vmid,
|
||||
},
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-qemu-$vmid-2020_01_01-12_18_21.tar.zst",
|
||||
'ctime' => $basetime,
|
||||
'vmid' => $vmid,
|
||||
},
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-lxc-$vmid-2020_01_01-12_18_21.tar.zst",
|
||||
'ctime' => $basetime,
|
||||
'vmid' => $vmid,
|
||||
},
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-$vmid-renamed.tar.zst",
|
||||
'ctime' => 1234,
|
||||
'vmid' => $vmid,
|
||||
},
|
||||
);
|
||||
push @{ $mocked_backups_lists->{default} },
|
||||
(
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-qemu-$vmid-2018_05_26-11_18_21.tar.zst",
|
||||
'ctime' => $basetime - 585 * 24 * 60 * 60 - 60 * 60,
|
||||
'vmid' => $vmid,
|
||||
},
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-qemu-$vmid-2019_12_31-11_18_21.tar.zst",
|
||||
'ctime' => $basetime - 24 * 60 * 60 - 60 * 60,
|
||||
'vmid' => $vmid,
|
||||
},
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-qemu-$vmid-2019_12_31-11_18_51.tar.zst",
|
||||
'ctime' => $basetime - 24 * 60 * 60 - 60 * 60 + 30,
|
||||
'vmid' => $vmid,
|
||||
'protected' => 1,
|
||||
},
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-qemu-$vmid-2019_12_31-11_19_21.tar.zst",
|
||||
'ctime' => $basetime - 24 * 60 * 60 - 60 * 60 + 60,
|
||||
'vmid' => $vmid,
|
||||
},
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-qemu-$vmid-2020_01_01-11_18_21.tar.zst",
|
||||
'ctime' => $basetime - 60 * 60,
|
||||
'vmid' => $vmid,
|
||||
},
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-qemu-$vmid-2020_01_01-12_18_21.tar.zst",
|
||||
'ctime' => $basetime,
|
||||
'vmid' => $vmid,
|
||||
},
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-lxc-$vmid-2020_01_01-12_18_21.tar.zst",
|
||||
'ctime' => $basetime,
|
||||
'vmid' => $vmid,
|
||||
},
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-$vmid-renamed.tar.zst",
|
||||
'ctime' => 1234,
|
||||
'vmid' => $vmid,
|
||||
},
|
||||
);
|
||||
}
|
||||
push @{$mocked_backups_lists->{year1970}}, (
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-lxc-321-1970_01_01-00_01_23.tar.zst",
|
||||
'ctime' => 83,
|
||||
'vmid' => 321,
|
||||
},
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-lxc-321-2070_01_01-00_01_00.tar.zst",
|
||||
'ctime' => 60*60*24 * (365*100 + 25) + 60,
|
||||
'vmid' => 321,
|
||||
},
|
||||
);
|
||||
push @{$mocked_backups_lists->{novmid}}, (
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-lxc-novmid.tar.gz",
|
||||
'ctime' => 1234,
|
||||
},
|
||||
);
|
||||
push @{$mocked_backups_lists->{threeway}}, (
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-qemu-7654-2019_12_25-12_18_21.tar.zst",
|
||||
'ctime' => $basetime - 7*24*60*60,
|
||||
'vmid' => 7654,
|
||||
},
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-qemu-7654-2019_12_31-12_18_21.tar.zst",
|
||||
'ctime' => $basetime - 24*60*60,
|
||||
'vmid' => 7654,
|
||||
},
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-qemu-7654-2020_01_01-12_18_21.tar.zst",
|
||||
'ctime' => $basetime,
|
||||
'vmid' => 7654,
|
||||
},
|
||||
);
|
||||
push @{$mocked_backups_lists->{weekboundary}}, (
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-qemu-7654-2020_12_03-12_18_21.tar.zst",
|
||||
'ctime' => $basetime + (366-31+2)*24*60*60,
|
||||
'vmid' => 7654,
|
||||
},
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-qemu-7654-2020_12_04-12_18_21.tar.zst",
|
||||
'ctime' => $basetime + (366-31+3)*24*60*60,
|
||||
'vmid' => 7654,
|
||||
},
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-qemu-7654-2020_12_07-12_18_21.tar.zst",
|
||||
'ctime' => $basetime + (366-31+6)*24*60*60,
|
||||
'vmid' => 7654,
|
||||
},
|
||||
);
|
||||
push @{ $mocked_backups_lists->{year1970} },
|
||||
(
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-lxc-321-1970_01_01-00_01_23.tar.zst",
|
||||
'ctime' => 83,
|
||||
'vmid' => 321,
|
||||
},
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-lxc-321-2070_01_01-00_01_00.tar.zst",
|
||||
'ctime' => 60 * 60 * 24 * (365 * 100 + 25) + 60,
|
||||
'vmid' => 321,
|
||||
},
|
||||
);
|
||||
push @{ $mocked_backups_lists->{novmid} },
|
||||
(
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-lxc-novmid.tar.gz",
|
||||
'ctime' => 1234,
|
||||
},
|
||||
);
|
||||
push @{ $mocked_backups_lists->{threeway} },
|
||||
(
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-qemu-7654-2019_12_25-12_18_21.tar.zst",
|
||||
'ctime' => $basetime - 7 * 24 * 60 * 60,
|
||||
'vmid' => 7654,
|
||||
},
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-qemu-7654-2019_12_31-12_18_21.tar.zst",
|
||||
'ctime' => $basetime - 24 * 60 * 60,
|
||||
'vmid' => 7654,
|
||||
},
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-qemu-7654-2020_01_01-12_18_21.tar.zst",
|
||||
'ctime' => $basetime,
|
||||
'vmid' => 7654,
|
||||
},
|
||||
);
|
||||
push @{ $mocked_backups_lists->{weekboundary} },
|
||||
(
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-qemu-7654-2020_12_03-12_18_21.tar.zst",
|
||||
'ctime' => $basetime + (366 - 31 + 2) * 24 * 60 * 60,
|
||||
'vmid' => 7654,
|
||||
},
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-qemu-7654-2020_12_04-12_18_21.tar.zst",
|
||||
'ctime' => $basetime + (366 - 31 + 3) * 24 * 60 * 60,
|
||||
'vmid' => 7654,
|
||||
},
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-qemu-7654-2020_12_07-12_18_21.tar.zst",
|
||||
'ctime' => $basetime + (366 - 31 + 6) * 24 * 60 * 60,
|
||||
'vmid' => 7654,
|
||||
},
|
||||
);
|
||||
my $current_list;
|
||||
my $mock_plugin = Test::MockModule->new('PVE::Storage::Plugin');
|
||||
$mock_plugin->redefine(list_volumes => sub {
|
||||
my ($class, $storeid, $scfg, $vmid, $content_types) = @_;
|
||||
$mock_plugin->redefine(
|
||||
list_volumes => sub {
|
||||
my ($class, $storeid, $scfg, $vmid, $content_types) = @_;
|
||||
|
||||
my $list = $mocked_backups_lists->{$current_list};
|
||||
my $list = $mocked_backups_lists->{$current_list};
|
||||
|
||||
return $list if !defined($vmid);
|
||||
return $list if !defined($vmid);
|
||||
|
||||
return [ grep { $_->{vmid} eq $vmid } @{$list} ];
|
||||
});
|
||||
return [grep { $_->{vmid} eq $vmid } @{$list}];
|
||||
},
|
||||
);
|
||||
|
||||
sub generate_expected {
|
||||
my ($vmids, $type, $marks) = @_;
|
||||
|
||||
my @expected;
|
||||
foreach my $vmid (@{$vmids}) {
|
||||
push @expected, (
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-qemu-$vmid-2018_05_26-11_18_21.tar.zst",
|
||||
'type' => 'qemu',
|
||||
'ctime' => $basetime - 585*24*60*60 - 60*60,
|
||||
'mark' => $marks->[0],
|
||||
'vmid' => $vmid,
|
||||
},
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-qemu-$vmid-2019_12_31-11_18_21.tar.zst",
|
||||
'type' => 'qemu',
|
||||
'ctime' => $basetime - 24*60*60 - 60*60,
|
||||
'mark' => $marks->[1],
|
||||
'vmid' => $vmid,
|
||||
},
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-qemu-$vmid-2019_12_31-11_18_51.tar.zst",
|
||||
'type' => 'qemu',
|
||||
'ctime' => $basetime - 24*60*60 - 60*60 + 30,
|
||||
'mark' => 'protected',
|
||||
'vmid' => $vmid,
|
||||
},
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-qemu-$vmid-2019_12_31-11_19_21.tar.zst",
|
||||
'type' => 'qemu',
|
||||
'ctime' => $basetime - 24*60*60 - 60*60 + 60,
|
||||
'mark' => $marks->[2],
|
||||
'vmid' => $vmid,
|
||||
},
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-qemu-$vmid-2020_01_01-11_18_21.tar.zst",
|
||||
'type' => 'qemu',
|
||||
'ctime' => $basetime - 60*60,
|
||||
'mark' => $marks->[3],
|
||||
'vmid' => $vmid,
|
||||
},
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-qemu-$vmid-2020_01_01-12_18_21.tar.zst",
|
||||
'type' => 'qemu',
|
||||
'ctime' => $basetime,
|
||||
'mark' => $marks->[4],
|
||||
'vmid' => $vmid,
|
||||
},
|
||||
) if !defined($type) || $type eq 'qemu';
|
||||
push @expected, (
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-lxc-$vmid-2020_01_01-12_18_21.tar.zst",
|
||||
'type' => 'lxc',
|
||||
'ctime' => $basetime,
|
||||
'mark' => $marks->[5],
|
||||
'vmid' => $vmid,
|
||||
},
|
||||
) if !defined($type) || $type eq 'lxc';
|
||||
push @expected, (
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-$vmid-renamed.tar.zst",
|
||||
'type' => 'unknown',
|
||||
'ctime' => 1234,
|
||||
'mark' => 'renamed',
|
||||
'vmid' => $vmid,
|
||||
},
|
||||
) if !defined($type);
|
||||
push @expected,
|
||||
(
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-qemu-$vmid-2018_05_26-11_18_21.tar.zst",
|
||||
'type' => 'qemu',
|
||||
'ctime' => $basetime - 585 * 24 * 60 * 60 - 60 * 60,
|
||||
'mark' => $marks->[0],
|
||||
'vmid' => $vmid,
|
||||
},
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-qemu-$vmid-2019_12_31-11_18_21.tar.zst",
|
||||
'type' => 'qemu',
|
||||
'ctime' => $basetime - 24 * 60 * 60 - 60 * 60,
|
||||
'mark' => $marks->[1],
|
||||
'vmid' => $vmid,
|
||||
},
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-qemu-$vmid-2019_12_31-11_18_51.tar.zst",
|
||||
'type' => 'qemu',
|
||||
'ctime' => $basetime - 24 * 60 * 60 - 60 * 60 + 30,
|
||||
'mark' => 'protected',
|
||||
'vmid' => $vmid,
|
||||
},
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-qemu-$vmid-2019_12_31-11_19_21.tar.zst",
|
||||
'type' => 'qemu',
|
||||
'ctime' => $basetime - 24 * 60 * 60 - 60 * 60 + 60,
|
||||
'mark' => $marks->[2],
|
||||
'vmid' => $vmid,
|
||||
},
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-qemu-$vmid-2020_01_01-11_18_21.tar.zst",
|
||||
'type' => 'qemu',
|
||||
'ctime' => $basetime - 60 * 60,
|
||||
'mark' => $marks->[3],
|
||||
'vmid' => $vmid,
|
||||
},
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-qemu-$vmid-2020_01_01-12_18_21.tar.zst",
|
||||
'type' => 'qemu',
|
||||
'ctime' => $basetime,
|
||||
'mark' => $marks->[4],
|
||||
'vmid' => $vmid,
|
||||
},
|
||||
) if !defined($type) || $type eq 'qemu';
|
||||
push @expected,
|
||||
(
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-lxc-$vmid-2020_01_01-12_18_21.tar.zst",
|
||||
'type' => 'lxc',
|
||||
'ctime' => $basetime,
|
||||
'mark' => $marks->[5],
|
||||
'vmid' => $vmid,
|
||||
},
|
||||
) if !defined($type) || $type eq 'lxc';
|
||||
push @expected,
|
||||
(
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-$vmid-renamed.tar.zst",
|
||||
'type' => 'unknown',
|
||||
'ctime' => 1234,
|
||||
'mark' => 'renamed',
|
||||
'vmid' => $vmid,
|
||||
},
|
||||
) if !defined($type);
|
||||
}
|
||||
return [ sort { $a->{volid} cmp $b->{volid} } @expected ];
|
||||
return [sort { $a->{volid} cmp $b->{volid} } @expected];
|
||||
}
|
||||
|
||||
# an array of test cases, each test is comprised of the following keys:
|
||||
@ -208,268 +218,312 @@ sub generate_expected {
|
||||
# most of them are created further below
|
||||
my $tests = [
|
||||
{
|
||||
description => 'last=3, multiple IDs',
|
||||
keep => {
|
||||
'keep-last' => 3,
|
||||
},
|
||||
expected => generate_expected(\@vmids, undef, ['remove', 'remove', 'keep', 'keep', 'keep', 'keep']),
|
||||
description => 'last=3, multiple IDs',
|
||||
keep => {
|
||||
'keep-last' => 3,
|
||||
},
|
||||
expected =>
|
||||
generate_expected(\@vmids, undef, ['remove', 'remove', 'keep', 'keep', 'keep', 'keep']),
|
||||
},
|
||||
{
|
||||
description => 'weekly=2, one ID',
|
||||
vmid => $vmids[0],
|
||||
keep => {
|
||||
'keep-weekly' => 2,
|
||||
},
|
||||
expected => generate_expected([$vmids[0]], undef, ['keep', 'remove', 'remove', 'remove', 'keep', 'keep']),
|
||||
description => 'weekly=2, one ID',
|
||||
vmid => $vmids[0],
|
||||
keep => {
|
||||
'keep-weekly' => 2,
|
||||
},
|
||||
expected => generate_expected(
|
||||
[$vmids[0]],
|
||||
undef,
|
||||
['keep', 'remove', 'remove', 'remove', 'keep', 'keep'],
|
||||
),
|
||||
},
|
||||
{
|
||||
description => 'daily=weekly=monthly=1, multiple IDs',
|
||||
keep => {
|
||||
'keep-hourly' => 0,
|
||||
'keep-daily' => 1,
|
||||
'keep-weekly' => 1,
|
||||
'keep-monthly' => 1,
|
||||
},
|
||||
expected => generate_expected(\@vmids, undef, ['keep', 'remove', 'keep', 'remove', 'keep', 'keep']),
|
||||
description => 'daily=weekly=monthly=1, multiple IDs',
|
||||
keep => {
|
||||
'keep-hourly' => 0,
|
||||
'keep-daily' => 1,
|
||||
'keep-weekly' => 1,
|
||||
'keep-monthly' => 1,
|
||||
},
|
||||
expected =>
|
||||
generate_expected(\@vmids, undef, ['keep', 'remove', 'keep', 'remove', 'keep', 'keep']),
|
||||
},
|
||||
{
|
||||
description => 'hourly=4, one ID',
|
||||
vmid => $vmids[0],
|
||||
keep => {
|
||||
'keep-hourly' => 4,
|
||||
'keep-daily' => 0,
|
||||
},
|
||||
expected => generate_expected([$vmids[0]], undef, ['keep', 'remove', 'keep', 'keep', 'keep', 'keep']),
|
||||
description => 'hourly=4, one ID',
|
||||
vmid => $vmids[0],
|
||||
keep => {
|
||||
'keep-hourly' => 4,
|
||||
'keep-daily' => 0,
|
||||
},
|
||||
expected => generate_expected(
|
||||
[$vmids[0]],
|
||||
undef,
|
||||
['keep', 'remove', 'keep', 'keep', 'keep', 'keep'],
|
||||
),
|
||||
},
|
||||
{
|
||||
description => 'yearly=2, multiple IDs',
|
||||
keep => {
|
||||
'keep-hourly' => 0,
|
||||
'keep-daily' => 0,
|
||||
'keep-weekly' => 0,
|
||||
'keep-monthly' => 0,
|
||||
'keep-yearly' => 2,
|
||||
},
|
||||
expected => generate_expected(\@vmids, undef, ['remove', 'remove', 'keep', 'remove', 'keep', 'keep']),
|
||||
description => 'yearly=2, multiple IDs',
|
||||
keep => {
|
||||
'keep-hourly' => 0,
|
||||
'keep-daily' => 0,
|
||||
'keep-weekly' => 0,
|
||||
'keep-monthly' => 0,
|
||||
'keep-yearly' => 2,
|
||||
},
|
||||
expected => generate_expected(
|
||||
\@vmids,
|
||||
undef,
|
||||
['remove', 'remove', 'keep', 'remove', 'keep', 'keep'],
|
||||
),
|
||||
},
|
||||
{
|
||||
description => 'last=2,hourly=2 one ID',
|
||||
vmid => $vmids[0],
|
||||
keep => {
|
||||
'keep-last' => 2,
|
||||
'keep-hourly' => 2,
|
||||
},
|
||||
expected => generate_expected([$vmids[0]], undef, ['keep', 'remove', 'keep', 'keep', 'keep', 'keep']),
|
||||
description => 'last=2,hourly=2 one ID',
|
||||
vmid => $vmids[0],
|
||||
keep => {
|
||||
'keep-last' => 2,
|
||||
'keep-hourly' => 2,
|
||||
},
|
||||
expected => generate_expected(
|
||||
[$vmids[0]],
|
||||
undef,
|
||||
['keep', 'remove', 'keep', 'keep', 'keep', 'keep'],
|
||||
),
|
||||
},
|
||||
{
|
||||
description => 'last=1,monthly=2, multiple IDs',
|
||||
keep => {
|
||||
'keep-last' => 1,
|
||||
'keep-monthly' => 2,
|
||||
},
|
||||
expected => generate_expected(\@vmids, undef, ['keep', 'remove', 'keep', 'remove', 'keep', 'keep']),
|
||||
description => 'last=1,monthly=2, multiple IDs',
|
||||
keep => {
|
||||
'keep-last' => 1,
|
||||
'keep-monthly' => 2,
|
||||
},
|
||||
expected =>
|
||||
generate_expected(\@vmids, undef, ['keep', 'remove', 'keep', 'remove', 'keep', 'keep']),
|
||||
},
|
||||
{
|
||||
description => 'monthly=3, one ID',
|
||||
vmid => $vmids[0],
|
||||
keep => {
|
||||
'keep-monthly' => 3,
|
||||
},
|
||||
expected => generate_expected([$vmids[0]], undef, ['keep', 'remove', 'keep', 'remove', 'keep', 'keep']),
|
||||
description => 'monthly=3, one ID',
|
||||
vmid => $vmids[0],
|
||||
keep => {
|
||||
'keep-monthly' => 3,
|
||||
},
|
||||
expected => generate_expected(
|
||||
[$vmids[0]],
|
||||
undef,
|
||||
['keep', 'remove', 'keep', 'remove', 'keep', 'keep'],
|
||||
),
|
||||
},
|
||||
{
|
||||
description => 'last=daily=weekly=1, multiple IDs',
|
||||
keep => {
|
||||
'keep-last' => 1,
|
||||
'keep-daily' => 1,
|
||||
'keep-weekly' => 1,
|
||||
},
|
||||
expected => generate_expected(\@vmids, undef, ['keep', 'remove', 'keep', 'remove', 'keep', 'keep']),
|
||||
description => 'last=daily=weekly=1, multiple IDs',
|
||||
keep => {
|
||||
'keep-last' => 1,
|
||||
'keep-daily' => 1,
|
||||
'keep-weekly' => 1,
|
||||
},
|
||||
expected =>
|
||||
generate_expected(\@vmids, undef, ['keep', 'remove', 'keep', 'remove', 'keep', 'keep']),
|
||||
},
|
||||
{
|
||||
description => 'last=daily=weekly=1, others zero, multiple IDs',
|
||||
keep => {
|
||||
'keep-hourly' => 0,
|
||||
'keep-last' => 1,
|
||||
'keep-daily' => 1,
|
||||
'keep-weekly' => 1,
|
||||
'keep-monthly' => 0,
|
||||
'keep-yearly' => 0,
|
||||
},
|
||||
expected => generate_expected(\@vmids, undef, ['keep', 'remove', 'keep', 'remove', 'keep', 'keep']),
|
||||
description => 'last=daily=weekly=1, others zero, multiple IDs',
|
||||
keep => {
|
||||
'keep-hourly' => 0,
|
||||
'keep-last' => 1,
|
||||
'keep-daily' => 1,
|
||||
'keep-weekly' => 1,
|
||||
'keep-monthly' => 0,
|
||||
'keep-yearly' => 0,
|
||||
},
|
||||
expected =>
|
||||
generate_expected(\@vmids, undef, ['keep', 'remove', 'keep', 'remove', 'keep', 'keep']),
|
||||
},
|
||||
{
|
||||
description => 'daily=2, one ID',
|
||||
vmid => $vmids[0],
|
||||
keep => {
|
||||
'keep-daily' => 2,
|
||||
},
|
||||
expected => generate_expected([$vmids[0]], undef, ['remove', 'remove', 'keep', 'remove', 'keep', 'keep']),
|
||||
description => 'daily=2, one ID',
|
||||
vmid => $vmids[0],
|
||||
keep => {
|
||||
'keep-daily' => 2,
|
||||
},
|
||||
expected => generate_expected(
|
||||
[$vmids[0]],
|
||||
undef,
|
||||
['remove', 'remove', 'keep', 'remove', 'keep', 'keep'],
|
||||
),
|
||||
},
|
||||
{
|
||||
description => 'weekly=monthly=1, multiple IDs',
|
||||
keep => {
|
||||
'keep-weekly' => 1,
|
||||
'keep-monthly' => 1,
|
||||
},
|
||||
expected => generate_expected(\@vmids, undef, ['keep', 'remove', 'remove', 'remove', 'keep', 'keep']),
|
||||
description => 'weekly=monthly=1, multiple IDs',
|
||||
keep => {
|
||||
'keep-weekly' => 1,
|
||||
'keep-monthly' => 1,
|
||||
},
|
||||
expected => generate_expected(
|
||||
\@vmids,
|
||||
undef,
|
||||
['keep', 'remove', 'remove', 'remove', 'keep', 'keep'],
|
||||
),
|
||||
},
|
||||
{
|
||||
description => 'weekly=yearly=1, one ID',
|
||||
vmid => $vmids[0],
|
||||
keep => {
|
||||
'keep-weekly' => 1,
|
||||
'keep-yearly' => 1,
|
||||
},
|
||||
expected => generate_expected([$vmids[0]], undef, ['keep', 'remove', 'remove', 'remove', 'keep', 'keep']),
|
||||
description => 'weekly=yearly=1, one ID',
|
||||
vmid => $vmids[0],
|
||||
keep => {
|
||||
'keep-weekly' => 1,
|
||||
'keep-yearly' => 1,
|
||||
},
|
||||
expected => generate_expected(
|
||||
[$vmids[0]],
|
||||
undef,
|
||||
['keep', 'remove', 'remove', 'remove', 'keep', 'keep'],
|
||||
),
|
||||
},
|
||||
{
|
||||
description => 'weekly=yearly=1, one ID, type qemu',
|
||||
vmid => $vmids[0],
|
||||
type => 'qemu',
|
||||
keep => {
|
||||
'keep-weekly' => 1,
|
||||
'keep-yearly' => 1,
|
||||
},
|
||||
expected => generate_expected([$vmids[0]], 'qemu', ['keep', 'remove', 'remove', 'remove', 'keep', '']),
|
||||
description => 'weekly=yearly=1, one ID, type qemu',
|
||||
vmid => $vmids[0],
|
||||
type => 'qemu',
|
||||
keep => {
|
||||
'keep-weekly' => 1,
|
||||
'keep-yearly' => 1,
|
||||
},
|
||||
expected => generate_expected(
|
||||
[$vmids[0]],
|
||||
'qemu',
|
||||
['keep', 'remove', 'remove', 'remove', 'keep', ''],
|
||||
),
|
||||
},
|
||||
{
|
||||
description => 'week=yearly=1, one ID, type lxc',
|
||||
vmid => $vmids[0],
|
||||
type => 'lxc',
|
||||
keep => {
|
||||
'keep-last' => 1,
|
||||
},
|
||||
expected => generate_expected([$vmids[0]], 'lxc', ['', '', '', '', '', 'keep']),
|
||||
description => 'week=yearly=1, one ID, type lxc',
|
||||
vmid => $vmids[0],
|
||||
type => 'lxc',
|
||||
keep => {
|
||||
'keep-last' => 1,
|
||||
},
|
||||
expected => generate_expected([$vmids[0]], 'lxc', ['', '', '', '', '', 'keep']),
|
||||
},
|
||||
{
|
||||
description => 'yearly=1, year before 2000',
|
||||
keep => {
|
||||
'keep-yearly' => 1,
|
||||
},
|
||||
list => 'year1970',
|
||||
expected => [
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-lxc-321-1970_01_01-00_01_23.tar.zst",
|
||||
'ctime' => 83,
|
||||
'mark' => 'remove',
|
||||
'type' => 'lxc',
|
||||
'vmid' => 321,
|
||||
},
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-lxc-321-2070_01_01-00_01_00.tar.zst",
|
||||
'ctime' => 60*60*24 * (365*100 + 25) + 60,
|
||||
'mark' => 'keep',
|
||||
'type' => 'lxc',
|
||||
'vmid' => 321,
|
||||
},
|
||||
],
|
||||
description => 'yearly=1, year before 2000',
|
||||
keep => {
|
||||
'keep-yearly' => 1,
|
||||
},
|
||||
list => 'year1970',
|
||||
expected => [
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-lxc-321-1970_01_01-00_01_23.tar.zst",
|
||||
'ctime' => 83,
|
||||
'mark' => 'remove',
|
||||
'type' => 'lxc',
|
||||
'vmid' => 321,
|
||||
},
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-lxc-321-2070_01_01-00_01_00.tar.zst",
|
||||
'ctime' => 60 * 60 * 24 * (365 * 100 + 25) + 60,
|
||||
'mark' => 'keep',
|
||||
'type' => 'lxc',
|
||||
'vmid' => 321,
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
description => 'last=1, ne ID, year before 2000',
|
||||
keep => {
|
||||
'keep-last' => 1,
|
||||
},
|
||||
list => 'novmid',
|
||||
expected => [
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-lxc-novmid.tar.gz",
|
||||
'ctime' => 1234,
|
||||
'mark' => 'renamed',
|
||||
'type' => 'lxc',
|
||||
},
|
||||
],
|
||||
description => 'last=1, ne ID, year before 2000',
|
||||
keep => {
|
||||
'keep-last' => 1,
|
||||
},
|
||||
list => 'novmid',
|
||||
expected => [
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-lxc-novmid.tar.gz",
|
||||
'ctime' => 1234,
|
||||
'mark' => 'renamed',
|
||||
'type' => 'lxc',
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
description => 'all missing, multiple IDs',
|
||||
keep => {},
|
||||
expected => generate_expected(\@vmids, undef, ['keep', 'keep', 'keep', 'keep', 'keep', 'keep']),
|
||||
description => 'all missing, multiple IDs',
|
||||
keep => {},
|
||||
expected =>
|
||||
generate_expected(\@vmids, undef, ['keep', 'keep', 'keep', 'keep', 'keep', 'keep']),
|
||||
},
|
||||
{
|
||||
description => 'all zero, multiple IDs',
|
||||
keep => {
|
||||
'keep-last' => 0,
|
||||
'keep-hourly' => 0,
|
||||
'keep-daily' => 0,
|
||||
'keep-weekly' => 0,
|
||||
'keep-monthyl' => 0,
|
||||
'keep-yearly' => 0,
|
||||
},
|
||||
expected => generate_expected(\@vmids, undef, ['keep', 'keep', 'keep', 'keep', 'keep', 'keep']),
|
||||
description => 'all zero, multiple IDs',
|
||||
keep => {
|
||||
'keep-last' => 0,
|
||||
'keep-hourly' => 0,
|
||||
'keep-daily' => 0,
|
||||
'keep-weekly' => 0,
|
||||
'keep-monthyl' => 0,
|
||||
'keep-yearly' => 0,
|
||||
},
|
||||
expected =>
|
||||
generate_expected(\@vmids, undef, ['keep', 'keep', 'keep', 'keep', 'keep', 'keep']),
|
||||
},
|
||||
{
|
||||
description => 'some zero, some missing, multiple IDs',
|
||||
keep => {
|
||||
'keep-last' => 0,
|
||||
'keep-hourly' => 0,
|
||||
'keep-daily' => 0,
|
||||
'keep-monthyl' => 0,
|
||||
'keep-yearly' => 0,
|
||||
},
|
||||
expected => generate_expected(\@vmids, undef, ['keep', 'keep', 'keep', 'keep', 'keep', 'keep']),
|
||||
description => 'some zero, some missing, multiple IDs',
|
||||
keep => {
|
||||
'keep-last' => 0,
|
||||
'keep-hourly' => 0,
|
||||
'keep-daily' => 0,
|
||||
'keep-monthyl' => 0,
|
||||
'keep-yearly' => 0,
|
||||
},
|
||||
expected =>
|
||||
generate_expected(\@vmids, undef, ['keep', 'keep', 'keep', 'keep', 'keep', 'keep']),
|
||||
},
|
||||
{
|
||||
description => 'daily=weekly=monthly=1',
|
||||
keep => {
|
||||
'keep-daily' => 1,
|
||||
'keep-weekly' => 1,
|
||||
'keep-monthly' => 1,
|
||||
},
|
||||
list => 'threeway',
|
||||
expected => [
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-qemu-7654-2019_12_25-12_18_21.tar.zst",
|
||||
'ctime' => $basetime - 7*24*60*60,
|
||||
'type' => 'qemu',
|
||||
'vmid' => 7654,
|
||||
'mark' => 'keep',
|
||||
},
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-qemu-7654-2019_12_31-12_18_21.tar.zst",
|
||||
'ctime' => $basetime - 24*60*60,
|
||||
'type' => 'qemu',
|
||||
'vmid' => 7654,
|
||||
'mark' => 'remove', # month is already covered by the backup kept by keep-weekly!
|
||||
},
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-qemu-7654-2020_01_01-12_18_21.tar.zst",
|
||||
'ctime' => $basetime,
|
||||
'type' => 'qemu',
|
||||
'vmid' => 7654,
|
||||
'mark' => 'keep',
|
||||
},
|
||||
],
|
||||
description => 'daily=weekly=monthly=1',
|
||||
keep => {
|
||||
'keep-daily' => 1,
|
||||
'keep-weekly' => 1,
|
||||
'keep-monthly' => 1,
|
||||
},
|
||||
list => 'threeway',
|
||||
expected => [
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-qemu-7654-2019_12_25-12_18_21.tar.zst",
|
||||
'ctime' => $basetime - 7 * 24 * 60 * 60,
|
||||
'type' => 'qemu',
|
||||
'vmid' => 7654,
|
||||
'mark' => 'keep',
|
||||
},
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-qemu-7654-2019_12_31-12_18_21.tar.zst",
|
||||
'ctime' => $basetime - 24 * 60 * 60,
|
||||
'type' => 'qemu',
|
||||
'vmid' => 7654,
|
||||
'mark' => 'remove', # month is already covered by the backup kept by keep-weekly!
|
||||
},
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-qemu-7654-2020_01_01-12_18_21.tar.zst",
|
||||
'ctime' => $basetime,
|
||||
'type' => 'qemu',
|
||||
'vmid' => 7654,
|
||||
'mark' => 'keep',
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
description => 'daily=weekly=1,weekboundary',
|
||||
keep => {
|
||||
'keep-daily' => 1,
|
||||
'keep-weekly' => 1,
|
||||
},
|
||||
list => 'weekboundary',
|
||||
expected => [
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-qemu-7654-2020_12_03-12_18_21.tar.zst",
|
||||
'ctime' => $basetime + (366-31+2)*24*60*60,
|
||||
'type' => 'qemu',
|
||||
'vmid' => 7654,
|
||||
'mark' => 'remove',
|
||||
},
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-qemu-7654-2020_12_04-12_18_21.tar.zst",
|
||||
'ctime' => $basetime + (366-31+3)*24*60*60,
|
||||
'type' => 'qemu',
|
||||
'vmid' => 7654,
|
||||
'mark' => 'keep',
|
||||
},
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-qemu-7654-2020_12_07-12_18_21.tar.zst",
|
||||
'ctime' => $basetime + (366-31+6)*24*60*60,
|
||||
'type' => 'qemu',
|
||||
'vmid' => 7654,
|
||||
'mark' => 'keep',
|
||||
},
|
||||
],
|
||||
description => 'daily=weekly=1,weekboundary',
|
||||
keep => {
|
||||
'keep-daily' => 1,
|
||||
'keep-weekly' => 1,
|
||||
},
|
||||
list => 'weekboundary',
|
||||
expected => [
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-qemu-7654-2020_12_03-12_18_21.tar.zst",
|
||||
'ctime' => $basetime + (366 - 31 + 2) * 24 * 60 * 60,
|
||||
'type' => 'qemu',
|
||||
'vmid' => 7654,
|
||||
'mark' => 'remove',
|
||||
},
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-qemu-7654-2020_12_04-12_18_21.tar.zst",
|
||||
'ctime' => $basetime + (366 - 31 + 3) * 24 * 60 * 60,
|
||||
'type' => 'qemu',
|
||||
'vmid' => 7654,
|
||||
'mark' => 'keep',
|
||||
},
|
||||
{
|
||||
'volid' => "$storeid:backup/vzdump-qemu-7654-2020_12_07-12_18_21.tar.zst",
|
||||
'ctime' => $basetime + (366 - 31 + 6) * 24 * 60 * 60,
|
||||
'type' => 'qemu',
|
||||
'vmid' => 7654,
|
||||
'mark' => 'keep',
|
||||
},
|
||||
],
|
||||
},
|
||||
];
|
||||
|
||||
@ -478,9 +532,11 @@ plan tests => scalar @$tests;
|
||||
for my $tt (@$tests) {
|
||||
|
||||
my $got = eval {
|
||||
$current_list = $tt->{list} // 'default';
|
||||
my $res = PVE::Storage::Plugin->prune_backups($tt->{scfg}, $storeid, $tt->{keep}, $tt->{vmid}, $tt->{type}, 1);
|
||||
return [ sort { $a->{volid} cmp $b->{volid} } @{$res} ];
|
||||
$current_list = $tt->{list} // 'default';
|
||||
my $res = PVE::Storage::Plugin->prune_backups(
|
||||
$tt->{scfg}, $storeid, $tt->{keep}, $tt->{vmid}, $tt->{type}, 1,
|
||||
);
|
||||
return [sort { $a->{volid} cmp $b->{volid} } @{$res}];
|
||||
};
|
||||
$got = $@ if $@;
|
||||
|
||||
|
||||
@ -26,7 +26,7 @@ use JSON;
|
||||
use PVE::Tools qw(run_command);
|
||||
|
||||
my $pool = "testpool";
|
||||
my $use_existing= undef;
|
||||
my $use_existing = undef;
|
||||
my $namespace = "testspace";
|
||||
my $showhelp = '';
|
||||
my $vmid = 999999;
|
||||
@ -46,7 +46,7 @@ Known options are:
|
||||
-h, --help Print this help message
|
||||
";
|
||||
|
||||
GetOptions (
|
||||
GetOptions(
|
||||
"pool=s" => \$pool,
|
||||
"use-existing" => \$use_existing,
|
||||
"namespace=s" => \$namespace,
|
||||
@ -54,7 +54,7 @@ GetOptions (
|
||||
"h|help" => \$showhelp,
|
||||
"cleanup" => \$cleanup,
|
||||
"d|debug" => \$DEBUG,
|
||||
) or die ($helpstring);
|
||||
) or die($helpstring);
|
||||
|
||||
if ($showhelp) {
|
||||
warn $helpstring;
|
||||
@ -69,6 +69,7 @@ my $vmid_linked_clone = int($vmid) - 2;
|
||||
sub jp {
|
||||
print to_json($_[0], { utf8 => 8, pretty => 1, canonical => 1 }) . "\n";
|
||||
}
|
||||
|
||||
sub dbgvar {
|
||||
jp(@_) if $DEBUG;
|
||||
}
|
||||
@ -77,26 +78,24 @@ sub run_cmd {
|
||||
my ($cmd, $json, $ignore_errors) = @_;
|
||||
|
||||
my $raw = '';
|
||||
my $parser = sub {$raw .= shift;};
|
||||
my $parser = sub { $raw .= shift; };
|
||||
|
||||
eval {
|
||||
run_command($cmd, outfunc => $parser);
|
||||
};
|
||||
eval { run_command($cmd, outfunc => $parser); };
|
||||
if (my $err = $@) {
|
||||
die $err if !$ignore_errors;
|
||||
die $err if !$ignore_errors;
|
||||
}
|
||||
|
||||
if ($json) {
|
||||
my $result;
|
||||
if ($raw eq '') {
|
||||
$result = [];
|
||||
} elsif ($raw =~ m/^(\[.*\])$/s) { # untaint
|
||||
$result = JSON::decode_json($1);
|
||||
} else {
|
||||
die "got unexpected data from command: '$cmd' -> '$raw'\n";
|
||||
}
|
||||
return $result;
|
||||
}
|
||||
my $result;
|
||||
if ($raw eq '') {
|
||||
$result = [];
|
||||
} elsif ($raw =~ m/^(\[.*\])$/s) { # untaint
|
||||
$result = JSON::decode_json($1);
|
||||
} else {
|
||||
die "got unexpected data from command: '$cmd' -> '$raw'\n";
|
||||
}
|
||||
return $result;
|
||||
}
|
||||
return $raw;
|
||||
}
|
||||
|
||||
@ -105,17 +104,15 @@ sub run_test_cmd {
|
||||
|
||||
my $raw = '';
|
||||
my $out = sub {
|
||||
my $line = shift;
|
||||
$raw .= "${line}\n";
|
||||
my $line = shift;
|
||||
$raw .= "${line}\n";
|
||||
};
|
||||
|
||||
eval {
|
||||
run_command($cmd, outfunc => $out);
|
||||
};
|
||||
eval { run_command($cmd, outfunc => $out); };
|
||||
if (my $err = $@) {
|
||||
print $raw;
|
||||
print $err;
|
||||
return 0;
|
||||
print $raw;
|
||||
print $err;
|
||||
return 0;
|
||||
}
|
||||
print $raw;
|
||||
return 1;
|
||||
@ -126,23 +123,23 @@ sub prepare {
|
||||
|
||||
my $pools = run_cmd("ceph osd pool ls --format json", 1);
|
||||
|
||||
my %poolnames = map {$_ => 1} @$pools;
|
||||
my %poolnames = map { $_ => 1 } @$pools;
|
||||
die "Pool '$pool' does not exist!\n"
|
||||
if !exists($poolnames{$pool}) && $use_existing;
|
||||
if !exists($poolnames{$pool}) && $use_existing;
|
||||
|
||||
run_cmd(['pveceph', 'pool', 'create', ${pool}, '--add_storages', 1])
|
||||
if !$use_existing;
|
||||
if !$use_existing;
|
||||
|
||||
my $namespaces = run_cmd(['rbd', '-p', ${pool}, 'namespace', 'ls', '--format', 'json'], 1);
|
||||
dbgvar($namespace);
|
||||
my $ns_found = 0;
|
||||
for my $i (@$namespaces) {
|
||||
$ns_found = 1 if $i->{name} eq $namespace;
|
||||
$ns_found = 1 if $i->{name} eq $namespace;
|
||||
}
|
||||
|
||||
if (!$ns_found) {
|
||||
print "Create namespace '${namespace}' in pool '${pool}'\n";
|
||||
run_cmd(['rbd', 'namespace', 'create', "${pool}/${namespace}"]);
|
||||
print "Create namespace '${namespace}' in pool '${pool}'\n";
|
||||
run_cmd(['rbd', 'namespace', 'create', "${pool}/${namespace}"]);
|
||||
}
|
||||
|
||||
my $storages = run_cmd(['pvesh', 'get', 'storage', '--output-format', 'json'], 1);
|
||||
@ -152,41 +149,67 @@ sub prepare {
|
||||
|
||||
print "Create storage definition\n";
|
||||
for my $stor (@$storages) {
|
||||
$pool_found = 1 if $stor->{storage} eq $pool;
|
||||
$rbd_found = 1 if $stor->{storage} eq $storage_name;
|
||||
$pool_found = 1 if $stor->{storage} eq $pool;
|
||||
$rbd_found = 1 if $stor->{storage} eq $storage_name;
|
||||
|
||||
if ($rbd_found) {
|
||||
run_cmd(['pvesm', 'set', ${storage_name}, '--krbd', '0']);
|
||||
die "Enable the storage '$stor->{storage}'!" if $stor->{disable};
|
||||
}
|
||||
if ($rbd_found) {
|
||||
run_cmd(['pvesm', 'set', ${storage_name}, '--krbd', '0']);
|
||||
die "Enable the storage '$stor->{storage}'!" if $stor->{disable};
|
||||
}
|
||||
}
|
||||
if (!$pool_found) {
|
||||
die "No storage for pool '${pool}' found! Must have same name as pool!\n"
|
||||
if $use_existing;
|
||||
die "No storage for pool '${pool}' found! Must have same name as pool!\n"
|
||||
if $use_existing;
|
||||
|
||||
run_cmd(['pvesm', 'add', 'rbd', $pool, '--pool', $pool, '--content', 'images,rootdir']);
|
||||
run_cmd(['pvesm', 'add', 'rbd', $pool, '--pool', $pool, '--content', 'images,rootdir']);
|
||||
}
|
||||
# create PVE storages (librbd / krbd)
|
||||
run_cmd(['pvesm', 'add', 'rbd', ${storage_name}, '--krbd', '0', '--pool', ${pool}, '--namespace', ${namespace}, '--content', 'images,rootdir'])
|
||||
if !$rbd_found;
|
||||
|
||||
run_cmd(
|
||||
[
|
||||
'pvesm',
|
||||
'add',
|
||||
'rbd',
|
||||
${storage_name},
|
||||
'--krbd',
|
||||
'0',
|
||||
'--pool',
|
||||
${pool},
|
||||
'--namespace',
|
||||
${namespace},
|
||||
'--content',
|
||||
'images,rootdir',
|
||||
],
|
||||
) if !$rbd_found;
|
||||
|
||||
# create test VM
|
||||
print "Create test VM ${vmid}\n";
|
||||
my $vms = run_cmd(['pvesh', 'get', 'cluster/resources', '--type', 'vm', '--output-format', 'json'], 1);
|
||||
my $vms =
|
||||
run_cmd(['pvesh', 'get', 'cluster/resources', '--type', 'vm', '--output-format', 'json'],
|
||||
1);
|
||||
for my $vm (@$vms) {
|
||||
# TODO: introduce a force flag to make this behaviour configurable
|
||||
# TODO: introduce a force flag to make this behaviour configurable
|
||||
|
||||
if ($vm->{vmid} eq $vmid) {
|
||||
print "Test VM '${vmid}' already exists. It will be removed and recreated!\n";
|
||||
run_cmd(['qm', 'stop', ${vmid}], 0, 1);
|
||||
run_cmd(['qm', 'destroy', ${vmid}]);
|
||||
}
|
||||
if ($vm->{vmid} eq $vmid) {
|
||||
print "Test VM '${vmid}' already exists. It will be removed and recreated!\n";
|
||||
run_cmd(['qm', 'stop', ${vmid}], 0, 1);
|
||||
run_cmd(['qm', 'destroy', ${vmid}]);
|
||||
}
|
||||
}
|
||||
run_cmd(['qm', 'create', ${vmid}, '--bios', 'ovmf', '--efidisk0', "${storage_name}:1", '--scsi0', "${storage_name}:2"]);
|
||||
run_cmd(
|
||||
[
|
||||
'qm',
|
||||
'create',
|
||||
${vmid},
|
||||
'--bios',
|
||||
'ovmf',
|
||||
'--efidisk0',
|
||||
"${storage_name}:1",
|
||||
'--scsi0',
|
||||
"${storage_name}:2",
|
||||
],
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
sub cleanup {
|
||||
print "Cleaning up test environment!\n";
|
||||
print "Removing VMs\n";
|
||||
@ -195,7 +218,21 @@ sub cleanup {
|
||||
run_cmd(['qm', 'stop', ${vmid_clone}], 0, 1);
|
||||
run_cmd(['qm', 'destroy', ${vmid_linked_clone}], 0, 1);
|
||||
run_cmd(['qm', 'destroy', ${vmid_clone}], 0, 1);
|
||||
run_cmd(['for', 'i', 'in', "/dev/rbd/${pool}/${namespace}/*;", 'do', '/usr/bin/rbd', 'unmap', '\$i;', 'done'], 0, 1);
|
||||
run_cmd(
|
||||
[
|
||||
'for',
|
||||
'i',
|
||||
'in',
|
||||
"/dev/rbd/${pool}/${namespace}/*;",
|
||||
'do',
|
||||
'/usr/bin/rbd',
|
||||
'unmap',
|
||||
'\$i;',
|
||||
'done',
|
||||
],
|
||||
0,
|
||||
1,
|
||||
);
|
||||
run_cmd(['qm', 'unlock', ${vmid}], 0, 1);
|
||||
run_cmd(['qm', 'destroy', ${vmid}], 0, 1);
|
||||
|
||||
@ -206,10 +243,10 @@ sub cleanup {
|
||||
run_cmd(['rbd', 'namespace', 'remove', "${pool}/${namespace}"], 0, 1);
|
||||
|
||||
if (!$use_existing) {
|
||||
print "Removing Storage definition for ${pool}\n";
|
||||
run_cmd(['pvesm', 'remove', ${pool}], 0, 1);
|
||||
print "Removing test pool\n";
|
||||
run_cmd(['pveceph', 'pool', 'destroy', $pool]);
|
||||
print "Removing Storage definition for ${pool}\n";
|
||||
run_cmd(['pvesm', 'remove', ${pool}], 0, 1);
|
||||
print "Removing test pool\n";
|
||||
run_cmd(['pveceph', 'pool', 'destroy', $pool]);
|
||||
}
|
||||
}
|
||||
|
||||
@ -229,102 +266,96 @@ my $tests = [
|
||||
# ],
|
||||
# },
|
||||
{
|
||||
name => 'first VM start',
|
||||
steps => [
|
||||
['qm', 'start', $vmid],
|
||||
],
|
||||
name => 'first VM start',
|
||||
steps => [
|
||||
['qm', 'start', $vmid],
|
||||
],
|
||||
},
|
||||
{
|
||||
name => 'snapshot/rollback',
|
||||
steps => [
|
||||
['qm', 'snapshot', $vmid, 'test'],
|
||||
['qm', 'rollback', $vmid, 'test'],
|
||||
],
|
||||
cleanup => [
|
||||
['qm', 'unlock', $vmid],
|
||||
],
|
||||
name => 'snapshot/rollback',
|
||||
steps => [
|
||||
['qm', 'snapshot', $vmid, 'test'], ['qm', 'rollback', $vmid, 'test'],
|
||||
],
|
||||
cleanup => [
|
||||
['qm', 'unlock', $vmid],
|
||||
],
|
||||
},
|
||||
{
|
||||
name => 'remove snapshot',
|
||||
steps => [
|
||||
['qm', 'delsnapshot', $vmid, 'test'],
|
||||
],
|
||||
name => 'remove snapshot',
|
||||
steps => [
|
||||
['qm', 'delsnapshot', $vmid, 'test'],
|
||||
],
|
||||
},
|
||||
{
|
||||
name => 'moving disk between namespaces',
|
||||
steps => [
|
||||
['qm', 'move_disk', $vmid, 'scsi0', $pool, '--delete', 1],
|
||||
['qm', 'move_disk', $vmid, 'scsi0', $storage_name, '--delete', 1],
|
||||
],
|
||||
name => 'moving disk between namespaces',
|
||||
steps => [
|
||||
['qm', 'move_disk', $vmid, 'scsi0', $pool, '--delete', 1],
|
||||
['qm', 'move_disk', $vmid, 'scsi0', $storage_name, '--delete', 1],
|
||||
],
|
||||
},
|
||||
{
|
||||
name => 'switch to krbd',
|
||||
preparations => [
|
||||
['qm', 'stop', $vmid],
|
||||
['pvesm', 'set', $storage_name, '--krbd', 1]
|
||||
],
|
||||
name => 'switch to krbd',
|
||||
preparations => [
|
||||
['qm', 'stop', $vmid], ['pvesm', 'set', $storage_name, '--krbd', 1],
|
||||
],
|
||||
},
|
||||
{
|
||||
name => 'start VM with krbd',
|
||||
steps => [
|
||||
['qm', 'start', $vmid],
|
||||
],
|
||||
name => 'start VM with krbd',
|
||||
steps => [
|
||||
['qm', 'start', $vmid],
|
||||
],
|
||||
},
|
||||
{
|
||||
name => 'snapshot/rollback with krbd',
|
||||
steps => [
|
||||
['qm', 'snapshot', $vmid, 'test'],
|
||||
['qm', 'rollback', $vmid, 'test'],
|
||||
],
|
||||
cleanup => [
|
||||
['qm', 'unlock', $vmid],
|
||||
],
|
||||
name => 'snapshot/rollback with krbd',
|
||||
steps => [
|
||||
['qm', 'snapshot', $vmid, 'test'], ['qm', 'rollback', $vmid, 'test'],
|
||||
],
|
||||
cleanup => [
|
||||
['qm', 'unlock', $vmid],
|
||||
],
|
||||
},
|
||||
{
|
||||
name => 'remove snapshot with krbd',
|
||||
steps => [
|
||||
['qm', 'delsnapshot', $vmid, 'test'],
|
||||
],
|
||||
name => 'remove snapshot with krbd',
|
||||
steps => [
|
||||
['qm', 'delsnapshot', $vmid, 'test'],
|
||||
],
|
||||
},
|
||||
{
|
||||
name => 'moving disk between namespaces with krbd',
|
||||
steps => [
|
||||
['qm', 'move_disk', $vmid, 'scsi0', $pool, '--delete', 1],
|
||||
['qm', 'move_disk', $vmid, 'scsi0', $storage_name, '--delete', 1],
|
||||
],
|
||||
name => 'moving disk between namespaces with krbd',
|
||||
steps => [
|
||||
['qm', 'move_disk', $vmid, 'scsi0', $pool, '--delete', 1],
|
||||
['qm', 'move_disk', $vmid, 'scsi0', $storage_name, '--delete', 1],
|
||||
],
|
||||
},
|
||||
{
|
||||
name => 'clone VM with krbd',
|
||||
steps => [
|
||||
['qm', 'clone', $vmid, $vmid_clone],
|
||||
],
|
||||
name => 'clone VM with krbd',
|
||||
steps => [
|
||||
['qm', 'clone', $vmid, $vmid_clone],
|
||||
],
|
||||
},
|
||||
{
|
||||
name => 'switch to non krbd',
|
||||
preparations => [
|
||||
['qm', 'stop', $vmid],
|
||||
['qm', 'stop', $vmid_clone],
|
||||
['pvesm', 'set', $storage_name, '--krbd', 0]
|
||||
],
|
||||
name => 'switch to non krbd',
|
||||
preparations => [
|
||||
['qm', 'stop', $vmid],
|
||||
['qm', 'stop', $vmid_clone],
|
||||
['pvesm', 'set', $storage_name, '--krbd', 0],
|
||||
],
|
||||
},
|
||||
{
|
||||
name => 'templates and linked clone',
|
||||
steps => [
|
||||
['qm', 'template', $vmid],
|
||||
['qm', 'clone', $vmid, $vmid_linked_clone],
|
||||
['qm', 'start', $vmid_linked_clone],
|
||||
['qm', 'stop', $vmid_linked_clone],
|
||||
],
|
||||
name => 'templates and linked clone',
|
||||
steps => [
|
||||
['qm', 'template', $vmid],
|
||||
['qm', 'clone', $vmid, $vmid_linked_clone],
|
||||
['qm', 'start', $vmid_linked_clone],
|
||||
['qm', 'stop', $vmid_linked_clone],
|
||||
],
|
||||
},
|
||||
{
|
||||
name => 'start linked clone with krbd',
|
||||
preparations => [
|
||||
['pvesm', 'set', $storage_name, '--krbd', 1]
|
||||
],
|
||||
steps => [
|
||||
['qm', 'start', $vmid_linked_clone],
|
||||
['qm', 'stop', $vmid_linked_clone],
|
||||
],
|
||||
name => 'start linked clone with krbd',
|
||||
preparations => [['pvesm', 'set', $storage_name, '--krbd', 1]],
|
||||
steps => [
|
||||
['qm', 'start', $vmid_linked_clone], ['qm', 'stop', $vmid_linked_clone],
|
||||
],
|
||||
},
|
||||
];
|
||||
|
||||
@ -332,8 +363,8 @@ sub run_prep_cleanup {
|
||||
my ($cmds) = @_;
|
||||
|
||||
for (@$cmds) {
|
||||
print join(' ', @$_). "\n";
|
||||
run_cmd($_);
|
||||
print join(' ', @$_) . "\n";
|
||||
run_cmd($_);
|
||||
}
|
||||
}
|
||||
|
||||
@ -341,7 +372,7 @@ sub run_steps {
|
||||
my ($steps) = @_;
|
||||
|
||||
for (@$steps) {
|
||||
ok(run_test_cmd($_), join(' ', @$_));
|
||||
ok(run_test_cmd($_), join(' ', @$_));
|
||||
}
|
||||
}
|
||||
|
||||
@ -350,23 +381,23 @@ sub run_tests {
|
||||
|
||||
my $num_tests = 0;
|
||||
for (@$tests) {
|
||||
$num_tests += scalar(@{$_->{steps}}) if defined $_->{steps};
|
||||
$num_tests += scalar(@{ $_->{steps} }) if defined $_->{steps};
|
||||
}
|
||||
|
||||
print("Tests: $num_tests\n");
|
||||
plan tests => $num_tests;
|
||||
|
||||
for my $test (@$tests) {
|
||||
print "Section: $test->{name}\n";
|
||||
run_prep_cleanup($test->{preparations}) if defined $test->{preparations};
|
||||
run_steps($test->{steps}) if defined $test->{steps};
|
||||
run_prep_cleanup($test->{cleanup}) if defined $test->{cleanup};
|
||||
print "Section: $test->{name}\n";
|
||||
run_prep_cleanup($test->{preparations}) if defined $test->{preparations};
|
||||
run_steps($test->{steps}) if defined $test->{steps};
|
||||
run_prep_cleanup($test->{cleanup}) if defined $test->{cleanup};
|
||||
}
|
||||
|
||||
done_testing();
|
||||
|
||||
if (Test::More->builder->is_passing()) {
|
||||
cleanup();
|
||||
cleanup();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -51,23 +51,23 @@ EOF
|
||||
|
||||
my $permissions = {
|
||||
'user1@test' => {},
|
||||
'user2@test' => { '/' => ['Sys.Modify'], },
|
||||
'user3@test' => { '/storage' => ['Datastore.Allocate'], },
|
||||
'user4@test' => { '/storage/d20m40r30' => ['Datastore.Allocate'], },
|
||||
'user2@test' => { '/' => ['Sys.Modify'] },
|
||||
'user3@test' => { '/storage' => ['Datastore.Allocate'] },
|
||||
'user4@test' => { '/storage/d20m40r30' => ['Datastore.Allocate'] },
|
||||
};
|
||||
|
||||
my $pve_cluster_module;
|
||||
$pve_cluster_module = Test::MockModule->new('PVE::Cluster');
|
||||
$pve_cluster_module->mock(
|
||||
cfs_update => sub {},
|
||||
cfs_update => sub { },
|
||||
get_config => sub {
|
||||
my ($file) = @_;
|
||||
if ($file eq 'datacenter.cfg') {
|
||||
return $datacenter_cfg;
|
||||
} elsif ($file eq 'storage.cfg') {
|
||||
return $storage_cfg;
|
||||
}
|
||||
die "TODO: mock get_config($file)\n";
|
||||
my ($file) = @_;
|
||||
if ($file eq 'datacenter.cfg') {
|
||||
return $datacenter_cfg;
|
||||
} elsif ($file eq 'storage.cfg') {
|
||||
return $storage_cfg;
|
||||
}
|
||||
die "TODO: mock get_config($file)\n";
|
||||
},
|
||||
);
|
||||
|
||||
@ -75,136 +75,360 @@ my $rpcenv_module;
|
||||
$rpcenv_module = Test::MockModule->new('PVE::RPCEnvironment');
|
||||
$rpcenv_module->mock(
|
||||
check => sub {
|
||||
my ($env, $user, $path, $perms, $noerr) = @_;
|
||||
return 1 if $user eq 'root@pam';
|
||||
my $userperms = $permissions->{$user}
|
||||
or die "no permissions defined for user $user\n";
|
||||
if (defined(my $pathperms = $userperms->{$path})) {
|
||||
foreach my $pp (@$pathperms) {
|
||||
foreach my $reqp (@$perms) {
|
||||
return 1 if $pp eq $reqp;
|
||||
}
|
||||
}
|
||||
}
|
||||
die "permission denied\n" if !$noerr;
|
||||
return 0;
|
||||
my ($env, $user, $path, $perms, $noerr) = @_;
|
||||
return 1 if $user eq 'root@pam';
|
||||
my $userperms = $permissions->{$user}
|
||||
or die "no permissions defined for user $user\n";
|
||||
if (defined(my $pathperms = $userperms->{$path})) {
|
||||
foreach my $pp (@$pathperms) {
|
||||
foreach my $reqp (@$perms) {
|
||||
return 1 if $pp eq $reqp;
|
||||
}
|
||||
}
|
||||
}
|
||||
die "permission denied\n" if !$noerr;
|
||||
return 0;
|
||||
},
|
||||
);
|
||||
|
||||
my $rpcenv = PVE::RPCEnvironment->init('pub');
|
||||
|
||||
my @tests = (
|
||||
[ user => 'root@pam' ],
|
||||
[ ['unknown', ['nolimit'], undef], 100, 'root / generic default limit, requesting default' ],
|
||||
[ ['move', ['nolimit'], undef], 80, 'root / specific default limit, requesting default (move)' ],
|
||||
[ ['restore', ['nolimit'], undef], 60, 'root / specific default limit, requesting default (restore)' ],
|
||||
[ ['unknown', ['d50m40r30'], undef], 50, 'root / storage default limit' ],
|
||||
[ ['move', ['d50m40r30'], undef], 40, 'root / specific storage limit (move)' ],
|
||||
[ ['restore', ['d50m40r30'], undef], 30, 'root / specific storage limit (restore)' ],
|
||||
[ ['unknown', ['nolimit'], 0], 0, 'root / generic default limit' ],
|
||||
[ ['move', ['nolimit'], 0], 0, 'root / specific default limit (move)' ],
|
||||
[ ['restore', ['nolimit'], 0], 0, 'root / specific default limit (restore)' ],
|
||||
[ ['unknown', ['d50m40r30'], 0], 0, 'root / storage default limit' ],
|
||||
[ ['move', ['d50m40r30'], 0], 0, 'root / specific storage limit (move)' ],
|
||||
[ ['restore', ['d50m40r30'], 0], 0, 'root / specific storage limit (restore)' ],
|
||||
[ ['migrate', undef, 100], 100, 'root / undef storage (migrate)' ],
|
||||
[ ['migrate', [], 100], 100, 'root / no storage (migrate)' ],
|
||||
[ ['migrate', [undef], undef], 100, 'root / [undef] storage no override (migrate)' ],
|
||||
[ ['migrate', [undef, undef], 200], 200, 'root / list of undef storages with override (migrate)' ],
|
||||
[user => 'root@pam'],
|
||||
[['unknown', ['nolimit'], undef], 100, 'root / generic default limit, requesting default'],
|
||||
[
|
||||
['move', ['nolimit'], undef],
|
||||
80,
|
||||
'root / specific default limit, requesting default (move)',
|
||||
],
|
||||
[
|
||||
['restore', ['nolimit'], undef],
|
||||
60,
|
||||
'root / specific default limit, requesting default (restore)',
|
||||
],
|
||||
[['unknown', ['d50m40r30'], undef], 50, 'root / storage default limit'],
|
||||
[['move', ['d50m40r30'], undef], 40, 'root / specific storage limit (move)'],
|
||||
[['restore', ['d50m40r30'], undef], 30, 'root / specific storage limit (restore)'],
|
||||
[['unknown', ['nolimit'], 0], 0, 'root / generic default limit'],
|
||||
[['move', ['nolimit'], 0], 0, 'root / specific default limit (move)'],
|
||||
[['restore', ['nolimit'], 0], 0, 'root / specific default limit (restore)'],
|
||||
[['unknown', ['d50m40r30'], 0], 0, 'root / storage default limit'],
|
||||
[['move', ['d50m40r30'], 0], 0, 'root / specific storage limit (move)'],
|
||||
[['restore', ['d50m40r30'], 0], 0, 'root / specific storage limit (restore)'],
|
||||
[['migrate', undef, 100], 100, 'root / undef storage (migrate)'],
|
||||
[['migrate', [], 100], 100, 'root / no storage (migrate)'],
|
||||
[['migrate', [undef], undef], 100, 'root / [undef] storage no override (migrate)'],
|
||||
[
|
||||
['migrate', [undef, undef], 200],
|
||||
200,
|
||||
'root / list of undef storages with override (migrate)',
|
||||
],
|
||||
|
||||
[ user => 'user1@test' ],
|
||||
[ ['unknown', ['nolimit'], undef], 100, 'generic default limit' ],
|
||||
[ ['move', ['nolimit'], undef], 80, 'specific default limit (move)' ],
|
||||
[ ['restore', ['nolimit'], undef], 60, 'specific default limit (restore)' ],
|
||||
[ ['unknown', ['d50m40r30'], undef], 50, 'storage default limit' ],
|
||||
[ ['move', ['d50m40r30'], undef], 40, 'specific storage limit (move)' ],
|
||||
[ ['restore', ['d50m40r30'], undef], 30, 'specific storage limit (restore)' ],
|
||||
[ ['unknown', ['d200m400r300'], undef], 200, 'storage default limit above datacenter limits' ],
|
||||
[ ['move', ['d200m400r300'], undef], 400, 'specific storage limit above datacenter limits (move)' ],
|
||||
[ ['restore', ['d200m400r300'], undef], 300, 'specific storage limit above datacenter limits (restore)' ],
|
||||
[ ['unknown', ['d50'], undef], 50, 'storage default limit' ],
|
||||
[ ['move', ['d50'], undef], 50, 'storage default limit (move)' ],
|
||||
[ ['restore', ['d50'], undef], 50, 'storage default limit (restore)' ],
|
||||
[user => 'user1@test'],
|
||||
[['unknown', ['nolimit'], undef], 100, 'generic default limit'],
|
||||
[['move', ['nolimit'], undef], 80, 'specific default limit (move)'],
|
||||
[['restore', ['nolimit'], undef], 60, 'specific default limit (restore)'],
|
||||
[['unknown', ['d50m40r30'], undef], 50, 'storage default limit'],
|
||||
[['move', ['d50m40r30'], undef], 40, 'specific storage limit (move)'],
|
||||
[['restore', ['d50m40r30'], undef], 30, 'specific storage limit (restore)'],
|
||||
[
|
||||
['unknown', ['d200m400r300'], undef],
|
||||
200,
|
||||
'storage default limit above datacenter limits',
|
||||
],
|
||||
[
|
||||
['move', ['d200m400r300'], undef],
|
||||
400,
|
||||
'specific storage limit above datacenter limits (move)',
|
||||
],
|
||||
[
|
||||
['restore', ['d200m400r300'], undef],
|
||||
300,
|
||||
'specific storage limit above datacenter limits (restore)',
|
||||
],
|
||||
[['unknown', ['d50'], undef], 50, 'storage default limit'],
|
||||
[['move', ['d50'], undef], 50, 'storage default limit (move)'],
|
||||
[['restore', ['d50'], undef], 50, 'storage default limit (restore)'],
|
||||
|
||||
[ user => 'user2@test' ],
|
||||
[ ['unknown', ['nolimit'], 0], 0, 'generic default limit with Sys.Modify, passing unlimited' ],
|
||||
[ ['unknown', ['nolimit'], undef], 100, 'generic default limit with Sys.Modify' ],
|
||||
[ ['move', ['nolimit'], undef], 80, 'specific default limit with Sys.Modify (move)' ],
|
||||
[ ['restore', ['nolimit'], undef], 60, 'specific default limit with Sys.Modify (restore)' ],
|
||||
[ ['restore', ['nolimit'], 0], 0, 'specific default limit with Sys.Modify, passing unlimited (restore)' ],
|
||||
[ ['move', ['nolimit'], 0], 0, 'specific default limit with Sys.Modify, passing unlimited (move)' ],
|
||||
[ ['unknown', ['d50m40r30'], undef], 50, 'storage default limit with Sys.Modify' ],
|
||||
[ ['restore', ['d50m40r30'], undef], 30, 'specific storage limit with Sys.Modify (restore)' ],
|
||||
[ ['move', ['d50m40r30'], undef], 40, 'specific storage limit with Sys.Modify (move)' ],
|
||||
[user => 'user2@test'],
|
||||
[
|
||||
['unknown', ['nolimit'], 0],
|
||||
0,
|
||||
'generic default limit with Sys.Modify, passing unlimited',
|
||||
],
|
||||
[['unknown', ['nolimit'], undef], 100, 'generic default limit with Sys.Modify'],
|
||||
[['move', ['nolimit'], undef], 80, 'specific default limit with Sys.Modify (move)'],
|
||||
[['restore', ['nolimit'], undef], 60, 'specific default limit with Sys.Modify (restore)'],
|
||||
[
|
||||
['restore', ['nolimit'], 0],
|
||||
0,
|
||||
'specific default limit with Sys.Modify, passing unlimited (restore)',
|
||||
],
|
||||
[
|
||||
['move', ['nolimit'], 0],
|
||||
0,
|
||||
'specific default limit with Sys.Modify, passing unlimited (move)',
|
||||
],
|
||||
[['unknown', ['d50m40r30'], undef], 50, 'storage default limit with Sys.Modify'],
|
||||
[['restore', ['d50m40r30'], undef], 30, 'specific storage limit with Sys.Modify (restore)'],
|
||||
[['move', ['d50m40r30'], undef], 40, 'specific storage limit with Sys.Modify (move)'],
|
||||
|
||||
[ user => 'user3@test' ],
|
||||
[ ['unknown', ['nolimit'], undef], 100, 'generic default limit with privileges on /' ],
|
||||
[ ['unknown', ['nolimit'], 80], 80, 'generic default limit with privileges on /, passing an override value' ],
|
||||
[ ['unknown', ['nolimit'], 0], 0, 'generic default limit with privileges on /, passing unlimited' ],
|
||||
[ ['move', ['nolimit'], undef], 80, 'specific default limit with privileges on / (move)' ],
|
||||
[ ['move', ['nolimit'], 0], 0, 'specific default limit with privileges on /, passing unlimited (move)' ],
|
||||
[ ['restore', ['nolimit'], undef], 60, 'specific default limit with privileges on / (restore)' ],
|
||||
[ ['restore', ['nolimit'], 0], 0, 'specific default limit with privileges on /, passing unlimited (restore)' ],
|
||||
[ ['unknown', ['d50m40r30'], 0], 0, 'storage default limit with privileges on /, passing unlimited' ],
|
||||
[ ['unknown', ['d50m40r30'], undef], 50, 'storage default limit with privileges on /' ],
|
||||
[ ['unknown', ['d50m40r30'], 0], 0, 'storage default limit with privileges on, passing unlimited /' ],
|
||||
[ ['move', ['d50m40r30'], undef], 40, 'specific storage limit with privileges on / (move)' ],
|
||||
[ ['move', ['d50m40r30'], 0], 0, 'specific storage limit with privileges on, passing unlimited / (move)' ],
|
||||
[ ['restore', ['d50m40r30'], undef], 30, 'specific storage limit with privileges on / (restore)' ],
|
||||
[ ['restore', ['d50m40r30'], 0], 0, 'specific storage limit with privileges on /, passing unlimited (restore)' ],
|
||||
[user => 'user3@test'],
|
||||
[['unknown', ['nolimit'], undef], 100, 'generic default limit with privileges on /'],
|
||||
[
|
||||
['unknown', ['nolimit'], 80],
|
||||
80,
|
||||
'generic default limit with privileges on /, passing an override value',
|
||||
],
|
||||
[
|
||||
['unknown', ['nolimit'], 0],
|
||||
0,
|
||||
'generic default limit with privileges on /, passing unlimited',
|
||||
],
|
||||
[['move', ['nolimit'], undef], 80, 'specific default limit with privileges on / (move)'],
|
||||
[
|
||||
['move', ['nolimit'], 0],
|
||||
0,
|
||||
'specific default limit with privileges on /, passing unlimited (move)',
|
||||
],
|
||||
[
|
||||
['restore', ['nolimit'], undef],
|
||||
60,
|
||||
'specific default limit with privileges on / (restore)',
|
||||
],
|
||||
[
|
||||
['restore', ['nolimit'], 0],
|
||||
0,
|
||||
'specific default limit with privileges on /, passing unlimited (restore)',
|
||||
],
|
||||
[
|
||||
['unknown', ['d50m40r30'], 0],
|
||||
0,
|
||||
'storage default limit with privileges on /, passing unlimited',
|
||||
],
|
||||
[['unknown', ['d50m40r30'], undef], 50, 'storage default limit with privileges on /'],
|
||||
[
|
||||
['unknown', ['d50m40r30'], 0],
|
||||
0,
|
||||
'storage default limit with privileges on, passing unlimited /',
|
||||
],
|
||||
[['move', ['d50m40r30'], undef], 40, 'specific storage limit with privileges on / (move)'],
|
||||
[
|
||||
['move', ['d50m40r30'], 0],
|
||||
0,
|
||||
'specific storage limit with privileges on, passing unlimited / (move)',
|
||||
],
|
||||
[
|
||||
['restore', ['d50m40r30'], undef],
|
||||
30,
|
||||
'specific storage limit with privileges on / (restore)',
|
||||
],
|
||||
[
|
||||
['restore', ['d50m40r30'], 0],
|
||||
0,
|
||||
'specific storage limit with privileges on /, passing unlimited (restore)',
|
||||
],
|
||||
|
||||
[ user => 'user4@test' ],
|
||||
[ ['unknown', ['nolimit'], 10], 10, 'generic default limit with privileges on a different storage, passing lower override' ],
|
||||
[ ['unknown', ['nolimit'], undef], 100, 'generic default limit with privileges on a different storage' ],
|
||||
[ ['unknown', ['nolimit'], 0], 100, 'generic default limit with privileges on a different storage, passing unlimited' ],
|
||||
[ ['move', ['nolimit'], undef], 80, 'specific default limit with privileges on a different storage (move)' ],
|
||||
[ ['restore', ['nolimit'], undef], 60, 'specific default limit with privileges on a different storage (restore)' ],
|
||||
[ ['unknown', ['d50m40r30'], undef], 50, 'storage default limit with privileges on a different storage' ],
|
||||
[ ['move', ['d50m40r30'], undef], 40, 'specific storage limit with privileges on a different storage (move)' ],
|
||||
[ ['restore', ['d50m40r30'], undef], 30, 'specific storage limit with privileges on a different storage (restore)' ],
|
||||
[ ['unknown', ['d20m40r30'], undef], 20, 'storage default limit with privileges on that storage' ],
|
||||
[ ['unknown', ['d20m40r30'], 0], 0, 'storage default limit with privileges on that storage, passing unlimited' ],
|
||||
[ ['move', ['d20m40r30'], undef], 40, 'specific storage limit with privileges on that storage (move)' ],
|
||||
[ ['move', ['d20m40r30'], 0], 0, 'specific storage limit with privileges on that storage, passing unlimited (move)' ],
|
||||
[ ['move', ['d20m40r30'], 10], 10, 'specific storage limit with privileges on that storage, passing low override (move)' ],
|
||||
[ ['move', ['d20m40r30'], 300], 300, 'specific storage limit with privileges on that storage, passing high override (move)' ],
|
||||
[ ['restore', ['d20m40r30'], undef], 30, 'specific storage limit with privileges on that storage (restore)' ],
|
||||
[ ['restore', ['d20m40r30'], 0], 0, 'specific storage limit with privileges on that storage, passing unlimited (restore)' ],
|
||||
[ ['unknown', ['d50m40r30', 'd20m40r30'], 0], 50, 'multiple storages default limit with privileges on one of them, passing unlimited' ],
|
||||
[ ['move', ['d50m40r30', 'd20m40r30'], 0], 40, 'multiple storages specific limit with privileges on one of them, passing unlimited (move)' ],
|
||||
[ ['restore', ['d50m40r30', 'd20m40r30'], 0], 30, 'multiple storages specific limit with privileges on one of them, passing unlimited (restore)' ],
|
||||
[ ['unknown', ['d50m40r30', 'd20m40r30'], undef], 20, 'multiple storages default limit with privileges on one of them' ],
|
||||
[ ['unknown', ['d10', 'd20m40r30'], undef], 10, 'multiple storages default limit with privileges on one of them (storage limited)' ],
|
||||
[ ['move', ['d10', 'd20m40r30'], undef], 10, 'multiple storages specific limit with privileges on one of them (storage limited) (move)' ],
|
||||
[ ['restore', ['d10', 'd20m40r30'], undef], 10, 'multiple storages specific limit with privileges on one of them (storage limited) (restore)' ],
|
||||
[ ['restore', ['d10', 'd20m40r30'], 5], 5, 'multiple storages specific limit (storage limited) (restore), passing lower override' ],
|
||||
[ ['restore', ['d200', 'd200m400r300'], 65], 65, 'multiple storages specific limit (storage limited) (restore), passing lower override' ],
|
||||
[ ['restore', ['d200', 'd200m400r300'], 400], 200, 'multiple storages specific limit (storage limited) (restore), passing higher override' ],
|
||||
[ ['restore', ['d200', 'd200m400r300'], 0], 200, 'multiple storages specific limit (storage limited) (restore), passing unlimited' ],
|
||||
[ ['restore', ['d200', 'd200m400r300'], 1], 1, 'multiple storages specific limit (storage limited) (restore), passing 1' ],
|
||||
[ ['restore', ['d10', 'd20m40r30'], 500], 10, 'multiple storages specific limit with privileges on one of them (storage limited) (restore), passing higher override' ],
|
||||
[ ['unknown', ['nolimit', 'd20m40r30'], 0], 100, 'multiple storages default limit with privileges on one of them, passing unlimited (default limited)' ],
|
||||
[ ['move', ['nolimit', 'd20m40r30'], 0], 80, 'multiple storages specific limit with privileges on one of them, passing unlimited (default limited) (move)' ],
|
||||
[ ['restore', ['nolimit', 'd20m40r30'], 0], 60, 'multiple storages specific limit with privileges on one of them, passing unlimited (default limited) (restore)' ],
|
||||
[ ['unknown', ['nolimit', 'd20m40r30'], undef], 20, 'multiple storages default limit with privileges on one of them (default limited)' ],
|
||||
[ ['move', ['nolimit', 'd20m40r30'], undef], 40, 'multiple storages specific limit with privileges on one of them (default limited) (move)' ],
|
||||
[ ['restore', ['nolimit', 'd20m40r30'], undef], 30, 'multiple storages specific limit with privileges on one of them (default limited) (restore)' ],
|
||||
[ ['restore', ['d20m40r30', 'm50'], 200], 60, 'multiple storages specific limit with privileges on one of them (global default limited) (restore)' ],
|
||||
[ ['move', ['nolimit', undef ], 40] , 40, 'multiple storages one undefined, passing 40 (move)' ],
|
||||
[ ['move', undef, 100] , 80, 'undef storage, passing 100 (move)' ],
|
||||
[ ['move', [undef], 100] , 80, '[undef] storage, passing 100 (move)' ],
|
||||
[ ['move', [undef], undef] , 80, '[undef] storage, no override (move)' ],
|
||||
[user => 'user4@test'],
|
||||
[
|
||||
['unknown', ['nolimit'], 10],
|
||||
10,
|
||||
'generic default limit with privileges on a different storage, passing lower override',
|
||||
],
|
||||
[
|
||||
['unknown', ['nolimit'], undef],
|
||||
100,
|
||||
'generic default limit with privileges on a different storage',
|
||||
],
|
||||
[
|
||||
['unknown', ['nolimit'], 0],
|
||||
100,
|
||||
'generic default limit with privileges on a different storage, passing unlimited',
|
||||
],
|
||||
[
|
||||
['move', ['nolimit'], undef],
|
||||
80,
|
||||
'specific default limit with privileges on a different storage (move)',
|
||||
],
|
||||
[
|
||||
['restore', ['nolimit'], undef],
|
||||
60,
|
||||
'specific default limit with privileges on a different storage (restore)',
|
||||
],
|
||||
[
|
||||
['unknown', ['d50m40r30'], undef],
|
||||
50,
|
||||
'storage default limit with privileges on a different storage',
|
||||
],
|
||||
[
|
||||
['move', ['d50m40r30'], undef],
|
||||
40,
|
||||
'specific storage limit with privileges on a different storage (move)',
|
||||
],
|
||||
[
|
||||
['restore', ['d50m40r30'], undef],
|
||||
30,
|
||||
'specific storage limit with privileges on a different storage (restore)',
|
||||
],
|
||||
[
|
||||
['unknown', ['d20m40r30'], undef],
|
||||
20,
|
||||
'storage default limit with privileges on that storage',
|
||||
],
|
||||
[
|
||||
['unknown', ['d20m40r30'], 0],
|
||||
0,
|
||||
'storage default limit with privileges on that storage, passing unlimited',
|
||||
],
|
||||
[
|
||||
['move', ['d20m40r30'], undef],
|
||||
40,
|
||||
'specific storage limit with privileges on that storage (move)',
|
||||
],
|
||||
[
|
||||
['move', ['d20m40r30'], 0],
|
||||
0,
|
||||
'specific storage limit with privileges on that storage, passing unlimited (move)',
|
||||
],
|
||||
[
|
||||
['move', ['d20m40r30'], 10],
|
||||
10,
|
||||
'specific storage limit with privileges on that storage, passing low override (move)',
|
||||
],
|
||||
[
|
||||
['move', ['d20m40r30'], 300],
|
||||
300,
|
||||
'specific storage limit with privileges on that storage, passing high override (move)',
|
||||
],
|
||||
[
|
||||
['restore', ['d20m40r30'], undef],
|
||||
30,
|
||||
'specific storage limit with privileges on that storage (restore)',
|
||||
],
|
||||
[
|
||||
['restore', ['d20m40r30'], 0],
|
||||
0,
|
||||
'specific storage limit with privileges on that storage, passing unlimited (restore)',
|
||||
],
|
||||
[
|
||||
['unknown', ['d50m40r30', 'd20m40r30'], 0],
|
||||
50,
|
||||
'multiple storages default limit with privileges on one of them, passing unlimited',
|
||||
],
|
||||
[
|
||||
['move', ['d50m40r30', 'd20m40r30'], 0],
|
||||
40,
|
||||
'multiple storages specific limit with privileges on one of them, passing unlimited (move)',
|
||||
],
|
||||
[
|
||||
['restore', ['d50m40r30', 'd20m40r30'], 0],
|
||||
30,
|
||||
'multiple storages specific limit with privileges on one of them, passing unlimited (restore)',
|
||||
],
|
||||
[
|
||||
['unknown', ['d50m40r30', 'd20m40r30'], undef],
|
||||
20,
|
||||
'multiple storages default limit with privileges on one of them',
|
||||
],
|
||||
[
|
||||
['unknown', ['d10', 'd20m40r30'], undef],
|
||||
10,
|
||||
'multiple storages default limit with privileges on one of them (storage limited)',
|
||||
],
|
||||
[
|
||||
['move', ['d10', 'd20m40r30'], undef],
|
||||
10,
|
||||
'multiple storages specific limit with privileges on one of them (storage limited) (move)',
|
||||
],
|
||||
[
|
||||
['restore', ['d10', 'd20m40r30'], undef],
|
||||
10,
|
||||
'multiple storages specific limit with privileges on one of them (storage limited) (restore)',
|
||||
],
|
||||
[
|
||||
['restore', ['d10', 'd20m40r30'], 5],
|
||||
5,
|
||||
'multiple storages specific limit (storage limited) (restore), passing lower override',
|
||||
],
|
||||
[
|
||||
['restore', ['d200', 'd200m400r300'], 65],
|
||||
65,
|
||||
'multiple storages specific limit (storage limited) (restore), passing lower override',
|
||||
],
|
||||
[
|
||||
['restore', ['d200', 'd200m400r300'], 400],
|
||||
200,
|
||||
'multiple storages specific limit (storage limited) (restore), passing higher override',
|
||||
],
|
||||
[
|
||||
['restore', ['d200', 'd200m400r300'], 0],
|
||||
200,
|
||||
'multiple storages specific limit (storage limited) (restore), passing unlimited',
|
||||
],
|
||||
[
|
||||
['restore', ['d200', 'd200m400r300'], 1],
|
||||
1,
|
||||
'multiple storages specific limit (storage limited) (restore), passing 1',
|
||||
],
|
||||
[
|
||||
['restore', ['d10', 'd20m40r30'], 500],
|
||||
10,
|
||||
'multiple storages specific limit with privileges on one of them (storage limited) (restore), passing higher override',
|
||||
],
|
||||
[
|
||||
['unknown', ['nolimit', 'd20m40r30'], 0],
|
||||
100,
|
||||
'multiple storages default limit with privileges on one of them, passing unlimited (default limited)',
|
||||
],
|
||||
[
|
||||
['move', ['nolimit', 'd20m40r30'], 0],
|
||||
80,
|
||||
'multiple storages specific limit with privileges on one of them, passing unlimited (default limited) (move)',
|
||||
],
|
||||
[
|
||||
['restore', ['nolimit', 'd20m40r30'], 0],
|
||||
60,
|
||||
'multiple storages specific limit with privileges on one of them, passing unlimited (default limited) (restore)',
|
||||
],
|
||||
[
|
||||
['unknown', ['nolimit', 'd20m40r30'], undef],
|
||||
20,
|
||||
'multiple storages default limit with privileges on one of them (default limited)',
|
||||
],
|
||||
[
|
||||
['move', ['nolimit', 'd20m40r30'], undef],
|
||||
40,
|
||||
'multiple storages specific limit with privileges on one of them (default limited) (move)',
|
||||
],
|
||||
[
|
||||
['restore', ['nolimit', 'd20m40r30'], undef],
|
||||
30,
|
||||
'multiple storages specific limit with privileges on one of them (default limited) (restore)',
|
||||
],
|
||||
[
|
||||
['restore', ['d20m40r30', 'm50'], 200],
|
||||
60,
|
||||
'multiple storages specific limit with privileges on one of them (global default limited) (restore)',
|
||||
],
|
||||
[
|
||||
['move', ['nolimit', undef], 40],
|
||||
40,
|
||||
'multiple storages one undefined, passing 40 (move)',
|
||||
],
|
||||
[['move', undef, 100], 80, 'undef storage, passing 100 (move)'],
|
||||
[['move', [undef], 100], 80, '[undef] storage, passing 100 (move)'],
|
||||
[['move', [undef], undef], 80, '[undef] storage, no override (move)'],
|
||||
);
|
||||
|
||||
foreach my $t (@tests) {
|
||||
my ($args, $expected, $description) = @$t;
|
||||
if (!ref($args)) {
|
||||
if ($args eq 'user') {
|
||||
$rpcenv->set_user($expected);
|
||||
} else {
|
||||
die "not a test specification\n";
|
||||
}
|
||||
next;
|
||||
if ($args eq 'user') {
|
||||
$rpcenv->set_user($expected);
|
||||
} else {
|
||||
die "not a test specification\n";
|
||||
}
|
||||
next;
|
||||
}
|
||||
is(PVE::Storage::get_bandwidth_limit(@$args), $expected, $description);
|
||||
}
|
||||
|
||||
@ -5,8 +5,8 @@ use warnings;
|
||||
|
||||
use TAP::Harness;
|
||||
|
||||
my $harness = TAP::Harness->new( { verbosity => -2 });
|
||||
my $res = $harness->runtests( "disklist_test.pm" );
|
||||
my $harness = TAP::Harness->new({ verbosity => -2 });
|
||||
my $res = $harness->runtests("disklist_test.pm");
|
||||
|
||||
exit -1 if !$res || $res->{failed} || $res->{parse_errors};
|
||||
|
||||
|
||||
@ -10,11 +10,12 @@ use Test::More;
|
||||
|
||||
use Data::Dumper;
|
||||
|
||||
my $test_manifests = join ('/', $Bin, 'ovf_manifests');
|
||||
my $test_manifests = join('/', $Bin, 'ovf_manifests');
|
||||
|
||||
print "parsing ovfs\n";
|
||||
|
||||
my $win2008 = eval { PVE::GuestImport::OVF::parse_ovf("$test_manifests/Win_2008_R2_two-disks.ovf") };
|
||||
my $win2008 =
|
||||
eval { PVE::GuestImport::OVF::parse_ovf("$test_manifests/Win_2008_R2_two-disks.ovf") };
|
||||
if (my $err = $@) {
|
||||
fail('parse win2008');
|
||||
warn("error: $err\n");
|
||||
@ -28,7 +29,8 @@ if (my $err = $@) {
|
||||
} else {
|
||||
ok('parse win10');
|
||||
}
|
||||
my $win10noNs = eval { PVE::GuestImport::OVF::parse_ovf("$test_manifests/Win10-Liz_no_default_ns.ovf") };
|
||||
my $win10noNs =
|
||||
eval { PVE::GuestImport::OVF::parse_ovf("$test_manifests/Win10-Liz_no_default_ns.ovf") };
|
||||
if (my $err = $@) {
|
||||
fail("parse win10 no default rasd NS");
|
||||
warn("error: $err\n");
|
||||
@ -38,26 +40,59 @@ if (my $err = $@) {
|
||||
|
||||
print "testing disks\n";
|
||||
|
||||
is($win2008->{disks}->[0]->{disk_address}, 'scsi0', 'multidisk vm has the correct first disk controller');
|
||||
is($win2008->{disks}->[0]->{backing_file}, "$test_manifests/disk1.vmdk", 'multidisk vm has the correct first disk backing device');
|
||||
is(
|
||||
$win2008->{disks}->[0]->{disk_address},
|
||||
'scsi0',
|
||||
'multidisk vm has the correct first disk controller',
|
||||
);
|
||||
is(
|
||||
$win2008->{disks}->[0]->{backing_file},
|
||||
"$test_manifests/disk1.vmdk",
|
||||
'multidisk vm has the correct first disk backing device',
|
||||
);
|
||||
is($win2008->{disks}->[0]->{virtual_size}, 2048, 'multidisk vm has the correct first disk size');
|
||||
|
||||
is($win2008->{disks}->[1]->{disk_address}, 'scsi1', 'multidisk vm has the correct second disk controller');
|
||||
is($win2008->{disks}->[1]->{backing_file}, "$test_manifests/disk2.vmdk", 'multidisk vm has the correct second disk backing device');
|
||||
is(
|
||||
$win2008->{disks}->[1]->{disk_address},
|
||||
'scsi1',
|
||||
'multidisk vm has the correct second disk controller',
|
||||
);
|
||||
is(
|
||||
$win2008->{disks}->[1]->{backing_file},
|
||||
"$test_manifests/disk2.vmdk",
|
||||
'multidisk vm has the correct second disk backing device',
|
||||
);
|
||||
is($win2008->{disks}->[1]->{virtual_size}, 2048, 'multidisk vm has the correct second disk size');
|
||||
|
||||
is($win10->{disks}->[0]->{disk_address}, 'scsi0', 'single disk vm has the correct disk controller');
|
||||
is($win10->{disks}->[0]->{backing_file}, "$test_manifests/Win10-Liz-disk1.vmdk", 'single disk vm has the correct disk backing device');
|
||||
is(
|
||||
$win10->{disks}->[0]->{backing_file},
|
||||
"$test_manifests/Win10-Liz-disk1.vmdk",
|
||||
'single disk vm has the correct disk backing device',
|
||||
);
|
||||
is($win10->{disks}->[0]->{virtual_size}, 2048, 'single disk vm has the correct size');
|
||||
|
||||
is($win10noNs->{disks}->[0]->{disk_address}, 'scsi0', 'single disk vm (no default rasd NS) has the correct disk controller');
|
||||
is($win10noNs->{disks}->[0]->{backing_file}, "$test_manifests/Win10-Liz-disk1.vmdk", 'single disk vm (no default rasd NS) has the correct disk backing device');
|
||||
is($win10noNs->{disks}->[0]->{virtual_size}, 2048, 'single disk vm (no default rasd NS) has the correct size');
|
||||
is(
|
||||
$win10noNs->{disks}->[0]->{disk_address},
|
||||
'scsi0',
|
||||
'single disk vm (no default rasd NS) has the correct disk controller',
|
||||
);
|
||||
is(
|
||||
$win10noNs->{disks}->[0]->{backing_file},
|
||||
"$test_manifests/Win10-Liz-disk1.vmdk",
|
||||
'single disk vm (no default rasd NS) has the correct disk backing device',
|
||||
);
|
||||
is(
|
||||
$win10noNs->{disks}->[0]->{virtual_size},
|
||||
2048,
|
||||
'single disk vm (no default rasd NS) has the correct size',
|
||||
);
|
||||
|
||||
print "testing nics\n";
|
||||
is($win2008->{net}->{net0}->{model}, 'e1000', 'win2008 has correct nic model');
|
||||
is($win10->{net}->{net0}->{model}, 'e1000e', 'win10 has correct nic model');
|
||||
is($win10noNs->{net}->{net0}->{model}, 'e1000e', 'win10 (no default rasd NS) has correct nic model');
|
||||
is($win10noNs->{net}->{net0}->{model}, 'e1000e',
|
||||
'win10 (no default rasd NS) has correct nic model');
|
||||
|
||||
print "\ntesting vm.conf extraction\n";
|
||||
|
||||
|
||||
@ -8,7 +8,7 @@ $ENV{TZ} = 'UTC';
|
||||
|
||||
use TAP::Harness;
|
||||
|
||||
my $harness = TAP::Harness->new( { verbosity => -1 });
|
||||
my $harness = TAP::Harness->new({ verbosity => -1 });
|
||||
my $res = $harness->runtests(
|
||||
"archive_info_test.pm",
|
||||
"parse_volname_test.pm",
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user