separate packaging and source build system
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
This commit is contained in:
23
src/Makefile
Normal file
23
src/Makefile
Normal file
@ -0,0 +1,23 @@
|
||||
DESTDIR=
|
||||
PREFIX=/usr
|
||||
|
||||
export PERLDIR=$(PREFIX)/share/perl5
|
||||
|
||||
all:
|
||||
|
||||
.PHONY: install
|
||||
install: PVE bin udev-rbd
|
||||
$(MAKE) -C bin install
|
||||
$(MAKE) -C PVE install
|
||||
$(MAKE) -C udev-rbd install
|
||||
|
||||
.PHONY: test
|
||||
test:
|
||||
perl -I. -T -e "use PVE::CLI::pvesm; PVE::CLI::pvesm->verify_api();"
|
||||
$(MAKE) -C test
|
||||
|
||||
.PHONY: clean
|
||||
clean:
|
||||
$(MAKE) -C bin clean
|
||||
$(MAKE) -C PVE clean
|
||||
$(MAKE) -C udev-rbd clean
|
||||
318
src/PVE/API2/Disks.pm
Normal file
318
src/PVE/API2/Disks.pm
Normal file
@ -0,0 +1,318 @@
|
||||
package PVE::API2::Disks;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
use File::Basename;
|
||||
use HTTP::Status qw(:constants);
|
||||
|
||||
use PVE::Diskmanage;
|
||||
use PVE::JSONSchema qw(get_standard_option);
|
||||
use PVE::SafeSyslog;
|
||||
use PVE::Tools qw(run_command);
|
||||
|
||||
use PVE::API2::Disks::Directory;
|
||||
use PVE::API2::Disks::LVM;
|
||||
use PVE::API2::Disks::LVMThin;
|
||||
use PVE::API2::Disks::ZFS;
|
||||
|
||||
use PVE::RESTHandler;
|
||||
use base qw(PVE::RESTHandler);
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
subclass => "PVE::API2::Disks::LVM",
|
||||
path => 'lvm',
|
||||
});
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
subclass => "PVE::API2::Disks::LVMThin",
|
||||
path => 'lvmthin',
|
||||
});
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
subclass => "PVE::API2::Disks::Directory",
|
||||
path => 'directory',
|
||||
});
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
subclass => "PVE::API2::Disks::ZFS",
|
||||
path => 'zfs',
|
||||
});
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
name => 'index',
|
||||
path => '',
|
||||
method => 'GET',
|
||||
proxyto => 'node',
|
||||
permissions => { user => 'all' },
|
||||
description => "Node index.",
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
},
|
||||
},
|
||||
returns => {
|
||||
type => 'array',
|
||||
items => {
|
||||
type => "object",
|
||||
properties => {},
|
||||
},
|
||||
links => [ { rel => 'child', href => "{name}" } ],
|
||||
},
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
|
||||
my $result = [
|
||||
{ name => 'list' },
|
||||
{ name => 'initgpt' },
|
||||
{ name => 'smart' },
|
||||
{ name => 'lvm' },
|
||||
{ name => 'lvmthin' },
|
||||
{ name => 'directory' },
|
||||
{ name => 'wipedisk' },
|
||||
{ name => 'zfs' },
|
||||
];
|
||||
|
||||
return $result;
|
||||
}});
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
name => 'list',
|
||||
path => 'list',
|
||||
method => 'GET',
|
||||
description => "List local disks.",
|
||||
protected => 1,
|
||||
proxyto => 'node',
|
||||
permissions => {
|
||||
check => ['or',
|
||||
['perm', '/', ['Sys.Audit', 'Datastore.Audit'], any => 1],
|
||||
['perm', '/nodes/{node}', ['Sys.Audit', 'Datastore.Audit'], any => 1],
|
||||
],
|
||||
},
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
'include-partitions' => {
|
||||
description => "Also include partitions.",
|
||||
type => 'boolean',
|
||||
optional => 1,
|
||||
default => 0,
|
||||
},
|
||||
skipsmart => {
|
||||
description => "Skip smart checks.",
|
||||
type => 'boolean',
|
||||
optional => 1,
|
||||
default => 0,
|
||||
},
|
||||
type => {
|
||||
description => "Only list specific types of disks.",
|
||||
type => 'string',
|
||||
enum => ['unused', 'journal_disks'],
|
||||
optional => 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns => {
|
||||
type => 'array',
|
||||
items => {
|
||||
type => 'object',
|
||||
properties => {
|
||||
devpath => {
|
||||
type => 'string',
|
||||
description => 'The device path',
|
||||
},
|
||||
used => { type => 'string', optional => 1 },
|
||||
gpt => { type => 'boolean' },
|
||||
mounted => { type => 'boolean' },
|
||||
size => { type => 'integer'},
|
||||
osdid => { type => 'integer'},
|
||||
vendor => { type => 'string', optional => 1 },
|
||||
model => { type => 'string', optional => 1 },
|
||||
serial => { type => 'string', optional => 1 },
|
||||
wwn => { type => 'string', optional => 1},
|
||||
health => { type => 'string', optional => 1},
|
||||
parent => {
|
||||
type => 'string',
|
||||
description => 'For partitions only. The device path of ' .
|
||||
'the disk the partition resides on.',
|
||||
optional => 1
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
|
||||
my $skipsmart = $param->{skipsmart} // 0;
|
||||
my $include_partitions = $param->{'include-partitions'} // 0;
|
||||
|
||||
my $disks = PVE::Diskmanage::get_disks(
|
||||
undef,
|
||||
$skipsmart,
|
||||
$include_partitions
|
||||
);
|
||||
|
||||
my $type = $param->{type} // '';
|
||||
my $result = [];
|
||||
|
||||
foreach my $disk (sort keys %$disks) {
|
||||
my $entry = $disks->{$disk};
|
||||
if ($type eq 'journal_disks') {
|
||||
next if $entry->{osdid} >= 0;
|
||||
if (my $usage = $entry->{used}) {
|
||||
next if !($usage eq 'partitions' && $entry->{gpt}
|
||||
|| $usage eq 'LVM');
|
||||
}
|
||||
} elsif ($type eq 'unused') {
|
||||
next if $entry->{used};
|
||||
} elsif ($type ne '') {
|
||||
die "internal error"; # should not happen
|
||||
}
|
||||
push @$result, $entry;
|
||||
}
|
||||
return $result;
|
||||
}});
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
name => 'smart',
|
||||
path => 'smart',
|
||||
method => 'GET',
|
||||
description => "Get SMART Health of a disk.",
|
||||
protected => 1,
|
||||
proxyto => "node",
|
||||
permissions => {
|
||||
check => ['perm', '/', ['Sys.Audit', 'Datastore.Audit'], any => 1],
|
||||
},
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
disk => {
|
||||
type => 'string',
|
||||
pattern => '^/dev/[a-zA-Z0-9\/]+$',
|
||||
description => "Block device name",
|
||||
},
|
||||
healthonly => {
|
||||
type => 'boolean',
|
||||
description => "If true returns only the health status",
|
||||
optional => 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns => {
|
||||
type => 'object',
|
||||
properties => {
|
||||
health => { type => 'string' },
|
||||
type => { type => 'string', optional => 1 },
|
||||
attributes => { type => 'array', optional => 1},
|
||||
text => { type => 'string', optional => 1 },
|
||||
},
|
||||
},
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
|
||||
my $disk = PVE::Diskmanage::verify_blockdev_path($param->{disk});
|
||||
|
||||
my $result = PVE::Diskmanage::get_smart_data($disk, $param->{healthonly});
|
||||
|
||||
$result->{health} = 'UNKNOWN' if !defined $result->{health};
|
||||
$result = { health => $result->{health} } if $param->{healthonly};
|
||||
|
||||
return $result;
|
||||
}});
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
name => 'initgpt',
|
||||
path => 'initgpt',
|
||||
method => 'POST',
|
||||
description => "Initialize Disk with GPT",
|
||||
protected => 1,
|
||||
proxyto => "node",
|
||||
permissions => {
|
||||
check => ['perm', '/', ['Sys.Modify']],
|
||||
},
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
disk => {
|
||||
type => 'string',
|
||||
description => "Block device name",
|
||||
pattern => '^/dev/[a-zA-Z0-9\/]+$',
|
||||
},
|
||||
uuid => {
|
||||
type => 'string',
|
||||
description => 'UUID for the GPT table',
|
||||
pattern => '[a-fA-F0-9\-]+',
|
||||
maxLength => 36,
|
||||
optional => 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns => { type => 'string' },
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
|
||||
my $disk = PVE::Diskmanage::verify_blockdev_path($param->{disk});
|
||||
|
||||
my $rpcenv = PVE::RPCEnvironment::get();
|
||||
|
||||
my $authuser = $rpcenv->get_user();
|
||||
|
||||
die "$disk is a partition\n" if PVE::Diskmanage::is_partition($disk);
|
||||
die "disk $disk already in use\n" if PVE::Diskmanage::disk_is_used($disk);
|
||||
my $worker = sub {
|
||||
PVE::Diskmanage::init_disk($disk, $param->{uuid});
|
||||
};
|
||||
|
||||
my $diskid = $disk;
|
||||
$diskid =~ s|^.*/||; # remove all up to the last slash
|
||||
return $rpcenv->fork_worker('diskinit', $diskid, $authuser, $worker);
|
||||
}});
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
name => 'wipe_disk',
|
||||
path => 'wipedisk',
|
||||
method => 'PUT',
|
||||
description => "Wipe a disk or partition.",
|
||||
proxyto => 'node',
|
||||
protected => 1,
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
disk => {
|
||||
type => 'string',
|
||||
description => "Block device name",
|
||||
pattern => '^/dev/[a-zA-Z0-9\/]+$',
|
||||
},
|
||||
},
|
||||
},
|
||||
returns => { type => 'string' },
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
|
||||
my $disk = PVE::Diskmanage::verify_blockdev_path($param->{disk});
|
||||
|
||||
my $mounted = PVE::Diskmanage::is_mounted($disk);
|
||||
die "disk/partition '${mounted}' is mounted\n" if $mounted;
|
||||
|
||||
my $held = PVE::Diskmanage::has_holder($disk);
|
||||
die "disk/partition '${held}' has a holder\n" if $held;
|
||||
|
||||
my $rpcenv = PVE::RPCEnvironment::get();
|
||||
my $authuser = $rpcenv->get_user();
|
||||
|
||||
my $worker = sub {
|
||||
PVE::Diskmanage::wipe_blockdev($disk);
|
||||
PVE::Diskmanage::udevadm_trigger($disk);
|
||||
};
|
||||
|
||||
my $basename = basename($disk); # avoid '/' in the ID
|
||||
|
||||
return $rpcenv->fork_worker('wipedisk', $basename, $authuser, $worker);
|
||||
}});
|
||||
|
||||
1;
|
||||
409
src/PVE/API2/Disks/Directory.pm
Normal file
409
src/PVE/API2/Disks/Directory.pm
Normal file
@ -0,0 +1,409 @@
|
||||
package PVE::API2::Disks::Directory;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
use POSIX;
|
||||
|
||||
use PVE::Diskmanage;
|
||||
use PVE::JSONSchema qw(get_standard_option);
|
||||
use PVE::RESTHandler;
|
||||
use PVE::RPCEnvironment;
|
||||
use PVE::Systemd;
|
||||
use PVE::Tools qw(run_command trim file_set_contents file_get_contents dir_glob_foreach lock_file);
|
||||
|
||||
use PVE::API2::Storage::Config;
|
||||
|
||||
use base qw(PVE::RESTHandler);
|
||||
|
||||
my $SGDISK = '/sbin/sgdisk';
|
||||
my $MKFS = '/sbin/mkfs';
|
||||
my $BLKID = '/sbin/blkid';
|
||||
|
||||
my $read_ini = sub {
|
||||
my ($filename) = @_;
|
||||
|
||||
my $content = file_get_contents($filename);
|
||||
my @lines = split /\n/, $content;
|
||||
|
||||
my $result = {};
|
||||
my $section;
|
||||
|
||||
foreach my $line (@lines) {
|
||||
$line = trim($line);
|
||||
if ($line =~ m/^\[([^\]]+)\]/) {
|
||||
$section = $1;
|
||||
if (!defined($result->{$section})) {
|
||||
$result->{$section} = {};
|
||||
}
|
||||
} elsif ($line =~ m/^(.*?)=(.*)$/) {
|
||||
my ($key, $val) = ($1, $2);
|
||||
if (!$section) {
|
||||
warn "key value pair found without section, skipping\n";
|
||||
next;
|
||||
}
|
||||
|
||||
if ($result->{$section}->{$key}) {
|
||||
# make duplicate properties to arrays to keep the order
|
||||
my $prop = $result->{$section}->{$key};
|
||||
if (ref($prop) eq 'ARRAY') {
|
||||
push @$prop, $val;
|
||||
} else {
|
||||
$result->{$section}->{$key} = [$prop, $val];
|
||||
}
|
||||
} else {
|
||||
$result->{$section}->{$key} = $val;
|
||||
}
|
||||
}
|
||||
# ignore everything else
|
||||
}
|
||||
|
||||
return $result;
|
||||
};
|
||||
|
||||
my $write_ini = sub {
|
||||
my ($ini, $filename) = @_;
|
||||
|
||||
my $content = "";
|
||||
|
||||
foreach my $sname (sort keys %$ini) {
|
||||
my $section = $ini->{$sname};
|
||||
|
||||
$content .= "[$sname]\n";
|
||||
|
||||
foreach my $pname (sort keys %$section) {
|
||||
my $prop = $section->{$pname};
|
||||
|
||||
if (!ref($prop)) {
|
||||
$content .= "$pname=$prop\n";
|
||||
} elsif (ref($prop) eq 'ARRAY') {
|
||||
foreach my $val (@$prop) {
|
||||
$content .= "$pname=$val\n";
|
||||
}
|
||||
} else {
|
||||
die "invalid property '$pname'\n";
|
||||
}
|
||||
}
|
||||
$content .= "\n";
|
||||
}
|
||||
|
||||
file_set_contents($filename, $content);
|
||||
};
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
name => 'index',
|
||||
path => '',
|
||||
method => 'GET',
|
||||
proxyto => 'node',
|
||||
protected => 1,
|
||||
permissions => {
|
||||
check => ['perm', '/', ['Sys.Audit', 'Datastore.Audit'], any => 1],
|
||||
},
|
||||
description => "PVE Managed Directory storages.",
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
},
|
||||
},
|
||||
returns => {
|
||||
type => 'array',
|
||||
items => {
|
||||
type => 'object',
|
||||
properties => {
|
||||
unitfile => {
|
||||
type => 'string',
|
||||
description => 'The path of the mount unit.',
|
||||
},
|
||||
path => {
|
||||
type => 'string',
|
||||
description => 'The mount path.',
|
||||
},
|
||||
device => {
|
||||
type => 'string',
|
||||
description => 'The mounted device.',
|
||||
},
|
||||
type => {
|
||||
type => 'string',
|
||||
description => 'The filesystem type.',
|
||||
},
|
||||
options => {
|
||||
type => 'string',
|
||||
description => 'The mount options.',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
|
||||
my $result = [];
|
||||
|
||||
dir_glob_foreach('/etc/systemd/system', '^mnt-pve-(.+)\.mount$', sub {
|
||||
my ($filename, $storid) = @_;
|
||||
$storid = PVE::Systemd::unescape_unit($storid);
|
||||
|
||||
my $unitfile = "/etc/systemd/system/$filename";
|
||||
my $unit = $read_ini->($unitfile);
|
||||
|
||||
push @$result, {
|
||||
unitfile => $unitfile,
|
||||
path => "/mnt/pve/$storid",
|
||||
device => $unit->{'Mount'}->{'What'},
|
||||
type => $unit->{'Mount'}->{'Type'},
|
||||
options => $unit->{'Mount'}->{'Options'},
|
||||
};
|
||||
});
|
||||
|
||||
return $result;
|
||||
}});
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
name => 'create',
|
||||
path => '',
|
||||
method => 'POST',
|
||||
proxyto => 'node',
|
||||
protected => 1,
|
||||
permissions => {
|
||||
check => ['perm', '/', ['Sys.Modify', 'Datastore.Allocate']],
|
||||
},
|
||||
description => "Create a Filesystem on an unused disk. Will be mounted under '/mnt/pve/NAME'.",
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
name => get_standard_option('pve-storage-id'),
|
||||
device => {
|
||||
type => 'string',
|
||||
description => 'The block device you want to create the filesystem on.',
|
||||
},
|
||||
add_storage => {
|
||||
description => "Configure storage using the directory.",
|
||||
type => 'boolean',
|
||||
optional => 1,
|
||||
default => 0,
|
||||
},
|
||||
filesystem => {
|
||||
description => "The desired filesystem.",
|
||||
type => 'string',
|
||||
enum => ['ext4', 'xfs'],
|
||||
optional => 1,
|
||||
default => 'ext4',
|
||||
},
|
||||
},
|
||||
},
|
||||
returns => { type => 'string' },
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
|
||||
my $rpcenv = PVE::RPCEnvironment::get();
|
||||
my $user = $rpcenv->get_user();
|
||||
|
||||
my $name = $param->{name};
|
||||
my $dev = $param->{device};
|
||||
my $node = $param->{node};
|
||||
my $type = $param->{filesystem} // 'ext4';
|
||||
my $path = "/mnt/pve/$name";
|
||||
my $mountunitname = PVE::Systemd::escape_unit($path, 1) . ".mount";
|
||||
my $mountunitpath = "/etc/systemd/system/$mountunitname";
|
||||
|
||||
$dev = PVE::Diskmanage::verify_blockdev_path($dev);
|
||||
PVE::Diskmanage::assert_disk_unused($dev);
|
||||
|
||||
my $storage_params = {
|
||||
type => 'dir',
|
||||
storage => $name,
|
||||
content => 'rootdir,images,iso,backup,vztmpl,snippets',
|
||||
is_mountpoint => 1,
|
||||
path => $path,
|
||||
nodes => $node,
|
||||
};
|
||||
my $verify_params = [qw(path)];
|
||||
|
||||
if ($param->{add_storage}) {
|
||||
PVE::API2::Storage::Config->create_or_update(
|
||||
$name,
|
||||
$node,
|
||||
$storage_params,
|
||||
$verify_params,
|
||||
1,
|
||||
);
|
||||
}
|
||||
|
||||
my $mounted = PVE::Diskmanage::mounted_paths();
|
||||
die "the path for '${name}' is already mounted: ${path} ($mounted->{$path})\n"
|
||||
if $mounted->{$path};
|
||||
die "a systemd mount unit already exists: ${mountunitpath}\n" if -e $mountunitpath;
|
||||
|
||||
my $worker = sub {
|
||||
PVE::Diskmanage::locked_disk_action(sub {
|
||||
PVE::Diskmanage::assert_disk_unused($dev);
|
||||
|
||||
my $part = $dev;
|
||||
|
||||
if (PVE::Diskmanage::is_partition($dev)) {
|
||||
eval { PVE::Diskmanage::change_parttype($dev, '8300'); };
|
||||
warn $@ if $@;
|
||||
} else {
|
||||
# create partition
|
||||
my $cmd = [$SGDISK, '-n1', '-t1:8300', $dev];
|
||||
print "# ", join(' ', @$cmd), "\n";
|
||||
run_command($cmd);
|
||||
|
||||
my ($devname) = $dev =~ m|^/dev/(.*)$|;
|
||||
$part = "/dev/";
|
||||
dir_glob_foreach("/sys/block/$devname", qr/\Q$devname\E.+/, sub {
|
||||
my ($partition) = @_;
|
||||
$part .= $partition;
|
||||
});
|
||||
}
|
||||
|
||||
# create filesystem
|
||||
my $cmd = [$MKFS, '-t', $type, $part];
|
||||
print "# ", join(' ', @$cmd), "\n";
|
||||
run_command($cmd);
|
||||
|
||||
# create systemd mount unit and enable & start it
|
||||
my $ini = {
|
||||
'Unit' => {
|
||||
'Description' => "Mount storage '$name' under /mnt/pve",
|
||||
},
|
||||
'Install' => {
|
||||
'WantedBy' => 'multi-user.target',
|
||||
},
|
||||
};
|
||||
|
||||
my $uuid_path;
|
||||
my $uuid;
|
||||
|
||||
$cmd = [$BLKID, $part, '-o', 'export'];
|
||||
print "# ", join(' ', @$cmd), "\n";
|
||||
run_command($cmd, outfunc => sub {
|
||||
my ($line) = @_;
|
||||
|
||||
if ($line =~ m/^UUID=(.*)$/) {
|
||||
$uuid = $1;
|
||||
$uuid_path = "/dev/disk/by-uuid/$uuid";
|
||||
}
|
||||
});
|
||||
|
||||
die "could not get UUID of device '$part'\n" if !$uuid;
|
||||
|
||||
$ini->{'Mount'} = {
|
||||
'What' => $uuid_path,
|
||||
'Where' => $path,
|
||||
'Type' => $type,
|
||||
'Options' => 'defaults',
|
||||
};
|
||||
|
||||
$write_ini->($ini, $mountunitpath);
|
||||
|
||||
PVE::Diskmanage::udevadm_trigger($part);
|
||||
|
||||
run_command(['systemctl', 'daemon-reload']);
|
||||
run_command(['systemctl', 'enable', $mountunitname]);
|
||||
run_command(['systemctl', 'start', $mountunitname]);
|
||||
|
||||
if ($param->{add_storage}) {
|
||||
PVE::API2::Storage::Config->create_or_update(
|
||||
$name,
|
||||
$node,
|
||||
$storage_params,
|
||||
$verify_params,
|
||||
);
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
return $rpcenv->fork_worker('dircreate', $name, $user, $worker);
|
||||
}});
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
name => 'delete',
|
||||
path => '{name}',
|
||||
method => 'DELETE',
|
||||
proxyto => 'node',
|
||||
protected => 1,
|
||||
permissions => {
|
||||
check => ['perm', '/', ['Sys.Modify', 'Datastore.Allocate']],
|
||||
},
|
||||
description => "Unmounts the storage and removes the mount unit.",
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
name => get_standard_option('pve-storage-id'),
|
||||
'cleanup-config' => {
|
||||
description => "Marks associated storage(s) as not available on this node anymore ".
|
||||
"or removes them from the configuration (if configured for this node only).",
|
||||
type => 'boolean',
|
||||
optional => 1,
|
||||
default => 0,
|
||||
},
|
||||
'cleanup-disks' => {
|
||||
description => "Also wipe disk so it can be repurposed afterwards.",
|
||||
type => 'boolean',
|
||||
optional => 1,
|
||||
default => 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns => { type => 'string' },
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
|
||||
my $rpcenv = PVE::RPCEnvironment::get();
|
||||
my $user = $rpcenv->get_user();
|
||||
|
||||
my $name = $param->{name};
|
||||
my $node = $param->{node};
|
||||
|
||||
my $worker = sub {
|
||||
my $path = "/mnt/pve/$name";
|
||||
my $mountunitname = PVE::Systemd::escape_unit($path, 1) . ".mount";
|
||||
my $mountunitpath = "/etc/systemd/system/$mountunitname";
|
||||
|
||||
PVE::Diskmanage::locked_disk_action(sub {
|
||||
my $to_wipe;
|
||||
if ($param->{'cleanup-disks'}) {
|
||||
my $unit = $read_ini->($mountunitpath);
|
||||
|
||||
my $dev = PVE::Diskmanage::verify_blockdev_path($unit->{'Mount'}->{'What'});
|
||||
$to_wipe = $dev;
|
||||
|
||||
# clean up whole device if this is the only partition
|
||||
$dev =~ s|^/dev/||;
|
||||
my $info = PVE::Diskmanage::get_disks($dev, 1, 1);
|
||||
die "unable to obtain information for disk '$dev'\n" if !$info->{$dev};
|
||||
$to_wipe = $info->{$dev}->{parent}
|
||||
if $info->{$dev}->{parent} && scalar(keys $info->%*) == 2;
|
||||
}
|
||||
|
||||
run_command(['systemctl', 'stop', $mountunitname]);
|
||||
run_command(['systemctl', 'disable', $mountunitname]);
|
||||
|
||||
unlink $mountunitpath or $! == ENOENT or die "cannot remove $mountunitpath - $!\n";
|
||||
|
||||
my $config_err;
|
||||
if ($param->{'cleanup-config'}) {
|
||||
my $match = sub {
|
||||
my ($scfg) = @_;
|
||||
return $scfg->{type} eq 'dir' && $scfg->{path} eq $path;
|
||||
};
|
||||
eval { PVE::API2::Storage::Config->cleanup_storages_for_node($match, $node); };
|
||||
warn $config_err = $@ if $@;
|
||||
}
|
||||
|
||||
if ($to_wipe) {
|
||||
PVE::Diskmanage::wipe_blockdev($to_wipe);
|
||||
PVE::Diskmanage::udevadm_trigger($to_wipe);
|
||||
}
|
||||
|
||||
die "config cleanup failed - $config_err" if $config_err;
|
||||
});
|
||||
};
|
||||
|
||||
return $rpcenv->fork_worker('dirremove', $name, $user, $worker);
|
||||
}});
|
||||
|
||||
1;
|
||||
281
src/PVE/API2/Disks/LVM.pm
Normal file
281
src/PVE/API2/Disks/LVM.pm
Normal file
@ -0,0 +1,281 @@
|
||||
package PVE::API2::Disks::LVM;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
use PVE::Storage::LVMPlugin;
|
||||
use PVE::Diskmanage;
|
||||
use PVE::JSONSchema qw(get_standard_option);
|
||||
use PVE::API2::Storage::Config;
|
||||
use PVE::Tools qw(lock_file run_command);
|
||||
|
||||
use PVE::RPCEnvironment;
|
||||
use PVE::RESTHandler;
|
||||
|
||||
use base qw(PVE::RESTHandler);
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
name => 'index',
|
||||
path => '',
|
||||
method => 'GET',
|
||||
proxyto => 'node',
|
||||
protected => 1,
|
||||
permissions => {
|
||||
check => ['perm', '/', ['Sys.Audit', 'Datastore.Audit'], any => 1],
|
||||
},
|
||||
description => "List LVM Volume Groups",
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
},
|
||||
},
|
||||
returns => {
|
||||
type => 'object',
|
||||
properties => {
|
||||
leaf => {
|
||||
type => 'boolean',
|
||||
},
|
||||
children => {
|
||||
type => 'array',
|
||||
items => {
|
||||
type => "object",
|
||||
properties => {
|
||||
leaf => {
|
||||
type => 'boolean',
|
||||
},
|
||||
name => {
|
||||
type => 'string',
|
||||
description => 'The name of the volume group',
|
||||
},
|
||||
size => {
|
||||
type => 'integer',
|
||||
description => 'The size of the volume group in bytes',
|
||||
},
|
||||
free => {
|
||||
type => 'integer',
|
||||
description => 'The free bytes in the volume group',
|
||||
},
|
||||
children => {
|
||||
optional => 1,
|
||||
type => 'array',
|
||||
description => 'The underlying physical volumes',
|
||||
items => {
|
||||
type => 'object',
|
||||
properties => {
|
||||
leaf => {
|
||||
type => 'boolean',
|
||||
},
|
||||
name => {
|
||||
type => 'string',
|
||||
description => 'The name of the physical volume',
|
||||
},
|
||||
size => {
|
||||
type => 'integer',
|
||||
description => 'The size of the physical volume in bytes',
|
||||
},
|
||||
free => {
|
||||
type => 'integer',
|
||||
description => 'The free bytes in the physical volume',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
|
||||
my $result = [];
|
||||
|
||||
my $vgs = PVE::Storage::LVMPlugin::lvm_vgs(1);
|
||||
|
||||
foreach my $vg_name (sort keys %$vgs) {
|
||||
my $vg = $vgs->{$vg_name};
|
||||
$vg->{name} = $vg_name;
|
||||
$vg->{leaf} = 0;
|
||||
foreach my $pv (@{$vg->{pvs}}) {
|
||||
$pv->{leaf} = 1;
|
||||
}
|
||||
$vg->{children} = delete $vg->{pvs};
|
||||
push @$result, $vg;
|
||||
}
|
||||
|
||||
return {
|
||||
leaf => 0,
|
||||
children => $result,
|
||||
};
|
||||
}});
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
name => 'create',
|
||||
path => '',
|
||||
method => 'POST',
|
||||
proxyto => 'node',
|
||||
protected => 1,
|
||||
permissions => {
|
||||
check => ['perm', '/', ['Sys.Modify', 'Datastore.Allocate']],
|
||||
},
|
||||
description => "Create an LVM Volume Group",
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
name => get_standard_option('pve-storage-id'),
|
||||
device => {
|
||||
type => 'string',
|
||||
description => 'The block device you want to create the volume group on',
|
||||
},
|
||||
add_storage => {
|
||||
description => "Configure storage using the Volume Group",
|
||||
type => 'boolean',
|
||||
optional => 1,
|
||||
default => 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns => { type => 'string' },
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
|
||||
my $rpcenv = PVE::RPCEnvironment::get();
|
||||
my $user = $rpcenv->get_user();
|
||||
|
||||
my $name = $param->{name};
|
||||
my $dev = $param->{device};
|
||||
my $node = $param->{node};
|
||||
|
||||
$dev = PVE::Diskmanage::verify_blockdev_path($dev);
|
||||
PVE::Diskmanage::assert_disk_unused($dev);
|
||||
|
||||
my $storage_params = {
|
||||
type => 'lvm',
|
||||
vgname => $name,
|
||||
storage => $name,
|
||||
content => 'rootdir,images',
|
||||
shared => 0,
|
||||
nodes => $node,
|
||||
};
|
||||
my $verify_params = [qw(vgname)];
|
||||
|
||||
if ($param->{add_storage}) {
|
||||
PVE::API2::Storage::Config->create_or_update(
|
||||
$name,
|
||||
$node,
|
||||
$storage_params,
|
||||
$verify_params,
|
||||
1,
|
||||
);
|
||||
}
|
||||
|
||||
my $worker = sub {
|
||||
PVE::Diskmanage::locked_disk_action(sub {
|
||||
PVE::Diskmanage::assert_disk_unused($dev);
|
||||
die "volume group with name '${name}' already exists on node '${node}'\n"
|
||||
if PVE::Storage::LVMPlugin::lvm_vgs()->{$name};
|
||||
|
||||
if (PVE::Diskmanage::is_partition($dev)) {
|
||||
eval { PVE::Diskmanage::change_parttype($dev, '8E00'); };
|
||||
warn $@ if $@;
|
||||
}
|
||||
|
||||
PVE::Storage::LVMPlugin::lvm_create_volume_group($dev, $name);
|
||||
|
||||
PVE::Diskmanage::udevadm_trigger($dev);
|
||||
|
||||
if ($param->{add_storage}) {
|
||||
PVE::API2::Storage::Config->create_or_update(
|
||||
$name,
|
||||
$node,
|
||||
$storage_params,
|
||||
$verify_params,
|
||||
);
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
return $rpcenv->fork_worker('lvmcreate', $name, $user, $worker);
|
||||
}});
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
name => 'delete',
|
||||
path => '{name}',
|
||||
method => 'DELETE',
|
||||
proxyto => 'node',
|
||||
protected => 1,
|
||||
permissions => {
|
||||
check => ['perm', '/', ['Sys.Modify', 'Datastore.Allocate']],
|
||||
},
|
||||
description => "Remove an LVM Volume Group.",
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
name => get_standard_option('pve-storage-id'),
|
||||
'cleanup-config' => {
|
||||
description => "Marks associated storage(s) as not available on this node anymore ".
|
||||
"or removes them from the configuration (if configured for this node only).",
|
||||
type => 'boolean',
|
||||
optional => 1,
|
||||
default => 0,
|
||||
},
|
||||
'cleanup-disks' => {
|
||||
description => "Also wipe disks so they can be repurposed afterwards.",
|
||||
type => 'boolean',
|
||||
optional => 1,
|
||||
default => 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns => { type => 'string' },
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
|
||||
my $rpcenv = PVE::RPCEnvironment::get();
|
||||
my $user = $rpcenv->get_user();
|
||||
|
||||
my $name = $param->{name};
|
||||
my $node = $param->{node};
|
||||
|
||||
my $worker = sub {
|
||||
PVE::Diskmanage::locked_disk_action(sub {
|
||||
my $vgs = PVE::Storage::LVMPlugin::lvm_vgs(1);
|
||||
die "no such volume group '$name'\n" if !$vgs->{$name};
|
||||
|
||||
PVE::Storage::LVMPlugin::lvm_destroy_volume_group($name);
|
||||
|
||||
my $config_err;
|
||||
if ($param->{'cleanup-config'}) {
|
||||
my $match = sub {
|
||||
my ($scfg) = @_;
|
||||
return $scfg->{type} eq 'lvm' && $scfg->{vgname} eq $name;
|
||||
};
|
||||
eval { PVE::API2::Storage::Config->cleanup_storages_for_node($match, $node); };
|
||||
warn $config_err = $@ if $@;
|
||||
}
|
||||
|
||||
if ($param->{'cleanup-disks'}) {
|
||||
my $wiped = [];
|
||||
eval {
|
||||
for my $pv ($vgs->{$name}->{pvs}->@*) {
|
||||
my $dev = PVE::Diskmanage::verify_blockdev_path($pv->{name});
|
||||
PVE::Diskmanage::wipe_blockdev($dev);
|
||||
push $wiped->@*, $dev;
|
||||
}
|
||||
};
|
||||
my $err = $@;
|
||||
PVE::Diskmanage::udevadm_trigger($wiped->@*);
|
||||
die "cleanup failed - $err" if $err;
|
||||
}
|
||||
|
||||
die "config cleanup failed - $config_err" if $config_err;
|
||||
});
|
||||
};
|
||||
|
||||
return $rpcenv->fork_worker('lvmremove', $name, $user, $worker);
|
||||
}});
|
||||
|
||||
1;
|
||||
271
src/PVE/API2/Disks/LVMThin.pm
Normal file
271
src/PVE/API2/Disks/LVMThin.pm
Normal file
@ -0,0 +1,271 @@
|
||||
package PVE::API2::Disks::LVMThin;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
use PVE::Storage::LvmThinPlugin;
|
||||
use PVE::Diskmanage;
|
||||
use PVE::JSONSchema qw(get_standard_option);
|
||||
use PVE::API2::Storage::Config;
|
||||
use PVE::Storage;
|
||||
use PVE::Tools qw(run_command lock_file);
|
||||
|
||||
use PVE::RPCEnvironment;
|
||||
use PVE::RESTHandler;
|
||||
|
||||
use base qw(PVE::RESTHandler);
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
name => 'index',
|
||||
path => '',
|
||||
method => 'GET',
|
||||
proxyto => 'node',
|
||||
protected => 1,
|
||||
permissions => {
|
||||
check => ['perm', '/', ['Sys.Audit', 'Datastore.Audit'], any => 1],
|
||||
},
|
||||
description => "List LVM thinpools",
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
},
|
||||
},
|
||||
returns => {
|
||||
type => 'array',
|
||||
items => {
|
||||
type => 'object',
|
||||
properties => {
|
||||
lv => {
|
||||
type => 'string',
|
||||
description => 'The name of the thinpool.',
|
||||
},
|
||||
vg => {
|
||||
type => 'string',
|
||||
description => 'The associated volume group.',
|
||||
},
|
||||
lv_size => {
|
||||
type => 'integer',
|
||||
description => 'The size of the thinpool in bytes.',
|
||||
},
|
||||
used => {
|
||||
type => 'integer',
|
||||
description => 'The used bytes of the thinpool.',
|
||||
},
|
||||
metadata_size => {
|
||||
type => 'integer',
|
||||
description => 'The size of the metadata lv in bytes.',
|
||||
},
|
||||
metadata_used => {
|
||||
type => 'integer',
|
||||
description => 'The used bytes of the metadata lv.',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
return PVE::Storage::LvmThinPlugin::list_thinpools(undef);
|
||||
}});
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
name => 'create',
|
||||
path => '',
|
||||
method => 'POST',
|
||||
proxyto => 'node',
|
||||
protected => 1,
|
||||
permissions => {
|
||||
check => ['perm', '/', ['Sys.Modify', 'Datastore.Allocate']],
|
||||
},
|
||||
description => "Create an LVM thinpool",
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
name => get_standard_option('pve-storage-id'),
|
||||
device => {
|
||||
type => 'string',
|
||||
description => 'The block device you want to create the thinpool on.',
|
||||
},
|
||||
add_storage => {
|
||||
description => "Configure storage using the thinpool.",
|
||||
type => 'boolean',
|
||||
optional => 1,
|
||||
default => 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns => { type => 'string' },
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
|
||||
my $rpcenv = PVE::RPCEnvironment::get();
|
||||
my $user = $rpcenv->get_user();
|
||||
|
||||
my $name = $param->{name};
|
||||
my $dev = $param->{device};
|
||||
my $node = $param->{node};
|
||||
|
||||
$dev = PVE::Diskmanage::verify_blockdev_path($dev);
|
||||
PVE::Diskmanage::assert_disk_unused($dev);
|
||||
|
||||
my $storage_params = {
|
||||
type => 'lvmthin',
|
||||
vgname => $name,
|
||||
thinpool => $name,
|
||||
storage => $name,
|
||||
content => 'rootdir,images',
|
||||
nodes => $node,
|
||||
};
|
||||
my $verify_params = [qw(vgname thinpool)];
|
||||
|
||||
if ($param->{add_storage}) {
|
||||
PVE::API2::Storage::Config->create_or_update(
|
||||
$name,
|
||||
$node,
|
||||
$storage_params,
|
||||
$verify_params,
|
||||
1,
|
||||
);
|
||||
}
|
||||
|
||||
my $worker = sub {
|
||||
PVE::Diskmanage::locked_disk_action(sub {
|
||||
PVE::Diskmanage::assert_disk_unused($dev);
|
||||
|
||||
die "volume group with name '${name}' already exists on node '${node}'\n"
|
||||
if PVE::Storage::LVMPlugin::lvm_vgs()->{$name};
|
||||
|
||||
if (PVE::Diskmanage::is_partition($dev)) {
|
||||
eval { PVE::Diskmanage::change_parttype($dev, '8E00'); };
|
||||
warn $@ if $@;
|
||||
}
|
||||
|
||||
PVE::Storage::LVMPlugin::lvm_create_volume_group($dev, $name);
|
||||
my $pv = PVE::Storage::LVMPlugin::lvm_pv_info($dev);
|
||||
# keep some free space just in case
|
||||
my $datasize = $pv->{size} - 128*1024;
|
||||
# default to 1% for metadata
|
||||
my $metadatasize = $datasize/100;
|
||||
# but at least 1G, as recommended in lvmthin man
|
||||
$metadatasize = 1024*1024 if $metadatasize < 1024*1024;
|
||||
# but at most 16G, which is the current lvm max
|
||||
$metadatasize = 16*1024*1024 if $metadatasize > 16*1024*1024;
|
||||
# shrink data by needed amount for metadata
|
||||
$datasize -= 2*$metadatasize;
|
||||
|
||||
run_command([
|
||||
'/sbin/lvcreate',
|
||||
'--type', 'thin-pool',
|
||||
"-L${datasize}K",
|
||||
'--poolmetadatasize', "${metadatasize}K",
|
||||
'-n', $name,
|
||||
$name
|
||||
]);
|
||||
|
||||
PVE::Diskmanage::udevadm_trigger($dev);
|
||||
|
||||
if ($param->{add_storage}) {
|
||||
PVE::API2::Storage::Config->create_or_update(
|
||||
$name,
|
||||
$node,
|
||||
$storage_params,
|
||||
$verify_params,
|
||||
);
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
return $rpcenv->fork_worker('lvmthincreate', $name, $user, $worker);
|
||||
}});
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
name => 'delete',
|
||||
path => '{name}',
|
||||
method => 'DELETE',
|
||||
proxyto => 'node',
|
||||
protected => 1,
|
||||
permissions => {
|
||||
check => ['perm', '/', ['Sys.Modify', 'Datastore.Allocate']],
|
||||
},
|
||||
description => "Remove an LVM thin pool.",
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
name => get_standard_option('pve-storage-id'),
|
||||
'volume-group' => get_standard_option('pve-storage-id'),
|
||||
'cleanup-config' => {
|
||||
description => "Marks associated storage(s) as not available on this node anymore ".
|
||||
"or removes them from the configuration (if configured for this node only).",
|
||||
type => 'boolean',
|
||||
optional => 1,
|
||||
default => 0,
|
||||
},
|
||||
'cleanup-disks' => {
|
||||
description => "Also wipe disks so they can be repurposed afterwards.",
|
||||
type => 'boolean',
|
||||
optional => 1,
|
||||
default => 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns => { type => 'string' },
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
|
||||
my $rpcenv = PVE::RPCEnvironment::get();
|
||||
my $user = $rpcenv->get_user();
|
||||
|
||||
my $vg = $param->{'volume-group'};
|
||||
my $lv = $param->{name};
|
||||
my $node = $param->{node};
|
||||
|
||||
my $worker = sub {
|
||||
PVE::Diskmanage::locked_disk_action(sub {
|
||||
my $thinpools = PVE::Storage::LvmThinPlugin::list_thinpools();
|
||||
|
||||
die "no such thin pool ${vg}/${lv}\n"
|
||||
if !grep { $_->{lv} eq $lv && $_->{vg} eq $vg } $thinpools->@*;
|
||||
|
||||
run_command(['lvremove', '-y', "${vg}/${lv}"]);
|
||||
|
||||
my $config_err;
|
||||
if ($param->{'cleanup-config'}) {
|
||||
my $match = sub {
|
||||
my ($scfg) = @_;
|
||||
return $scfg->{type} eq 'lvmthin'
|
||||
&& $scfg->{vgname} eq $vg
|
||||
&& $scfg->{thinpool} eq $lv;
|
||||
};
|
||||
eval { PVE::API2::Storage::Config->cleanup_storages_for_node($match, $node); };
|
||||
warn $config_err = $@ if $@;
|
||||
}
|
||||
|
||||
if ($param->{'cleanup-disks'}) {
|
||||
my $vgs = PVE::Storage::LVMPlugin::lvm_vgs(1);
|
||||
|
||||
die "no such volume group '$vg'\n" if !$vgs->{$vg};
|
||||
die "volume group '$vg' still in use\n" if $vgs->{$vg}->{lvcount} > 0;
|
||||
|
||||
my $wiped = [];
|
||||
eval {
|
||||
for my $pv ($vgs->{$vg}->{pvs}->@*) {
|
||||
my $dev = PVE::Diskmanage::verify_blockdev_path($pv->{name});
|
||||
PVE::Diskmanage::wipe_blockdev($dev);
|
||||
push $wiped->@*, $dev;
|
||||
}
|
||||
};
|
||||
my $err = $@;
|
||||
PVE::Diskmanage::udevadm_trigger($wiped->@*);
|
||||
die "cleanup failed - $err" if $err;
|
||||
}
|
||||
|
||||
die "config cleanup failed - $config_err" if $config_err;
|
||||
});
|
||||
};
|
||||
|
||||
return $rpcenv->fork_worker('lvmthinremove', "${vg}-${lv}", $user, $worker);
|
||||
}});
|
||||
|
||||
1;
|
||||
9
src/PVE/API2/Disks/Makefile
Normal file
9
src/PVE/API2/Disks/Makefile
Normal file
@ -0,0 +1,9 @@
|
||||
|
||||
SOURCES= LVM.pm\
|
||||
LVMThin.pm\
|
||||
ZFS.pm\
|
||||
Directory.pm
|
||||
|
||||
.PHONY: install
|
||||
install:
|
||||
for i in ${SOURCES}; do install -D -m 0644 $$i ${DESTDIR}${PERLDIR}/PVE/API2/Disks/$$i; done
|
||||
612
src/PVE/API2/Disks/ZFS.pm
Normal file
612
src/PVE/API2/Disks/ZFS.pm
Normal file
@ -0,0 +1,612 @@
|
||||
package PVE::API2::Disks::ZFS;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
use PVE::Diskmanage;
|
||||
use PVE::JSONSchema qw(get_standard_option parse_property_string);
|
||||
use PVE::Systemd;
|
||||
use PVE::API2::Storage::Config;
|
||||
use PVE::Storage;
|
||||
use PVE::Tools qw(run_command lock_file trim);
|
||||
|
||||
use PVE::RPCEnvironment;
|
||||
use PVE::RESTHandler;
|
||||
|
||||
use base qw(PVE::RESTHandler);
|
||||
|
||||
my $ZPOOL = '/sbin/zpool';
|
||||
my $ZFS = '/sbin/zfs';
|
||||
|
||||
sub get_pool_data {
|
||||
die "zfsutils-linux not installed\n" if ! -f $ZPOOL;
|
||||
|
||||
my $propnames = [qw(name size alloc free frag dedup health)];
|
||||
my $numbers = {
|
||||
size => 1,
|
||||
alloc => 1,
|
||||
free => 1,
|
||||
frag => 1,
|
||||
dedup => 1,
|
||||
};
|
||||
|
||||
my $pools = [];
|
||||
run_command([$ZPOOL, 'list', '-HpPLo', join(',', @$propnames)], outfunc => sub {
|
||||
my ($line) = @_;
|
||||
|
||||
my @props = split('\s+', trim($line));
|
||||
my $pool = {};
|
||||
for (my $i = 0; $i < scalar(@$propnames); $i++) {
|
||||
if ($numbers->{$propnames->[$i]}) {
|
||||
$pool->{$propnames->[$i]} = $props[$i] + 0;
|
||||
} else {
|
||||
$pool->{$propnames->[$i]} = $props[$i];
|
||||
}
|
||||
}
|
||||
|
||||
push @$pools, $pool;
|
||||
});
|
||||
|
||||
return $pools;
|
||||
}
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
name => 'index',
|
||||
path => '',
|
||||
method => 'GET',
|
||||
proxyto => 'node',
|
||||
protected => 1,
|
||||
permissions => {
|
||||
check => ['perm', '/', ['Sys.Audit', 'Datastore.Audit'], any => 1],
|
||||
},
|
||||
description => "List Zpools.",
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
},
|
||||
},
|
||||
returns => {
|
||||
type => 'array',
|
||||
items => {
|
||||
type => 'object',
|
||||
properties => {
|
||||
name => {
|
||||
type => 'string',
|
||||
description => "",
|
||||
},
|
||||
size => {
|
||||
type => 'integer',
|
||||
description => "",
|
||||
},
|
||||
alloc => {
|
||||
type => 'integer',
|
||||
description => "",
|
||||
},
|
||||
free => {
|
||||
type => 'integer',
|
||||
description => "",
|
||||
},
|
||||
frag => {
|
||||
type => 'integer',
|
||||
description => "",
|
||||
},
|
||||
dedup => {
|
||||
type => 'number',
|
||||
description => "",
|
||||
},
|
||||
health => {
|
||||
type => 'string',
|
||||
description => "",
|
||||
},
|
||||
},
|
||||
},
|
||||
links => [ { rel => 'child', href => "{name}" } ],
|
||||
},
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
|
||||
return get_pool_data();
|
||||
}});
|
||||
|
||||
sub preparetree {
|
||||
my ($el) = @_;
|
||||
delete $el->{lvl};
|
||||
if ($el->{children} && scalar(@{$el->{children}})) {
|
||||
$el->{leaf} = 0;
|
||||
foreach my $child (@{$el->{children}}) {
|
||||
preparetree($child);
|
||||
}
|
||||
} else {
|
||||
$el->{leaf} = 1;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
name => 'detail',
|
||||
path => '{name}',
|
||||
method => 'GET',
|
||||
proxyto => 'node',
|
||||
protected => 1,
|
||||
permissions => {
|
||||
check => ['perm', '/', ['Sys.Audit', 'Datastore.Audit'], any => 1],
|
||||
},
|
||||
description => "Get details about a zpool.",
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
name => get_standard_option('pve-storage-id'),
|
||||
},
|
||||
},
|
||||
returns => {
|
||||
type => 'object',
|
||||
properties => {
|
||||
name => {
|
||||
type => 'string',
|
||||
description => 'The name of the zpool.',
|
||||
},
|
||||
state => {
|
||||
type => 'string',
|
||||
description => 'The state of the zpool.',
|
||||
},
|
||||
status => {
|
||||
optional => 1,
|
||||
type => 'string',
|
||||
description => 'Information about the state of the zpool.',
|
||||
},
|
||||
action => {
|
||||
optional => 1,
|
||||
type => 'string',
|
||||
description => 'Information about the recommended action to fix the state.',
|
||||
},
|
||||
scan => {
|
||||
optional => 1,
|
||||
type => 'string',
|
||||
description => 'Information about the last/current scrub.',
|
||||
},
|
||||
errors => {
|
||||
type => 'string',
|
||||
description => 'Information about the errors on the zpool.',
|
||||
},
|
||||
children => {
|
||||
type => 'array',
|
||||
description => "The pool configuration information, including the vdevs for each section (e.g. spares, cache), may be nested.",
|
||||
items => {
|
||||
type => 'object',
|
||||
properties => {
|
||||
name => {
|
||||
type => 'string',
|
||||
description => 'The name of the vdev or section.',
|
||||
},
|
||||
state => {
|
||||
optional => 1,
|
||||
type => 'string',
|
||||
description => 'The state of the vdev.',
|
||||
},
|
||||
read => {
|
||||
optional => 1,
|
||||
type => 'number',
|
||||
},
|
||||
write => {
|
||||
optional => 1,
|
||||
type => 'number',
|
||||
},
|
||||
cksum => {
|
||||
optional => 1,
|
||||
type => 'number',
|
||||
},
|
||||
msg => {
|
||||
type => 'string',
|
||||
description => 'An optional message about the vdev.'
|
||||
}
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
|
||||
if (!-f $ZPOOL) {
|
||||
die "zfsutils-linux not installed\n";
|
||||
}
|
||||
|
||||
my $cmd = [$ZPOOL, 'status', '-P', $param->{name}];
|
||||
|
||||
my $pool = {
|
||||
lvl => 0,
|
||||
};
|
||||
|
||||
my $curfield;
|
||||
my $config = 0;
|
||||
|
||||
my $stack = [$pool];
|
||||
my $curlvl = 0;
|
||||
|
||||
run_command($cmd, outfunc => sub {
|
||||
my ($line) = @_;
|
||||
|
||||
if ($line =~ m/^\s*(\S+): (\S+.*)$/) {
|
||||
$curfield = $1;
|
||||
$pool->{$curfield} = $2;
|
||||
|
||||
$config = 0 if $curfield eq 'errors';
|
||||
} elsif (!$config && $line =~ m/^\s+(\S+.*)$/) {
|
||||
$pool->{$curfield} .= " " . $1;
|
||||
} elsif (!$config && $line =~ m/^\s*config:/) {
|
||||
$config = 1;
|
||||
} elsif ($config && $line =~ m/^(\s+)(\S+)\s*(\S+)?(?:\s+(\S+)\s+(\S+)\s+(\S+))?\s*(.*)$/) {
|
||||
my ($space, $name, $state, $read, $write, $cksum, $msg) = ($1, $2, $3, $4, $5, $6, $7);
|
||||
if ($name ne "NAME") {
|
||||
my $lvl = int(length($space) / 2) + 1; # two spaces per level
|
||||
my $vdev = {
|
||||
name => $name,
|
||||
msg => $msg,
|
||||
lvl => $lvl,
|
||||
};
|
||||
|
||||
$vdev->{state} = $state if defined($state);
|
||||
$vdev->{read} = $read + 0 if defined($read);
|
||||
$vdev->{write} = $write + 0 if defined($write);
|
||||
$vdev->{cksum} = $cksum + 0 if defined($cksum);
|
||||
|
||||
my $cur = pop @$stack;
|
||||
|
||||
if ($lvl > $curlvl) {
|
||||
$cur->{children} = [ $vdev ];
|
||||
} elsif ($lvl == $curlvl) {
|
||||
$cur = pop @$stack;
|
||||
push @{$cur->{children}}, $vdev;
|
||||
} else {
|
||||
while ($lvl <= $cur->{lvl} && $cur->{lvl} != 0) {
|
||||
$cur = pop @$stack;
|
||||
}
|
||||
push @{$cur->{children}}, $vdev;
|
||||
}
|
||||
|
||||
push @$stack, $cur;
|
||||
push @$stack, $vdev;
|
||||
$curlvl = $lvl;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
# change treenodes for extjs tree
|
||||
$pool->{name} = delete $pool->{pool};
|
||||
preparetree($pool);
|
||||
|
||||
return $pool;
|
||||
}});
|
||||
|
||||
my $draid_config_format = {
|
||||
spares => {
|
||||
type => 'integer',
|
||||
minimum => 0,
|
||||
description => 'Number of dRAID spares.',
|
||||
},
|
||||
data => {
|
||||
type => 'integer',
|
||||
minimum => 1,
|
||||
description => 'The number of data devices per redundancy group. (dRAID)',
|
||||
},
|
||||
};
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
name => 'create',
|
||||
path => '',
|
||||
method => 'POST',
|
||||
proxyto => 'node',
|
||||
protected => 1,
|
||||
permissions => {
|
||||
check => ['perm', '/', ['Sys.Modify', 'Datastore.Allocate']],
|
||||
},
|
||||
description => "Create a ZFS pool.",
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
name => get_standard_option('pve-storage-id'),
|
||||
raidlevel => {
|
||||
type => 'string',
|
||||
description => 'The RAID level to use.',
|
||||
enum => [
|
||||
'single', 'mirror',
|
||||
'raid10', 'raidz', 'raidz2', 'raidz3',
|
||||
'draid', 'draid2', 'draid3',
|
||||
],
|
||||
},
|
||||
devices => {
|
||||
type => 'string', format => 'string-list',
|
||||
description => 'The block devices you want to create the zpool on.',
|
||||
},
|
||||
'draid-config' => {
|
||||
type => 'string',
|
||||
format => $draid_config_format,
|
||||
optional => 1,
|
||||
},
|
||||
ashift => {
|
||||
type => 'integer',
|
||||
minimum => 9,
|
||||
maximum => 16,
|
||||
optional => 1,
|
||||
default => 12,
|
||||
description => 'Pool sector size exponent.',
|
||||
},
|
||||
compression => {
|
||||
type => 'string',
|
||||
description => 'The compression algorithm to use.',
|
||||
enum => ['on', 'off', 'gzip', 'lz4', 'lzjb', 'zle', 'zstd'],
|
||||
optional => 1,
|
||||
default => 'on',
|
||||
},
|
||||
add_storage => {
|
||||
description => "Configure storage using the zpool.",
|
||||
type => 'boolean',
|
||||
optional => 1,
|
||||
default => 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns => { type => 'string' },
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
|
||||
my $rpcenv = PVE::RPCEnvironment::get();
|
||||
my $user = $rpcenv->get_user();
|
||||
|
||||
my $name = $param->{name};
|
||||
my $node = $param->{node};
|
||||
my $devs = [PVE::Tools::split_list($param->{devices})];
|
||||
my $raidlevel = $param->{raidlevel};
|
||||
my $compression = $param->{compression} // 'on';
|
||||
|
||||
my $draid_config;
|
||||
if (exists $param->{'draid-config'}) {
|
||||
die "draid-config set without using dRAID level\n" if $raidlevel !~ m/^draid/;
|
||||
$draid_config = parse_property_string($draid_config_format, $param->{'draid-config'});
|
||||
}
|
||||
|
||||
for my $dev (@$devs) {
|
||||
$dev = PVE::Diskmanage::verify_blockdev_path($dev);
|
||||
PVE::Diskmanage::assert_disk_unused($dev);
|
||||
|
||||
}
|
||||
my $storage_params = {
|
||||
type => 'zfspool',
|
||||
pool => $name,
|
||||
storage => $name,
|
||||
content => 'rootdir,images',
|
||||
nodes => $node,
|
||||
};
|
||||
my $verify_params = [qw(pool)];
|
||||
|
||||
if ($param->{add_storage}) {
|
||||
PVE::API2::Storage::Config->create_or_update(
|
||||
$name,
|
||||
$node,
|
||||
$storage_params,
|
||||
$verify_params,
|
||||
1,
|
||||
);
|
||||
}
|
||||
|
||||
my $pools = get_pool_data();
|
||||
die "pool '${name}' already exists on node '${node}'\n"
|
||||
if grep { $_->{name} eq $name } @{$pools};
|
||||
|
||||
my $numdisks = scalar(@$devs);
|
||||
my $mindisks = {
|
||||
single => 1,
|
||||
mirror => 2,
|
||||
raid10 => 4,
|
||||
raidz => 3,
|
||||
raidz2 => 4,
|
||||
raidz3 => 5,
|
||||
draid => 3,
|
||||
draid2 => 4,
|
||||
draid3 => 5,
|
||||
};
|
||||
|
||||
# sanity checks
|
||||
die "raid10 needs an even number of disks\n"
|
||||
if $raidlevel eq 'raid10' && $numdisks % 2 != 0;
|
||||
|
||||
die "please give only one disk for single disk mode\n"
|
||||
if $raidlevel eq 'single' && $numdisks > 1;
|
||||
|
||||
die "$raidlevel needs at least $mindisks->{$raidlevel} disks\n"
|
||||
if $numdisks < $mindisks->{$raidlevel};
|
||||
|
||||
# draid checks
|
||||
if ($raidlevel =~ m/^draid/) {
|
||||
# bare minimum would be two drives: one for parity & one for data, but forbid that
|
||||
# because it makes no sense in practice, at least one spare disk should be used
|
||||
my $draid_min = $mindisks->{$raidlevel} - 2;
|
||||
if ($draid_config) {
|
||||
$draid_min += $draid_config->{data} || 0;
|
||||
$draid_min += $draid_config->{spares} || 0;
|
||||
}
|
||||
die "At least $draid_min disks needed for current dRAID config\n"
|
||||
if $numdisks < $draid_min;
|
||||
}
|
||||
|
||||
my $code = sub {
|
||||
for my $dev (@$devs) {
|
||||
PVE::Diskmanage::assert_disk_unused($dev);
|
||||
|
||||
my $is_partition = PVE::Diskmanage::is_partition($dev);
|
||||
|
||||
if ($is_partition) {
|
||||
eval {
|
||||
PVE::Diskmanage::change_parttype($dev, '6a898cc3-1dd2-11b2-99a6-080020736631');
|
||||
};
|
||||
warn $@ if $@;
|
||||
}
|
||||
|
||||
my $sysfsdev = $is_partition ? PVE::Diskmanage::get_blockdev($dev) : $dev;
|
||||
|
||||
$sysfsdev =~ s!^/dev/!/sys/block/!;
|
||||
if ($is_partition) {
|
||||
my $part = $dev =~ s!^/dev/!!r;
|
||||
$sysfsdev .= "/${part}";
|
||||
}
|
||||
|
||||
my $udevinfo = PVE::Diskmanage::get_udev_info($sysfsdev);
|
||||
$dev = $udevinfo->{by_id_link} if defined($udevinfo->{by_id_link});
|
||||
}
|
||||
|
||||
# create zpool with desired raidlevel
|
||||
my $ashift = $param->{ashift} // 12;
|
||||
|
||||
my $cmd = [$ZPOOL, 'create', '-o', "ashift=$ashift", $name];
|
||||
|
||||
if ($raidlevel eq 'raid10') {
|
||||
for (my $i = 0; $i < @$devs; $i+=2) {
|
||||
push @$cmd, 'mirror', $devs->[$i], $devs->[$i+1];
|
||||
}
|
||||
} elsif ($raidlevel eq 'single') {
|
||||
push @$cmd, $devs->[0];
|
||||
} elsif ($raidlevel =~ m/^draid/) {
|
||||
my $draid_cmd = $raidlevel;
|
||||
$draid_cmd .= ":$draid_config->{data}d" if $draid_config->{data};
|
||||
$draid_cmd .= ":$draid_config->{spares}s" if $draid_config->{spares};
|
||||
push @$cmd, $draid_cmd, @$devs;
|
||||
} else {
|
||||
push @$cmd, $raidlevel, @$devs;
|
||||
}
|
||||
|
||||
print "# ", join(' ', @$cmd), "\n";
|
||||
run_command($cmd);
|
||||
|
||||
$cmd = [$ZFS, 'set', "compression=$compression", $name];
|
||||
print "# ", join(' ', @$cmd), "\n";
|
||||
run_command($cmd);
|
||||
|
||||
if (-e '/lib/systemd/system/zfs-import@.service') {
|
||||
my $importunit = 'zfs-import@'. PVE::Systemd::escape_unit($name, undef) . '.service';
|
||||
$cmd = ['systemctl', 'enable', $importunit];
|
||||
print "# ", join(' ', @$cmd), "\n";
|
||||
run_command($cmd);
|
||||
}
|
||||
|
||||
PVE::Diskmanage::udevadm_trigger($devs->@*);
|
||||
|
||||
if ($param->{add_storage}) {
|
||||
PVE::API2::Storage::Config->create_or_update(
|
||||
$name,
|
||||
$node,
|
||||
$storage_params,
|
||||
$verify_params,
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
return $rpcenv->fork_worker('zfscreate', $name, $user, sub {
|
||||
PVE::Diskmanage::locked_disk_action($code);
|
||||
});
|
||||
}});
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
name => 'delete',
|
||||
path => '{name}',
|
||||
method => 'DELETE',
|
||||
proxyto => 'node',
|
||||
protected => 1,
|
||||
permissions => {
|
||||
check => ['perm', '/', ['Sys.Modify', 'Datastore.Allocate']],
|
||||
},
|
||||
description => "Destroy a ZFS pool.",
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
name => get_standard_option('pve-storage-id'),
|
||||
'cleanup-config' => {
|
||||
description => "Marks associated storage(s) as not available on this node anymore ".
|
||||
"or removes them from the configuration (if configured for this node only).",
|
||||
type => 'boolean',
|
||||
optional => 1,
|
||||
default => 0,
|
||||
},
|
||||
'cleanup-disks' => {
|
||||
description => "Also wipe disks so they can be repurposed afterwards.",
|
||||
type => 'boolean',
|
||||
optional => 1,
|
||||
default => 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns => { type => 'string' },
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
|
||||
my $rpcenv = PVE::RPCEnvironment::get();
|
||||
my $user = $rpcenv->get_user();
|
||||
|
||||
my $name = $param->{name};
|
||||
my $node = $param->{node};
|
||||
|
||||
my $worker = sub {
|
||||
PVE::Diskmanage::locked_disk_action(sub {
|
||||
my $to_wipe = [];
|
||||
if ($param->{'cleanup-disks'}) {
|
||||
# Using -o name does not only output the name in combination with -v.
|
||||
run_command(['zpool', 'list', '-vHPL', $name], outfunc => sub {
|
||||
my ($line) = @_;
|
||||
|
||||
my ($name) = PVE::Tools::split_list($line);
|
||||
return if $name !~ m|^/dev/.+|;
|
||||
|
||||
my $dev = PVE::Diskmanage::verify_blockdev_path($name);
|
||||
my $wipe = $dev;
|
||||
|
||||
$dev =~ s|^/dev/||;
|
||||
my $info = PVE::Diskmanage::get_disks($dev, 1, 1);
|
||||
die "unable to obtain information for disk '$dev'\n" if !$info->{$dev};
|
||||
|
||||
# Wipe whole disk if usual ZFS layout with partition 9 as ZFS reserved.
|
||||
my $parent = $info->{$dev}->{parent};
|
||||
if ($parent && scalar(keys $info->%*) == 3) {
|
||||
$parent =~ s|^/dev/||;
|
||||
my $info9 = $info->{"${parent}9"};
|
||||
|
||||
$wipe = $info->{$dev}->{parent} # need leading /dev/
|
||||
if $info9 && $info9->{used} && $info9->{used} =~ m/^ZFS reserved/;
|
||||
}
|
||||
|
||||
push $to_wipe->@*, $wipe;
|
||||
});
|
||||
}
|
||||
|
||||
if (-e '/lib/systemd/system/zfs-import@.service') {
|
||||
my $importunit = 'zfs-import@' . PVE::Systemd::escape_unit($name) . '.service';
|
||||
run_command(['systemctl', 'disable', $importunit]);
|
||||
}
|
||||
|
||||
run_command(['zpool', 'destroy', $name]);
|
||||
|
||||
my $config_err;
|
||||
if ($param->{'cleanup-config'}) {
|
||||
my $match = sub {
|
||||
my ($scfg) = @_;
|
||||
return $scfg->{type} eq 'zfspool' && $scfg->{pool} eq $name;
|
||||
};
|
||||
eval { PVE::API2::Storage::Config->cleanup_storages_for_node($match, $node); };
|
||||
warn $config_err = $@ if $@;
|
||||
}
|
||||
|
||||
eval { PVE::Diskmanage::wipe_blockdev($_) for $to_wipe->@*; };
|
||||
my $err = $@;
|
||||
PVE::Diskmanage::udevadm_trigger($to_wipe->@*);
|
||||
die "cleanup failed - $err" if $err;
|
||||
|
||||
die "config cleanup failed - $config_err" if $config_err;
|
||||
});
|
||||
};
|
||||
|
||||
return $rpcenv->fork_worker('zfsremove', $name, $user, $worker);
|
||||
}});
|
||||
|
||||
1;
|
||||
7
src/PVE/API2/Makefile
Normal file
7
src/PVE/API2/Makefile
Normal file
@ -0,0 +1,7 @@
|
||||
|
||||
|
||||
.PHONY: install
|
||||
install:
|
||||
install -D -m 0644 Disks.pm ${DESTDIR}${PERLDIR}/PVE/API2/Disks.pm
|
||||
make -C Storage install
|
||||
make -C Disks install
|
||||
424
src/PVE/API2/Storage/Config.pm
Executable file
424
src/PVE/API2/Storage/Config.pm
Executable file
@ -0,0 +1,424 @@
|
||||
package PVE::API2::Storage::Config;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
use PVE::SafeSyslog;
|
||||
use PVE::Tools qw(extract_param extract_sensitive_params);
|
||||
use PVE::Cluster qw(cfs_read_file cfs_write_file);
|
||||
use PVE::Storage;
|
||||
use PVE::Storage::Plugin;
|
||||
use PVE::Storage::LVMPlugin;
|
||||
use PVE::Storage::CIFSPlugin;
|
||||
use HTTP::Status qw(:constants);
|
||||
use Storable qw(dclone);
|
||||
use PVE::JSONSchema qw(get_standard_option);
|
||||
use PVE::RPCEnvironment;
|
||||
|
||||
use PVE::RESTHandler;
|
||||
|
||||
use base qw(PVE::RESTHandler);
|
||||
|
||||
my @ctypes = qw(images vztmpl iso backup);
|
||||
|
||||
my $storage_type_enum = PVE::Storage::Plugin->lookup_types();
|
||||
|
||||
my $api_storage_config = sub {
|
||||
my ($cfg, $storeid) = @_;
|
||||
|
||||
my $scfg = dclone(PVE::Storage::storage_config($cfg, $storeid));
|
||||
$scfg->{storage} = $storeid;
|
||||
$scfg->{digest} = $cfg->{digest};
|
||||
$scfg->{content} = PVE::Storage::Plugin->encode_value($scfg->{type}, 'content', $scfg->{content});
|
||||
|
||||
if ($scfg->{nodes}) {
|
||||
$scfg->{nodes} = PVE::Storage::Plugin->encode_value($scfg->{type}, 'nodes', $scfg->{nodes});
|
||||
}
|
||||
|
||||
return $scfg;
|
||||
};
|
||||
|
||||
# For storages that $match->($scfg), update node restrictions to not include $node anymore and
|
||||
# in case no node remains, remove the storage altogether.
|
||||
sub cleanup_storages_for_node {
|
||||
my ($self, $match, $node) = @_;
|
||||
|
||||
my $config = PVE::Storage::config();
|
||||
my $cluster_nodes = PVE::Cluster::get_nodelist();
|
||||
|
||||
for my $storeid (keys $config->{ids}->%*) {
|
||||
my $scfg = PVE::Storage::storage_config($config, $storeid);
|
||||
next if !$match->($scfg);
|
||||
|
||||
my $nodes = $scfg->{nodes} || { map { $_ => 1 } $cluster_nodes->@* };
|
||||
next if !$nodes->{$node}; # not configured on $node, so nothing to do
|
||||
delete $nodes->{$node};
|
||||
|
||||
if (scalar(keys $nodes->%*) > 0) {
|
||||
$self->update({
|
||||
nodes => join(',', sort keys $nodes->%*),
|
||||
storage => $storeid,
|
||||
});
|
||||
} else {
|
||||
$self->delete({storage => $storeid});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Decides if a storage needs to be created or updated. An update is needed, if
|
||||
# the storage has a node list configured, then the current node will be added.
|
||||
# The verify_params parameter is an array of parameter names that need to match
|
||||
# if there already is a storage config of the same name present. This is
|
||||
# mainly intended for local storage types as certain parameters need to be the
|
||||
# same. For exmaple 'pool' for ZFS, 'vg_name' for LVM, ...
|
||||
# Set the dryrun parameter, to only verify the parameters without updating or
|
||||
# creating the storage.
|
||||
sub create_or_update {
|
||||
my ($self, $sid, $node, $storage_params, $verify_params, $dryrun) = @_;
|
||||
|
||||
my $cfg = PVE::Storage::config();
|
||||
my $scfg = PVE::Storage::storage_config($cfg, $sid, 1);
|
||||
|
||||
if ($scfg) {
|
||||
die "storage config for '${sid}' exists but no parameters to verify were provided\n"
|
||||
if !$verify_params;
|
||||
|
||||
$node = PVE::INotify::nodename() if !$node || ($node eq 'localhost');
|
||||
die "Storage ID '${sid}' already exists on node ${node}\n"
|
||||
if !defined($scfg->{nodes}) || $scfg->{nodes}->{$node};
|
||||
|
||||
push @$verify_params, 'type';
|
||||
for my $key (@$verify_params) {
|
||||
if (!defined($scfg->{$key})) {
|
||||
die "Option '${key}' is not configured for storage '$sid', "
|
||||
."expected it to be '$storage_params->{$key}'";
|
||||
}
|
||||
if ($storage_params->{$key} ne $scfg->{$key}) {
|
||||
die "Option '${key}' ($storage_params->{$key}) does not match "
|
||||
."existing storage configuration '$scfg->{$key}'\n";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!$dryrun) {
|
||||
if ($scfg) {
|
||||
if ($scfg->{nodes}) {
|
||||
$scfg->{nodes}->{$node} = 1;
|
||||
$self->update({
|
||||
nodes => join(',', sort keys $scfg->{nodes}->%*),
|
||||
storage => $sid,
|
||||
});
|
||||
print "Added '${node}' to nodes for storage '${sid}'\n";
|
||||
}
|
||||
} else {
|
||||
$self->create($storage_params);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
name => 'index',
|
||||
path => '',
|
||||
method => 'GET',
|
||||
description => "Storage index.",
|
||||
permissions => {
|
||||
description => "Only list entries where you have 'Datastore.Audit' or 'Datastore.AllocateSpace' permissions on '/storage/<storage>'",
|
||||
user => 'all',
|
||||
},
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
type => {
|
||||
description => "Only list storage of specific type",
|
||||
type => 'string',
|
||||
enum => $storage_type_enum,
|
||||
optional => 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns => {
|
||||
type => 'array',
|
||||
items => {
|
||||
type => "object",
|
||||
properties => { storage => { type => 'string'} },
|
||||
},
|
||||
links => [ { rel => 'child', href => "{storage}" } ],
|
||||
},
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
|
||||
my $rpcenv = PVE::RPCEnvironment::get();
|
||||
my $authuser = $rpcenv->get_user();
|
||||
|
||||
my $cfg = PVE::Storage::config();
|
||||
|
||||
my @sids = PVE::Storage::storage_ids($cfg);
|
||||
|
||||
my $res = [];
|
||||
foreach my $storeid (@sids) {
|
||||
my $privs = [ 'Datastore.Audit', 'Datastore.AllocateSpace' ];
|
||||
next if !$rpcenv->check_any($authuser, "/storage/$storeid", $privs, 1);
|
||||
|
||||
my $scfg = &$api_storage_config($cfg, $storeid);
|
||||
next if $param->{type} && $param->{type} ne $scfg->{type};
|
||||
push @$res, $scfg;
|
||||
}
|
||||
|
||||
return $res;
|
||||
}});
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
name => 'read',
|
||||
path => '{storage}',
|
||||
method => 'GET',
|
||||
description => "Read storage configuration.",
|
||||
permissions => {
|
||||
check => ['perm', '/storage/{storage}', ['Datastore.Allocate']],
|
||||
},
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
storage => get_standard_option('pve-storage-id'),
|
||||
},
|
||||
},
|
||||
returns => { type => 'object' },
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
|
||||
my $cfg = PVE::Storage::config();
|
||||
|
||||
return &$api_storage_config($cfg, $param->{storage});
|
||||
}});
|
||||
|
||||
my $sensitive_params = [qw(password encryption-key master-pubkey keyring)];
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
name => 'create',
|
||||
protected => 1,
|
||||
path => '',
|
||||
method => 'POST',
|
||||
description => "Create a new storage.",
|
||||
permissions => {
|
||||
check => ['perm', '/storage', ['Datastore.Allocate']],
|
||||
},
|
||||
parameters => PVE::Storage::Plugin->createSchema(),
|
||||
returns => {
|
||||
type => 'object',
|
||||
properties => {
|
||||
storage => {
|
||||
description => "The ID of the created storage.",
|
||||
type => 'string',
|
||||
},
|
||||
type => {
|
||||
description => "The type of the created storage.",
|
||||
type => 'string',
|
||||
enum => $storage_type_enum,
|
||||
},
|
||||
config => {
|
||||
description => "Partial, possible server generated, configuration properties.",
|
||||
type => 'object',
|
||||
optional => 1,
|
||||
additionalProperties => 1,
|
||||
properties => {
|
||||
'encryption-key' => {
|
||||
description => "The, possible auto-generated, encryption-key.",
|
||||
optional => 1,
|
||||
type => 'string',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
|
||||
my $type = extract_param($param, 'type');
|
||||
my $storeid = extract_param($param, 'storage');
|
||||
|
||||
# revent an empty nodelist.
|
||||
# fix me in section config create never need an empty entity.
|
||||
delete $param->{nodes} if !$param->{nodes};
|
||||
|
||||
my $sensitive = extract_sensitive_params($param, $sensitive_params, []);
|
||||
|
||||
my $plugin = PVE::Storage::Plugin->lookup($type);
|
||||
my $opts = $plugin->check_config($storeid, $param, 1, 1);
|
||||
|
||||
my $returned_config;
|
||||
PVE::Storage::lock_storage_config(sub {
|
||||
my $cfg = PVE::Storage::config();
|
||||
|
||||
if (my $scfg = PVE::Storage::storage_config($cfg, $storeid, 1)) {
|
||||
die "storage ID '$storeid' already defined\n";
|
||||
}
|
||||
|
||||
$cfg->{ids}->{$storeid} = $opts;
|
||||
|
||||
$returned_config = $plugin->on_add_hook($storeid, $opts, %$sensitive);
|
||||
|
||||
eval {
|
||||
# try to activate if enabled on local node,
|
||||
# we only do this to detect errors/problems sooner
|
||||
if (PVE::Storage::storage_check_enabled($cfg, $storeid, undef, 1)) {
|
||||
PVE::Storage::activate_storage($cfg, $storeid);
|
||||
}
|
||||
};
|
||||
if (my $err = $@) {
|
||||
eval { $plugin->on_delete_hook($storeid, $opts) };
|
||||
warn "$@\n" if $@;
|
||||
die $err;
|
||||
}
|
||||
|
||||
PVE::Storage::write_config($cfg);
|
||||
|
||||
}, "create storage failed");
|
||||
|
||||
my $res = {
|
||||
storage => $storeid,
|
||||
type => $type,
|
||||
};
|
||||
$res->{config} = $returned_config if $returned_config;
|
||||
return $res;
|
||||
}});
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
name => 'update',
|
||||
protected => 1,
|
||||
path => '{storage}',
|
||||
method => 'PUT',
|
||||
description => "Update storage configuration.",
|
||||
permissions => {
|
||||
check => ['perm', '/storage', ['Datastore.Allocate']],
|
||||
},
|
||||
parameters => PVE::Storage::Plugin->updateSchema(),
|
||||
returns => {
|
||||
type => 'object',
|
||||
properties => {
|
||||
storage => {
|
||||
description => "The ID of the created storage.",
|
||||
type => 'string',
|
||||
},
|
||||
type => {
|
||||
description => "The type of the created storage.",
|
||||
type => 'string',
|
||||
enum => $storage_type_enum,
|
||||
},
|
||||
config => {
|
||||
description => "Partial, possible server generated, configuration properties.",
|
||||
type => 'object',
|
||||
optional => 1,
|
||||
additionalProperties => 1,
|
||||
properties => {
|
||||
'encryption-key' => {
|
||||
description => "The, possible auto-generated, encryption-key.",
|
||||
optional => 1,
|
||||
type => 'string',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
|
||||
my $storeid = extract_param($param, 'storage');
|
||||
my $digest = extract_param($param, 'digest');
|
||||
my $delete = extract_param($param, 'delete');
|
||||
my $type;
|
||||
|
||||
if ($delete) {
|
||||
$delete = [ PVE::Tools::split_list($delete) ];
|
||||
}
|
||||
|
||||
my $returned_config;
|
||||
PVE::Storage::lock_storage_config(sub {
|
||||
my $cfg = PVE::Storage::config();
|
||||
|
||||
PVE::SectionConfig::assert_if_modified($cfg, $digest);
|
||||
|
||||
my $scfg = PVE::Storage::storage_config($cfg, $storeid);
|
||||
$type = $scfg->{type};
|
||||
|
||||
my $sensitive = extract_sensitive_params($param, $sensitive_params, $delete);
|
||||
|
||||
my $plugin = PVE::Storage::Plugin->lookup($type);
|
||||
my $opts = $plugin->check_config($storeid, $param, 0, 1);
|
||||
|
||||
if ($delete) {
|
||||
my $options = $plugin->private()->{options}->{$type};
|
||||
foreach my $k (@$delete) {
|
||||
my $d = $options->{$k} || die "no such option '$k'\n";
|
||||
die "unable to delete required option '$k'\n" if !$d->{optional};
|
||||
die "unable to delete fixed option '$k'\n" if $d->{fixed};
|
||||
die "cannot set and delete property '$k' at the same time!\n"
|
||||
if defined($opts->{$k});
|
||||
|
||||
delete $scfg->{$k};
|
||||
}
|
||||
}
|
||||
|
||||
$returned_config = $plugin->on_update_hook($storeid, $opts, %$sensitive);
|
||||
|
||||
for my $k (keys %$opts) {
|
||||
$scfg->{$k} = $opts->{$k};
|
||||
}
|
||||
|
||||
PVE::Storage::write_config($cfg);
|
||||
|
||||
}, "update storage failed");
|
||||
|
||||
my $res = {
|
||||
storage => $storeid,
|
||||
type => $type,
|
||||
};
|
||||
$res->{config} = $returned_config if $returned_config;
|
||||
return $res;
|
||||
}});
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
name => 'delete',
|
||||
protected => 1,
|
||||
path => '{storage}', # /storage/config/{storage}
|
||||
method => 'DELETE',
|
||||
description => "Delete storage configuration.",
|
||||
permissions => {
|
||||
check => ['perm', '/storage', ['Datastore.Allocate']],
|
||||
},
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
storage => get_standard_option('pve-storage-id', {
|
||||
completion => \&PVE::Storage::complete_storage,
|
||||
}),
|
||||
},
|
||||
},
|
||||
returns => { type => 'null' },
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
|
||||
my $storeid = extract_param($param, 'storage');
|
||||
|
||||
PVE::Storage::lock_storage_config(sub {
|
||||
my $cfg = PVE::Storage::config();
|
||||
|
||||
my $scfg = PVE::Storage::storage_config($cfg, $storeid);
|
||||
|
||||
die "can't remove storage - storage is used as base of another storage\n"
|
||||
if PVE::Storage::storage_is_used($cfg, $storeid);
|
||||
|
||||
my $plugin = PVE::Storage::Plugin->lookup($scfg->{type});
|
||||
|
||||
$plugin->on_delete_hook($storeid, $scfg);
|
||||
|
||||
delete $cfg->{ids}->{$storeid};
|
||||
|
||||
PVE::Storage::write_config($cfg);
|
||||
|
||||
}, "delete storage failed");
|
||||
|
||||
PVE::AccessControl::remove_storage_access($storeid);
|
||||
|
||||
return undef;
|
||||
}});
|
||||
|
||||
1;
|
||||
560
src/PVE/API2/Storage/Content.pm
Normal file
560
src/PVE/API2/Storage/Content.pm
Normal file
@ -0,0 +1,560 @@
|
||||
package PVE::API2::Storage::Content;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use Data::Dumper;
|
||||
|
||||
use PVE::SafeSyslog;
|
||||
use PVE::Cluster;
|
||||
use PVE::Storage;
|
||||
use PVE::INotify;
|
||||
use PVE::Exception qw(raise_param_exc);
|
||||
use PVE::RPCEnvironment;
|
||||
use PVE::RESTHandler;
|
||||
use PVE::JSONSchema qw(get_standard_option);
|
||||
use PVE::SSHInfo;
|
||||
|
||||
use base qw(PVE::RESTHandler);
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
name => 'index',
|
||||
path => '',
|
||||
method => 'GET',
|
||||
description => "List storage content.",
|
||||
permissions => {
|
||||
check => ['perm', '/storage/{storage}', ['Datastore.Audit', 'Datastore.AllocateSpace'], any => 1],
|
||||
},
|
||||
protected => 1,
|
||||
proxyto => 'node',
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
storage => get_standard_option('pve-storage-id', {
|
||||
completion => \&PVE::Storage::complete_storage_enabled,
|
||||
}),
|
||||
content => {
|
||||
description => "Only list content of this type.",
|
||||
type => 'string', format => 'pve-storage-content',
|
||||
optional => 1,
|
||||
completion => \&PVE::Storage::complete_content_type,
|
||||
},
|
||||
vmid => get_standard_option('pve-vmid', {
|
||||
description => "Only list images for this VM",
|
||||
optional => 1,
|
||||
completion => \&PVE::Cluster::complete_vmid,
|
||||
}),
|
||||
},
|
||||
},
|
||||
returns => {
|
||||
type => 'array',
|
||||
items => {
|
||||
type => "object",
|
||||
properties => {
|
||||
volid => {
|
||||
description => "Volume identifier.",
|
||||
type => 'string',
|
||||
},
|
||||
vmid => {
|
||||
description => "Associated Owner VMID.",
|
||||
type => 'integer',
|
||||
optional => 1,
|
||||
},
|
||||
parent => {
|
||||
description => "Volume identifier of parent (for linked cloned).",
|
||||
type => 'string',
|
||||
optional => 1,
|
||||
},
|
||||
'format' => {
|
||||
description => "Format identifier ('raw', 'qcow2', 'subvol', 'iso', 'tgz' ...)",
|
||||
type => 'string',
|
||||
},
|
||||
size => {
|
||||
description => "Volume size in bytes.",
|
||||
type => 'integer',
|
||||
renderer => 'bytes',
|
||||
},
|
||||
used => {
|
||||
description => "Used space. Please note that most storage plugins " .
|
||||
"do not report anything useful here.",
|
||||
type => 'integer',
|
||||
renderer => 'bytes',
|
||||
optional => 1,
|
||||
},
|
||||
ctime => {
|
||||
description => "Creation time (seconds since the UNIX Epoch).",
|
||||
type => 'integer',
|
||||
minimum => 0,
|
||||
optional => 1,
|
||||
},
|
||||
notes => {
|
||||
description => "Optional notes. If they contain multiple lines, only the first one is returned here.",
|
||||
type => 'string',
|
||||
optional => 1,
|
||||
},
|
||||
encrypted => {
|
||||
description => "If whole backup is encrypted, value is the fingerprint or '1' "
|
||||
." if encrypted. Only useful for the Proxmox Backup Server storage type.",
|
||||
type => 'string',
|
||||
optional => 1,
|
||||
},
|
||||
verification => {
|
||||
description => "Last backup verification result, only useful for PBS storages.",
|
||||
type => 'object',
|
||||
properties => {
|
||||
state => {
|
||||
description => "Last backup verification state.",
|
||||
type => 'string',
|
||||
},
|
||||
upid => {
|
||||
description => "Last backup verification UPID.",
|
||||
type => 'string',
|
||||
},
|
||||
},
|
||||
optional => 1,
|
||||
},
|
||||
protected => {
|
||||
description => "Protection status. Currently only supported for backups.",
|
||||
type => 'boolean',
|
||||
optional => 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
links => [ { rel => 'child', href => "{volid}" } ],
|
||||
},
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
|
||||
my $rpcenv = PVE::RPCEnvironment::get();
|
||||
|
||||
my $authuser = $rpcenv->get_user();
|
||||
|
||||
my $storeid = $param->{storage};
|
||||
|
||||
my $cfg = PVE::Storage::config();
|
||||
|
||||
my $vollist = PVE::Storage::volume_list($cfg, $storeid, $param->{vmid}, $param->{content});
|
||||
|
||||
my $res = [];
|
||||
foreach my $item (@$vollist) {
|
||||
eval { PVE::Storage::check_volume_access($rpcenv, $authuser, $cfg, undef, $item->{volid}); };
|
||||
next if $@;
|
||||
$item->{vmid} = int($item->{vmid}) if defined($item->{vmid});
|
||||
$item->{size} = int($item->{size}) if defined($item->{size});
|
||||
$item->{used} = int($item->{used}) if defined($item->{used});
|
||||
push @$res, $item;
|
||||
}
|
||||
|
||||
return $res;
|
||||
}});
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
name => 'create',
|
||||
path => '',
|
||||
method => 'POST',
|
||||
description => "Allocate disk images.",
|
||||
permissions => {
|
||||
check => ['perm', '/storage/{storage}', ['Datastore.AllocateSpace']],
|
||||
},
|
||||
protected => 1,
|
||||
proxyto => 'node',
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
storage => get_standard_option('pve-storage-id', {
|
||||
completion => \&PVE::Storage::complete_storage_enabled,
|
||||
}),
|
||||
filename => {
|
||||
description => "The name of the file to create.",
|
||||
type => 'string',
|
||||
},
|
||||
vmid => get_standard_option('pve-vmid', {
|
||||
description => "Specify owner VM",
|
||||
completion => \&PVE::Cluster::complete_vmid,
|
||||
}),
|
||||
size => {
|
||||
description => "Size in kilobyte (1024 bytes). Optional suffixes 'M' (megabyte, 1024K) and 'G' (gigabyte, 1024M)",
|
||||
type => 'string',
|
||||
pattern => '\d+[MG]?',
|
||||
},
|
||||
'format' => {
|
||||
type => 'string',
|
||||
enum => ['raw', 'qcow2', 'subvol'],
|
||||
requires => 'size',
|
||||
optional => 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns => {
|
||||
description => "Volume identifier",
|
||||
type => 'string',
|
||||
},
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
|
||||
my $storeid = $param->{storage};
|
||||
my $name = $param->{filename};
|
||||
my $sizestr = $param->{size};
|
||||
|
||||
my $size;
|
||||
if ($sizestr =~ m/^\d+$/) {
|
||||
$size = $sizestr;
|
||||
} elsif ($sizestr =~ m/^(\d+)M$/) {
|
||||
$size = $1 * 1024;
|
||||
} elsif ($sizestr =~ m/^(\d+)G$/) {
|
||||
$size = $1 * 1024 * 1024;
|
||||
} else {
|
||||
raise_param_exc({ size => "unable to parse size '$sizestr'" });
|
||||
}
|
||||
|
||||
# extract FORMAT from name
|
||||
if ($name =~ m/\.(raw|qcow2|vmdk)$/) {
|
||||
my $fmt = $1;
|
||||
|
||||
raise_param_exc({ format => "different storage formats ($param->{format} != $fmt)" })
|
||||
if $param->{format} && $param->{format} ne $fmt;
|
||||
|
||||
$param->{format} = $fmt;
|
||||
}
|
||||
|
||||
my $cfg = PVE::Storage::config();
|
||||
|
||||
my $volid = PVE::Storage::vdisk_alloc ($cfg, $storeid, $param->{vmid},
|
||||
$param->{format},
|
||||
$name, $size);
|
||||
|
||||
return $volid;
|
||||
}});
|
||||
|
||||
# we allow to pass volume names (without storage prefix) if the storage
|
||||
# is specified as separate parameter.
|
||||
my $real_volume_id = sub {
|
||||
my ($storeid, $volume) = @_;
|
||||
|
||||
my $volid;
|
||||
|
||||
if ($volume =~ m/:/) {
|
||||
eval {
|
||||
my ($sid, $volname) = PVE::Storage::parse_volume_id ($volume);
|
||||
die "storage ID mismatch ($sid != $storeid)\n"
|
||||
if $storeid && $sid ne $storeid;
|
||||
$volid = $volume;
|
||||
$storeid = $sid;
|
||||
};
|
||||
raise_param_exc({ volume => $@ }) if $@;
|
||||
|
||||
} else {
|
||||
raise_param_exc({ volume => "no storage specified - incomplete volume ID" })
|
||||
if !$storeid;
|
||||
|
||||
$volid = "$storeid:$volume";
|
||||
}
|
||||
|
||||
return wantarray ? ($volid, $storeid) : $volid;
|
||||
};
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
name => 'info',
|
||||
path => '{volume}',
|
||||
method => 'GET',
|
||||
description => "Get volume attributes",
|
||||
permissions => {
|
||||
description => "You need read access for the volume.",
|
||||
user => 'all',
|
||||
},
|
||||
protected => 1,
|
||||
proxyto => 'node',
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
storage => get_standard_option('pve-storage-id', { optional => 1 }),
|
||||
volume => {
|
||||
description => "Volume identifier",
|
||||
type => 'string',
|
||||
},
|
||||
},
|
||||
},
|
||||
returns => {
|
||||
type => 'object',
|
||||
properties => {
|
||||
path => {
|
||||
description => "The Path",
|
||||
type => 'string',
|
||||
},
|
||||
size => {
|
||||
description => "Volume size in bytes.",
|
||||
type => 'integer',
|
||||
renderer => 'bytes',
|
||||
},
|
||||
used => {
|
||||
description => "Used space. Please note that most storage plugins " .
|
||||
"do not report anything useful here.",
|
||||
type => 'integer',
|
||||
renderer => 'bytes',
|
||||
},
|
||||
format => {
|
||||
description => "Format identifier ('raw', 'qcow2', 'subvol', 'iso', 'tgz' ...)",
|
||||
type => 'string',
|
||||
},
|
||||
notes => {
|
||||
description => "Optional notes.",
|
||||
optional => 1,
|
||||
type => 'string',
|
||||
},
|
||||
protected => {
|
||||
description => "Protection status. Currently only supported for backups.",
|
||||
type => 'boolean',
|
||||
optional => 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
|
||||
my $rpcenv = PVE::RPCEnvironment::get();
|
||||
my $authuser = $rpcenv->get_user();
|
||||
|
||||
my ($volid, $storeid) = &$real_volume_id($param->{storage}, $param->{volume});
|
||||
|
||||
my $cfg = PVE::Storage::config();
|
||||
|
||||
PVE::Storage::check_volume_access($rpcenv, $authuser, $cfg, undef, $volid);
|
||||
|
||||
my $path = PVE::Storage::path($cfg, $volid);
|
||||
my ($size, $format, $used, $parent) = PVE::Storage::volume_size_info($cfg, $volid);
|
||||
die "volume_size_info on '$volid' failed\n" if !($format && $size);
|
||||
|
||||
my $entry = {
|
||||
path => $path,
|
||||
size => int($size), # cast to integer in case it was changed to a string previously
|
||||
used => int($used),
|
||||
format => $format,
|
||||
};
|
||||
|
||||
for my $attribute (qw(notes protected)) {
|
||||
# keep going if fetching an optional attribute fails
|
||||
eval {
|
||||
my $value = PVE::Storage::get_volume_attribute($cfg, $volid, $attribute);
|
||||
$entry->{$attribute} = $value if defined($value);
|
||||
};
|
||||
warn $@ if $@;
|
||||
}
|
||||
|
||||
return $entry;
|
||||
}});
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
name => 'updateattributes',
|
||||
path => '{volume}',
|
||||
method => 'PUT',
|
||||
description => "Update volume attributes",
|
||||
permissions => {
|
||||
description => "You need read access for the volume.",
|
||||
user => 'all',
|
||||
},
|
||||
protected => 1,
|
||||
proxyto => 'node',
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
storage => get_standard_option('pve-storage-id', { optional => 1 }),
|
||||
volume => {
|
||||
description => "Volume identifier",
|
||||
type => 'string',
|
||||
},
|
||||
notes => {
|
||||
description => "The new notes.",
|
||||
type => 'string',
|
||||
optional => 1,
|
||||
},
|
||||
protected => {
|
||||
description => "Protection status. Currently only supported for backups.",
|
||||
type => 'boolean',
|
||||
optional => 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns => { type => 'null' },
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
|
||||
my $rpcenv = PVE::RPCEnvironment::get();
|
||||
my $authuser = $rpcenv->get_user();
|
||||
|
||||
my ($volid, $storeid) = &$real_volume_id($param->{storage}, $param->{volume});
|
||||
|
||||
my $cfg = PVE::Storage::config();
|
||||
|
||||
PVE::Storage::check_volume_access($rpcenv, $authuser, $cfg, undef, $volid);
|
||||
|
||||
for my $attr (qw(notes protected)) {
|
||||
if (exists $param->{$attr}) {
|
||||
PVE::Storage::update_volume_attribute($cfg, $volid, $attr, $param->{$attr});
|
||||
}
|
||||
}
|
||||
|
||||
return undef;
|
||||
}});
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
name => 'delete',
|
||||
path => '{volume}',
|
||||
method => 'DELETE',
|
||||
description => "Delete volume",
|
||||
permissions => {
|
||||
description => "You need 'Datastore.Allocate' privilege on the storage (or 'Datastore.AllocateSpace' for backup volumes if you have VM.Backup privilege on the VM).",
|
||||
user => 'all',
|
||||
},
|
||||
protected => 1,
|
||||
proxyto => 'node',
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
storage => get_standard_option('pve-storage-id', {
|
||||
optional => 1,
|
||||
completion => \&PVE::Storage::complete_storage,
|
||||
}),
|
||||
volume => {
|
||||
description => "Volume identifier",
|
||||
type => 'string',
|
||||
completion => \&PVE::Storage::complete_volume,
|
||||
},
|
||||
delay => {
|
||||
type => 'integer',
|
||||
description => "Time to wait for the task to finish. We return 'null' if the task finish within that time.",
|
||||
minimum => 1,
|
||||
maximum => 30,
|
||||
optional => 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns => { type => 'string', optional => 1, },
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
|
||||
my $rpcenv = PVE::RPCEnvironment::get();
|
||||
my $authuser = $rpcenv->get_user();
|
||||
|
||||
my $cfg = PVE::Storage::config();
|
||||
|
||||
my ($volid, $storeid) = &$real_volume_id($param->{storage}, $param->{volume});
|
||||
|
||||
my ($path, $ownervm, $vtype) = PVE::Storage::path($cfg, $volid);
|
||||
if ($vtype eq 'backup' && $ownervm) {
|
||||
$rpcenv->check($authuser, "/storage/$storeid", ['Datastore.AllocateSpace']);
|
||||
$rpcenv->check($authuser, "/vms/$ownervm", ['VM.Backup']);
|
||||
} else {
|
||||
$rpcenv->check($authuser, "/storage/$storeid", ['Datastore.Allocate']);
|
||||
}
|
||||
|
||||
my $worker = sub {
|
||||
PVE::Storage::vdisk_free ($cfg, $volid);
|
||||
print "Removed volume '$volid'\n";
|
||||
if ($vtype eq 'backup'
|
||||
&& $path =~ /(.*\/vzdump-\w+-\d+-\d{4}_\d{2}_\d{2}-\d{2}_\d{2}_\d{2})[^\/]+$/) {
|
||||
# Remove log file #318 and notes file #3972 if they still exist
|
||||
PVE::Storage::archive_auxiliaries_remove($path);
|
||||
}
|
||||
};
|
||||
|
||||
my $id = (defined $ownervm ? "$ownervm@" : '') . $storeid;
|
||||
my $upid = $rpcenv->fork_worker('imgdel', $id, $authuser, $worker);
|
||||
my $background_delay = $param->{delay};
|
||||
if ($background_delay) {
|
||||
my $end_time = time() + $background_delay;
|
||||
my $currently_deleting; # not necessarily true, e.g. sequential api call from cli
|
||||
do {
|
||||
my $task = PVE::Tools::upid_decode($upid);
|
||||
$currently_deleting = PVE::ProcFSTools::check_process_running($task->{pid}, $task->{pstart});
|
||||
sleep 1 if $currently_deleting;
|
||||
} while (time() < $end_time && $currently_deleting);
|
||||
|
||||
if (!$currently_deleting) {
|
||||
my $status = PVE::Tools::upid_read_status($upid);
|
||||
chomp $status;
|
||||
return undef if !PVE::Tools::upid_status_is_error($status);
|
||||
die "$status\n";
|
||||
}
|
||||
}
|
||||
return $upid;
|
||||
}});
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
name => 'copy',
|
||||
path => '{volume}',
|
||||
method => 'POST',
|
||||
description => "Copy a volume. This is experimental code - do not use.",
|
||||
protected => 1,
|
||||
proxyto => 'node',
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
storage => get_standard_option('pve-storage-id', { optional => 1}),
|
||||
volume => {
|
||||
description => "Source volume identifier",
|
||||
type => 'string',
|
||||
},
|
||||
target => {
|
||||
description => "Target volume identifier",
|
||||
type => 'string',
|
||||
},
|
||||
target_node => get_standard_option('pve-node', {
|
||||
description => "Target node. Default is local node.",
|
||||
optional => 1,
|
||||
}),
|
||||
},
|
||||
},
|
||||
returns => {
|
||||
type => 'string',
|
||||
},
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
|
||||
my $rpcenv = PVE::RPCEnvironment::get();
|
||||
|
||||
my $user = $rpcenv->get_user();
|
||||
|
||||
my $target_node = $param->{target_node} || PVE::INotify::nodename();
|
||||
# pvesh examples
|
||||
# cd /nodes/localhost/storage/local/content
|
||||
# pve:/> create local:103/vm-103-disk-1.raw -target local:103/vm-103-disk-2.raw
|
||||
# pve:/> create 103/vm-103-disk-1.raw -target 103/vm-103-disk-3.raw
|
||||
|
||||
my $src_volid = &$real_volume_id($param->{storage}, $param->{volume});
|
||||
my $dst_volid = &$real_volume_id($param->{storage}, $param->{target});
|
||||
|
||||
print "DEBUG: COPY $src_volid TO $dst_volid\n";
|
||||
|
||||
my $cfg = PVE::Storage::config();
|
||||
|
||||
# do all parameter checks first
|
||||
|
||||
# then do all short running task (to raise errors before we go to background)
|
||||
|
||||
# then start the worker task
|
||||
my $worker = sub {
|
||||
my $upid = shift;
|
||||
|
||||
print "DEBUG: starting worker $upid\n";
|
||||
|
||||
my ($target_sid, $target_volname) = PVE::Storage::parse_volume_id($dst_volid);
|
||||
#my $target_ip = PVE::Cluster::remote_node_ip($target_node);
|
||||
|
||||
# you need to get this working (fails currently, because storage_migrate() uses
|
||||
# ssh to connect to local host (which is not needed
|
||||
my $sshinfo = PVE::SSHInfo::get_ssh_info($target_node);
|
||||
PVE::Storage::storage_migrate($cfg, $src_volid, $sshinfo, $target_sid, {'target_volname' => $target_volname});
|
||||
|
||||
print "DEBUG: end worker $upid\n";
|
||||
|
||||
};
|
||||
|
||||
return $rpcenv->fork_worker('imgcopy', undef, $user, $worker);
|
||||
}});
|
||||
|
||||
1;
|
||||
215
src/PVE/API2/Storage/FileRestore.pm
Normal file
215
src/PVE/API2/Storage/FileRestore.pm
Normal file
@ -0,0 +1,215 @@
|
||||
package PVE::API2::Storage::FileRestore;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
use MIME::Base64;
|
||||
use PVE::Exception qw(raise_param_exc);
|
||||
use PVE::JSONSchema qw(get_standard_option);
|
||||
use PVE::PBSClient;
|
||||
use PVE::Storage;
|
||||
use PVE::Tools qw(extract_param);
|
||||
|
||||
use PVE::RESTHandler;
|
||||
use base qw(PVE::RESTHandler);
|
||||
|
||||
my $parse_volname_or_id = sub {
|
||||
my ($storeid, $volume) = @_;
|
||||
|
||||
my $volid;
|
||||
my ($sid, $volname) = PVE::Storage::parse_volume_id($volume, 1);
|
||||
|
||||
if (defined($sid)) {
|
||||
raise_param_exc({ volume => "storage ID mismatch ($sid != $storeid)." })
|
||||
if $sid ne $storeid;
|
||||
|
||||
$volid = $volume;
|
||||
} elsif ($volume =~ m/^backup\//) {
|
||||
$volid = "$storeid:$volume";
|
||||
} else {
|
||||
$volid = "$storeid:backup/$volume";
|
||||
}
|
||||
|
||||
return $volid;
|
||||
};
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
name => 'list',
|
||||
path => 'list',
|
||||
method => 'GET',
|
||||
proxyto => 'node',
|
||||
permissions => {
|
||||
description => "You need read access for the volume.",
|
||||
user => 'all',
|
||||
},
|
||||
description => "List files and directories for single file restore under the given path.",
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
storage => get_standard_option('pve-storage-id', {
|
||||
completion => \&PVE::Storage::complete_storage_enabled,
|
||||
}),
|
||||
volume => {
|
||||
description => "Backup volume ID or name. Currently only PBS snapshots are supported.",
|
||||
type => 'string',
|
||||
completion => \&PVE::Storage::complete_volume,
|
||||
},
|
||||
filepath => {
|
||||
description => 'base64-path to the directory or file being listed, or "/".',
|
||||
type => 'string',
|
||||
},
|
||||
},
|
||||
},
|
||||
returns => {
|
||||
type => 'array',
|
||||
items => {
|
||||
type => "object",
|
||||
properties => {
|
||||
filepath => {
|
||||
description => "base64 path of the current entry",
|
||||
type => 'string',
|
||||
},
|
||||
type => {
|
||||
description => "Entry type.",
|
||||
type => 'string',
|
||||
},
|
||||
text => {
|
||||
description => "Entry display text.",
|
||||
type => 'string',
|
||||
},
|
||||
leaf => {
|
||||
description => "If this entry is a leaf in the directory graph.",
|
||||
type => 'boolean',
|
||||
},
|
||||
size => {
|
||||
description => "Entry file size.",
|
||||
type => 'integer',
|
||||
optional => 1,
|
||||
},
|
||||
mtime => {
|
||||
description => "Entry last-modified time (unix timestamp).",
|
||||
type => 'integer',
|
||||
optional => 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
protected => 1,
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
|
||||
my $rpcenv = PVE::RPCEnvironment::get();
|
||||
my $user = $rpcenv->get_user();
|
||||
|
||||
my $path = extract_param($param, 'filepath') || "/";
|
||||
my $base64 = $path ne "/";
|
||||
|
||||
my $storeid = extract_param($param, 'storage');
|
||||
|
||||
my $volid = $parse_volname_or_id->($storeid, $param->{volume});
|
||||
my $cfg = PVE::Storage::config();
|
||||
my $scfg = PVE::Storage::storage_config($cfg, $storeid);
|
||||
|
||||
PVE::Storage::check_volume_access($rpcenv, $user, $cfg, undef, $volid, 'backup');
|
||||
|
||||
raise_param_exc({'storage' => "Only PBS storages supported for file-restore."})
|
||||
if $scfg->{type} ne 'pbs';
|
||||
|
||||
my (undef, $snap) = PVE::Storage::parse_volname($cfg, $volid);
|
||||
|
||||
my $client = PVE::PBSClient->new($scfg, $storeid);
|
||||
my $ret = $client->file_restore_list($snap, $path, $base64, { timeout => 25 });
|
||||
|
||||
if (ref($ret) eq "HASH") {
|
||||
my $msg = $ret->{message};
|
||||
if (my $code = $ret->{code}) {
|
||||
die PVE::Exception->new("$msg\n", code => $code);
|
||||
} else {
|
||||
die "$msg\n";
|
||||
}
|
||||
} elsif (ref($ret) eq "ARRAY") {
|
||||
# 'leaf' is a proper JSON boolean, map to perl-y bool
|
||||
# TODO: make PBSClient decode all bools always as 1/0?
|
||||
foreach my $item (@$ret) {
|
||||
$item->{leaf} = $item->{leaf} ? 1 : 0;
|
||||
}
|
||||
|
||||
return $ret;
|
||||
}
|
||||
|
||||
die "invalid proxmox-file-restore output";
|
||||
}});
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
name => 'download',
|
||||
path => 'download',
|
||||
method => 'GET',
|
||||
proxyto => 'node',
|
||||
permissions => {
|
||||
description => "You need read access for the volume.",
|
||||
user => 'all',
|
||||
},
|
||||
description => "Extract a file or directory (as zip archive) from a PBS backup.",
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
storage => get_standard_option('pve-storage-id', {
|
||||
completion => \&PVE::Storage::complete_storage_enabled,
|
||||
}),
|
||||
volume => {
|
||||
description => "Backup volume ID or name. Currently only PBS snapshots are supported.",
|
||||
type => 'string',
|
||||
completion => \&PVE::Storage::complete_volume,
|
||||
},
|
||||
filepath => {
|
||||
description => 'base64-path to the directory or file to download.',
|
||||
type => 'string',
|
||||
},
|
||||
},
|
||||
},
|
||||
returns => {
|
||||
type => 'any', # download
|
||||
},
|
||||
protected => 1,
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
|
||||
my $rpcenv = PVE::RPCEnvironment::get();
|
||||
my $user = $rpcenv->get_user();
|
||||
|
||||
my $path = extract_param($param, 'filepath');
|
||||
my $storeid = extract_param($param, 'storage');
|
||||
my $volid = $parse_volname_or_id->($storeid, $param->{volume});
|
||||
|
||||
my $cfg = PVE::Storage::config();
|
||||
my $scfg = PVE::Storage::storage_config($cfg, $storeid);
|
||||
|
||||
PVE::Storage::check_volume_access($rpcenv, $user, $cfg, undef, $volid, 'backup');
|
||||
|
||||
raise_param_exc({'storage' => "Only PBS storages supported for file-restore."})
|
||||
if $scfg->{type} ne 'pbs';
|
||||
|
||||
my (undef, $snap) = PVE::Storage::parse_volname($cfg, $volid);
|
||||
|
||||
my $client = PVE::PBSClient->new($scfg, $storeid);
|
||||
my $fifo = $client->file_restore_extract_prepare();
|
||||
|
||||
$rpcenv->fork_worker('pbs-download', undef, $user, sub {
|
||||
my $name = decode_base64($path);
|
||||
print "Starting download of file: $name\n";
|
||||
$client->file_restore_extract($fifo, $snap, $path, 1);
|
||||
});
|
||||
|
||||
my $ret = {
|
||||
download => {
|
||||
path => $fifo,
|
||||
stream => 1,
|
||||
'content-type' => 'application/octet-stream',
|
||||
},
|
||||
};
|
||||
return $ret;
|
||||
}});
|
||||
|
||||
1;
|
||||
6
src/PVE/API2/Storage/Makefile
Normal file
6
src/PVE/API2/Storage/Makefile
Normal file
@ -0,0 +1,6 @@
|
||||
|
||||
SOURCES= Content.pm Status.pm Config.pm PruneBackups.pm Scan.pm FileRestore.pm
|
||||
|
||||
.PHONY: install
|
||||
install:
|
||||
for i in ${SOURCES}; do install -D -m 0644 $$i ${DESTDIR}${PERLDIR}/PVE/API2/Storage/$$i; done
|
||||
164
src/PVE/API2/Storage/PruneBackups.pm
Normal file
164
src/PVE/API2/Storage/PruneBackups.pm
Normal file
@ -0,0 +1,164 @@
|
||||
package PVE::API2::Storage::PruneBackups;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
use PVE::Cluster;
|
||||
use PVE::JSONSchema qw(get_standard_option);
|
||||
use PVE::RESTHandler;
|
||||
use PVE::RPCEnvironment;
|
||||
use PVE::Storage;
|
||||
use PVE::Tools qw(extract_param);
|
||||
|
||||
use base qw(PVE::RESTHandler);
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
name => 'dryrun',
|
||||
path => '',
|
||||
method => 'GET',
|
||||
description => "Get prune information for backups. NOTE: this is only a preview and might not be " .
|
||||
"what a subsequent prune call does if backups are removed/added in the meantime.",
|
||||
permissions => {
|
||||
check => ['perm', '/storage/{storage}', ['Datastore.Audit', 'Datastore.AllocateSpace'], any => 1],
|
||||
},
|
||||
protected => 1,
|
||||
proxyto => 'node',
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
storage => get_standard_option('pve-storage-id', {
|
||||
completion => \&PVE::Storage::complete_storage_enabled,
|
||||
}),
|
||||
'prune-backups' => get_standard_option('prune-backups', {
|
||||
description => "Use these retention options instead of those from the storage configuration.",
|
||||
optional => 1,
|
||||
}),
|
||||
type => {
|
||||
description => "Either 'qemu' or 'lxc'. Only consider backups for guests of this type.",
|
||||
type => 'string',
|
||||
optional => 1,
|
||||
enum => ['qemu', 'lxc'],
|
||||
},
|
||||
vmid => get_standard_option('pve-vmid', {
|
||||
description => "Only consider backups for this guest.",
|
||||
optional => 1,
|
||||
completion => \&PVE::Cluster::complete_vmid,
|
||||
}),
|
||||
},
|
||||
},
|
||||
returns => {
|
||||
type => 'array',
|
||||
items => {
|
||||
type => 'object',
|
||||
properties => {
|
||||
volid => {
|
||||
description => "Backup volume ID.",
|
||||
type => 'string',
|
||||
},
|
||||
'ctime' => {
|
||||
description => "Creation time of the backup (seconds since the UNIX epoch).",
|
||||
type => 'integer',
|
||||
},
|
||||
'mark' => {
|
||||
description => "Whether the backup would be kept or removed. Backups that are" .
|
||||
" protected or don't use the standard naming scheme are not removed.",
|
||||
type => 'string',
|
||||
enum => ['keep', 'remove', 'protected', 'renamed'],
|
||||
},
|
||||
type => {
|
||||
description => "One of 'qemu', 'lxc', 'openvz' or 'unknown'.",
|
||||
type => 'string',
|
||||
},
|
||||
'vmid' => {
|
||||
description => "The VM the backup belongs to.",
|
||||
type => 'integer',
|
||||
optional => 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
|
||||
my $cfg = PVE::Storage::config();
|
||||
|
||||
my $vmid = extract_param($param, 'vmid');
|
||||
my $type = extract_param($param, 'type');
|
||||
my $storeid = extract_param($param, 'storage');
|
||||
|
||||
my $prune_backups = extract_param($param, 'prune-backups');
|
||||
$prune_backups = PVE::JSONSchema::parse_property_string('prune-backups', $prune_backups)
|
||||
if defined($prune_backups);
|
||||
|
||||
return PVE::Storage::prune_backups($cfg, $storeid, $prune_backups, $vmid, $type, 1);
|
||||
}});
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
name => 'delete',
|
||||
path => '',
|
||||
method => 'DELETE',
|
||||
description => "Prune backups. Only those using the standard naming scheme are considered.",
|
||||
permissions => {
|
||||
description => "You need the 'Datastore.Allocate' privilege on the storage " .
|
||||
"(or if a VM ID is specified, 'Datastore.AllocateSpace' and 'VM.Backup' for the VM).",
|
||||
user => 'all',
|
||||
},
|
||||
protected => 1,
|
||||
proxyto => 'node',
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
storage => get_standard_option('pve-storage-id', {
|
||||
completion => \&PVE::Storage::complete_storage,
|
||||
}),
|
||||
'prune-backups' => get_standard_option('prune-backups', {
|
||||
description => "Use these retention options instead of those from the storage configuration.",
|
||||
}),
|
||||
type => {
|
||||
description => "Either 'qemu' or 'lxc'. Only consider backups for guests of this type.",
|
||||
type => 'string',
|
||||
optional => 1,
|
||||
enum => ['qemu', 'lxc'],
|
||||
},
|
||||
vmid => get_standard_option('pve-vmid', {
|
||||
description => "Only prune backups for this VM.",
|
||||
completion => \&PVE::Cluster::complete_vmid,
|
||||
optional => 1,
|
||||
}),
|
||||
},
|
||||
},
|
||||
returns => { type => 'string' },
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
|
||||
my $rpcenv = PVE::RPCEnvironment::get();
|
||||
my $authuser = $rpcenv->get_user();
|
||||
|
||||
my $cfg = PVE::Storage::config();
|
||||
|
||||
my $vmid = extract_param($param, 'vmid');
|
||||
my $type = extract_param($param, 'type');
|
||||
my $storeid = extract_param($param, 'storage');
|
||||
|
||||
my $prune_backups = extract_param($param, 'prune-backups');
|
||||
$prune_backups = PVE::JSONSchema::parse_property_string('prune-backups', $prune_backups)
|
||||
if defined($prune_backups);
|
||||
|
||||
if (defined($vmid)) {
|
||||
$rpcenv->check($authuser, "/storage/$storeid", ['Datastore.AllocateSpace']);
|
||||
$rpcenv->check($authuser, "/vms/$vmid", ['VM.Backup']);
|
||||
} else {
|
||||
$rpcenv->check($authuser, "/storage/$storeid", ['Datastore.Allocate']);
|
||||
}
|
||||
|
||||
my $id = (defined($vmid) ? "$vmid@" : '') . $storeid;
|
||||
my $worker = sub {
|
||||
PVE::Storage::prune_backups($cfg, $storeid, $prune_backups, $vmid, $type, 0);
|
||||
};
|
||||
|
||||
return $rpcenv->fork_worker('prunebackups', $id, $authuser, $worker);
|
||||
}});
|
||||
|
||||
1;
|
||||
449
src/PVE/API2/Storage/Scan.pm
Normal file
449
src/PVE/API2/Storage/Scan.pm
Normal file
@ -0,0 +1,449 @@
|
||||
package PVE::API2::Storage::Scan;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
# NOTE: This API endpoints are mounted by pve-manager's API2::Node module and pvesm CLI
|
||||
|
||||
use PVE::JSONSchema qw(get_standard_option);
|
||||
use PVE::RESTHandler;
|
||||
use PVE::SafeSyslog;
|
||||
use PVE::Storage::LVMPlugin;
|
||||
use PVE::Storage;
|
||||
use PVE::SysFSTools;
|
||||
|
||||
use base qw(PVE::RESTHandler);
|
||||
|
||||
__PACKAGE__->register_method({
|
||||
name => 'index',
|
||||
path => '',
|
||||
method => 'GET',
|
||||
description => "Index of available scan methods",
|
||||
permissions => {
|
||||
user => 'all',
|
||||
},
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
},
|
||||
},
|
||||
returns => {
|
||||
type => 'array',
|
||||
items => {
|
||||
type => "object",
|
||||
properties => {
|
||||
method => { type => 'string'},
|
||||
},
|
||||
},
|
||||
links => [ { rel => 'child', href => "{method}" } ],
|
||||
},
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
|
||||
my $res = [
|
||||
{ method => 'cifs' },
|
||||
{ method => 'glusterfs' },
|
||||
{ method => 'iscsi' },
|
||||
{ method => 'lvm' },
|
||||
{ method => 'nfs' },
|
||||
{ method => 'pbs' },
|
||||
{ method => 'zfs' },
|
||||
];
|
||||
|
||||
return $res;
|
||||
}});
|
||||
|
||||
__PACKAGE__->register_method({
|
||||
name => 'nfsscan',
|
||||
path => 'nfs',
|
||||
method => 'GET',
|
||||
description => "Scan remote NFS server.",
|
||||
protected => 1,
|
||||
proxyto => "node",
|
||||
permissions => {
|
||||
check => ['perm', '/storage', ['Datastore.Allocate']],
|
||||
},
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
server => {
|
||||
description => "The server address (name or IP).",
|
||||
type => 'string', format => 'pve-storage-server',
|
||||
},
|
||||
},
|
||||
},
|
||||
returns => {
|
||||
type => 'array',
|
||||
items => {
|
||||
type => "object",
|
||||
properties => {
|
||||
path => {
|
||||
description => "The exported path.",
|
||||
type => 'string',
|
||||
},
|
||||
options => {
|
||||
description => "NFS export options.",
|
||||
type => 'string',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
|
||||
my $server = $param->{server};
|
||||
my $res = PVE::Storage::scan_nfs($server);
|
||||
|
||||
my $data = [];
|
||||
foreach my $k (sort keys %$res) {
|
||||
push @$data, { path => $k, options => $res->{$k} };
|
||||
}
|
||||
return $data;
|
||||
}});
|
||||
|
||||
__PACKAGE__->register_method({
|
||||
name => 'cifsscan',
|
||||
path => 'cifs',
|
||||
method => 'GET',
|
||||
description => "Scan remote CIFS server.",
|
||||
protected => 1,
|
||||
proxyto => "node",
|
||||
permissions => {
|
||||
check => ['perm', '/storage', ['Datastore.Allocate']],
|
||||
},
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
server => {
|
||||
description => "The server address (name or IP).",
|
||||
type => 'string', format => 'pve-storage-server',
|
||||
},
|
||||
username => {
|
||||
description => "User name.",
|
||||
type => 'string',
|
||||
optional => 1,
|
||||
},
|
||||
password => {
|
||||
description => "User password.",
|
||||
type => 'string',
|
||||
optional => 1,
|
||||
},
|
||||
domain => {
|
||||
description => "SMB domain (Workgroup).",
|
||||
type => 'string',
|
||||
optional => 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns => {
|
||||
type => 'array',
|
||||
items => {
|
||||
type => "object",
|
||||
properties => {
|
||||
share => {
|
||||
description => "The cifs share name.",
|
||||
type => 'string',
|
||||
},
|
||||
description => {
|
||||
description => "Descriptive text from server.",
|
||||
type => 'string',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
|
||||
my $server = $param->{server};
|
||||
|
||||
my $username = $param->{username};
|
||||
my $password = $param->{password};
|
||||
my $domain = $param->{domain};
|
||||
|
||||
my $res = PVE::Storage::scan_cifs($server, $username, $password, $domain);
|
||||
|
||||
my $data = [];
|
||||
foreach my $k (sort keys %$res) {
|
||||
next if $k =~ m/NT_STATUS_/;
|
||||
push @$data, { share => $k, description => $res->{$k} };
|
||||
}
|
||||
|
||||
return $data;
|
||||
}});
|
||||
|
||||
__PACKAGE__->register_method({
|
||||
name => 'pbsscan',
|
||||
path => 'pbs',
|
||||
method => 'GET',
|
||||
description => "Scan remote Proxmox Backup Server.",
|
||||
protected => 1,
|
||||
proxyto => "node",
|
||||
permissions => {
|
||||
check => ['perm', '/storage', ['Datastore.Allocate']],
|
||||
},
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
server => {
|
||||
description => "The server address (name or IP).",
|
||||
type => 'string', format => 'pve-storage-server',
|
||||
},
|
||||
username => {
|
||||
description => "User-name or API token-ID.",
|
||||
type => 'string',
|
||||
},
|
||||
password => {
|
||||
description => "User password or API token secret.",
|
||||
type => 'string',
|
||||
},
|
||||
fingerprint => get_standard_option('fingerprint-sha256', {
|
||||
optional => 1,
|
||||
}),
|
||||
port => {
|
||||
description => "Optional port.",
|
||||
type => 'integer',
|
||||
minimum => 1,
|
||||
maximum => 65535,
|
||||
default => 8007,
|
||||
optional => 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns => {
|
||||
type => 'array',
|
||||
items => {
|
||||
type => "object",
|
||||
properties => {
|
||||
store => {
|
||||
description => "The datastore name.",
|
||||
type => 'string',
|
||||
},
|
||||
comment => {
|
||||
description => "Comment from server.",
|
||||
type => 'string',
|
||||
optional => 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
|
||||
my $password = delete $param->{password};
|
||||
|
||||
return PVE::Storage::PBSPlugin::scan_datastores($param, $password);
|
||||
}
|
||||
});
|
||||
|
||||
# Note: GlusterFS currently does not have an equivalent of showmount.
|
||||
# As workaround, we simply use nfs showmount.
|
||||
# see http://www.gluster.org/category/volumes/
|
||||
__PACKAGE__->register_method({
|
||||
name => 'glusterfsscan',
|
||||
path => 'glusterfs',
|
||||
method => 'GET',
|
||||
description => "Scan remote GlusterFS server.",
|
||||
protected => 1,
|
||||
proxyto => "node",
|
||||
permissions => {
|
||||
check => ['perm', '/storage', ['Datastore.Allocate']],
|
||||
},
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
server => {
|
||||
description => "The server address (name or IP).",
|
||||
type => 'string', format => 'pve-storage-server',
|
||||
},
|
||||
},
|
||||
},
|
||||
returns => {
|
||||
type => 'array',
|
||||
items => {
|
||||
type => "object",
|
||||
properties => {
|
||||
volname => {
|
||||
description => "The volume name.",
|
||||
type => 'string',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
|
||||
my $server = $param->{server};
|
||||
my $res = PVE::Storage::scan_nfs($server);
|
||||
|
||||
my $data = [];
|
||||
foreach my $path (sort keys %$res) {
|
||||
if ($path =~ m!^/([^\s/]+)$!) {
|
||||
push @$data, { volname => $1 };
|
||||
}
|
||||
}
|
||||
return $data;
|
||||
}});
|
||||
|
||||
__PACKAGE__->register_method({
|
||||
name => 'iscsiscan',
|
||||
path => 'iscsi',
|
||||
method => 'GET',
|
||||
description => "Scan remote iSCSI server.",
|
||||
protected => 1,
|
||||
proxyto => "node",
|
||||
permissions => {
|
||||
check => ['perm', '/storage', ['Datastore.Allocate']],
|
||||
},
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
portal => {
|
||||
description => "The iSCSI portal (IP or DNS name with optional port).",
|
||||
type => 'string', format => 'pve-storage-portal-dns',
|
||||
},
|
||||
},
|
||||
},
|
||||
returns => {
|
||||
type => 'array',
|
||||
items => {
|
||||
type => "object",
|
||||
properties => {
|
||||
target => {
|
||||
description => "The iSCSI target name.",
|
||||
type => 'string',
|
||||
},
|
||||
portal => {
|
||||
description => "The iSCSI portal name.",
|
||||
type => 'string',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
|
||||
my $res = PVE::Storage::scan_iscsi($param->{portal});
|
||||
|
||||
my $data = [];
|
||||
foreach my $k (sort keys %$res) {
|
||||
push @$data, { target => $k, portal => join(',', @{$res->{$k}}) };
|
||||
}
|
||||
|
||||
return $data;
|
||||
}});
|
||||
|
||||
__PACKAGE__->register_method({
|
||||
name => 'lvmscan',
|
||||
path => 'lvm',
|
||||
method => 'GET',
|
||||
description => "List local LVM volume groups.",
|
||||
protected => 1,
|
||||
proxyto => "node",
|
||||
permissions => {
|
||||
check => ['perm', '/storage', ['Datastore.Allocate']],
|
||||
},
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
},
|
||||
},
|
||||
returns => {
|
||||
type => 'array',
|
||||
items => {
|
||||
type => "object",
|
||||
properties => {
|
||||
vg => {
|
||||
description => "The LVM logical volume group name.",
|
||||
type => 'string',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
|
||||
my $res = PVE::Storage::LVMPlugin::lvm_vgs();
|
||||
return PVE::RESTHandler::hash_to_array($res, 'vg');
|
||||
}});
|
||||
|
||||
__PACKAGE__->register_method({
|
||||
name => 'lvmthinscan',
|
||||
path => 'lvmthin',
|
||||
method => 'GET',
|
||||
description => "List local LVM Thin Pools.",
|
||||
protected => 1,
|
||||
proxyto => "node",
|
||||
permissions => {
|
||||
check => ['perm', '/storage', ['Datastore.Allocate']],
|
||||
},
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
vg => {
|
||||
type => 'string',
|
||||
pattern => '[a-zA-Z0-9\.\+\_][a-zA-Z0-9\.\+\_\-]+', # see lvm(8) manpage
|
||||
maxLength => 100,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns => {
|
||||
type => 'array',
|
||||
items => {
|
||||
type => "object",
|
||||
properties => {
|
||||
lv => {
|
||||
description => "The LVM Thin Pool name (LVM logical volume).",
|
||||
type => 'string',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
|
||||
return PVE::Storage::LvmThinPlugin::list_thinpools($param->{vg});
|
||||
}});
|
||||
|
||||
__PACKAGE__->register_method({
|
||||
name => 'zfsscan',
|
||||
path => 'zfs',
|
||||
method => 'GET',
|
||||
description => "Scan zfs pool list on local node.",
|
||||
protected => 1,
|
||||
proxyto => "node",
|
||||
permissions => {
|
||||
check => ['perm', '/storage', ['Datastore.Allocate']],
|
||||
},
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
},
|
||||
},
|
||||
returns => {
|
||||
type => 'array',
|
||||
items => {
|
||||
type => "object",
|
||||
properties => {
|
||||
pool => {
|
||||
description => "ZFS pool name.",
|
||||
type => 'string',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
|
||||
return PVE::Storage::scan_zfs();
|
||||
}});
|
||||
|
||||
1;
|
||||
660
src/PVE/API2/Storage/Status.pm
Normal file
660
src/PVE/API2/Storage/Status.pm
Normal file
@ -0,0 +1,660 @@
|
||||
package PVE::API2::Storage::Status;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
use File::Basename;
|
||||
use File::Path;
|
||||
use POSIX qw(ENOENT);
|
||||
|
||||
use PVE::Cluster;
|
||||
use PVE::Exception qw(raise_param_exc);
|
||||
use PVE::INotify;
|
||||
use PVE::JSONSchema qw(get_standard_option);
|
||||
use PVE::RESTHandler;
|
||||
use PVE::RPCEnvironment;
|
||||
use PVE::RRD;
|
||||
use PVE::Tools qw(run_command);
|
||||
|
||||
use PVE::API2::Storage::Content;
|
||||
use PVE::API2::Storage::FileRestore;
|
||||
use PVE::API2::Storage::PruneBackups;
|
||||
use PVE::Storage;
|
||||
|
||||
use base qw(PVE::RESTHandler);
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
subclass => "PVE::API2::Storage::PruneBackups",
|
||||
path => '{storage}/prunebackups',
|
||||
});
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
subclass => "PVE::API2::Storage::Content",
|
||||
# set fragment delimiter (no subdirs) - we need that, because volume
|
||||
# IDs may contain a slash '/'
|
||||
fragmentDelimiter => '',
|
||||
path => '{storage}/content',
|
||||
});
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
subclass => "PVE::API2::Storage::FileRestore",
|
||||
path => '{storage}/file-restore',
|
||||
});
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
name => 'index',
|
||||
path => '',
|
||||
method => 'GET',
|
||||
description => "Get status for all datastores.",
|
||||
permissions => {
|
||||
description => "Only list entries where you have 'Datastore.Audit' or 'Datastore.AllocateSpace' permissions on '/storage/<storage>'",
|
||||
user => 'all',
|
||||
},
|
||||
protected => 1,
|
||||
proxyto => 'node',
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
storage => get_standard_option('pve-storage-id', {
|
||||
description => "Only list status for specified storage",
|
||||
optional => 1,
|
||||
completion => \&PVE::Storage::complete_storage_enabled,
|
||||
}),
|
||||
content => {
|
||||
description => "Only list stores which support this content type.",
|
||||
type => 'string', format => 'pve-storage-content-list',
|
||||
optional => 1,
|
||||
completion => \&PVE::Storage::complete_content_type,
|
||||
},
|
||||
enabled => {
|
||||
description => "Only list stores which are enabled (not disabled in config).",
|
||||
type => 'boolean',
|
||||
optional => 1,
|
||||
default => 0,
|
||||
},
|
||||
target => get_standard_option('pve-node', {
|
||||
description => "If target is different to 'node', we only lists shared storages which " .
|
||||
"content is accessible on this 'node' and the specified 'target' node.",
|
||||
optional => 1,
|
||||
completion => \&PVE::Cluster::get_nodelist,
|
||||
}),
|
||||
'format' => {
|
||||
description => "Include information about formats",
|
||||
type => 'boolean',
|
||||
optional => 1,
|
||||
default => 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns => {
|
||||
type => 'array',
|
||||
items => {
|
||||
type => "object",
|
||||
properties => {
|
||||
storage => get_standard_option('pve-storage-id'),
|
||||
type => {
|
||||
description => "Storage type.",
|
||||
type => 'string',
|
||||
},
|
||||
content => {
|
||||
description => "Allowed storage content types.",
|
||||
type => 'string', format => 'pve-storage-content-list',
|
||||
},
|
||||
enabled => {
|
||||
description => "Set when storage is enabled (not disabled).",
|
||||
type => 'boolean',
|
||||
optional => 1,
|
||||
},
|
||||
active => {
|
||||
description => "Set when storage is accessible.",
|
||||
type => 'boolean',
|
||||
optional => 1,
|
||||
},
|
||||
shared => {
|
||||
description => "Shared flag from storage configuration.",
|
||||
type => 'boolean',
|
||||
optional => 1,
|
||||
},
|
||||
total => {
|
||||
description => "Total storage space in bytes.",
|
||||
type => 'integer',
|
||||
renderer => 'bytes',
|
||||
optional => 1,
|
||||
},
|
||||
used => {
|
||||
description => "Used storage space in bytes.",
|
||||
type => 'integer',
|
||||
renderer => 'bytes',
|
||||
optional => 1,
|
||||
},
|
||||
avail => {
|
||||
description => "Available storage space in bytes.",
|
||||
type => 'integer',
|
||||
renderer => 'bytes',
|
||||
optional => 1,
|
||||
},
|
||||
used_fraction => {
|
||||
description => "Used fraction (used/total).",
|
||||
type => 'number',
|
||||
renderer => 'fraction_as_percentage',
|
||||
optional => 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
links => [ { rel => 'child', href => "{storage}" } ],
|
||||
},
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
|
||||
my $rpcenv = PVE::RPCEnvironment::get();
|
||||
my $authuser = $rpcenv->get_user();
|
||||
|
||||
my $localnode = PVE::INotify::nodename();
|
||||
|
||||
my $target = $param->{target};
|
||||
|
||||
undef $target if $target && ($target eq $localnode || $target eq 'localhost');
|
||||
|
||||
my $cfg = PVE::Storage::config();
|
||||
|
||||
my $info = PVE::Storage::storage_info($cfg, $param->{content}, $param->{format});
|
||||
|
||||
raise_param_exc({ storage => "No such storage." })
|
||||
if $param->{storage} && !defined($info->{$param->{storage}});
|
||||
|
||||
my $res = {};
|
||||
my @sids = PVE::Storage::storage_ids($cfg);
|
||||
foreach my $storeid (@sids) {
|
||||
my $data = $info->{$storeid};
|
||||
next if !$data;
|
||||
my $privs = [ 'Datastore.Audit', 'Datastore.AllocateSpace' ];
|
||||
next if !$rpcenv->check_any($authuser, "/storage/$storeid", $privs, 1);
|
||||
next if $param->{storage} && $param->{storage} ne $storeid;
|
||||
|
||||
my $scfg = PVE::Storage::storage_config($cfg, $storeid);
|
||||
|
||||
next if $param->{enabled} && $scfg->{disable};
|
||||
|
||||
if ($target) {
|
||||
# check if storage content is accessible on local node and specified target node
|
||||
# we use this on the Clone GUI
|
||||
|
||||
next if !$scfg->{shared};
|
||||
next if !PVE::Storage::storage_check_node($cfg, $storeid, undef, 1);
|
||||
next if !PVE::Storage::storage_check_node($cfg, $storeid, $target, 1);
|
||||
}
|
||||
|
||||
if ($data->{total}) {
|
||||
$data->{used_fraction} = ($data->{used} // 0) / $data->{total};
|
||||
}
|
||||
|
||||
$res->{$storeid} = $data;
|
||||
}
|
||||
|
||||
return PVE::RESTHandler::hash_to_array($res, 'storage');
|
||||
}});
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
name => 'diridx',
|
||||
path => '{storage}',
|
||||
method => 'GET',
|
||||
description => "",
|
||||
permissions => {
|
||||
check => ['perm', '/storage/{storage}', ['Datastore.Audit', 'Datastore.AllocateSpace'], any => 1],
|
||||
},
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
storage => get_standard_option('pve-storage-id'),
|
||||
},
|
||||
},
|
||||
returns => {
|
||||
type => 'array',
|
||||
items => {
|
||||
type => "object",
|
||||
properties => {
|
||||
subdir => { type => 'string' },
|
||||
},
|
||||
},
|
||||
links => [ { rel => 'child', href => "{subdir}" } ],
|
||||
},
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
|
||||
my $res = [
|
||||
{ subdir => 'content' },
|
||||
{ subdir => 'download-url' },
|
||||
{ subdir => 'file-restore' },
|
||||
{ subdir => 'prunebackups' },
|
||||
{ subdir => 'rrd' },
|
||||
{ subdir => 'rrddata' },
|
||||
{ subdir => 'status' },
|
||||
{ subdir => 'upload' },
|
||||
];
|
||||
|
||||
return $res;
|
||||
}});
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
name => 'read_status',
|
||||
path => '{storage}/status',
|
||||
method => 'GET',
|
||||
description => "Read storage status.",
|
||||
permissions => {
|
||||
check => ['perm', '/storage/{storage}', ['Datastore.Audit', 'Datastore.AllocateSpace'], any => 1],
|
||||
},
|
||||
protected => 1,
|
||||
proxyto => 'node',
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
storage => get_standard_option('pve-storage-id'),
|
||||
},
|
||||
},
|
||||
returns => {
|
||||
type => "object",
|
||||
properties => {},
|
||||
},
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
|
||||
my $cfg = PVE::Storage::config();
|
||||
|
||||
my $info = PVE::Storage::storage_info($cfg, $param->{content});
|
||||
|
||||
my $data = $info->{$param->{storage}};
|
||||
|
||||
raise_param_exc({ storage => "No such storage." })
|
||||
if !defined($data);
|
||||
|
||||
return $data;
|
||||
}});
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
name => 'rrd',
|
||||
path => '{storage}/rrd',
|
||||
method => 'GET',
|
||||
description => "Read storage RRD statistics (returns PNG).",
|
||||
permissions => {
|
||||
check => ['perm', '/storage/{storage}', ['Datastore.Audit', 'Datastore.AllocateSpace'], any => 1],
|
||||
},
|
||||
protected => 1,
|
||||
proxyto => 'node',
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
storage => get_standard_option('pve-storage-id'),
|
||||
timeframe => {
|
||||
description => "Specify the time frame you are interested in.",
|
||||
type => 'string',
|
||||
enum => [ 'hour', 'day', 'week', 'month', 'year' ],
|
||||
},
|
||||
ds => {
|
||||
description => "The list of datasources you want to display.",
|
||||
type => 'string', format => 'pve-configid-list',
|
||||
},
|
||||
cf => {
|
||||
description => "The RRD consolidation function",
|
||||
type => 'string',
|
||||
enum => [ 'AVERAGE', 'MAX' ],
|
||||
optional => 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns => {
|
||||
type => "object",
|
||||
properties => {
|
||||
filename => { type => 'string' },
|
||||
},
|
||||
},
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
|
||||
return PVE::RRD::create_rrd_graph(
|
||||
"pve2-storage/$param->{node}/$param->{storage}",
|
||||
$param->{timeframe}, $param->{ds}, $param->{cf});
|
||||
}});
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
name => 'rrddata',
|
||||
path => '{storage}/rrddata',
|
||||
method => 'GET',
|
||||
description => "Read storage RRD statistics.",
|
||||
permissions => {
|
||||
check => ['perm', '/storage/{storage}', ['Datastore.Audit', 'Datastore.AllocateSpace'], any => 1],
|
||||
},
|
||||
protected => 1,
|
||||
proxyto => 'node',
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
storage => get_standard_option('pve-storage-id'),
|
||||
timeframe => {
|
||||
description => "Specify the time frame you are interested in.",
|
||||
type => 'string',
|
||||
enum => [ 'hour', 'day', 'week', 'month', 'year' ],
|
||||
},
|
||||
cf => {
|
||||
description => "The RRD consolidation function",
|
||||
type => 'string',
|
||||
enum => [ 'AVERAGE', 'MAX' ],
|
||||
optional => 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns => {
|
||||
type => "array",
|
||||
items => {
|
||||
type => "object",
|
||||
properties => {},
|
||||
},
|
||||
},
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
|
||||
return PVE::RRD::create_rrd_data(
|
||||
"pve2-storage/$param->{node}/$param->{storage}",
|
||||
$param->{timeframe}, $param->{cf});
|
||||
}});
|
||||
|
||||
# makes no sense for big images and backup files (because it
|
||||
# create a copy of the file).
|
||||
__PACKAGE__->register_method ({
|
||||
name => 'upload',
|
||||
path => '{storage}/upload',
|
||||
method => 'POST',
|
||||
description => "Upload templates and ISO images.",
|
||||
permissions => {
|
||||
check => ['perm', '/storage/{storage}', ['Datastore.AllocateTemplate']],
|
||||
},
|
||||
protected => 1,
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
storage => get_standard_option('pve-storage-id'),
|
||||
content => {
|
||||
description => "Content type.",
|
||||
type => 'string', format => 'pve-storage-content',
|
||||
enum => ['iso', 'vztmpl'],
|
||||
},
|
||||
filename => {
|
||||
description => "The name of the file to create. Caution: This will be normalized!",
|
||||
maxLength => 255,
|
||||
type => 'string',
|
||||
},
|
||||
checksum => {
|
||||
description => "The expected checksum of the file.",
|
||||
type => 'string',
|
||||
requires => 'checksum-algorithm',
|
||||
optional => 1,
|
||||
},
|
||||
'checksum-algorithm' => {
|
||||
description => "The algorithm to calculate the checksum of the file.",
|
||||
type => 'string',
|
||||
enum => ['md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512'],
|
||||
requires => 'checksum',
|
||||
optional => 1,
|
||||
},
|
||||
tmpfilename => {
|
||||
description => "The source file name. This parameter is usually set by the REST handler. You can only overwrite it when connecting to the trusted port on localhost.",
|
||||
type => 'string',
|
||||
optional => 1,
|
||||
pattern => '/var/tmp/pveupload-[0-9a-f]+',
|
||||
},
|
||||
},
|
||||
},
|
||||
returns => { type => "string" },
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
|
||||
my $rpcenv = PVE::RPCEnvironment::get();
|
||||
|
||||
my $user = $rpcenv->get_user();
|
||||
|
||||
my $cfg = PVE::Storage::config();
|
||||
|
||||
my $node = $param->{node};
|
||||
my $scfg = PVE::Storage::storage_check_enabled($cfg, $param->{storage}, $node);
|
||||
|
||||
die "can't upload to storage type '$scfg->{type}'\n"
|
||||
if !defined($scfg->{path});
|
||||
|
||||
my $content = $param->{content};
|
||||
|
||||
my $tmpfilename = $param->{tmpfilename};
|
||||
die "missing temporary file name\n" if !$tmpfilename;
|
||||
|
||||
my $size = -s $tmpfilename;
|
||||
die "temporary file '$tmpfilename' does not exist\n" if !defined($size);
|
||||
|
||||
my $filename = PVE::Storage::normalize_content_filename($param->{filename});
|
||||
|
||||
my $path;
|
||||
|
||||
if ($content eq 'iso') {
|
||||
if ($filename !~ m![^/]+$PVE::Storage::ISO_EXT_RE_0$!) {
|
||||
raise_param_exc({ filename => "wrong file extension" });
|
||||
}
|
||||
$path = PVE::Storage::get_iso_dir($cfg, $param->{storage});
|
||||
} elsif ($content eq 'vztmpl') {
|
||||
if ($filename !~ m![^/]+$PVE::Storage::VZTMPL_EXT_RE_1$!) {
|
||||
raise_param_exc({ filename => "wrong file extension" });
|
||||
}
|
||||
$path = PVE::Storage::get_vztmpl_dir($cfg, $param->{storage});
|
||||
} else {
|
||||
raise_param_exc({ content => "upload content type '$content' not allowed" });
|
||||
}
|
||||
|
||||
die "storage '$param->{storage}' does not support '$content' content\n"
|
||||
if !$scfg->{content}->{$content};
|
||||
|
||||
my $dest = "$path/$filename";
|
||||
my $dirname = dirname($dest);
|
||||
|
||||
# best effort to match apl_download behaviour
|
||||
chmod 0644, $tmpfilename;
|
||||
|
||||
my $err_cleanup = sub { unlink $dest; die "cleanup failed: $!\n" if $! && $! != ENOENT };
|
||||
|
||||
my $cmd;
|
||||
if ($node ne 'localhost' && $node ne PVE::INotify::nodename()) {
|
||||
my $remip = PVE::Cluster::remote_node_ip($node);
|
||||
|
||||
my @ssh_options = ('-o', 'BatchMode=yes');
|
||||
|
||||
my @remcmd = ('/usr/bin/ssh', @ssh_options, $remip, '--');
|
||||
|
||||
eval { # activate remote storage
|
||||
run_command([@remcmd, '/usr/sbin/pvesm', 'status', '--storage', $param->{storage}]);
|
||||
};
|
||||
die "can't activate storage '$param->{storage}' on node '$node': $@\n" if $@;
|
||||
|
||||
run_command(
|
||||
[@remcmd, '/bin/mkdir', '-p', '--', PVE::Tools::shell_quote($dirname)],
|
||||
errmsg => "mkdir failed",
|
||||
);
|
||||
|
||||
$cmd = ['/usr/bin/scp', @ssh_options, '-p', '--', $tmpfilename, "[$remip]:" . PVE::Tools::shell_quote($dest)];
|
||||
|
||||
$err_cleanup = sub { run_command([@remcmd, 'rm', '-f', '--', $dest]) };
|
||||
} else {
|
||||
PVE::Storage::activate_storage($cfg, $param->{storage});
|
||||
File::Path::make_path($dirname);
|
||||
$cmd = ['cp', '--', $tmpfilename, $dest];
|
||||
}
|
||||
|
||||
# NOTE: we simply overwrite the destination file if it already exists
|
||||
my $worker = sub {
|
||||
my $upid = shift;
|
||||
|
||||
print "starting file import from: $tmpfilename\n";
|
||||
|
||||
eval {
|
||||
my ($checksum, $checksum_algorithm) = $param->@{'checksum', 'checksum-algorithm'};
|
||||
if ($checksum_algorithm) {
|
||||
print "calculating checksum...";
|
||||
|
||||
my $checksum_got = PVE::Tools::get_file_hash($checksum_algorithm, $tmpfilename);
|
||||
|
||||
if (lc($checksum_got) eq lc($checksum)) {
|
||||
print "OK, checksum verified\n";
|
||||
} else {
|
||||
print "\n"; # the front end expects the error to reside at the last line without any noise
|
||||
die "checksum mismatch: got '$checksum_got' != expect '$checksum'\n";
|
||||
}
|
||||
}
|
||||
};
|
||||
if (my $err = $@) {
|
||||
# unlinks only the temporary file from the http server
|
||||
unlink $tmpfilename;
|
||||
warn "unable to clean up temporory file '$tmpfilename' - $!\n"
|
||||
if $! && $! != ENOENT;
|
||||
die $err;
|
||||
}
|
||||
|
||||
print "target node: $node\n";
|
||||
print "target file: $dest\n";
|
||||
print "file size is: $size\n";
|
||||
print "command: " . join(' ', @$cmd) . "\n";
|
||||
|
||||
eval { run_command($cmd, errmsg => 'import failed'); };
|
||||
|
||||
unlink $tmpfilename; # the temporary file got only uploaded locally, no need to rm remote
|
||||
warn "unable to clean up temporary file '$tmpfilename' - $!\n" if $! && $! != ENOENT;
|
||||
|
||||
if (my $err = $@) {
|
||||
eval { $err_cleanup->() };
|
||||
warn "$@" if $@;
|
||||
die $err;
|
||||
}
|
||||
print "finished file import successfully\n";
|
||||
};
|
||||
|
||||
return $rpcenv->fork_worker('imgcopy', undef, $user, $worker);
|
||||
}});
|
||||
|
||||
__PACKAGE__->register_method({
|
||||
name => 'download_url',
|
||||
path => '{storage}/download-url',
|
||||
method => 'POST',
|
||||
description => "Download templates and ISO images by using an URL.",
|
||||
proxyto => 'node',
|
||||
permissions => {
|
||||
check => [ 'and',
|
||||
['perm', '/storage/{storage}', [ 'Datastore.AllocateTemplate' ]],
|
||||
['perm', '/', [ 'Sys.Audit', 'Sys.Modify' ]],
|
||||
],
|
||||
},
|
||||
protected => 1,
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
node => get_standard_option('pve-node'),
|
||||
storage => get_standard_option('pve-storage-id'),
|
||||
url => {
|
||||
description => "The URL to download the file from.",
|
||||
type => 'string',
|
||||
pattern => 'https?://.*',
|
||||
},
|
||||
content => {
|
||||
description => "Content type.", # TODO: could be optional & detected in most cases
|
||||
type => 'string', format => 'pve-storage-content',
|
||||
enum => ['iso', 'vztmpl'],
|
||||
},
|
||||
filename => {
|
||||
description => "The name of the file to create. Caution: This will be normalized!",
|
||||
maxLength => 255,
|
||||
type => 'string',
|
||||
},
|
||||
checksum => {
|
||||
description => "The expected checksum of the file.",
|
||||
type => 'string',
|
||||
requires => 'checksum-algorithm',
|
||||
optional => 1,
|
||||
},
|
||||
'checksum-algorithm' => {
|
||||
description => "The algorithm to calculate the checksum of the file.",
|
||||
type => 'string',
|
||||
enum => ['md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512'],
|
||||
requires => 'checksum',
|
||||
optional => 1,
|
||||
},
|
||||
'verify-certificates' => {
|
||||
description => "If false, no SSL/TLS certificates will be verified.",
|
||||
type => 'boolean',
|
||||
optional => 1,
|
||||
default => 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns => {
|
||||
type => "string"
|
||||
},
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
|
||||
my $rpcenv = PVE::RPCEnvironment::get();
|
||||
my $user = $rpcenv->get_user();
|
||||
|
||||
my $cfg = PVE::Storage::config();
|
||||
|
||||
my ($node, $storage) = $param->@{'node', 'storage'};
|
||||
my $scfg = PVE::Storage::storage_check_enabled($cfg, $storage, $node);
|
||||
|
||||
die "can't upload to storage type '$scfg->{type}', not a file based storage!\n"
|
||||
if !defined($scfg->{path});
|
||||
|
||||
my ($content, $url) = $param->@{'content', 'url'};
|
||||
|
||||
die "storage '$storage' is not configured for content-type '$content'\n"
|
||||
if !$scfg->{content}->{$content};
|
||||
|
||||
my $filename = PVE::Storage::normalize_content_filename($param->{filename});
|
||||
|
||||
my $path;
|
||||
if ($content eq 'iso') {
|
||||
if ($filename !~ m![^/]+$PVE::Storage::ISO_EXT_RE_0$!) {
|
||||
raise_param_exc({ filename => "wrong file extension" });
|
||||
}
|
||||
$path = PVE::Storage::get_iso_dir($cfg, $storage);
|
||||
} elsif ($content eq 'vztmpl') {
|
||||
if ($filename !~ m![^/]+$PVE::Storage::VZTMPL_EXT_RE_1$!) {
|
||||
raise_param_exc({ filename => "wrong file extension" });
|
||||
}
|
||||
$path = PVE::Storage::get_vztmpl_dir($cfg, $storage);
|
||||
} else {
|
||||
raise_param_exc({ content => "upload content-type '$content' is not allowed" });
|
||||
}
|
||||
|
||||
PVE::Storage::activate_storage($cfg, $storage);
|
||||
File::Path::make_path($path);
|
||||
|
||||
my $dccfg = PVE::Cluster::cfs_read_file('datacenter.cfg');
|
||||
my $opts = {
|
||||
hash_required => 0,
|
||||
verify_certificates => $param->{'verify-certificates'} // 1,
|
||||
http_proxy => $dccfg->{http_proxy},
|
||||
};
|
||||
|
||||
my ($checksum, $checksum_algorithm) = $param->@{'checksum', 'checksum-algorithm'};
|
||||
if ($checksum) {
|
||||
$opts->{"${checksum_algorithm}sum"} = $checksum;
|
||||
$opts->{hash_required} = 1;
|
||||
}
|
||||
|
||||
my $worker = sub {
|
||||
PVE::Tools::download_file_from_url("$path/$filename", $url, $opts);
|
||||
};
|
||||
|
||||
my $worker_id = PVE::Tools::encode_text($filename); # must not pass : or the like as w-ID
|
||||
|
||||
return $rpcenv->fork_worker('download', $worker_id, $user, $worker);
|
||||
}});
|
||||
|
||||
1;
|
||||
9
src/PVE/CLI/Makefile
Normal file
9
src/PVE/CLI/Makefile
Normal file
@ -0,0 +1,9 @@
|
||||
SOURCES=pvesm.pm
|
||||
|
||||
.PHONY: install
|
||||
install: ${SOURCES}
|
||||
install -d -m 0755 ${DESTDIR}${PERLDIR}/PVE/CLI
|
||||
for i in ${SOURCES}; do install -D -m 0644 $$i ${DESTDIR}${PERLDIR}/PVE/CLI/$$i; done
|
||||
|
||||
|
||||
clean:
|
||||
731
src/PVE/CLI/pvesm.pm
Executable file
731
src/PVE/CLI/pvesm.pm
Executable file
@ -0,0 +1,731 @@
|
||||
package PVE::CLI::pvesm;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
use POSIX qw(O_RDONLY O_WRONLY O_CREAT O_TRUNC);
|
||||
use Fcntl ':flock';
|
||||
use File::Path;
|
||||
use MIME::Base64 qw(encode_base64);
|
||||
|
||||
use IO::Socket::IP;
|
||||
use IO::Socket::UNIX;
|
||||
use Socket qw(SOCK_STREAM);
|
||||
|
||||
use PVE::SafeSyslog;
|
||||
use PVE::Cluster;
|
||||
use PVE::INotify;
|
||||
use PVE::RPCEnvironment;
|
||||
use PVE::Storage;
|
||||
use PVE::Tools qw(extract_param);
|
||||
use PVE::API2::Storage::Config;
|
||||
use PVE::API2::Storage::Content;
|
||||
use PVE::API2::Storage::PruneBackups;
|
||||
use PVE::API2::Storage::Scan;
|
||||
use PVE::API2::Storage::Status;
|
||||
use PVE::JSONSchema qw(get_standard_option);
|
||||
use PVE::PTY;
|
||||
|
||||
use PVE::CLIHandler;
|
||||
|
||||
use base qw(PVE::CLIHandler);
|
||||
|
||||
my $nodename = PVE::INotify::nodename();
|
||||
|
||||
sub param_mapping {
|
||||
my ($name) = @_;
|
||||
|
||||
my $password_map = PVE::CLIHandler::get_standard_mapping('pve-password', {
|
||||
func => sub {
|
||||
my ($value) = @_;
|
||||
return $value if $value;
|
||||
return PVE::PTY::read_password("Enter Password: ");
|
||||
},
|
||||
});
|
||||
|
||||
my $enc_key_map = {
|
||||
name => 'encryption-key',
|
||||
desc => 'a file containing an encryption key, or the special value "autogen"',
|
||||
func => sub {
|
||||
my ($value) = @_;
|
||||
return $value if $value eq 'autogen';
|
||||
return PVE::Tools::file_get_contents($value);
|
||||
}
|
||||
};
|
||||
|
||||
my $master_key_map = {
|
||||
name => 'master-pubkey',
|
||||
desc => 'a file containing a PEM-formatted master public key',
|
||||
func => sub {
|
||||
my ($value) = @_;
|
||||
return encode_base64(PVE::Tools::file_get_contents($value), '');
|
||||
}
|
||||
};
|
||||
|
||||
my $keyring_map = {
|
||||
name => 'keyring',
|
||||
desc => 'file containing the keyring to authenticate in the Ceph cluster',
|
||||
func => sub {
|
||||
my ($value) = @_;
|
||||
return PVE::Tools::file_get_contents($value);
|
||||
},
|
||||
};
|
||||
|
||||
my $mapping = {
|
||||
'cifsscan' => [ $password_map ],
|
||||
'cifs' => [ $password_map ],
|
||||
'pbs' => [ $password_map ],
|
||||
'create' => [ $password_map, $enc_key_map, $master_key_map, $keyring_map ],
|
||||
'update' => [ $password_map, $enc_key_map, $master_key_map, $keyring_map ],
|
||||
};
|
||||
return $mapping->{$name};
|
||||
}
|
||||
|
||||
sub setup_environment {
|
||||
PVE::RPCEnvironment->setup_default_cli_env();
|
||||
}
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
name => 'apiinfo',
|
||||
path => 'apiinfo',
|
||||
method => 'GET',
|
||||
description => "Returns APIVER and APIAGE.",
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {},
|
||||
},
|
||||
returns => {
|
||||
type => 'object',
|
||||
properties => {
|
||||
apiver => { type => 'integer' },
|
||||
apiage => { type => 'integer' },
|
||||
},
|
||||
},
|
||||
code => sub {
|
||||
return {
|
||||
apiver => PVE::Storage::APIVER,
|
||||
apiage => PVE::Storage::APIAGE,
|
||||
};
|
||||
}
|
||||
});
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
name => 'path',
|
||||
path => 'path',
|
||||
method => 'GET',
|
||||
description => "Get filesystem path for specified volume",
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
volume => {
|
||||
description => "Volume identifier",
|
||||
type => 'string', format => 'pve-volume-id',
|
||||
completion => \&PVE::Storage::complete_volume,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns => { type => 'null' },
|
||||
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
|
||||
my $cfg = PVE::Storage::config();
|
||||
|
||||
my $path = PVE::Storage::path ($cfg, $param->{volume});
|
||||
|
||||
print "$path\n";
|
||||
|
||||
return undef;
|
||||
|
||||
}});
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
name => 'extractconfig',
|
||||
path => 'extractconfig',
|
||||
method => 'GET',
|
||||
description => "Extract configuration from vzdump backup archive.",
|
||||
permissions => {
|
||||
description => "The user needs 'VM.Backup' permissions on the backed up guest ID, and 'Datastore.AllocateSpace' on the backup storage.",
|
||||
user => 'all',
|
||||
},
|
||||
protected => 1,
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
volume => {
|
||||
description => "Volume identifier",
|
||||
type => 'string',
|
||||
completion => \&PVE::Storage::complete_volume,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns => { type => 'null' },
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
my $volume = $param->{volume};
|
||||
|
||||
my $rpcenv = PVE::RPCEnvironment::get();
|
||||
my $authuser = $rpcenv->get_user();
|
||||
|
||||
my $storage_cfg = PVE::Storage::config();
|
||||
PVE::Storage::check_volume_access(
|
||||
$rpcenv,
|
||||
$authuser,
|
||||
$storage_cfg,
|
||||
undef,
|
||||
$volume,
|
||||
'backup',
|
||||
);
|
||||
|
||||
if (PVE::Storage::parse_volume_id($volume, 1)) {
|
||||
my (undef, undef, $ownervm) = PVE::Storage::parse_volname($storage_cfg, $volume);
|
||||
$rpcenv->check($authuser, "/vms/$ownervm", ['VM.Backup']);
|
||||
}
|
||||
|
||||
my $config_raw = PVE::Storage::extract_vzdump_config($storage_cfg, $volume);
|
||||
|
||||
print "$config_raw\n";
|
||||
return;
|
||||
}});
|
||||
|
||||
my $print_content = sub {
|
||||
my ($list) = @_;
|
||||
|
||||
my ($maxlenname, $maxsize) = (0, 0);
|
||||
foreach my $info (@$list) {
|
||||
my $volid = $info->{volid};
|
||||
my $sidlen = length ($volid);
|
||||
$maxlenname = $sidlen if $sidlen > $maxlenname;
|
||||
$maxsize = $info->{size} if ($info->{size} // 0) > $maxsize;
|
||||
}
|
||||
my $sizemaxdigits = length($maxsize);
|
||||
|
||||
my $basefmt = "%-${maxlenname}s %-7s %-9s %${sizemaxdigits}s";
|
||||
printf "$basefmt %s\n", "Volid", "Format", "Type", "Size", "VMID";
|
||||
|
||||
foreach my $info (@$list) {
|
||||
next if !$info->{vmid};
|
||||
my $volid = $info->{volid};
|
||||
|
||||
printf "$basefmt %d\n", $volid, $info->{format}, $info->{content}, $info->{size}, $info->{vmid};
|
||||
}
|
||||
|
||||
foreach my $info (sort { $a->{format} cmp $b->{format} } @$list) {
|
||||
next if $info->{vmid};
|
||||
my $volid = $info->{volid};
|
||||
|
||||
printf "$basefmt\n", $volid, $info->{format}, $info->{content}, $info->{size};
|
||||
}
|
||||
};
|
||||
|
||||
my $print_status = sub {
|
||||
my $res = shift;
|
||||
|
||||
my $maxlen = 0;
|
||||
foreach my $res (@$res) {
|
||||
my $storeid = $res->{storage};
|
||||
$maxlen = length ($storeid) if length ($storeid) > $maxlen;
|
||||
}
|
||||
$maxlen+=1;
|
||||
|
||||
printf "%-${maxlen}s %10s %10s %15s %15s %15s %8s\n", 'Name', 'Type',
|
||||
'Status', 'Total', 'Used', 'Available', '%';
|
||||
|
||||
foreach my $res (sort { $a->{storage} cmp $b->{storage} } @$res) {
|
||||
my $storeid = $res->{storage};
|
||||
|
||||
my $active = $res->{active} ? 'active' : 'inactive';
|
||||
my ($per, $per_fmt) = (0, '% 7.2f%%');
|
||||
$per = ($res->{used}*100)/$res->{total} if $res->{total} > 0;
|
||||
|
||||
if (!$res->{enabled}) {
|
||||
$per = 'N/A';
|
||||
$per_fmt = '% 8s';
|
||||
$active = 'disabled';
|
||||
}
|
||||
|
||||
printf "%-${maxlen}s %10s %10s %15d %15d %15d $per_fmt\n", $storeid,
|
||||
$res->{type}, $active, $res->{total}/1024, $res->{used}/1024,
|
||||
$res->{avail}/1024, $per;
|
||||
}
|
||||
};
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
name => 'export',
|
||||
path => 'export',
|
||||
method => 'GET',
|
||||
description => "Used internally to export a volume.",
|
||||
protected => 1,
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
volume => {
|
||||
description => "Volume identifier",
|
||||
type => 'string',
|
||||
completion => \&PVE::Storage::complete_volume,
|
||||
},
|
||||
format => {
|
||||
description => "Export stream format",
|
||||
type => 'string',
|
||||
enum => $PVE::Storage::KNOWN_EXPORT_FORMATS,
|
||||
},
|
||||
filename => {
|
||||
description => "Destination file name",
|
||||
type => 'string',
|
||||
},
|
||||
base => {
|
||||
description => "Snapshot to start an incremental stream from",
|
||||
type => 'string',
|
||||
pattern => qr/[a-z0-9_\-]{1,40}/i,
|
||||
maxLength => 40,
|
||||
optional => 1,
|
||||
},
|
||||
snapshot => {
|
||||
description => "Snapshot to export",
|
||||
type => 'string',
|
||||
pattern => qr/[a-z0-9_\-]{1,40}/i,
|
||||
maxLength => 40,
|
||||
optional => 1,
|
||||
},
|
||||
'with-snapshots' => {
|
||||
description =>
|
||||
"Whether to include intermediate snapshots in the stream",
|
||||
type => 'boolean',
|
||||
optional => 1,
|
||||
default => 0,
|
||||
},
|
||||
'snapshot-list' => {
|
||||
description => "Ordered list of snapshots to transfer",
|
||||
type => 'string',
|
||||
format => 'string-list',
|
||||
optional => 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns => { type => 'null' },
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
|
||||
my $with_snapshots = $param->{'with-snapshots'};
|
||||
if (defined(my $list = $param->{'snapshot-list'})) {
|
||||
$with_snapshots = PVE::Tools::split_list($list);
|
||||
}
|
||||
|
||||
my $filename = $param->{filename};
|
||||
|
||||
my $outfh;
|
||||
if ($filename eq '-') {
|
||||
$outfh = \*STDOUT;
|
||||
} else {
|
||||
sysopen($outfh, $filename, O_CREAT|O_WRONLY|O_TRUNC)
|
||||
or die "open($filename): $!\n";
|
||||
}
|
||||
|
||||
eval {
|
||||
my $cfg = PVE::Storage::config();
|
||||
PVE::Storage::volume_export($cfg, $outfh, $param->{volume}, $param->{format},
|
||||
$param->{snapshot}, $param->{base}, $with_snapshots);
|
||||
};
|
||||
my $err = $@;
|
||||
if ($filename ne '-') {
|
||||
close($outfh);
|
||||
unlink($filename) if $err;
|
||||
}
|
||||
die $err if $err;
|
||||
return;
|
||||
}
|
||||
});
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
name => 'import',
|
||||
path => 'import',
|
||||
method => 'PUT',
|
||||
description => "Used internally to import a volume.",
|
||||
protected => 1,
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
volume => {
|
||||
description => "Volume identifier",
|
||||
type => 'string',
|
||||
completion => \&PVE::Storage::complete_volume,
|
||||
},
|
||||
format => {
|
||||
description => "Import stream format",
|
||||
type => 'string',
|
||||
enum => $PVE::Storage::KNOWN_EXPORT_FORMATS,
|
||||
},
|
||||
filename => {
|
||||
description => "Source file name. For '-' stdin is used, the " .
|
||||
"tcp://<IP-or-CIDR> format allows to use a TCP connection, " .
|
||||
"the unix://PATH-TO-SOCKET format a UNIX socket as input." .
|
||||
"Else, the file is treated as common file.",
|
||||
type => 'string',
|
||||
},
|
||||
base => {
|
||||
description => "Base snapshot of an incremental stream",
|
||||
type => 'string',
|
||||
pattern => qr/[a-z0-9_\-]{1,40}/i,
|
||||
maxLength => 40,
|
||||
optional => 1,
|
||||
},
|
||||
'with-snapshots' => {
|
||||
description =>
|
||||
"Whether the stream includes intermediate snapshots",
|
||||
type => 'boolean',
|
||||
optional => 1,
|
||||
default => 0,
|
||||
},
|
||||
'delete-snapshot' => {
|
||||
description => "A snapshot to delete on success",
|
||||
type => 'string',
|
||||
pattern => qr/[a-z0-9_\-]{1,80}/i,
|
||||
maxLength => 80,
|
||||
optional => 1,
|
||||
},
|
||||
'allow-rename' => {
|
||||
description => "Choose a new volume ID if the requested " .
|
||||
"volume ID already exists, instead of throwing an error.",
|
||||
type => 'boolean',
|
||||
optional => 1,
|
||||
default => 0,
|
||||
},
|
||||
snapshot => {
|
||||
description => "The current-state snapshot if the stream contains snapshots",
|
||||
type => 'string',
|
||||
pattern => qr/[a-z0-9_\-]{1,40}/i,
|
||||
maxLength => 40,
|
||||
optional => 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns => { type => 'string' },
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
|
||||
my $filename = $param->{filename};
|
||||
|
||||
my $infh;
|
||||
if ($filename eq '-') {
|
||||
$infh = \*STDIN;
|
||||
} elsif ($filename =~ m!^tcp://(([^/]+)(/\d+)?)$!) {
|
||||
my ($cidr, $ip, $subnet) = ($1, $2, $3);
|
||||
if ($subnet) { # got real CIDR notation, not just IP
|
||||
my $ips = PVE::Network::get_local_ip_from_cidr($cidr);
|
||||
die "Unable to get any local IP address in network '$cidr'\n"
|
||||
if scalar(@$ips) < 1;
|
||||
die "Got multiple local IP address in network '$cidr'\n"
|
||||
if scalar(@$ips) > 1;
|
||||
|
||||
$ip = $ips->[0];
|
||||
}
|
||||
my $family = PVE::Tools::get_host_address_family($ip);
|
||||
my $port = PVE::Tools::next_migrate_port($family, $ip);
|
||||
|
||||
my $sock_params = {
|
||||
Listen => 1,
|
||||
ReuseAddr => 1,
|
||||
Proto => &Socket::IPPROTO_TCP,
|
||||
GetAddrInfoFlags => 0,
|
||||
LocalAddr => $ip,
|
||||
LocalPort => $port,
|
||||
};
|
||||
my $socket = IO::Socket::IP->new(%$sock_params)
|
||||
or die "failed to open socket: $!\n";
|
||||
|
||||
print "$ip\n$port\n"; # tell remote where to connect
|
||||
*STDOUT->flush();
|
||||
|
||||
my $prev_alarm = alarm 0;
|
||||
local $SIG{ALRM} = sub { die "timed out waiting for client\n" };
|
||||
alarm 30;
|
||||
my $client = $socket->accept; # Wait for a client
|
||||
alarm $prev_alarm;
|
||||
close($socket);
|
||||
|
||||
$infh = \*$client;
|
||||
} elsif ($filename =~ m!^unix://(.*)$!) {
|
||||
my $socket_path = $1;
|
||||
my $socket = IO::Socket::UNIX->new(
|
||||
Type => SOCK_STREAM(),
|
||||
Local => $socket_path,
|
||||
Listen => 1,
|
||||
) or die "failed to open socket: $!\n";
|
||||
|
||||
print "ready\n";
|
||||
*STDOUT->flush();
|
||||
|
||||
my $prev_alarm = alarm 0;
|
||||
local $SIG{ALRM} = sub { die "timed out waiting for client\n" };
|
||||
alarm 30;
|
||||
my $client = $socket->accept; # Wait for a client
|
||||
alarm $prev_alarm;
|
||||
close($socket);
|
||||
|
||||
$infh = \*$client;
|
||||
} else {
|
||||
sysopen($infh, $filename, O_RDONLY)
|
||||
or die "open($filename): $!\n";
|
||||
}
|
||||
|
||||
my $cfg = PVE::Storage::config();
|
||||
my $volume = $param->{volume};
|
||||
my $delete = $param->{'delete-snapshot'};
|
||||
my $imported_volid = PVE::Storage::volume_import($cfg, $infh, $volume, $param->{format},
|
||||
$param->{snapshot}, $param->{base}, $param->{'with-snapshots'},
|
||||
$param->{'allow-rename'});
|
||||
PVE::Storage::volume_snapshot_delete($cfg, $imported_volid, $delete)
|
||||
if defined($delete);
|
||||
return $imported_volid;
|
||||
}
|
||||
});
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
name => 'prunebackups',
|
||||
path => 'prunebackups',
|
||||
method => 'GET',
|
||||
description => "Prune backups. Only those using the standard naming scheme are considered. " .
|
||||
"If no keep options are specified, those from the storage configuration are used.",
|
||||
protected => 1,
|
||||
proxyto => 'node',
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
'dry-run' => {
|
||||
description => "Only show what would be pruned, don't delete anything.",
|
||||
type => 'boolean',
|
||||
optional => 1,
|
||||
},
|
||||
node => get_standard_option('pve-node'),
|
||||
storage => get_standard_option('pve-storage-id', {
|
||||
completion => \&PVE::Storage::complete_storage_enabled,
|
||||
}),
|
||||
%{$PVE::Storage::Plugin::prune_backups_format},
|
||||
type => {
|
||||
description => "Either 'qemu' or 'lxc'. Only consider backups for guests of this type.",
|
||||
type => 'string',
|
||||
optional => 1,
|
||||
enum => ['qemu', 'lxc'],
|
||||
},
|
||||
vmid => get_standard_option('pve-vmid', {
|
||||
description => "Only consider backups for this guest.",
|
||||
optional => 1,
|
||||
completion => \&PVE::Cluster::complete_vmid,
|
||||
}),
|
||||
},
|
||||
},
|
||||
returns => {
|
||||
type => 'object',
|
||||
properties => {
|
||||
dryrun => {
|
||||
description => 'If it was a dry run or not. The list will only be defined in that case.',
|
||||
type => 'boolean',
|
||||
},
|
||||
list => {
|
||||
type => 'array',
|
||||
items => {
|
||||
type => 'object',
|
||||
properties => {
|
||||
volid => {
|
||||
description => "Backup volume ID.",
|
||||
type => 'string',
|
||||
},
|
||||
'ctime' => {
|
||||
description => "Creation time of the backup (seconds since the UNIX epoch).",
|
||||
type => 'integer',
|
||||
},
|
||||
'mark' => {
|
||||
description => "Whether the backup would be kept or removed. For backups that don't " .
|
||||
"use the standard naming scheme, it's 'protected'.",
|
||||
type => 'string',
|
||||
},
|
||||
type => {
|
||||
description => "One of 'qemu', 'lxc', 'openvz' or 'unknown'.",
|
||||
type => 'string',
|
||||
},
|
||||
'vmid' => {
|
||||
description => "The VM the backup belongs to.",
|
||||
type => 'integer',
|
||||
optional => 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
|
||||
my $dryrun = extract_param($param, 'dry-run') ? 1 : 0;
|
||||
|
||||
my $keep_opts;
|
||||
foreach my $keep (keys %{$PVE::Storage::Plugin::prune_backups_format}) {
|
||||
$keep_opts->{$keep} = extract_param($param, $keep) if defined($param->{$keep});
|
||||
}
|
||||
$param->{'prune-backups'} = PVE::JSONSchema::print_property_string(
|
||||
$keep_opts, $PVE::Storage::Plugin::prune_backups_format) if $keep_opts;
|
||||
|
||||
my $list = [];
|
||||
if ($dryrun) {
|
||||
$list = PVE::API2::Storage::PruneBackups->dryrun($param);
|
||||
} else {
|
||||
PVE::API2::Storage::PruneBackups->delete($param);
|
||||
}
|
||||
|
||||
return {
|
||||
dryrun => $dryrun,
|
||||
list => $list,
|
||||
};
|
||||
}});
|
||||
|
||||
my $print_api_result = sub {
|
||||
my ($data, $schema, $options) = @_;
|
||||
PVE::CLIFormatter::print_api_result($data, $schema, undef, $options);
|
||||
};
|
||||
|
||||
our $cmddef = {
|
||||
add => [ "PVE::API2::Storage::Config", 'create', ['type', 'storage'] ],
|
||||
set => [ "PVE::API2::Storage::Config", 'update', ['storage'] ],
|
||||
remove => [ "PVE::API2::Storage::Config", 'delete', ['storage'] ],
|
||||
status => [ "PVE::API2::Storage::Status", 'index', [],
|
||||
{ node => $nodename }, $print_status ],
|
||||
list => [ "PVE::API2::Storage::Content", 'index', ['storage'],
|
||||
{ node => $nodename }, $print_content ],
|
||||
alloc => [ "PVE::API2::Storage::Content", 'create', ['storage', 'vmid', 'filename', 'size'],
|
||||
{ node => $nodename }, sub {
|
||||
my $volid = shift;
|
||||
print "successfully created '$volid'\n";
|
||||
}],
|
||||
free => [ "PVE::API2::Storage::Content", 'delete', ['volume'],
|
||||
{ node => $nodename } ],
|
||||
scan => {
|
||||
nfs => [ "PVE::API2::Storage::Scan", 'nfsscan', ['server'], { node => $nodename }, sub {
|
||||
my $res = shift;
|
||||
|
||||
my $maxlen = 0;
|
||||
foreach my $rec (@$res) {
|
||||
my $len = length ($rec->{path});
|
||||
$maxlen = $len if $len > $maxlen;
|
||||
}
|
||||
foreach my $rec (@$res) {
|
||||
printf "%-${maxlen}s %s\n", $rec->{path}, $rec->{options};
|
||||
}
|
||||
}],
|
||||
cifs => [ "PVE::API2::Storage::Scan", 'cifsscan', ['server'], { node => $nodename }, sub {
|
||||
my $res = shift;
|
||||
|
||||
my $maxlen = 0;
|
||||
foreach my $rec (@$res) {
|
||||
my $len = length ($rec->{share});
|
||||
$maxlen = $len if $len > $maxlen;
|
||||
}
|
||||
foreach my $rec (@$res) {
|
||||
printf "%-${maxlen}s %s\n", $rec->{share}, $rec->{description};
|
||||
}
|
||||
}],
|
||||
glusterfs => [ "PVE::API2::Storage::Scan", 'glusterfsscan', ['server'], { node => $nodename }, sub {
|
||||
my $res = shift;
|
||||
|
||||
foreach my $rec (@$res) {
|
||||
printf "%s\n", $rec->{volname};
|
||||
}
|
||||
}],
|
||||
iscsi => [ "PVE::API2::Storage::Scan", 'iscsiscan', ['portal'], { node => $nodename }, sub {
|
||||
my $res = shift;
|
||||
|
||||
my $maxlen = 0;
|
||||
foreach my $rec (@$res) {
|
||||
my $len = length ($rec->{target});
|
||||
$maxlen = $len if $len > $maxlen;
|
||||
}
|
||||
foreach my $rec (@$res) {
|
||||
printf "%-${maxlen}s %s\n", $rec->{target}, $rec->{portal};
|
||||
}
|
||||
}],
|
||||
lvm => [ "PVE::API2::Storage::Scan", 'lvmscan', [], { node => $nodename }, sub {
|
||||
my $res = shift;
|
||||
foreach my $rec (@$res) {
|
||||
printf "$rec->{vg}\n";
|
||||
}
|
||||
}],
|
||||
lvmthin => [ "PVE::API2::Storage::Scan", 'lvmthinscan', ['vg'], { node => $nodename }, sub {
|
||||
my $res = shift;
|
||||
foreach my $rec (@$res) {
|
||||
printf "$rec->{lv}\n";
|
||||
}
|
||||
}],
|
||||
pbs => [
|
||||
"PVE::API2::Storage::Scan",
|
||||
'pbsscan',
|
||||
['server', 'username'],
|
||||
{ node => $nodename },
|
||||
$print_api_result,
|
||||
$PVE::RESTHandler::standard_output_options,
|
||||
],
|
||||
zfs => [ "PVE::API2::Storage::Scan", 'zfsscan', [], { node => $nodename }, sub {
|
||||
my $res = shift;
|
||||
|
||||
foreach my $rec (@$res) {
|
||||
printf "$rec->{pool}\n";
|
||||
}
|
||||
}],
|
||||
},
|
||||
nfsscan => { alias => 'scan nfs' },
|
||||
cifsscan => { alias => 'scan cifs' },
|
||||
glusterfsscan => { alias => 'scan glusterfs' },
|
||||
iscsiscan => { alias => 'scan iscsi' },
|
||||
lvmscan => { alias => 'scan lvm' },
|
||||
lvmthinscan => { alias => 'scan lvmthin' },
|
||||
zfsscan => { alias => 'scan zfs' },
|
||||
path => [ __PACKAGE__, 'path', ['volume']],
|
||||
extractconfig => [__PACKAGE__, 'extractconfig', ['volume']],
|
||||
export => [ __PACKAGE__, 'export', ['volume', 'format', 'filename']],
|
||||
import => [ __PACKAGE__, 'import', ['volume', 'format', 'filename'], {}, sub {
|
||||
my $volid = shift;
|
||||
print PVE::Storage::volume_imported_message($volid);
|
||||
}],
|
||||
apiinfo => [ __PACKAGE__, 'apiinfo', [], {}, sub {
|
||||
my $res = shift;
|
||||
|
||||
print "APIVER $res->{apiver}\n";
|
||||
print "APIAGE $res->{apiage}\n";
|
||||
}],
|
||||
'prune-backups' => [ __PACKAGE__, 'prunebackups', ['storage'], { node => $nodename }, sub {
|
||||
my $res = shift;
|
||||
|
||||
my ($dryrun, $list) = ($res->{dryrun}, $res->{list});
|
||||
|
||||
return if !$dryrun;
|
||||
|
||||
if (!scalar(@{$list})) {
|
||||
print "No backups found\n";
|
||||
return;
|
||||
}
|
||||
|
||||
print "NOTE: this is only a preview and might not be what a subsequent\n" .
|
||||
"prune call does if backups are removed/added in the meantime.\n\n";
|
||||
|
||||
my @sorted = sort {
|
||||
my $vmcmp = PVE::Tools::safe_compare($a->{vmid}, $b->{vmid}, sub { $_[0] <=> $_[1] });
|
||||
return $vmcmp if $vmcmp ne 0;
|
||||
return $a->{ctime} <=> $b->{ctime};
|
||||
} @{$list};
|
||||
|
||||
my $maxlen = 0;
|
||||
foreach my $backup (@sorted) {
|
||||
my $volid = $backup->{volid};
|
||||
$maxlen = length($volid) if length($volid) > $maxlen;
|
||||
}
|
||||
$maxlen+=1;
|
||||
|
||||
printf("%-${maxlen}s %15s %10s\n", 'Backup', 'Backup-ID', 'Prune-Mark');
|
||||
foreach my $backup (@sorted) {
|
||||
my $type = $backup->{type};
|
||||
my $vmid = $backup->{vmid};
|
||||
my $backup_id = defined($vmid) ? "$type/$vmid" : "$type";
|
||||
printf("%-${maxlen}s %15s %10s\n", $backup->{volid}, $backup_id, $backup->{mark});
|
||||
}
|
||||
}],
|
||||
};
|
||||
|
||||
1;
|
||||
291
src/PVE/CephConfig.pm
Normal file
291
src/PVE/CephConfig.pm
Normal file
@ -0,0 +1,291 @@
|
||||
package PVE::CephConfig;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use Net::IP;
|
||||
use PVE::Tools qw(run_command);
|
||||
use PVE::Cluster qw(cfs_register_file);
|
||||
|
||||
cfs_register_file('ceph.conf',
|
||||
\&parse_ceph_config,
|
||||
\&write_ceph_config);
|
||||
|
||||
sub parse_ceph_config {
|
||||
my ($filename, $raw) = @_;
|
||||
|
||||
my $cfg = {};
|
||||
return $cfg if !defined($raw);
|
||||
|
||||
my @lines = split /\n/, $raw;
|
||||
|
||||
my $section;
|
||||
|
||||
foreach my $line (@lines) {
|
||||
$line =~ s/#.*$//;
|
||||
$line =~ s/^\s+//;
|
||||
$line =~ s/^;.*$//;
|
||||
$line =~ s/\s+$//;
|
||||
next if !$line;
|
||||
|
||||
$section = $1 if $line =~ m/^\[(\S+)\]$/;
|
||||
if (!$section) {
|
||||
warn "no section - skip: $line\n";
|
||||
next;
|
||||
}
|
||||
|
||||
if ($line =~ m/^(.*?\S)\s*=\s*(\S.*)$/) {
|
||||
my ($key, $val) = ($1, $2);
|
||||
# ceph treats ' ', '_' and '-' in keys the same, so lets do too
|
||||
$key =~ s/[-\ ]/_/g;
|
||||
$cfg->{$section}->{$key} = $val;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return $cfg;
|
||||
}
|
||||
|
||||
my $parse_ceph_file = sub {
|
||||
my ($filename) = @_;
|
||||
|
||||
my $cfg = {};
|
||||
|
||||
return $cfg if ! -f $filename;
|
||||
|
||||
my $content = PVE::Tools::file_get_contents($filename);
|
||||
|
||||
return parse_ceph_config($filename, $content);
|
||||
};
|
||||
|
||||
sub write_ceph_config {
|
||||
my ($filename, $cfg) = @_;
|
||||
|
||||
my $out = '';
|
||||
|
||||
my $cond_write_sec = sub {
|
||||
my $re = shift;
|
||||
|
||||
foreach my $section (sort keys %$cfg) {
|
||||
next if $section !~ m/^$re$/;
|
||||
$out .= "[$section]\n";
|
||||
foreach my $key (sort keys %{$cfg->{$section}}) {
|
||||
$out .= "\t $key = $cfg->{$section}->{$key}\n";
|
||||
}
|
||||
$out .= "\n";
|
||||
}
|
||||
};
|
||||
|
||||
&$cond_write_sec('global');
|
||||
&$cond_write_sec('client');
|
||||
|
||||
&$cond_write_sec('mds');
|
||||
&$cond_write_sec('mon');
|
||||
&$cond_write_sec('osd');
|
||||
&$cond_write_sec('mgr');
|
||||
|
||||
&$cond_write_sec('mds\..*');
|
||||
&$cond_write_sec('mon\..*');
|
||||
&$cond_write_sec('osd\..*');
|
||||
&$cond_write_sec('mgr\..*');
|
||||
|
||||
return $out;
|
||||
}
|
||||
|
||||
my $ceph_get_key = sub {
|
||||
my ($keyfile, $username) = @_;
|
||||
|
||||
my $key = $parse_ceph_file->($keyfile);
|
||||
my $secret = $key->{"client.$username"}->{key};
|
||||
|
||||
return $secret;
|
||||
};
|
||||
|
||||
my $get_host = sub {
|
||||
my ($hostport) = @_;
|
||||
my ($host, $port) = PVE::Tools::parse_host_and_port($hostport);
|
||||
if (!defined($host)) {
|
||||
return "";
|
||||
}
|
||||
$port = defined($port) ? ":$port" : '';
|
||||
$host = "[$host]" if Net::IP::ip_is_ipv6($host);
|
||||
return "${host}${port}";
|
||||
};
|
||||
|
||||
sub get_monaddr_list {
|
||||
my ($configfile) = shift;
|
||||
|
||||
if (!defined($configfile)) {
|
||||
warn "No ceph config specified\n";
|
||||
return;
|
||||
}
|
||||
|
||||
my $config = $parse_ceph_file->($configfile);
|
||||
|
||||
my $monhostlist = {};
|
||||
|
||||
# get all ip addresses from mon_host
|
||||
my $monhosts = [ split (/[ ,;]+/, $config->{global}->{mon_host} // "") ];
|
||||
|
||||
foreach my $monhost (@$monhosts) {
|
||||
$monhost =~ s/^\[?v\d\://; # remove beginning of vector
|
||||
$monhost =~ s|/\d+\]?||; # remove end of vector
|
||||
my $host = $get_host->($monhost);
|
||||
if ($host ne "") {
|
||||
$monhostlist->{$host} = 1;
|
||||
}
|
||||
}
|
||||
|
||||
# then get all addrs from mon. sections
|
||||
for my $section ( keys %$config ) {
|
||||
next if $section !~ m/^mon\./;
|
||||
|
||||
if (my $addr = $config->{$section}->{mon_addr}) {
|
||||
$monhostlist->{$addr} = 1;
|
||||
}
|
||||
}
|
||||
|
||||
return join(',', sort keys %$monhostlist);
|
||||
}
|
||||
|
||||
sub hostlist {
|
||||
my ($list_text, $separator) = @_;
|
||||
|
||||
my @monhostlist = PVE::Tools::split_list($list_text);
|
||||
return join($separator, map { $get_host->($_) } @monhostlist);
|
||||
}
|
||||
|
||||
my $ceph_check_keyfile = sub {
|
||||
my ($filename, $type) = @_;
|
||||
|
||||
return if ! -f $filename;
|
||||
|
||||
my $content = PVE::Tools::file_get_contents($filename);
|
||||
eval {
|
||||
die if !$content;
|
||||
|
||||
if ($type eq 'rbd') {
|
||||
die if $content !~ /\s*\[\S+\]\s*key\s*=\s*\S+==\s*$/m;
|
||||
} elsif ($type eq 'cephfs') {
|
||||
die if $content !~ /\S+==\s*$/;
|
||||
}
|
||||
};
|
||||
die "Not a proper $type authentication file: $filename\n" if $@;
|
||||
|
||||
return undef;
|
||||
};
|
||||
|
||||
sub ceph_connect_option {
|
||||
my ($scfg, $storeid, %options) = @_;
|
||||
|
||||
my $cmd_option = {};
|
||||
my $keyfile = "/etc/pve/priv/ceph/${storeid}.keyring";
|
||||
$keyfile = "/etc/pve/priv/ceph/${storeid}.secret" if ($scfg->{type} eq 'cephfs');
|
||||
my $pveceph_managed = !defined($scfg->{monhost});
|
||||
|
||||
$cmd_option->{ceph_conf} = '/etc/pve/ceph.conf' if $pveceph_managed;
|
||||
|
||||
$ceph_check_keyfile->($keyfile, $scfg->{type});
|
||||
|
||||
if (-e "/etc/pve/priv/ceph/${storeid}.conf") {
|
||||
# allow custom ceph configuration for external clusters
|
||||
if ($pveceph_managed) {
|
||||
warn "ignoring custom ceph config for storage '$storeid', 'monhost' is not set (assuming pveceph managed cluster)!\n";
|
||||
} else {
|
||||
$cmd_option->{ceph_conf} = "/etc/pve/priv/ceph/${storeid}.conf";
|
||||
}
|
||||
}
|
||||
|
||||
$cmd_option->{keyring} = $keyfile if (-e $keyfile);
|
||||
$cmd_option->{auth_supported} = (defined $cmd_option->{keyring}) ? 'cephx' : 'none';
|
||||
$cmd_option->{userid} = $scfg->{username} ? $scfg->{username} : 'admin';
|
||||
$cmd_option->{mon_host} = hostlist($scfg->{monhost}, ',') if (defined($scfg->{monhost}));
|
||||
|
||||
if (%options) {
|
||||
foreach my $k (keys %options) {
|
||||
$cmd_option->{$k} = $options{$k};
|
||||
}
|
||||
}
|
||||
|
||||
return $cmd_option;
|
||||
|
||||
}
|
||||
|
||||
sub ceph_create_keyfile {
|
||||
my ($type, $storeid, $secret) = @_;
|
||||
|
||||
my $extension = 'keyring';
|
||||
$extension = 'secret' if ($type eq 'cephfs');
|
||||
|
||||
my $ceph_admin_keyring = '/etc/pve/priv/ceph.client.admin.keyring';
|
||||
my $ceph_storage_keyring = "/etc/pve/priv/ceph/${storeid}.$extension";
|
||||
|
||||
die "ceph authx keyring file for storage '$storeid' already exists!\n"
|
||||
if -e $ceph_storage_keyring && !defined($secret);
|
||||
|
||||
if (-e $ceph_admin_keyring || defined($secret)) {
|
||||
eval {
|
||||
if (defined($secret)) {
|
||||
mkdir '/etc/pve/priv/ceph';
|
||||
chomp $secret;
|
||||
PVE::Tools::file_set_contents($ceph_storage_keyring, "${secret}\n", 0400);
|
||||
} elsif ($type eq 'rbd') {
|
||||
mkdir '/etc/pve/priv/ceph';
|
||||
PVE::Tools::file_copy($ceph_admin_keyring, $ceph_storage_keyring);
|
||||
} elsif ($type eq 'cephfs') {
|
||||
my $cephfs_secret = $ceph_get_key->($ceph_admin_keyring, 'admin');
|
||||
mkdir '/etc/pve/priv/ceph';
|
||||
chomp $cephfs_secret;
|
||||
PVE::Tools::file_set_contents($ceph_storage_keyring, "${cephfs_secret}\n", 0400);
|
||||
}
|
||||
};
|
||||
if (my $err = $@) {
|
||||
unlink $ceph_storage_keyring;
|
||||
die "failed to copy ceph authx $extension for storage '$storeid': $err\n";
|
||||
}
|
||||
} else {
|
||||
warn "$ceph_admin_keyring not found, authentication is disabled.\n";
|
||||
}
|
||||
}
|
||||
|
||||
sub ceph_remove_keyfile {
|
||||
my ($type, $storeid) = @_;
|
||||
|
||||
my $extension = 'keyring';
|
||||
$extension = 'secret' if ($type eq 'cephfs');
|
||||
my $ceph_storage_keyring = "/etc/pve/priv/ceph/${storeid}.$extension";
|
||||
|
||||
if (-f $ceph_storage_keyring) {
|
||||
unlink($ceph_storage_keyring) or warn "removing keyring of storage failed: $!\n";
|
||||
}
|
||||
}
|
||||
|
||||
my $ceph_version_parser = sub {
|
||||
my $ceph_version = shift;
|
||||
# FIXME this is the same as pve-manager PVE::Ceph::Tools get_local_version
|
||||
if ($ceph_version =~ /^ceph.*\sv?(\d+(?:\.\d+)+(?:-pve\d+)?)\s+(?:\(([a-zA-Z0-9]+)\))?/) {
|
||||
my ($version, $buildcommit) = ($1, $2);
|
||||
my $subversions = [ split(/\.|-/, $version) ];
|
||||
|
||||
return ($subversions, $version, $buildcommit);
|
||||
}
|
||||
warn "Could not parse Ceph version: '$ceph_version'\n";
|
||||
};
|
||||
|
||||
sub local_ceph_version {
|
||||
my ($cache) = @_;
|
||||
|
||||
my $version_string = $cache;
|
||||
if (!defined($version_string)) {
|
||||
run_command('ceph --version', outfunc => sub {
|
||||
$version_string = shift;
|
||||
});
|
||||
}
|
||||
return undef if !defined($version_string);
|
||||
# subversion is an array ref. with the version parts from major to minor
|
||||
# version is the filtered version string
|
||||
my ($subversions, $version) = $ceph_version_parser->($version_string);
|
||||
|
||||
return wantarray ? ($subversions, $version) : $version;
|
||||
}
|
||||
|
||||
1;
|
||||
913
src/PVE/Diskmanage.pm
Normal file
913
src/PVE/Diskmanage.pm
Normal file
@ -0,0 +1,913 @@
|
||||
package PVE::Diskmanage;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
use PVE::ProcFSTools;
|
||||
use Data::Dumper;
|
||||
use Cwd qw(abs_path);
|
||||
use Fcntl ':mode';
|
||||
use File::Basename;
|
||||
use File::stat;
|
||||
use JSON;
|
||||
|
||||
use PVE::Tools qw(extract_param run_command file_get_contents file_read_firstline dir_glob_regex dir_glob_foreach trim);
|
||||
|
||||
my $SMARTCTL = "/usr/sbin/smartctl";
|
||||
my $ZPOOL = "/sbin/zpool";
|
||||
my $SGDISK = "/sbin/sgdisk";
|
||||
my $PVS = "/sbin/pvs";
|
||||
my $LVS = "/sbin/lvs";
|
||||
my $LSBLK = "/bin/lsblk";
|
||||
|
||||
my sub strip_dev :prototype($) {
|
||||
my ($devpath) = @_;
|
||||
$devpath =~ s|^/dev/||;
|
||||
return $devpath;
|
||||
}
|
||||
|
||||
sub check_bin {
|
||||
my ($path) = @_;
|
||||
return -x $path;
|
||||
}
|
||||
|
||||
sub verify_blockdev_path {
|
||||
my ($rel_path) = @_;
|
||||
|
||||
die "missing path" if !$rel_path;
|
||||
my $path = abs_path($rel_path);
|
||||
die "failed to get absolute path to $rel_path\n" if !$path;
|
||||
|
||||
die "got unusual device path '$path'\n" if $path !~ m|^/dev/(.*)$|;
|
||||
|
||||
$path = "/dev/$1"; # untaint
|
||||
|
||||
assert_blockdev($path);
|
||||
|
||||
return $path;
|
||||
}
|
||||
|
||||
sub assert_blockdev {
|
||||
my ($dev, $noerr) = @_;
|
||||
|
||||
if ($dev !~ m|^/dev/| || !(-b $dev)) {
|
||||
return if $noerr;
|
||||
die "not a valid block device\n";
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
sub init_disk {
|
||||
my ($disk, $uuid) = @_;
|
||||
|
||||
assert_blockdev($disk);
|
||||
|
||||
# we should already have checked these in the api call, but we check again for safety
|
||||
die "$disk is a partition\n" if is_partition($disk);
|
||||
die "disk $disk is already in use\n" if disk_is_used($disk);
|
||||
|
||||
my $id = $uuid || 'R';
|
||||
run_command([$SGDISK, $disk, '-U', $id]);
|
||||
return 1;
|
||||
}
|
||||
|
||||
sub disk_is_used {
|
||||
my ($disk) = @_;
|
||||
|
||||
my $dev = $disk;
|
||||
$dev =~ s|^/dev/||;
|
||||
|
||||
my $disklist = get_disks($dev, 1, 1);
|
||||
|
||||
die "'$disk' is not a valid local disk\n" if !defined($disklist->{$dev});
|
||||
return 1 if $disklist->{$dev}->{used};
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
sub get_smart_data {
|
||||
my ($disk, $healthonly) = @_;
|
||||
|
||||
assert_blockdev($disk);
|
||||
my $smartdata = {};
|
||||
my $type;
|
||||
|
||||
my $cmd = [$SMARTCTL, '-H'];
|
||||
push @$cmd, '-A', '-f', 'brief' if !$healthonly;
|
||||
push @$cmd, $disk;
|
||||
|
||||
my $returncode = eval {
|
||||
run_command($cmd, noerr => 1, outfunc => sub {
|
||||
my ($line) = @_;
|
||||
|
||||
# ATA SMART attributes, e.g.:
|
||||
# ID# ATTRIBUTE_NAME FLAGS VALUE WORST THRESH FAIL RAW_VALUE
|
||||
# 1 Raw_Read_Error_Rate POSR-K 100 100 000 - 0
|
||||
#
|
||||
# SAS and NVME disks, e.g.:
|
||||
# Data Units Written: 5,584,952 [2.85 TB]
|
||||
# Accumulated start-stop cycles: 34
|
||||
|
||||
if (defined($type) && $type eq 'ata' && $line =~ m/^([ \d]{2}\d)\s+(\S+)\s+(\S{6})\s+(\d+)\s+(\d+)\s+(\S+)\s+(\S+)\s+(.*)$/) {
|
||||
my $entry = {};
|
||||
|
||||
$entry->{name} = $2 if defined $2;
|
||||
$entry->{flags} = $3 if defined $3;
|
||||
# the +0 makes a number out of the strings
|
||||
# FIXME: 'value' is depreacated by 'normalized'; remove with PVE 7.0
|
||||
$entry->{value} = $4+0 if defined $4;
|
||||
$entry->{normalized} = $4+0 if defined $4;
|
||||
$entry->{worst} = $5+0 if defined $5;
|
||||
# some disks report the default threshold as --- instead of 000
|
||||
if (defined($6) && $6 eq '---') {
|
||||
$entry->{threshold} = 0;
|
||||
} else {
|
||||
$entry->{threshold} = $6+0 if defined $6;
|
||||
}
|
||||
$entry->{fail} = $7 if defined $7;
|
||||
$entry->{raw} = $8 if defined $8;
|
||||
$entry->{id} = $1 if defined $1;
|
||||
push @{$smartdata->{attributes}}, $entry;
|
||||
} elsif ($line =~ m/(?:Health Status|self\-assessment test result): (.*)$/ ) {
|
||||
$smartdata->{health} = $1;
|
||||
} elsif ($line =~ m/Vendor Specific SMART Attributes with Thresholds:/) {
|
||||
$type = 'ata';
|
||||
delete $smartdata->{text};
|
||||
} elsif ($line =~ m/=== START OF (READ )?SMART DATA SECTION ===/) {
|
||||
$type = 'text';
|
||||
} elsif (defined($type) && $type eq 'text') {
|
||||
$smartdata->{text} = '' if !defined $smartdata->{text};
|
||||
$smartdata->{text} .= "$line\n";
|
||||
# extract wearout from nvme/sas text, allow for decimal values
|
||||
if ($line =~ m/Percentage Used(?: endurance indicator)?:\s*(\d+(?:\.\d+)?)\%/i) {
|
||||
$smartdata->{wearout} = 100 - $1;
|
||||
}
|
||||
} elsif ($line =~ m/SMART Disabled/) {
|
||||
$smartdata->{health} = "SMART Disabled";
|
||||
}
|
||||
})
|
||||
};
|
||||
my $err = $@;
|
||||
|
||||
# bit 0 and 1 mark a fatal error, other bits are for disk status -> ignore (see man 8 smartctl)
|
||||
if ((defined($returncode) && ($returncode & 0b00000011)) || $err) {
|
||||
die "Error getting S.M.A.R.T. data: Exit code: $returncode\n";
|
||||
}
|
||||
|
||||
$smartdata->{type} = $type;
|
||||
|
||||
return $smartdata;
|
||||
}
|
||||
|
||||
sub get_lsblk_info {
|
||||
my $cmd = [$LSBLK, '--json', '-o', 'path,parttype,fstype'];
|
||||
my $output = "";
|
||||
eval { run_command($cmd, outfunc => sub { $output .= "$_[0]\n"; }) };
|
||||
warn "$@\n" if $@;
|
||||
return {} if $output eq '';
|
||||
|
||||
my $parsed = eval { decode_json($output) } // {};
|
||||
warn "$@\n" if $@;
|
||||
my $list = $parsed->{blockdevices} // [];
|
||||
|
||||
return {
|
||||
map {
|
||||
$_->{path} => {
|
||||
parttype => $_->{parttype},
|
||||
fstype => $_->{fstype}
|
||||
}
|
||||
} @{$list}
|
||||
};
|
||||
}
|
||||
|
||||
my sub get_devices_by_partuuid {
|
||||
my ($lsblk_info, $uuids, $res) = @_;
|
||||
|
||||
$res = {} if !defined($res);
|
||||
|
||||
foreach my $dev (sort keys %{$lsblk_info}) {
|
||||
my $uuid = $lsblk_info->{$dev}->{parttype};
|
||||
next if !defined($uuid) || !defined($uuids->{$uuid});
|
||||
$res->{$dev} = $uuids->{$uuid};
|
||||
}
|
||||
|
||||
return $res;
|
||||
}
|
||||
|
||||
sub get_zfs_devices {
|
||||
my ($lsblk_info) = @_;
|
||||
my $res = {};
|
||||
|
||||
return {} if !check_bin($ZPOOL);
|
||||
|
||||
# use zpool and parttype uuid, because log and cache do not have zfs type uuid
|
||||
eval {
|
||||
run_command([$ZPOOL, 'list', '-HPLv'], outfunc => sub {
|
||||
my ($line) = @_;
|
||||
if ($line =~ m|^\t([^\t]+)\t|) {
|
||||
$res->{$1} = 1;
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
# only warn here, because maybe zfs tools are not installed
|
||||
warn "$@\n" if $@;
|
||||
|
||||
my $uuids = {
|
||||
"6a898cc3-1dd2-11b2-99a6-080020736631" => 1, # apple
|
||||
"516e7cba-6ecf-11d6-8ff8-00022d09712b" => 1, # bsd
|
||||
};
|
||||
|
||||
|
||||
$res = get_devices_by_partuuid($lsblk_info, $uuids, $res);
|
||||
|
||||
return $res;
|
||||
}
|
||||
|
||||
sub get_lvm_devices {
|
||||
my ($lsblk_info) = @_;
|
||||
my $res = {};
|
||||
eval {
|
||||
run_command([$PVS, '--noheadings', '--readonly', '-o', 'pv_name'], outfunc => sub{
|
||||
my ($line) = @_;
|
||||
$line = trim($line);
|
||||
if ($line =~ m|^/dev/|) {
|
||||
$res->{$line} = 1;
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
# if something goes wrong, we do not want to give up, but indicate an error has occurred
|
||||
warn "$@\n" if $@;
|
||||
|
||||
my $uuids = {
|
||||
"e6d6d379-f507-44c2-a23c-238f2a3df928" => 1,
|
||||
};
|
||||
|
||||
$res = get_devices_by_partuuid($lsblk_info, $uuids, $res);
|
||||
|
||||
return $res;
|
||||
}
|
||||
|
||||
sub get_ceph_journals {
|
||||
my ($lsblk_info) = @_;
|
||||
my $res = {};
|
||||
|
||||
my $uuids = {
|
||||
'45b0969e-9b03-4f30-b4c6-b4b80ceff106' => 1, # journal
|
||||
'30cd0809-c2b2-499c-8879-2d6b78529876' => 2, # db
|
||||
'5ce17fce-4087-4169-b7ff-056cc58473f9' => 3, # wal
|
||||
'cafecafe-9b03-4f30-b4c6-b4b80ceff106' => 4, # block
|
||||
};
|
||||
|
||||
$res = get_devices_by_partuuid($lsblk_info, $uuids, $res);
|
||||
|
||||
return $res;
|
||||
}
|
||||
|
||||
# reads the lv_tags and matches them with the devices
|
||||
sub get_ceph_volume_infos {
|
||||
my $result = {};
|
||||
|
||||
my $cmd = [ $LVS, '-S', 'lv_name=~^osd-', '-o', 'devices,lv_name,lv_tags',
|
||||
'--noheadings', '--readonly', '--separator', ';' ];
|
||||
|
||||
run_command($cmd, outfunc => sub {
|
||||
my $line = shift;
|
||||
$line =~ s/(?:^\s+)|(?:\s+$)//g; # trim whitespaces
|
||||
|
||||
my $fields = [ split(';', $line) ];
|
||||
|
||||
# lvs syntax is /dev/sdX(Y) where Y is the start (which we do not need)
|
||||
my ($dev) = $fields->[0] =~ m|^(/dev/[a-z]+[^(]*)|;
|
||||
if ($fields->[1] =~ m|^osd-([^-]+)-|) {
|
||||
my $type = $1;
|
||||
# $result autovivification is wanted, to not creating empty hashes
|
||||
if (($type eq 'block' || $type eq 'data') && $fields->[2] =~ m/ceph.osd_id=([^,]+)/) {
|
||||
$result->{$dev}->{osdid} = $1;
|
||||
$result->{$dev}->{bluestore} = ($type eq 'block');
|
||||
if ($fields->[2] =~ m/ceph\.encrypted=1/) {
|
||||
$result->{$dev}->{encrypted} = 1;
|
||||
}
|
||||
} else {
|
||||
# undef++ becomes '1' (see `perldoc perlop`: Auto-increment)
|
||||
$result->{$dev}->{$type}++;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
return $result;
|
||||
}
|
||||
|
||||
sub get_udev_info {
|
||||
my ($dev) = @_;
|
||||
|
||||
my $info = "";
|
||||
my $data = {};
|
||||
eval {
|
||||
run_command(['udevadm', 'info', '-p', $dev, '--query', 'all'], outfunc => sub {
|
||||
my ($line) = @_;
|
||||
$info .= "$line\n";
|
||||
});
|
||||
};
|
||||
warn $@ if $@;
|
||||
return if !$info;
|
||||
|
||||
return if $info !~ m/^E: DEVTYPE=(disk|partition)$/m;
|
||||
return if $info =~ m/^E: ID_CDROM/m;
|
||||
|
||||
# we use this, because some disks are not simply in /dev e.g. /dev/cciss/c0d0
|
||||
if ($info =~ m/^E: DEVNAME=(\S+)$/m) {
|
||||
$data->{devpath} = $1;
|
||||
}
|
||||
return if !defined($data->{devpath});
|
||||
|
||||
$data->{serial} = 'unknown';
|
||||
$data->{serial} = $1 if $info =~ m/^E: ID_SERIAL_SHORT=(\S+)$/m;
|
||||
|
||||
$data->{gpt} = $info =~ m/^E: ID_PART_TABLE_TYPE=gpt$/m ? 1 : 0;
|
||||
|
||||
$data->{rpm} = -1;
|
||||
$data->{rpm} = $1 if $info =~ m/^E: ID_ATA_ROTATION_RATE_RPM=(\d+)$/m; # detects SSD implicit
|
||||
|
||||
$data->{usb} = 1 if $info =~ m/^E: ID_BUS=usb$/m;
|
||||
|
||||
$data->{model} = $1 if $info =~ m/^E: ID_MODEL=(.+)$/m;
|
||||
|
||||
$data->{wwn} = 'unknown';
|
||||
$data->{wwn} = $1 if $info =~ m/^E: ID_WWN=(.*)$/m;
|
||||
|
||||
if ($info =~ m/^E: DEVLINKS=(.+)$/m) {
|
||||
my @devlinks = grep(m#^/dev/disk/by-id/(ata|scsi|nvme(?!-eui))#, split (/ /, $1));
|
||||
$data->{by_id_link} = $devlinks[0] if defined($devlinks[0]);
|
||||
}
|
||||
|
||||
return $data;
|
||||
}
|
||||
|
||||
sub get_sysdir_size {
|
||||
my ($sysdir) = @_;
|
||||
|
||||
my $size = file_read_firstline("$sysdir/size");
|
||||
return if !$size;
|
||||
|
||||
# linux always considers sectors to be 512 bytes, independently of real block size
|
||||
return $size * 512;
|
||||
}
|
||||
|
||||
sub get_sysdir_info {
|
||||
my ($sysdir) = @_;
|
||||
|
||||
return if ! -d "$sysdir/device";
|
||||
|
||||
my $data = {};
|
||||
|
||||
$data->{size} = get_sysdir_size($sysdir) or return;
|
||||
|
||||
# dir/queue/rotational should be 1 for hdd, 0 for ssd
|
||||
$data->{rotational} = file_read_firstline("$sysdir/queue/rotational") // -1;
|
||||
|
||||
$data->{vendor} = file_read_firstline("$sysdir/device/vendor") || 'unknown';
|
||||
$data->{model} = file_read_firstline("$sysdir/device/model") || 'unknown';
|
||||
|
||||
return $data;
|
||||
}
|
||||
|
||||
sub get_wear_leveling_info {
|
||||
my ($smartdata) = @_;
|
||||
my $attributes = $smartdata->{attributes};
|
||||
|
||||
if (defined($smartdata->{wearout})) {
|
||||
return $smartdata->{wearout};
|
||||
}
|
||||
|
||||
my $wearout;
|
||||
|
||||
# Common register names that represent percentage values of potential failure indicators used
|
||||
# in drivedb.h of smartmontool's. Order matters, as some drives may have multiple definitions
|
||||
my @wearoutregisters = (
|
||||
"Media_Wearout_Indicator",
|
||||
"SSD_Life_Left",
|
||||
"Wear_Leveling_Count",
|
||||
"Perc_Write\/Erase_Ct_BC",
|
||||
"Perc_Rated_Life_Remain",
|
||||
"Remaining_Lifetime_Perc",
|
||||
"Percent_Lifetime_Remain",
|
||||
"Lifetime_Left",
|
||||
"PCT_Life_Remaining",
|
||||
"Lifetime_Remaining",
|
||||
"Percent_Life_Remaining",
|
||||
"Percent_Lifetime_Used",
|
||||
"Perc_Rated_Life_Used"
|
||||
);
|
||||
|
||||
# Search for S.M.A.R.T. attributes for known register
|
||||
foreach my $register (@wearoutregisters) {
|
||||
last if defined $wearout;
|
||||
foreach my $attr (@$attributes) {
|
||||
next if $attr->{name} !~ m/$register/;
|
||||
$wearout = $attr->{value};
|
||||
last;
|
||||
}
|
||||
}
|
||||
|
||||
return $wearout;
|
||||
}
|
||||
|
||||
sub dir_is_empty {
|
||||
my ($dir) = @_;
|
||||
|
||||
my $dh = IO::Dir->new ($dir);
|
||||
return 1 if !$dh;
|
||||
|
||||
while (defined(my $tmp = $dh->read)) {
|
||||
next if $tmp eq '.' || $tmp eq '..';
|
||||
$dh->close;
|
||||
return 0;
|
||||
}
|
||||
$dh->close;
|
||||
return 1;
|
||||
}
|
||||
|
||||
sub is_iscsi {
|
||||
my ($sysdir) = @_;
|
||||
|
||||
if (-l $sysdir && readlink($sysdir) =~ m|host[^/]*/session[^/]*|) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
my sub is_ssdlike {
|
||||
my ($type) = @_;
|
||||
return $type eq 'ssd' || $type eq 'nvme';
|
||||
}
|
||||
|
||||
sub mounted_blockdevs {
|
||||
my $mounted = {};
|
||||
|
||||
my $mounts = PVE::ProcFSTools::parse_proc_mounts();
|
||||
|
||||
foreach my $mount (@$mounts) {
|
||||
next if $mount->[0] !~ m|^/dev/|;
|
||||
$mounted->{abs_path($mount->[0])} = $mount->[1];
|
||||
};
|
||||
|
||||
return $mounted;
|
||||
}
|
||||
|
||||
# returns hashmap of abs mount path -> first part of /proc/mounts (what)
|
||||
sub mounted_paths {
|
||||
my $mounted = {};
|
||||
|
||||
my $mounts = PVE::ProcFSTools::parse_proc_mounts();
|
||||
|
||||
foreach my $mount (@$mounts) {
|
||||
$mounted->{abs_path($mount->[1])} = $mount->[0];
|
||||
};
|
||||
|
||||
return $mounted;
|
||||
}
|
||||
|
||||
sub get_disks {
|
||||
my ($disks, $nosmart, $include_partitions) = @_;
|
||||
my $disklist = {};
|
||||
|
||||
my $mounted = mounted_blockdevs();
|
||||
|
||||
my $lsblk_info = get_lsblk_info();
|
||||
|
||||
my $journalhash = get_ceph_journals($lsblk_info);
|
||||
my $ceph_volume_infos = get_ceph_volume_infos();
|
||||
|
||||
my $zfshash = get_zfs_devices($lsblk_info);
|
||||
|
||||
my $lvmhash = get_lvm_devices($lsblk_info);
|
||||
|
||||
my $disk_regex = ".*";
|
||||
if (defined($disks)) {
|
||||
if (!ref($disks)) {
|
||||
$disks = [ $disks ];
|
||||
} elsif (ref($disks) ne 'ARRAY') {
|
||||
die "disks is not a string or array reference\n";
|
||||
}
|
||||
# we get cciss/c0d0 but need cciss!c0d0
|
||||
$_ =~ s|cciss/|cciss!| for @$disks;
|
||||
|
||||
if ($include_partitions) {
|
||||
# Proper blockdevice is needed for the regex, use parent for partitions.
|
||||
for my $disk ($disks->@*) {
|
||||
next if !is_partition("/dev/$disk");
|
||||
$disk = strip_dev(get_blockdev("/dev/$disk"));
|
||||
}
|
||||
}
|
||||
|
||||
$disk_regex = "(?:" . join('|', @$disks) . ")";
|
||||
}
|
||||
|
||||
dir_glob_foreach('/sys/block', $disk_regex, sub {
|
||||
my ($dev) = @_;
|
||||
# whitelisting following devices
|
||||
# - hdX ide block device
|
||||
# - sdX scsi/sata block device
|
||||
# - vdX virtIO block device
|
||||
# - xvdX: xen virtual block device
|
||||
# - nvmeXnY: nvme devices
|
||||
# - cciss!cXnY cciss devices
|
||||
return if $dev !~ m/^(h|s|x?v)d[a-z]+$/ &&
|
||||
$dev !~ m/^nvme\d+n\d+$/ &&
|
||||
$dev !~ m/^cciss\!c\d+d\d+$/;
|
||||
|
||||
my $data = get_udev_info("/sys/block/$dev") // return;
|
||||
my $devpath = $data->{devpath};
|
||||
|
||||
my $sysdir = "/sys/block/$dev";
|
||||
|
||||
# we do not want iscsi devices
|
||||
return if is_iscsi($sysdir);
|
||||
|
||||
my $sysdata = get_sysdir_info($sysdir);
|
||||
return if !defined($sysdata);
|
||||
|
||||
my $type = 'unknown';
|
||||
|
||||
if ($sysdata->{rotational} == 0) {
|
||||
$type = 'ssd';
|
||||
$type = 'nvme' if $dev =~ m/^nvme\d+n\d+$/;
|
||||
$data->{rpm} = 0;
|
||||
} elsif ($sysdata->{rotational} == 1) {
|
||||
if ($data->{rpm} != -1) {
|
||||
$type = 'hdd';
|
||||
} elsif ($data->{usb}) {
|
||||
$type = 'usb';
|
||||
$data->{rpm} = 0;
|
||||
}
|
||||
}
|
||||
|
||||
my ($health, $wearout) = ('UNKNOWN', 'N/A');
|
||||
if (!$nosmart) {
|
||||
eval {
|
||||
my $smartdata = get_smart_data($devpath, !is_ssdlike($type));
|
||||
$health = $smartdata->{health} if $smartdata->{health};
|
||||
|
||||
if (is_ssdlike($type)) { # if we have an ssd we try to get the wearout indicator
|
||||
my $wear_level = get_wear_leveling_info($smartdata);
|
||||
$wearout = $wear_level if defined($wear_level);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
# we replaced cciss/ with cciss! above, but in the result we need cciss/ again because the
|
||||
# caller might want to check the result again with the original parameter
|
||||
if ($dev =~ m|^cciss!|) {
|
||||
$dev =~ s|^cciss!|cciss/|;
|
||||
}
|
||||
|
||||
$disklist->{$dev} = {
|
||||
vendor => $sysdata->{vendor},
|
||||
model => $data->{model} || $sysdata->{model},
|
||||
size => $sysdata->{size},
|
||||
serial => $data->{serial},
|
||||
gpt => $data->{gpt},
|
||||
rpm => $data->{rpm},
|
||||
type => $type,
|
||||
wwn => $data->{wwn},
|
||||
health => $health,
|
||||
devpath => $devpath,
|
||||
wearout => $wearout,
|
||||
};
|
||||
$disklist->{$dev}->{mounted} = 1 if exists $mounted->{$devpath};
|
||||
|
||||
my $by_id_link = $data->{by_id_link};
|
||||
$disklist->{$dev}->{by_id_link} = $by_id_link if defined($by_id_link);
|
||||
|
||||
my ($osdid, $bluestore, $osdencrypted) = (-1, 0, 0);
|
||||
my ($journal_count, $db_count, $wal_count) = (0, 0, 0);
|
||||
|
||||
my $partpath = $devpath;
|
||||
# remove trailing part to get the partition base path, e.g. /dev/cciss/c0d0 -> /dev/cciss
|
||||
$partpath =~ s/\/[^\/]+$//;
|
||||
|
||||
my $determine_usage = sub {
|
||||
my ($devpath, $sysdir, $is_partition) = @_;
|
||||
|
||||
return 'LVM' if $lvmhash->{$devpath};
|
||||
return 'ZFS' if $zfshash->{$devpath};
|
||||
|
||||
my $info = $lsblk_info->{$devpath} // {};
|
||||
|
||||
if (defined(my $parttype = $info->{parttype})) {
|
||||
return 'BIOS boot'if $parttype eq '21686148-6449-6e6f-744e-656564454649';
|
||||
return 'EFI' if $parttype eq 'c12a7328-f81f-11d2-ba4b-00a0c93ec93b';
|
||||
return 'ZFS reserved' if $parttype eq '6a945a3b-1dd2-11b2-99a6-080020736631';
|
||||
}
|
||||
|
||||
return "$info->{fstype}" if defined($info->{fstype});
|
||||
return 'mounted' if $mounted->{$devpath};
|
||||
|
||||
return if !$is_partition;
|
||||
|
||||
# for devices, this check is done explicitly later
|
||||
return 'Device Mapper' if !dir_is_empty("$sysdir/holders");
|
||||
|
||||
return; # unused partition
|
||||
};
|
||||
|
||||
my $collect_ceph_info = sub {
|
||||
my ($devpath) = @_;
|
||||
|
||||
my $ceph_volume = $ceph_volume_infos->{$devpath} or return;
|
||||
$journal_count += $ceph_volume->{journal} // 0;
|
||||
$db_count += $ceph_volume->{db} // 0;
|
||||
$wal_count += $ceph_volume->{wal} // 0;
|
||||
if (defined($ceph_volume->{osdid})) {
|
||||
$osdid = $ceph_volume->{osdid};
|
||||
$bluestore = 1 if $ceph_volume->{bluestore};
|
||||
$osdencrypted = 1 if $ceph_volume->{encrypted};
|
||||
}
|
||||
|
||||
my $result = { %{$ceph_volume} };
|
||||
$result->{journals} = delete $result->{journal} if $result->{journal};
|
||||
return $result;
|
||||
};
|
||||
|
||||
my $partitions = {};
|
||||
dir_glob_foreach("$sysdir", "$dev.+", sub {
|
||||
my ($part) = @_;
|
||||
|
||||
$partitions->{$part} = $collect_ceph_info->("$partpath/$part");
|
||||
my $lvm_based_osd = defined($partitions->{$part});
|
||||
|
||||
$partitions->{$part}->{devpath} = "$partpath/$part";
|
||||
$partitions->{$part}->{parent} = "$devpath";
|
||||
$partitions->{$part}->{mounted} = 1 if exists $mounted->{"$partpath/$part"};
|
||||
$partitions->{$part}->{gpt} = $data->{gpt};
|
||||
$partitions->{$part}->{type} = 'partition';
|
||||
$partitions->{$part}->{size} = get_sysdir_size("$sysdir/$part") // 0;
|
||||
$partitions->{$part}->{used} = $determine_usage->("$partpath/$part", "$sysdir/$part", 1);
|
||||
$partitions->{$part}->{osdid} //= -1;
|
||||
|
||||
# avoid counting twice (e.g. partition with the LVM for the DB OSD is in $journalhash)
|
||||
return if $lvm_based_osd;
|
||||
|
||||
# Legacy handling for non-LVM based OSDs
|
||||
if (my $mp = $mounted->{"$partpath/$part"}) {
|
||||
if ($mp =~ m|^/var/lib/ceph/osd/ceph-(\d+)$|) {
|
||||
$osdid = $1;
|
||||
$partitions->{$part}->{osdid} = $osdid;
|
||||
}
|
||||
}
|
||||
|
||||
if (my $journal_part = $journalhash->{"$partpath/$part"}) {
|
||||
$journal_count++ if $journal_part == 1;
|
||||
$db_count++ if $journal_part == 2;
|
||||
$wal_count++ if $journal_part == 3;
|
||||
$bluestore = 1 if $journal_part == 4;
|
||||
|
||||
$partitions->{$part}->{journals} = 1 if $journal_part == 1;
|
||||
$partitions->{$part}->{db} = 1 if $journal_part == 2;
|
||||
$partitions->{$part}->{wal} = 1 if $journal_part == 3;
|
||||
$partitions->{$part}->{bluestore} = 1 if $journal_part == 4;
|
||||
}
|
||||
});
|
||||
|
||||
my $used = $determine_usage->($devpath, $sysdir, 0);
|
||||
if (!$include_partitions) {
|
||||
foreach my $part (sort keys %{$partitions}) {
|
||||
$used //= $partitions->{$part}->{used};
|
||||
}
|
||||
} else {
|
||||
# fstype might be set even if there are partitions, but showing that is confusing
|
||||
$used = 'partitions' if scalar(keys %{$partitions});
|
||||
}
|
||||
$used //= 'partitions' if scalar(keys %{$partitions});
|
||||
# multipath, software raid, etc.
|
||||
# this check comes in last, to show more specific info
|
||||
# if we have it
|
||||
$used //= 'Device Mapper' if !dir_is_empty("$sysdir/holders");
|
||||
|
||||
$disklist->{$dev}->{used} = $used if $used;
|
||||
|
||||
$collect_ceph_info->($devpath);
|
||||
|
||||
$disklist->{$dev}->{osdid} = $osdid;
|
||||
$disklist->{$dev}->{journals} = $journal_count if $journal_count;
|
||||
$disklist->{$dev}->{bluestore} = $bluestore if $osdid != -1;
|
||||
$disklist->{$dev}->{osdencrypted} = $osdencrypted if $osdid != -1;
|
||||
$disklist->{$dev}->{db} = $db_count if $db_count;
|
||||
$disklist->{$dev}->{wal} = $wal_count if $wal_count;
|
||||
|
||||
if ($include_partitions) {
|
||||
$disklist->{$_} = $partitions->{$_} for keys %{$partitions};
|
||||
}
|
||||
});
|
||||
|
||||
return $disklist;
|
||||
}
|
||||
|
||||
sub get_partnum {
|
||||
my ($part_path) = @_;
|
||||
|
||||
my $st = stat($part_path);
|
||||
|
||||
die "error detecting block device '$part_path'\n"
|
||||
if !$st || !$st->mode || !S_ISBLK($st->mode) || !$st->rdev;
|
||||
|
||||
my $major = PVE::Tools::dev_t_major($st->rdev);
|
||||
my $minor = PVE::Tools::dev_t_minor($st->rdev);
|
||||
my $partnum_path = "/sys/dev/block/$major:$minor/";
|
||||
|
||||
my $partnum = file_read_firstline("${partnum_path}partition");
|
||||
die "Partition does not exist\n" if !defined($partnum);
|
||||
die "Failed to get partition number\n" if $partnum !~ m/(\d+)/; # untaint
|
||||
$partnum = $1;
|
||||
die "Partition number $partnum is invalid\n" if $partnum > 128;
|
||||
|
||||
return $partnum;
|
||||
}
|
||||
|
||||
sub get_blockdev {
|
||||
my ($part_path) = @_;
|
||||
|
||||
my ($dev, $block_dev);
|
||||
if ($part_path =~ m|^/dev/(.*)$|) {
|
||||
$dev = $1;
|
||||
my $link = readlink "/sys/class/block/$dev";
|
||||
$block_dev = $1 if $link =~ m|([^/]*)/$dev$|;
|
||||
}
|
||||
|
||||
die "Can't parse parent device\n" if !defined($block_dev);
|
||||
die "No valid block device\n" if index($dev, $block_dev) == -1;
|
||||
|
||||
$block_dev = "/dev/$block_dev";
|
||||
die "Block device does not exists\n" if !(-b $block_dev);
|
||||
|
||||
return $block_dev;
|
||||
}
|
||||
|
||||
sub is_partition {
|
||||
my ($dev_path) = @_;
|
||||
|
||||
return defined(eval { get_partnum($dev_path) });
|
||||
}
|
||||
|
||||
sub locked_disk_action {
|
||||
my ($sub) = @_;
|
||||
my $res = PVE::Tools::lock_file('/run/lock/pve-diskmanage.lck', undef, $sub);
|
||||
die $@ if $@;
|
||||
return $res;
|
||||
}
|
||||
|
||||
sub assert_disk_unused {
|
||||
my ($dev) = @_;
|
||||
die "device '$dev' is already in use\n" if disk_is_used($dev);
|
||||
return;
|
||||
}
|
||||
|
||||
sub append_partition {
|
||||
my ($dev, $size) = @_;
|
||||
|
||||
my $devname = $dev;
|
||||
$devname =~ s|^/dev/||;
|
||||
|
||||
my $newpartid = 1;
|
||||
dir_glob_foreach("/sys/block/$devname", qr/\Q$devname\E.*?(\d+)/, sub {
|
||||
my ($part, $partid) = @_;
|
||||
|
||||
if ($partid >= $newpartid) {
|
||||
$newpartid = $partid + 1;
|
||||
}
|
||||
});
|
||||
|
||||
$size = PVE::Tools::convert_size($size, 'b' => 'mb');
|
||||
|
||||
run_command([ $SGDISK, '-n', "$newpartid:0:+${size}M", $dev ],
|
||||
errmsg => "error creating partition '$newpartid' on '$dev'");
|
||||
|
||||
my $partition;
|
||||
|
||||
# loop again to detect the real partition device which does not always follow
|
||||
# a strict $devname$partition scheme like /dev/nvme0n1 -> /dev/nvme0n1p1
|
||||
dir_glob_foreach("/sys/block/$devname", qr/\Q$devname\E.*$newpartid/, sub {
|
||||
my ($part) = @_;
|
||||
|
||||
$partition = "/dev/$part";
|
||||
});
|
||||
|
||||
return $partition;
|
||||
}
|
||||
|
||||
# Check if a disk or any of its partitions has a holder.
|
||||
# Can also be called with a partition.
|
||||
# Expected to be called with a result of verify_blockdev_path().
|
||||
sub has_holder {
|
||||
my ($devpath) = @_;
|
||||
|
||||
my $dev = strip_dev($devpath);
|
||||
|
||||
return $devpath if !dir_is_empty("/sys/class/block/${dev}/holders");
|
||||
|
||||
my $found;
|
||||
dir_glob_foreach("/sys/block/${dev}", "${dev}.+", sub {
|
||||
my ($part) = @_;
|
||||
$found = "/dev/${part}" if !dir_is_empty("/sys/class/block/${part}/holders");
|
||||
});
|
||||
|
||||
return $found;
|
||||
}
|
||||
|
||||
# Basic check if a disk or any of its partitions is mounted.
|
||||
# Can also be called with a partition.
|
||||
# Expected to be called with a result of verify_blockdev_path().
|
||||
sub is_mounted {
|
||||
my ($devpath) = @_;
|
||||
|
||||
my $mounted = mounted_blockdevs();
|
||||
|
||||
return $devpath if $mounted->{$devpath};
|
||||
|
||||
my $dev = strip_dev($devpath);
|
||||
|
||||
my $found;
|
||||
dir_glob_foreach("/sys/block/${dev}", "${dev}.+", sub {
|
||||
my ($part) = @_;
|
||||
my $partpath = "/dev/${part}";
|
||||
|
||||
$found = $partpath if $mounted->{$partpath};
|
||||
});
|
||||
|
||||
return $found;
|
||||
}
|
||||
|
||||
# Currently only supports GPT-partitioned disks.
|
||||
sub change_parttype {
|
||||
my ($partpath, $parttype) = @_;
|
||||
|
||||
my $err = "unable to change partition type for $partpath";
|
||||
|
||||
my $partnum = get_partnum($partpath);
|
||||
my $blockdev = get_blockdev($partpath);
|
||||
my $dev = strip_dev($blockdev);
|
||||
|
||||
my $info = get_disks($dev, 1);
|
||||
die "$err - unable to get disk info for '$blockdev'\n" if !defined($info->{$dev});
|
||||
die "$err - disk '$blockdev' is not GPT partitioned\n" if !$info->{$dev}->{gpt};
|
||||
|
||||
run_command(['sgdisk', "-t${partnum}:${parttype}", $blockdev], errmsg => $err);
|
||||
}
|
||||
|
||||
# Wipes all labels and the first 200 MiB of a disk/partition (or the whole if it is smaller).
|
||||
# If called with a partition, also sets the partition type to 0x83 'Linux filesystem'.
|
||||
# Expected to be called with a result of verify_blockdev_path().
|
||||
sub wipe_blockdev {
|
||||
my ($devpath) = @_;
|
||||
|
||||
my $devname = basename($devpath);
|
||||
my $dev_size = PVE::Tools::file_get_contents("/sys/class/block/$devname/size");
|
||||
|
||||
($dev_size) = $dev_size =~ m|(\d+)|; # untaint $dev_size
|
||||
die "Couldn't get the size of the device $devname\n" if !defined($dev_size);
|
||||
|
||||
my $size = ($dev_size * 512 / 1024 / 1024);
|
||||
my $count = ($size < 200) ? $size : 200;
|
||||
|
||||
my $to_wipe = [];
|
||||
dir_glob_foreach("/sys/class/block/${devname}", "${devname}.+", sub {
|
||||
my ($part) = @_;
|
||||
push $to_wipe->@*, "/dev/${part}" if -b "/dev/${part}";
|
||||
});
|
||||
|
||||
if (scalar($to_wipe->@*) > 0) {
|
||||
print "found child partitions to wipe: ". join(', ', $to_wipe->@*) ."\n";
|
||||
}
|
||||
push $to_wipe->@*, $devpath; # put actual device last
|
||||
|
||||
print "wiping block device ${devpath}\n";
|
||||
|
||||
run_command(['wipefs', '--all', $to_wipe->@*], errmsg => "error wiping '${devpath}'");
|
||||
|
||||
run_command(
|
||||
['dd', 'if=/dev/zero', "of=${devpath}", 'bs=1M', 'conv=fdatasync', "count=${count}"],
|
||||
errmsg => "error wiping '${devpath}'",
|
||||
);
|
||||
|
||||
if (is_partition($devpath)) {
|
||||
eval { change_parttype($devpath, '8300'); };
|
||||
warn $@ if $@;
|
||||
}
|
||||
}
|
||||
|
||||
# FIXME: Remove once we depend on systemd >= v249.
|
||||
# Work around udev bug https://github.com/systemd/systemd/issues/18525 ensuring database is updated.
|
||||
sub udevadm_trigger {
|
||||
my @devs = @_;
|
||||
|
||||
return if scalar(@devs) == 0;
|
||||
|
||||
eval { run_command(['udevadm', 'trigger', @devs]); };
|
||||
warn $@ if $@;
|
||||
}
|
||||
|
||||
1;
|
||||
12
src/PVE/Makefile
Normal file
12
src/PVE/Makefile
Normal file
@ -0,0 +1,12 @@
|
||||
|
||||
|
||||
.PHONY: install
|
||||
install:
|
||||
install -D -m 0644 Storage.pm ${DESTDIR}${PERLDIR}/PVE/Storage.pm
|
||||
install -D -m 0644 Diskmanage.pm ${DESTDIR}${PERLDIR}/PVE/Diskmanage.pm
|
||||
install -D -m 0644 CephConfig.pm ${DESTDIR}${PERLDIR}/PVE/CephConfig.pm
|
||||
make -C Storage install
|
||||
make -C API2 install
|
||||
make -C CLI install
|
||||
|
||||
clean:
|
||||
2151
src/PVE/Storage.pm
Executable file
2151
src/PVE/Storage.pm
Executable file
File diff suppressed because it is too large
Load Diff
933
src/PVE/Storage/BTRFSPlugin.pm
Normal file
933
src/PVE/Storage/BTRFSPlugin.pm
Normal file
@ -0,0 +1,933 @@
|
||||
package PVE::Storage::BTRFSPlugin;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
use base qw(PVE::Storage::Plugin);
|
||||
|
||||
use Fcntl qw(S_ISDIR O_WRONLY O_CREAT O_EXCL);
|
||||
use File::Basename qw(basename dirname);
|
||||
use File::Path qw(mkpath);
|
||||
use IO::Dir;
|
||||
use POSIX qw(EEXIST);
|
||||
|
||||
use PVE::Tools qw(run_command dir_glob_foreach);
|
||||
|
||||
use PVE::Storage::DirPlugin;
|
||||
|
||||
use constant {
|
||||
BTRFS_FIRST_FREE_OBJECTID => 256,
|
||||
FS_NOCOW_FL => 0x00800000,
|
||||
FS_IOC_GETFLAGS => 0x40086602,
|
||||
FS_IOC_SETFLAGS => 0x80086601,
|
||||
BTRFS_MAGIC => 0x9123683e,
|
||||
};
|
||||
|
||||
# Configuration (similar to DirPlugin)
|
||||
|
||||
sub type {
|
||||
return 'btrfs';
|
||||
}
|
||||
|
||||
sub plugindata {
|
||||
return {
|
||||
content => [
|
||||
{
|
||||
images => 1,
|
||||
rootdir => 1,
|
||||
vztmpl => 1,
|
||||
iso => 1,
|
||||
backup => 1,
|
||||
snippets => 1,
|
||||
none => 1,
|
||||
},
|
||||
{ images => 1, rootdir => 1 },
|
||||
],
|
||||
format => [ { raw => 1, subvol => 1 }, 'raw', ],
|
||||
};
|
||||
}
|
||||
|
||||
sub properties {
|
||||
return {
|
||||
nocow => {
|
||||
description => "Set the NOCOW flag on files."
|
||||
. " Disables data checksumming and causes data errors to be unrecoverable from"
|
||||
. " while allowing direct I/O. Only use this if data does not need to be any more"
|
||||
. " safe than on a single ext4 formatted disk with no underlying raid system.",
|
||||
type => 'boolean',
|
||||
default => 0,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
sub options {
|
||||
return {
|
||||
path => { fixed => 1 },
|
||||
nodes => { optional => 1 },
|
||||
shared => { optional => 1 },
|
||||
disable => { optional => 1 },
|
||||
maxfiles => { optional => 1 },
|
||||
'prune-backups' => { optional => 1 },
|
||||
'max-protected-backups' => { optional => 1 },
|
||||
content => { optional => 1 },
|
||||
format => { optional => 1 },
|
||||
is_mountpoint => { optional => 1 },
|
||||
nocow => { optional => 1 },
|
||||
mkdir => { optional => 1 },
|
||||
preallocation => { optional => 1 },
|
||||
# TODO: The new variant of mkdir with `populate` vs `create`...
|
||||
};
|
||||
}
|
||||
|
||||
# Storage implementation
|
||||
#
|
||||
# We use the same volume names are directory plugins, but map *raw* disk image file names into a
|
||||
# subdirectory.
|
||||
#
|
||||
# `vm-VMID-disk-ID.raw`
|
||||
# -> `images/VMID/vm-VMID-disk-ID/disk.raw`
|
||||
# where the `vm-VMID-disk-ID/` subdirectory is a btrfs subvolume
|
||||
|
||||
# Reuse `DirPlugin`'s `check_config`. This simply checks for invalid paths.
|
||||
sub check_config {
|
||||
my ($self, $sectionId, $config, $create, $skipSchemaCheck) = @_;
|
||||
return PVE::Storage::DirPlugin::check_config($self, $sectionId, $config, $create, $skipSchemaCheck);
|
||||
}
|
||||
|
||||
my sub getfsmagic($) {
|
||||
my ($path) = @_;
|
||||
# The field type sizes in `struct statfs` are defined in a rather annoying way, and we only
|
||||
# need the first field, which is a `long` for our supported platforms.
|
||||
# Should be moved to pve-rs, so this can be the problem of the `libc` crate ;-)
|
||||
# Just round up and extract what we need:
|
||||
my $buf = pack('x160');
|
||||
if (0 != syscall(&PVE::Syscall::SYS_statfs, $path, $buf)) {
|
||||
die "statfs on '$path' failed - $!\n";
|
||||
}
|
||||
|
||||
return unpack('L!', $buf);
|
||||
}
|
||||
|
||||
my sub assert_btrfs($) {
|
||||
my ($path) = @_;
|
||||
die "'$path' is not a btrfs file system\n"
|
||||
if getfsmagic($path) != BTRFS_MAGIC;
|
||||
}
|
||||
|
||||
sub activate_storage {
|
||||
my ($class, $storeid, $scfg, $cache) = @_;
|
||||
|
||||
my $path = $scfg->{path};
|
||||
if (!defined($scfg->{mkdir}) || $scfg->{mkdir}) {
|
||||
mkpath $path;
|
||||
}
|
||||
|
||||
my $mp = PVE::Storage::DirPlugin::parse_is_mountpoint($scfg);
|
||||
if (defined($mp) && !PVE::Storage::DirPlugin::path_is_mounted($mp, $cache->{mountdata})) {
|
||||
die "unable to activate storage '$storeid' - directory is expected to be a mount point but"
|
||||
." is not mounted: '$mp'\n";
|
||||
}
|
||||
|
||||
assert_btrfs($path); # only assert this stuff now, ensures $path is there and better UX
|
||||
|
||||
$class->SUPER::activate_storage($storeid, $scfg, $cache);
|
||||
}
|
||||
|
||||
sub status {
|
||||
my ($class, $storeid, $scfg, $cache) = @_;
|
||||
return PVE::Storage::DirPlugin::status($class, $storeid, $scfg, $cache);
|
||||
}
|
||||
|
||||
sub get_volume_attribute {
|
||||
my ($class, $scfg, $storeid, $volname, $attribute) = @_;
|
||||
return PVE::Storage::DirPlugin::get_volume_attribute($class, $scfg, $storeid, $volname, $attribute);
|
||||
}
|
||||
|
||||
sub update_volume_attribute {
|
||||
my ($class, $scfg, $storeid, $volname, $attribute, $value) = @_;
|
||||
return PVE::Storage::DirPlugin::update_volume_attribute(
|
||||
$class,
|
||||
$scfg,
|
||||
$storeid,
|
||||
$volname,
|
||||
$attribute,
|
||||
$value,
|
||||
);
|
||||
}
|
||||
|
||||
# croak would not include the caller from within this module
|
||||
sub __error {
|
||||
my ($msg) = @_;
|
||||
my (undef, $f, $n) = caller(1);
|
||||
die "$msg at $f: $n\n";
|
||||
}
|
||||
|
||||
# Given a name (eg. `vm-VMID-disk-ID.raw`), take the part up to the format suffix as the name of
|
||||
# the subdirectory (subvolume).
|
||||
sub raw_name_to_dir($) {
|
||||
my ($raw) = @_;
|
||||
|
||||
# For the subvolume directory Strip the `.<format>` suffix:
|
||||
if ($raw =~ /^(.*)\.raw$/) {
|
||||
return $1;
|
||||
}
|
||||
|
||||
__error "internal error: bad disk name: $raw";
|
||||
}
|
||||
|
||||
sub raw_file_to_subvol($) {
|
||||
my ($file) = @_;
|
||||
|
||||
if ($file =~ m|^(.*)/disk\.raw$|) {
|
||||
return "$1";
|
||||
}
|
||||
|
||||
__error "internal error: bad raw path: $file";
|
||||
}
|
||||
|
||||
sub filesystem_path {
|
||||
my ($class, $scfg, $volname, $snapname) = @_;
|
||||
|
||||
my ($vtype, $name, $vmid, undef, undef, $isBase, $format) =
|
||||
$class->parse_volname($volname);
|
||||
|
||||
my $path = $class->get_subdir($scfg, $vtype);
|
||||
|
||||
$path .= "/$vmid" if $vtype eq 'images';
|
||||
|
||||
if (defined($format) && $format eq 'raw') {
|
||||
my $dir = raw_name_to_dir($name);
|
||||
if ($snapname) {
|
||||
$dir .= "\@$snapname";
|
||||
}
|
||||
$path .= "/$dir/disk.raw";
|
||||
} elsif (defined($format) && $format eq 'subvol') {
|
||||
$path .= "/$name";
|
||||
if ($snapname) {
|
||||
$path .= "\@$snapname";
|
||||
}
|
||||
} else {
|
||||
$path .= "/$name";
|
||||
}
|
||||
|
||||
return wantarray ? ($path, $vmid, $vtype) : $path;
|
||||
}
|
||||
|
||||
sub btrfs_cmd {
|
||||
my ($class, $cmd, $outfunc) = @_;
|
||||
|
||||
my $msg = '';
|
||||
my $func;
|
||||
if (defined($outfunc)) {
|
||||
$func = sub {
|
||||
my $part = &$outfunc(@_);
|
||||
$msg .= $part if defined($part);
|
||||
};
|
||||
} else {
|
||||
$func = sub { $msg .= "$_[0]\n" };
|
||||
}
|
||||
run_command(['btrfs', '-q', @$cmd], errmsg => 'btrfs error', outfunc => $func);
|
||||
|
||||
return $msg;
|
||||
}
|
||||
|
||||
sub btrfs_get_subvol_id {
|
||||
my ($class, $path) = @_;
|
||||
my $info = $class->btrfs_cmd(['subvolume', 'show', '--', $path]);
|
||||
if ($info !~ /^\s*(?:Object|Subvolume) ID:\s*(\d+)$/m) {
|
||||
die "failed to get btrfs subvolume ID from: $info\n";
|
||||
}
|
||||
return $1;
|
||||
}
|
||||
|
||||
my sub chattr : prototype($$$) {
|
||||
my ($fh, $mask, $xor) = @_;
|
||||
|
||||
my $flags = pack('L!', 0);
|
||||
ioctl($fh, FS_IOC_GETFLAGS, $flags) or die "FS_IOC_GETFLAGS failed - $!\n";
|
||||
$flags = pack('L!', (unpack('L!', $flags) & $mask) ^ $xor);
|
||||
ioctl($fh, FS_IOC_SETFLAGS, $flags) or die "FS_IOC_SETFLAGS failed - $!\n";
|
||||
return 1;
|
||||
}
|
||||
|
||||
sub create_base {
|
||||
my ($class, $storeid, $scfg, $volname) = @_;
|
||||
|
||||
my ($vtype, $name, $vmid, $basename, $basevmid, $isBase, $format) =
|
||||
$class->parse_volname($volname);
|
||||
|
||||
my $newname = $name;
|
||||
$newname =~ s/^vm-/base-/;
|
||||
|
||||
# If we're not working with a 'raw' file, which is the only thing that's "different" for btrfs,
|
||||
# or a subvolume, we forward to the DirPlugin
|
||||
if ($format ne 'raw' && $format ne 'subvol') {
|
||||
return PVE::Storage::DirPlugin::create_base(@_);
|
||||
}
|
||||
|
||||
my $path = $class->filesystem_path($scfg, $volname);
|
||||
my $newvolname = $basename ? "$basevmid/$basename/$vmid/$newname" : "$vmid/$newname";
|
||||
my $newpath = $class->filesystem_path($scfg, $newvolname);
|
||||
|
||||
my $subvol = $path;
|
||||
my $newsubvol = $newpath;
|
||||
if ($format eq 'raw') {
|
||||
$subvol = raw_file_to_subvol($subvol);
|
||||
$newsubvol = raw_file_to_subvol($newsubvol);
|
||||
}
|
||||
|
||||
rename($subvol, $newsubvol)
|
||||
|| die "rename '$subvol' to '$newsubvol' failed - $!\n";
|
||||
eval { $class->btrfs_cmd(['property', 'set', $newsubvol, 'ro', 'true']) };
|
||||
warn $@ if $@;
|
||||
|
||||
return $newvolname;
|
||||
}
|
||||
|
||||
sub clone_image {
|
||||
my ($class, $scfg, $storeid, $volname, $vmid, $snap) = @_;
|
||||
|
||||
my ($vtype, $basename, $basevmid, undef, undef, $isBase, $format) =
|
||||
$class->parse_volname($volname);
|
||||
|
||||
# If we're not working with a 'raw' file, which is the only thing that's "different" for btrfs,
|
||||
# or a subvolume, we forward to the DirPlugin
|
||||
if ($format ne 'raw' && $format ne 'subvol') {
|
||||
return PVE::Storage::DirPlugin::clone_image(@_);
|
||||
}
|
||||
|
||||
my $imagedir = $class->get_subdir($scfg, 'images');
|
||||
$imagedir .= "/$vmid";
|
||||
mkpath $imagedir;
|
||||
|
||||
my $path = $class->filesystem_path($scfg, $volname);
|
||||
my $newname = $class->find_free_diskname($storeid, $scfg, $vmid, $format, 1);
|
||||
|
||||
# For btrfs subvolumes we don't actually need the "link":
|
||||
#my $newvolname = "$basevmid/$basename/$vmid/$newname";
|
||||
my $newvolname = "$vmid/$newname";
|
||||
my $newpath = $class->filesystem_path($scfg, $newvolname);
|
||||
|
||||
my $subvol = $path;
|
||||
my $newsubvol = $newpath;
|
||||
if ($format eq 'raw') {
|
||||
$subvol = raw_file_to_subvol($subvol);
|
||||
$newsubvol = raw_file_to_subvol($newsubvol);
|
||||
}
|
||||
|
||||
$class->btrfs_cmd(['subvolume', 'snapshot', '--', $subvol, $newsubvol]);
|
||||
|
||||
return $newvolname;
|
||||
}
|
||||
|
||||
sub alloc_image {
|
||||
my ($class, $storeid, $scfg, $vmid, $fmt, $name, $size) = @_;
|
||||
|
||||
if ($fmt ne 'raw' && $fmt ne 'subvol') {
|
||||
return $class->SUPER::alloc_image($storeid, $scfg, $vmid, $fmt, $name, $size);
|
||||
}
|
||||
|
||||
# From Plugin.pm:
|
||||
|
||||
my $imagedir = $class->get_subdir($scfg, 'images') . "/$vmid";
|
||||
|
||||
mkpath $imagedir;
|
||||
|
||||
$name = $class->find_free_diskname($storeid, $scfg, $vmid, $fmt, 1) if !$name;
|
||||
|
||||
my (undef, $tmpfmt) = PVE::Storage::Plugin::parse_name_dir($name);
|
||||
|
||||
die "illegal name '$name' - wrong extension for format ('$tmpfmt != '$fmt')\n"
|
||||
if $tmpfmt ne $fmt;
|
||||
|
||||
# End copy from Plugin.pm
|
||||
|
||||
my $subvol = "$imagedir/$name";
|
||||
# .raw is not part of the directory name
|
||||
$subvol =~ s/\.raw$//;
|
||||
|
||||
die "disk image '$subvol' already exists\n" if -e $subvol;
|
||||
|
||||
my $path;
|
||||
if ($fmt eq 'raw') {
|
||||
$path = "$subvol/disk.raw";
|
||||
}
|
||||
|
||||
if ($fmt eq 'subvol' && !!$size) {
|
||||
# NOTE: `btrfs send/recv` actually drops quota information so supporting subvolumes with
|
||||
# quotas doesn't play nice with send/recv.
|
||||
die "btrfs quotas are currently not supported, use an unsized subvolume or a raw file\n";
|
||||
}
|
||||
|
||||
$class->btrfs_cmd(['subvolume', 'create', '--', $subvol]);
|
||||
|
||||
eval {
|
||||
if ($fmt eq 'subvol') {
|
||||
# Nothing to do for now...
|
||||
|
||||
# This is how we *would* do it:
|
||||
# # Use the subvol's default 0/$id qgroup
|
||||
# eval {
|
||||
# # This call should happen at storage creation instead and therefore governed by a
|
||||
# # configuration option!
|
||||
# # $class->btrfs_cmd(['quota', 'enable', $subvol]);
|
||||
# my $id = $class->btrfs_get_subvol_id($subvol);
|
||||
# $class->btrfs_cmd(['qgroup', 'limit', "${size}k", "0/$id", $subvol]);
|
||||
# };
|
||||
} elsif ($fmt eq 'raw') {
|
||||
sysopen my $fh, $path, O_WRONLY | O_CREAT | O_EXCL
|
||||
or die "failed to create raw file '$path' - $!\n";
|
||||
chattr($fh, ~FS_NOCOW_FL, FS_NOCOW_FL) if $scfg->{nocow};
|
||||
truncate($fh, $size * 1024)
|
||||
or die "failed to set file size for '$path' - $!\n";
|
||||
close($fh);
|
||||
} else {
|
||||
die "internal format error (format = $fmt)\n";
|
||||
}
|
||||
};
|
||||
|
||||
if (my $err = $@) {
|
||||
eval { $class->btrfs_cmd(['subvolume', 'delete', '--', $subvol]); };
|
||||
warn $@ if $@;
|
||||
die $err;
|
||||
}
|
||||
|
||||
return "$vmid/$name";
|
||||
}
|
||||
|
||||
# Same as btrfsprogs does:
|
||||
my sub path_is_subvolume : prototype($) {
|
||||
my ($path) = @_;
|
||||
my @stat = stat($path)
|
||||
or die "stat failed on '$path' - $!\n";
|
||||
my ($ino, $mode) = @stat[1, 2];
|
||||
return S_ISDIR($mode) && $ino == BTRFS_FIRST_FREE_OBJECTID;
|
||||
}
|
||||
|
||||
my $BTRFS_VOL_REGEX = qr/((?:vm|base|subvol)-\d+-disk-\d+(?:\.subvol)?)(?:\@(\S+))$/;
|
||||
|
||||
# Calls `$code->($volume, $name, $snapshot)` for each subvol in a directory matching our volume
|
||||
# regex.
|
||||
my sub foreach_subvol : prototype($$) {
|
||||
my ($dir, $code) = @_;
|
||||
|
||||
dir_glob_foreach($dir, $BTRFS_VOL_REGEX, sub {
|
||||
my ($volume, $name, $snapshot) = ($1, $2, $3);
|
||||
return if !path_is_subvolume("$dir/$volume");
|
||||
$code->($volume, $name, $snapshot);
|
||||
})
|
||||
}
|
||||
|
||||
sub free_image {
|
||||
my ($class, $storeid, $scfg, $volname, $isBase, $_format) = @_;
|
||||
|
||||
my (undef, undef, $vmid, undef, undef, undef, $format) =
|
||||
$class->parse_volname($volname);
|
||||
|
||||
if (!defined($format) || ($format ne 'subvol' && $format ne 'raw')) {
|
||||
return $class->SUPER::free_image($storeid, $scfg, $volname, $isBase, $_format);
|
||||
}
|
||||
|
||||
my $path = $class->filesystem_path($scfg, $volname);
|
||||
|
||||
my $subvol = $path;
|
||||
if ($format eq 'raw') {
|
||||
$subvol = raw_file_to_subvol($path);
|
||||
}
|
||||
|
||||
my $dir = dirname($subvol);
|
||||
my $basename = basename($subvol);
|
||||
my @snapshot_vols;
|
||||
foreach_subvol($dir, sub {
|
||||
my ($volume, $name, $snapshot) = @_;
|
||||
return if $name ne $basename;
|
||||
return if !defined $snapshot;
|
||||
push @snapshot_vols, "$dir/$volume";
|
||||
});
|
||||
|
||||
$class->btrfs_cmd(['subvolume', 'delete', '--', @snapshot_vols, $subvol]);
|
||||
# try to cleanup directory to not clutter storage with empty $vmid dirs if
|
||||
# all images from a guest got deleted
|
||||
rmdir($dir);
|
||||
|
||||
return undef;
|
||||
}
|
||||
|
||||
# Currently not used because quotas clash with send/recv.
|
||||
# my sub btrfs_subvol_quota {
|
||||
# my ($class, $path) = @_;
|
||||
# my $id = '0/' . $class->btrfs_get_subvol_id($path);
|
||||
# my $search = qr/^\Q$id\E\s+(\d)+\s+\d+\s+(\d+)\s*$/;
|
||||
# my ($used, $size);
|
||||
# $class->btrfs_cmd(['qgroup', 'show', '--raw', '-rf', '--', $path], sub {
|
||||
# return if defined($size);
|
||||
# if ($_[0] =~ $search) {
|
||||
# ($used, $size) = ($1, $2);
|
||||
# }
|
||||
# });
|
||||
# if (!defined($size)) {
|
||||
# # syslog should include more information:
|
||||
# syslog('err', "failed to get subvolume size for: $path (id $id)");
|
||||
# # UI should only see the last path component:
|
||||
# $path =~ s|^.*/||;
|
||||
# die "failed to get subvolume size for $path\n";
|
||||
# }
|
||||
# return wantarray ? ($used, $size) : $size;
|
||||
# }
|
||||
|
||||
sub volume_size_info {
|
||||
my ($class, $scfg, $storeid, $volname, $timeout) = @_;
|
||||
|
||||
my $path = $class->filesystem_path($scfg, $volname);
|
||||
|
||||
my $format = ($class->parse_volname($volname))[6];
|
||||
|
||||
if (defined($format) && $format eq 'subvol') {
|
||||
my $ctime = (stat($path))[10];
|
||||
my ($used, $size) = (0, 0);
|
||||
#my ($used, $size) = btrfs_subvol_quota($class, $path); # uses wantarray
|
||||
return wantarray ? ($size, 'subvol', $used, undef, $ctime) : 1;
|
||||
}
|
||||
|
||||
return PVE::Storage::Plugin::file_size_info($path, $timeout);
|
||||
}
|
||||
|
||||
sub volume_resize {
|
||||
my ($class, $scfg, $storeid, $volname, $size, $running) = @_;
|
||||
|
||||
my $format = ($class->parse_volname($volname))[6];
|
||||
if ($format eq 'subvol') {
|
||||
my $path = $class->filesystem_path($scfg, $volname);
|
||||
my $id = '0/' . $class->btrfs_get_subvol_id($path);
|
||||
$class->btrfs_cmd(['qgroup', 'limit', '--', "${size}k", "0/$id", $path]);
|
||||
return undef;
|
||||
}
|
||||
|
||||
return PVE::Storage::Plugin::volume_resize(@_);
|
||||
}
|
||||
|
||||
sub volume_snapshot {
|
||||
my ($class, $scfg, $storeid, $volname, $snap) = @_;
|
||||
|
||||
my ($name, $vmid, $format) = ($class->parse_volname($volname))[1,2,6];
|
||||
if ($format ne 'subvol' && $format ne 'raw') {
|
||||
return PVE::Storage::Plugin::volume_snapshot(@_);
|
||||
}
|
||||
|
||||
my $path = $class->filesystem_path($scfg, $volname);
|
||||
my $snap_path = $class->filesystem_path($scfg, $volname, $snap);
|
||||
|
||||
if ($format eq 'raw') {
|
||||
$path = raw_file_to_subvol($path);
|
||||
$snap_path = raw_file_to_subvol($snap_path);
|
||||
}
|
||||
|
||||
my $snapshot_dir = $class->get_subdir($scfg, 'images') . "/$vmid";
|
||||
mkpath $snapshot_dir;
|
||||
|
||||
$class->btrfs_cmd(['subvolume', 'snapshot', '-r', '--', $path, $snap_path]);
|
||||
return undef;
|
||||
}
|
||||
|
||||
sub volume_rollback_is_possible {
|
||||
my ($class, $scfg, $storeid, $volname, $snap, $blockers) = @_;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
sub volume_snapshot_rollback {
|
||||
my ($class, $scfg, $storeid, $volname, $snap) = @_;
|
||||
|
||||
my ($name, $format) = ($class->parse_volname($volname))[1,6];
|
||||
|
||||
if ($format ne 'subvol' && $format ne 'raw') {
|
||||
return PVE::Storage::Plugin::volume_snapshot_rollback(@_);
|
||||
}
|
||||
|
||||
my $path = $class->filesystem_path($scfg, $volname);
|
||||
my $snap_path = $class->filesystem_path($scfg, $volname, $snap);
|
||||
|
||||
if ($format eq 'raw') {
|
||||
$path = raw_file_to_subvol($path);
|
||||
$snap_path = raw_file_to_subvol($snap_path);
|
||||
}
|
||||
|
||||
# Simple version would be:
|
||||
# rename old to temp
|
||||
# create new
|
||||
# on error rename temp back
|
||||
# But for atomicity in case the rename after create-failure *also* fails, we create the new
|
||||
# subvol first, then use RENAME_EXCHANGE,
|
||||
my $tmp_path = "$path.tmp.$$";
|
||||
$class->btrfs_cmd(['subvolume', 'snapshot', '--', $snap_path, $tmp_path]);
|
||||
# The paths are absolute, so pass -1 as file descriptors.
|
||||
my $ok = PVE::Tools::renameat2(-1, $tmp_path, -1, $path, &PVE::Tools::RENAME_EXCHANGE);
|
||||
|
||||
eval { $class->btrfs_cmd(['subvolume', 'delete', '--', $tmp_path]) };
|
||||
warn "failed to remove '$tmp_path' subvolume: $@" if $@;
|
||||
|
||||
if (!$ok) {
|
||||
die "failed to rotate '$tmp_path' into place at '$path' - $!\n";
|
||||
}
|
||||
|
||||
return undef;
|
||||
}
|
||||
|
||||
sub volume_snapshot_delete {
|
||||
my ($class, $scfg, $storeid, $volname, $snap, $running) = @_;
|
||||
|
||||
my ($name, $vmid, $format) = ($class->parse_volname($volname))[1,2,6];
|
||||
|
||||
if ($format ne 'subvol' && $format ne 'raw') {
|
||||
return PVE::Storage::Plugin::volume_snapshot_delete(@_);
|
||||
}
|
||||
|
||||
my $path = $class->filesystem_path($scfg, $volname, $snap);
|
||||
|
||||
if ($format eq 'raw') {
|
||||
$path = raw_file_to_subvol($path);
|
||||
}
|
||||
|
||||
$class->btrfs_cmd(['subvolume', 'delete', '--', $path]);
|
||||
|
||||
return undef;
|
||||
}
|
||||
|
||||
sub volume_has_feature {
|
||||
my ($class, $scfg, $feature, $storeid, $volname, $snapname, $running) = @_;
|
||||
|
||||
my $features = {
|
||||
snapshot => {
|
||||
current => { qcow2 => 1, raw => 1, subvol => 1 },
|
||||
snap => { qcow2 => 1, raw => 1, subvol => 1 }
|
||||
},
|
||||
clone => {
|
||||
base => { qcow2 => 1, raw => 1, subvol => 1, vmdk => 1 },
|
||||
current => { raw => 1 },
|
||||
snap => { raw => 1 },
|
||||
},
|
||||
template => {
|
||||
current => { qcow2 => 1, raw => 1, vmdk => 1, subvol => 1 },
|
||||
},
|
||||
copy => {
|
||||
base => { qcow2 => 1, raw => 1, subvol => 1, vmdk => 1 },
|
||||
current => { qcow2 => 1, raw => 1, subvol => 1, vmdk => 1 },
|
||||
snap => { qcow2 => 1, raw => 1, subvol => 1 },
|
||||
},
|
||||
sparseinit => {
|
||||
base => { qcow2 => 1, raw => 1, vmdk => 1 },
|
||||
current => { qcow2 => 1, raw => 1, vmdk => 1 },
|
||||
},
|
||||
};
|
||||
|
||||
my ($vtype, $name, $vmid, $basename, $basevmid, $isBase, $format) = $class->parse_volname($volname);
|
||||
|
||||
my $key = undef;
|
||||
if ($snapname) {
|
||||
$key = 'snap';
|
||||
} else {
|
||||
$key = $isBase ? 'base' : 'current';
|
||||
}
|
||||
|
||||
return 1 if defined($features->{$feature}->{$key}->{$format});
|
||||
|
||||
return undef;
|
||||
}
|
||||
|
||||
sub list_images {
|
||||
my ($class, $storeid, $scfg, $vmid, $vollist, $cache) = @_;
|
||||
my $imagedir = $class->get_subdir($scfg, 'images');
|
||||
|
||||
my $res = [];
|
||||
|
||||
# Copied from Plugin.pm, with file_size_info calls adapted:
|
||||
foreach my $fn (<$imagedir/[0-9][0-9]*/*>) {
|
||||
# different to in Plugin.pm the regex below also excludes '@' as valid file name
|
||||
next if $fn !~ m@^(/.+/(\d+)/([^/\@.]+(?:\.(qcow2|vmdk|subvol))?))$@;
|
||||
$fn = $1; # untaint
|
||||
|
||||
my $owner = $2;
|
||||
my $name = $3;
|
||||
my $ext = $4;
|
||||
|
||||
next if !$vollist && defined($vmid) && ($owner ne $vmid);
|
||||
|
||||
my $volid = "$storeid:$owner/$name";
|
||||
my ($size, $format, $used, $parent, $ctime);
|
||||
|
||||
if (!$ext) { # raw
|
||||
$volid .= '.raw';
|
||||
($size, $format, $used, $parent, $ctime) = PVE::Storage::Plugin::file_size_info("$fn/disk.raw");
|
||||
} elsif ($ext eq 'subvol') {
|
||||
($used, $size) = (0, 0);
|
||||
#($used, $size) = btrfs_subvol_quota($class, $fn);
|
||||
$format = 'subvol';
|
||||
} else {
|
||||
($size, $format, $used, $parent, $ctime) = PVE::Storage::Plugin::file_size_info($fn);
|
||||
}
|
||||
next if !($format && defined($size));
|
||||
|
||||
if ($vollist) {
|
||||
next if ! grep { $_ eq $volid } @$vollist;
|
||||
}
|
||||
|
||||
my $info = {
|
||||
volid => $volid, format => $format,
|
||||
size => $size, vmid => $owner, used => $used, parent => $parent,
|
||||
};
|
||||
|
||||
$info->{ctime} = $ctime if $ctime;
|
||||
|
||||
push @$res, $info;
|
||||
}
|
||||
|
||||
return $res;
|
||||
}
|
||||
|
||||
sub volume_export_formats {
|
||||
my ($class, $scfg, $storeid, $volname, $snapshot, $base_snapshot, $with_snapshots) = @_;
|
||||
|
||||
# We can do whatever `DirPlugin` can do.
|
||||
my @result = PVE::Storage::Plugin::volume_export_formats(@_);
|
||||
|
||||
# `btrfs send` only works on snapshots:
|
||||
return @result if !defined $snapshot;
|
||||
|
||||
# Incremental stream with snapshots is only supported if the snapshots are listed (new api):
|
||||
return @result if defined($base_snapshot) && $with_snapshots && ref($with_snapshots) ne 'ARRAY';
|
||||
|
||||
# Otherwise we do also support `with_snapshots`.
|
||||
|
||||
# Finally, `btrfs send` only works on formats where we actually use btrfs subvolumes:
|
||||
my $format = ($class->parse_volname($volname))[6];
|
||||
return @result if $format ne 'raw' && $format ne 'subvol';
|
||||
|
||||
return ('btrfs', @result);
|
||||
}
|
||||
|
||||
sub volume_import_formats {
|
||||
my ($class, $scfg, $storeid, $volname, $snapshot, $base_snapshot, $with_snapshots) = @_;
|
||||
|
||||
# Same as export-formats, beware the parameter order:
|
||||
return volume_export_formats(
|
||||
$class,
|
||||
$scfg,
|
||||
$storeid,
|
||||
$volname,
|
||||
$snapshot,
|
||||
$base_snapshot,
|
||||
$with_snapshots,
|
||||
);
|
||||
}
|
||||
|
||||
sub volume_export {
|
||||
my (
|
||||
$class,
|
||||
$scfg,
|
||||
$storeid,
|
||||
$fh,
|
||||
$volname,
|
||||
$format,
|
||||
$snapshot,
|
||||
$base_snapshot,
|
||||
$with_snapshots,
|
||||
) = @_;
|
||||
|
||||
if ($format ne 'btrfs') {
|
||||
return PVE::Storage::Plugin::volume_export(@_);
|
||||
}
|
||||
|
||||
die "format 'btrfs' only works on snapshots\n"
|
||||
if !defined $snapshot;
|
||||
|
||||
die "'btrfs' format in incremental mode requires snapshots to be listed explicitly\n"
|
||||
if defined($base_snapshot) && $with_snapshots && ref($with_snapshots) ne 'ARRAY';
|
||||
|
||||
my $volume_format = ($class->parse_volname($volname))[6];
|
||||
|
||||
die "btrfs-sending volumes of type $volume_format ('$volname') is not supported\n"
|
||||
if $volume_format ne 'raw' && $volume_format ne 'subvol';
|
||||
|
||||
my $path = $class->path($scfg, $volname, $storeid);
|
||||
|
||||
if ($volume_format eq 'raw') {
|
||||
$path = raw_file_to_subvol($path);
|
||||
}
|
||||
|
||||
my $cmd = ['btrfs', '-q', 'send', '-e'];
|
||||
if ($base_snapshot) {
|
||||
my $base = $class->path($scfg, $volname, $storeid, $base_snapshot);
|
||||
if ($volume_format eq 'raw') {
|
||||
$base = raw_file_to_subvol($base);
|
||||
}
|
||||
push @$cmd, '-p', $base;
|
||||
}
|
||||
push @$cmd, '--';
|
||||
if (ref($with_snapshots) eq 'ARRAY') {
|
||||
push @$cmd, (map { "$path\@$_" } ($with_snapshots // [])->@*), $path;
|
||||
} else {
|
||||
dir_glob_foreach(dirname($path), $BTRFS_VOL_REGEX, sub {
|
||||
push @$cmd, "$path\@$_[2]" if !(defined($snapshot) && $_[2] eq $snapshot);
|
||||
});
|
||||
}
|
||||
$path .= "\@$snapshot" if defined($snapshot);
|
||||
push @$cmd, $path;
|
||||
|
||||
run_command($cmd, output => '>&'.fileno($fh));
|
||||
return;
|
||||
}
|
||||
|
||||
sub volume_import {
|
||||
my (
|
||||
$class,
|
||||
$scfg,
|
||||
$storeid,
|
||||
$fh,
|
||||
$volname,
|
||||
$format,
|
||||
$snapshot,
|
||||
$base_snapshot,
|
||||
$with_snapshots,
|
||||
$allow_rename,
|
||||
) = @_;
|
||||
|
||||
if ($format ne 'btrfs') {
|
||||
return PVE::Storage::Plugin::volume_import(@_);
|
||||
}
|
||||
|
||||
die "format 'btrfs' only works on snapshots\n"
|
||||
if !defined $snapshot;
|
||||
|
||||
my ($vtype, $name, $vmid, $basename, $basevmid, $isBase, $volume_format) =
|
||||
$class->parse_volname($volname);
|
||||
|
||||
die "btrfs-receiving volumes of type $volume_format ('$volname') is not supported\n"
|
||||
if $volume_format ne 'raw' && $volume_format ne 'subvol';
|
||||
|
||||
if (defined($base_snapshot)) {
|
||||
my $path = $class->path($scfg, $volname, $storeid, $base_snapshot);
|
||||
die "base snapshot '$base_snapshot' not found - no such directory '$path'\n"
|
||||
if !path_is_subvolume($path);
|
||||
}
|
||||
|
||||
my $destination = $class->filesystem_path($scfg, $volname);
|
||||
if ($volume_format eq 'raw') {
|
||||
$destination = raw_file_to_subvol($destination);
|
||||
}
|
||||
|
||||
if (!defined($base_snapshot) && -e $destination) {
|
||||
die "volume $volname already exists\n" if !$allow_rename;
|
||||
$volname = $class->find_free_diskname($storeid, $scfg, $vmid, $volume_format, 1);
|
||||
}
|
||||
|
||||
my $imagedir = $class->get_subdir($scfg, $vtype);
|
||||
$imagedir .= "/$vmid" if $vtype eq 'images';
|
||||
|
||||
my $tmppath = "$imagedir/recv.$vmid.tmp";
|
||||
mkdir($imagedir); # FIXME: if $scfg->{mkdir};
|
||||
if (!mkdir($tmppath)) {
|
||||
die "temp receive directory already exists at '$tmppath', incomplete concurrent import?\n"
|
||||
if $! == EEXIST;
|
||||
die "failed to create temporary receive directory at '$tmppath' - $!\n";
|
||||
}
|
||||
|
||||
my $dh = IO::Dir->new($tmppath)
|
||||
or die "failed to open temporary receive directory '$tmppath' - $!\n";
|
||||
eval {
|
||||
run_command(['btrfs', '-q', 'receive', '-e', '--', $tmppath], input => '<&'.fileno($fh));
|
||||
|
||||
# Analyze the received subvolumes;
|
||||
my ($diskname, $found_snapshot, @snapshots);
|
||||
$dh->rewind;
|
||||
while (defined(my $entry = $dh->read)) {
|
||||
next if $entry eq '.' || $entry eq '..';
|
||||
next if $entry !~ /^$BTRFS_VOL_REGEX$/;
|
||||
my ($cur_diskname, $cur_snapshot) = ($1, $2);
|
||||
|
||||
die "send stream included a non-snapshot subvolume\n"
|
||||
if !defined($cur_snapshot);
|
||||
|
||||
if (!defined($diskname)) {
|
||||
$diskname = $cur_diskname;
|
||||
} else {
|
||||
die "multiple disks contained in stream ('$diskname' vs '$cur_diskname')\n"
|
||||
if $diskname ne $cur_diskname;
|
||||
}
|
||||
|
||||
if ($cur_snapshot eq $snapshot) {
|
||||
$found_snapshot = 1;
|
||||
} else {
|
||||
push @snapshots, $cur_snapshot;
|
||||
}
|
||||
}
|
||||
|
||||
die "send stream did not contain the expected current snapshot '$snapshot'\n"
|
||||
if !$found_snapshot;
|
||||
|
||||
# Rotate the disk into place, first the current state:
|
||||
# Note that read-only subvolumes cannot be moved into different directories, but for the
|
||||
# "current" state we also want a writable copy, so start with that:
|
||||
$class->btrfs_cmd(['property', 'set', "$tmppath/$diskname\@$snapshot", 'ro', 'false']);
|
||||
PVE::Tools::renameat2(
|
||||
-1,
|
||||
"$tmppath/$diskname\@$snapshot",
|
||||
-1,
|
||||
$destination,
|
||||
&PVE::Tools::RENAME_NOREPLACE,
|
||||
) or die "failed to move received snapshot '$tmppath/$diskname\@$snapshot'"
|
||||
. " into place at '$destination' - $!\n";
|
||||
|
||||
# Now recreate the actual snapshot:
|
||||
$class->btrfs_cmd([
|
||||
'subvolume',
|
||||
'snapshot',
|
||||
'-r',
|
||||
'--',
|
||||
$destination,
|
||||
"$destination\@$snapshot",
|
||||
]);
|
||||
|
||||
# Now go through the remaining snapshots (if any)
|
||||
foreach my $snap (@snapshots) {
|
||||
$class->btrfs_cmd(['property', 'set', "$tmppath/$diskname\@$snap", 'ro', 'false']);
|
||||
PVE::Tools::renameat2(
|
||||
-1,
|
||||
"$tmppath/$diskname\@$snap",
|
||||
-1,
|
||||
"$destination\@$snap",
|
||||
&PVE::Tools::RENAME_NOREPLACE,
|
||||
) or die "failed to move received snapshot '$tmppath/$diskname\@$snap'"
|
||||
. " into place at '$destination\@$snap' - $!\n";
|
||||
eval { $class->btrfs_cmd(['property', 'set', "$destination\@$snap", 'ro', 'true']) };
|
||||
warn "failed to make $destination\@$snap read-only - $!\n" if $@;
|
||||
}
|
||||
};
|
||||
my $err = $@;
|
||||
|
||||
eval {
|
||||
# Cleanup all the received snapshots we did not move into place, so we can remove the temp
|
||||
# directory.
|
||||
if ($dh) {
|
||||
$dh->rewind;
|
||||
while (defined(my $entry = $dh->read)) {
|
||||
next if $entry eq '.' || $entry eq '..';
|
||||
eval { $class->btrfs_cmd(['subvolume', 'delete', '--', "$tmppath/$entry"]) };
|
||||
warn $@ if $@;
|
||||
}
|
||||
$dh->close; undef $dh;
|
||||
}
|
||||
if (!rmdir($tmppath)) {
|
||||
warn "failed to remove temporary directory '$tmppath' - $!\n"
|
||||
}
|
||||
};
|
||||
warn $@ if $@;
|
||||
if ($err) {
|
||||
# clean up if the directory ended up being empty after an error
|
||||
rmdir($tmppath);
|
||||
die $err;
|
||||
}
|
||||
|
||||
return "$storeid:$volname";
|
||||
}
|
||||
|
||||
1
|
||||
313
src/PVE/Storage/CIFSPlugin.pm
Normal file
313
src/PVE/Storage/CIFSPlugin.pm
Normal file
@ -0,0 +1,313 @@
|
||||
package PVE::Storage::CIFSPlugin;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use Net::IP;
|
||||
use PVE::Tools qw(run_command);
|
||||
use PVE::ProcFSTools;
|
||||
use File::Path;
|
||||
use PVE::Storage::Plugin;
|
||||
use PVE::JSONSchema qw(get_standard_option);
|
||||
|
||||
use base qw(PVE::Storage::Plugin);
|
||||
|
||||
# CIFS helper functions
|
||||
|
||||
sub cifs_is_mounted : prototype($$) {
|
||||
my ($scfg, $mountdata) = @_;
|
||||
|
||||
my ($mountpoint, $server, $share) = $scfg->@{'path', 'server', 'share'};
|
||||
my $subdir = $scfg->{subdir} // '';
|
||||
|
||||
$server = "[$server]" if Net::IP::ip_is_ipv6($server);
|
||||
my $source = "//${server}/$share$subdir";
|
||||
$mountdata = PVE::ProcFSTools::parse_proc_mounts() if !$mountdata;
|
||||
|
||||
return $mountpoint if grep {
|
||||
$_->[2] =~ /^cifs/ &&
|
||||
$_->[0] =~ m|^\Q$source\E/?$| &&
|
||||
$_->[1] eq $mountpoint
|
||||
} @$mountdata;
|
||||
return undef;
|
||||
}
|
||||
|
||||
sub cifs_cred_file_name {
|
||||
my ($storeid) = @_;
|
||||
return "/etc/pve/priv/storage/${storeid}.pw";
|
||||
}
|
||||
|
||||
sub cifs_delete_credentials {
|
||||
my ($storeid) = @_;
|
||||
|
||||
if (my $cred_file = get_cred_file($storeid)) {
|
||||
unlink($cred_file) or warn "removing cifs credientials '$cred_file' failed: $!\n";
|
||||
}
|
||||
}
|
||||
|
||||
sub cifs_set_credentials {
|
||||
my ($password, $storeid) = @_;
|
||||
|
||||
my $cred_file = cifs_cred_file_name($storeid);
|
||||
mkdir "/etc/pve/priv/storage";
|
||||
|
||||
PVE::Tools::file_set_contents($cred_file, "password=$password\n");
|
||||
|
||||
return $cred_file;
|
||||
}
|
||||
|
||||
sub get_cred_file {
|
||||
my ($storeid) = @_;
|
||||
|
||||
my $cred_file = cifs_cred_file_name($storeid);
|
||||
|
||||
if (-e $cred_file) {
|
||||
return $cred_file;
|
||||
}
|
||||
return undef;
|
||||
}
|
||||
|
||||
sub cifs_mount : prototype($$$$$) {
|
||||
my ($scfg, $storeid, $smbver, $user, $domain) = @_;
|
||||
|
||||
my ($mountpoint, $server, $share) = $scfg->@{'path', 'server', 'share'};
|
||||
my $subdir = $scfg->{subdir} // '';
|
||||
|
||||
$server = "[$server]" if Net::IP::ip_is_ipv6($server);
|
||||
my $source = "//${server}/$share$subdir";
|
||||
|
||||
my $cmd = ['/bin/mount', '-t', 'cifs', $source, $mountpoint, '-o', 'soft', '-o'];
|
||||
|
||||
if (my $cred_file = get_cred_file($storeid)) {
|
||||
push @$cmd, "username=$user", '-o', "credentials=$cred_file";
|
||||
push @$cmd, '-o', "domain=$domain" if defined($domain);
|
||||
} else {
|
||||
push @$cmd, 'guest,username=guest';
|
||||
}
|
||||
|
||||
push @$cmd, '-o', defined($smbver) ? "vers=$smbver" : "vers=default";
|
||||
|
||||
run_command($cmd, errmsg => "mount error");
|
||||
}
|
||||
|
||||
# Configuration
|
||||
|
||||
sub type {
|
||||
return 'cifs';
|
||||
}
|
||||
|
||||
sub plugindata {
|
||||
return {
|
||||
content => [ { images => 1, rootdir => 1, vztmpl => 1, iso => 1,
|
||||
backup => 1, snippets => 1}, { images => 1 }],
|
||||
format => [ { raw => 1, qcow2 => 1, vmdk => 1 } , 'raw' ],
|
||||
};
|
||||
}
|
||||
|
||||
sub properties {
|
||||
return {
|
||||
share => {
|
||||
description => "CIFS share.",
|
||||
type => 'string',
|
||||
},
|
||||
password => {
|
||||
description => "Password for accessing the share/datastore.",
|
||||
type => 'string',
|
||||
maxLength => 256,
|
||||
},
|
||||
domain => {
|
||||
description => "CIFS domain.",
|
||||
type => 'string',
|
||||
optional => 1,
|
||||
maxLength => 256,
|
||||
},
|
||||
smbversion => {
|
||||
description => "SMB protocol version. 'default' if not set, negotiates the highest SMB2+"
|
||||
." version supported by both the client and server.",
|
||||
type => 'string',
|
||||
default => 'default',
|
||||
enum => ['default', '2.0', '2.1', '3', '3.0', '3.11'],
|
||||
optional => 1,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
sub options {
|
||||
return {
|
||||
path => { fixed => 1 },
|
||||
'content-dirs' => { optional => 1 },
|
||||
server => { fixed => 1 },
|
||||
share => { fixed => 1 },
|
||||
subdir => { optional => 1 },
|
||||
nodes => { optional => 1 },
|
||||
disable => { optional => 1 },
|
||||
maxfiles => { optional => 1 },
|
||||
'prune-backups' => { optional => 1 },
|
||||
'max-protected-backups' => { optional => 1 },
|
||||
content => { optional => 1 },
|
||||
format => { optional => 1 },
|
||||
username => { optional => 1 },
|
||||
password => { optional => 1},
|
||||
domain => { optional => 1},
|
||||
smbversion => { optional => 1},
|
||||
mkdir => { optional => 1 },
|
||||
bwlimit => { optional => 1 },
|
||||
preallocation => { optional => 1 },
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
sub check_config {
|
||||
my ($class, $sectionId, $config, $create, $skipSchemaCheck) = @_;
|
||||
|
||||
$config->{path} = "/mnt/pve/$sectionId" if $create && !$config->{path};
|
||||
|
||||
return $class->SUPER::check_config($sectionId, $config, $create, $skipSchemaCheck);
|
||||
}
|
||||
|
||||
# Storage implementation
|
||||
|
||||
sub on_add_hook {
|
||||
my ($class, $storeid, $scfg, %sensitive) = @_;
|
||||
|
||||
if (defined($sensitive{password})) {
|
||||
cifs_set_credentials($sensitive{password}, $storeid);
|
||||
if (!exists($scfg->{username})) {
|
||||
warn "storage $storeid: ignoring password parameter, no user set\n";
|
||||
}
|
||||
} else {
|
||||
cifs_delete_credentials($storeid);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
sub on_update_hook {
|
||||
my ($class, $storeid, $scfg, %sensitive) = @_;
|
||||
|
||||
return if !exists($sensitive{password});
|
||||
|
||||
if (defined($sensitive{password})) {
|
||||
cifs_set_credentials($sensitive{password}, $storeid);
|
||||
if (!exists($scfg->{username})) {
|
||||
warn "storage $storeid: ignoring password parameter, no user set\n";
|
||||
}
|
||||
} else {
|
||||
cifs_delete_credentials($storeid);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
sub on_delete_hook {
|
||||
my ($class, $storeid, $scfg) = @_;
|
||||
|
||||
cifs_delete_credentials($storeid);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
sub status {
|
||||
my ($class, $storeid, $scfg, $cache) = @_;
|
||||
|
||||
$cache->{mountdata} = PVE::ProcFSTools::parse_proc_mounts()
|
||||
if !$cache->{mountdata};
|
||||
|
||||
return undef
|
||||
if !cifs_is_mounted($scfg, $cache->{mountdata});
|
||||
|
||||
return $class->SUPER::status($storeid, $scfg, $cache);
|
||||
}
|
||||
|
||||
sub activate_storage {
|
||||
my ($class, $storeid, $scfg, $cache) = @_;
|
||||
|
||||
$cache->{mountdata} = PVE::ProcFSTools::parse_proc_mounts()
|
||||
if !$cache->{mountdata};
|
||||
|
||||
my $path = $scfg->{path};
|
||||
|
||||
if (!cifs_is_mounted($scfg, $cache->{mountdata})) {
|
||||
|
||||
mkpath $path if !(defined($scfg->{mkdir}) && !$scfg->{mkdir});
|
||||
|
||||
die "unable to activate storage '$storeid' - " .
|
||||
"directory '$path' does not exist\n" if ! -d $path;
|
||||
|
||||
cifs_mount($scfg, $storeid, $scfg->{smbversion},
|
||||
$scfg->{username}, $scfg->{domain});
|
||||
}
|
||||
|
||||
$class->SUPER::activate_storage($storeid, $scfg, $cache);
|
||||
}
|
||||
|
||||
sub deactivate_storage {
|
||||
my ($class, $storeid, $scfg, $cache) = @_;
|
||||
|
||||
$cache->{mountdata} = PVE::ProcFSTools::parse_proc_mounts()
|
||||
if !$cache->{mountdata};
|
||||
|
||||
my $path = $scfg->{path};
|
||||
|
||||
if (cifs_is_mounted($scfg, $cache->{mountdata})) {
|
||||
my $cmd = ['/bin/umount', $path];
|
||||
run_command($cmd, errmsg => 'umount error');
|
||||
}
|
||||
}
|
||||
|
||||
sub check_connection {
|
||||
my ($class, $storeid, $scfg) = @_;
|
||||
|
||||
my $servicename = '//'.$scfg->{server}.'/'.$scfg->{share};
|
||||
|
||||
my $cmd = ['/usr/bin/smbclient', $servicename, '-d', '0'];
|
||||
|
||||
if (defined($scfg->{smbversion}) && $scfg->{smbversion} ne 'default') {
|
||||
# max-protocol version, so basically only relevant for smb2 vs smb3
|
||||
push @$cmd, '-m', "smb" . int($scfg->{smbversion});
|
||||
}
|
||||
|
||||
if (my $cred_file = get_cred_file($storeid)) {
|
||||
push @$cmd, '-U', $scfg->{username}, '-A', $cred_file;
|
||||
push @$cmd, '-W', $scfg->{domain} if defined($scfg->{domain});
|
||||
} else {
|
||||
push @$cmd, '-U', 'Guest','-N';
|
||||
}
|
||||
push @$cmd, '-c', 'echo 1 0';
|
||||
|
||||
my $out_str;
|
||||
my $out = sub { $out_str .= shift };
|
||||
|
||||
eval { run_command($cmd, timeout => 10, outfunc => $out, errfunc => sub {}) };
|
||||
|
||||
if (my $err = $@) {
|
||||
die "$out_str\n" if defined($out_str) &&
|
||||
($out_str =~ m/NT_STATUS_(ACCESS_DENIED|LOGON_FAILURE)/);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
# FIXME remove on the next APIAGE reset.
|
||||
# Deprecated, use get_volume_attribute instead.
|
||||
sub get_volume_notes {
|
||||
my $class = shift;
|
||||
PVE::Storage::DirPlugin::get_volume_notes($class, @_);
|
||||
}
|
||||
|
||||
# FIXME remove on the next APIAGE reset.
|
||||
# Deprecated, use update_volume_attribute instead.
|
||||
sub update_volume_notes {
|
||||
my $class = shift;
|
||||
PVE::Storage::DirPlugin::update_volume_notes($class, @_);
|
||||
}
|
||||
|
||||
sub get_volume_attribute {
|
||||
return PVE::Storage::DirPlugin::get_volume_attribute(@_);
|
||||
}
|
||||
|
||||
sub update_volume_attribute {
|
||||
return PVE::Storage::DirPlugin::update_volume_attribute(@_);
|
||||
}
|
||||
|
||||
1;
|
||||
262
src/PVE/Storage/CephFSPlugin.pm
Normal file
262
src/PVE/Storage/CephFSPlugin.pm
Normal file
@ -0,0 +1,262 @@
|
||||
package PVE::Storage::CephFSPlugin;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
use IO::File;
|
||||
use Net::IP;
|
||||
use File::Path;
|
||||
|
||||
use PVE::CephConfig;
|
||||
use PVE::JSONSchema qw(get_standard_option);
|
||||
use PVE::ProcFSTools;
|
||||
use PVE::Storage::Plugin;
|
||||
use PVE::Systemd;
|
||||
use PVE::Tools qw(run_command file_set_contents);
|
||||
|
||||
use base qw(PVE::Storage::Plugin);
|
||||
|
||||
sub cephfs_is_mounted {
|
||||
my ($scfg, $storeid, $mountdata) = @_;
|
||||
|
||||
my $cmd_option = PVE::CephConfig::ceph_connect_option($scfg, $storeid);
|
||||
my $configfile = $cmd_option->{ceph_conf};
|
||||
|
||||
my $subdir = $scfg->{subdir} // '/';
|
||||
my $mountpoint = $scfg->{path};
|
||||
|
||||
$mountdata = PVE::ProcFSTools::parse_proc_mounts() if !$mountdata;
|
||||
return $mountpoint if grep {
|
||||
$_->[2] =~ m#^ceph|fuse\.ceph-fuse# &&
|
||||
$_->[0] =~ m#\Q:$subdir\E$|^ceph-fuse$# &&
|
||||
$_->[1] eq $mountpoint
|
||||
} @$mountdata;
|
||||
|
||||
warn "A filesystem is already mounted on $mountpoint\n"
|
||||
if grep { $_->[1] eq $mountpoint } @$mountdata;
|
||||
|
||||
return undef;
|
||||
}
|
||||
|
||||
# FIXME: remove once it's possible to specify _netdev for fuse.ceph mounts
|
||||
sub systemd_netmount {
|
||||
my ($where, $type, $what, $opts) = @_;
|
||||
|
||||
# don't do default deps, systemd v241 generator produces ordering deps on both
|
||||
# local-fs(-pre) and remote-fs(-pre) targets if we use the required _netdev
|
||||
# option. Over three corners this gets us an ordering cycle on shutdown, which
|
||||
# may make shutdown hang if the random cycle breaking hits the "wrong" unit to
|
||||
# delete.
|
||||
my $unit = <<"EOF";
|
||||
[Unit]
|
||||
Description=${where}
|
||||
DefaultDependencies=no
|
||||
Requires=system.slice
|
||||
Wants=network-online.target
|
||||
Before=umount.target remote-fs.target
|
||||
After=systemd-journald.socket system.slice network.target -.mount remote-fs-pre.target network-online.target
|
||||
Conflicts=umount.target
|
||||
|
||||
[Mount]
|
||||
Where=${where}
|
||||
What=${what}
|
||||
Type=${type}
|
||||
Options=${opts}
|
||||
EOF
|
||||
|
||||
my $unit_fn = PVE::Systemd::escape_unit($where, 1) . ".mount";
|
||||
my $unit_path = "/run/systemd/system/$unit_fn";
|
||||
my $daemon_needs_reload = -e $unit_path;
|
||||
|
||||
file_set_contents($unit_path, $unit);
|
||||
|
||||
run_command(['systemctl', 'daemon-reload'], errmsg => "daemon-reload error")
|
||||
if $daemon_needs_reload;
|
||||
run_command(['systemctl', 'start', $unit_fn], errmsg => "mount error");
|
||||
|
||||
}
|
||||
|
||||
sub cephfs_mount {
|
||||
my ($scfg, $storeid) = @_;
|
||||
|
||||
my $mountpoint = $scfg->{path};
|
||||
my $subdir = $scfg->{subdir} // '/';
|
||||
|
||||
my $cmd_option = PVE::CephConfig::ceph_connect_option($scfg, $storeid);
|
||||
my $configfile = $cmd_option->{ceph_conf};
|
||||
my $secretfile = $cmd_option->{keyring};
|
||||
my $server = $cmd_option->{mon_host} // PVE::CephConfig::get_monaddr_list($configfile);
|
||||
my $type = 'ceph';
|
||||
my $fs_name = $scfg->{'fs-name'};
|
||||
|
||||
my @opts = ();
|
||||
if ($scfg->{fuse}) {
|
||||
$type = 'fuse.ceph';
|
||||
push @opts, "ceph.id=$cmd_option->{userid}";
|
||||
push @opts, "ceph.keyfile=$secretfile" if defined($secretfile);
|
||||
push @opts, "ceph.conf=$configfile" if defined($configfile);
|
||||
push @opts, "ceph.client_fs=$fs_name" if defined($fs_name);
|
||||
} else {
|
||||
push @opts, "name=$cmd_option->{userid}";
|
||||
push @opts, "secretfile=$secretfile" if defined($secretfile);
|
||||
push @opts, "conf=$configfile" if defined($configfile);
|
||||
push @opts, "fs=$fs_name" if defined($fs_name);
|
||||
}
|
||||
|
||||
push @opts, $scfg->{options} if $scfg->{options};
|
||||
|
||||
systemd_netmount($mountpoint, $type, "$server:$subdir", join(',', @opts));
|
||||
}
|
||||
|
||||
# Configuration
|
||||
|
||||
sub type {
|
||||
return 'cephfs';
|
||||
}
|
||||
|
||||
sub plugindata {
|
||||
return {
|
||||
content => [ { vztmpl => 1, iso => 1, backup => 1, snippets => 1},
|
||||
{ backup => 1 }],
|
||||
};
|
||||
}
|
||||
|
||||
sub properties {
|
||||
return {
|
||||
fuse => {
|
||||
description => "Mount CephFS through FUSE.",
|
||||
type => 'boolean',
|
||||
},
|
||||
'fs-name' => {
|
||||
description => "The Ceph filesystem name.",
|
||||
type => 'string', format => 'pve-configid',
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
sub options {
|
||||
return {
|
||||
path => { fixed => 1 },
|
||||
'content-dirs' => { optional => 1 },
|
||||
monhost => { optional => 1},
|
||||
nodes => { optional => 1 },
|
||||
subdir => { optional => 1 },
|
||||
disable => { optional => 1 },
|
||||
options => { optional => 1 },
|
||||
username => { optional => 1 },
|
||||
content => { optional => 1 },
|
||||
format => { optional => 1 },
|
||||
mkdir => { optional => 1 },
|
||||
fuse => { optional => 1 },
|
||||
bwlimit => { optional => 1 },
|
||||
maxfiles => { optional => 1 },
|
||||
keyring => { optional => 1 },
|
||||
'prune-backups' => { optional => 1 },
|
||||
'max-protected-backups' => { optional => 1 },
|
||||
'fs-name' => { optional => 1 },
|
||||
};
|
||||
}
|
||||
|
||||
sub check_config {
|
||||
my ($class, $sectionId, $config, $create, $skipSchemaCheck) = @_;
|
||||
|
||||
$config->{path} = "/mnt/pve/$sectionId" if $create && !$config->{path};
|
||||
|
||||
return $class->SUPER::check_config($sectionId, $config, $create, $skipSchemaCheck);
|
||||
}
|
||||
|
||||
# Storage implementation
|
||||
|
||||
sub on_add_hook {
|
||||
my ($class, $storeid, $scfg, %param) = @_;
|
||||
|
||||
PVE::CephConfig::ceph_create_keyfile($scfg->{type}, $storeid, $param{keyring});
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
sub on_update_hook {
|
||||
my ($class, $storeid, $scfg, %param) = @_;
|
||||
|
||||
if (exists($param{keyring})) {
|
||||
if (defined($param{keyring})) {
|
||||
PVE::CephConfig::ceph_create_keyfile($scfg->{type}, $storeid, $param{keyring});
|
||||
} else {
|
||||
PVE::CephConfig::ceph_remove_keyfile($scfg->{type}, $storeid);
|
||||
}
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
sub on_delete_hook {
|
||||
my ($class, $storeid, $scfg) = @_;
|
||||
PVE::CephConfig::ceph_remove_keyfile($scfg->{type}, $storeid);
|
||||
return;
|
||||
}
|
||||
|
||||
sub status {
|
||||
my ($class, $storeid, $scfg, $cache) = @_;
|
||||
|
||||
$cache->{mountdata} //= PVE::ProcFSTools::parse_proc_mounts();
|
||||
|
||||
return undef if !cephfs_is_mounted($scfg, $storeid, $cache->{mountdata});
|
||||
|
||||
return $class->SUPER::status($storeid, $scfg, $cache);
|
||||
}
|
||||
|
||||
sub activate_storage {
|
||||
my ($class, $storeid, $scfg, $cache) = @_;
|
||||
|
||||
$cache->{mountdata} //= PVE::ProcFSTools::parse_proc_mounts();
|
||||
|
||||
# NOTE: mkpath may hang if storage is mounted but not reachable
|
||||
if (!cephfs_is_mounted($scfg, $storeid, $cache->{mountdata})) {
|
||||
my $path = $scfg->{path};
|
||||
|
||||
mkpath $path if !(defined($scfg->{mkdir}) && !$scfg->{mkdir});
|
||||
|
||||
die "unable to activate storage '$storeid' - " .
|
||||
"directory '$path' does not exist\n" if ! -d $path;
|
||||
|
||||
cephfs_mount($scfg, $storeid);
|
||||
}
|
||||
|
||||
$class->SUPER::activate_storage($storeid, $scfg, $cache);
|
||||
}
|
||||
|
||||
sub deactivate_storage {
|
||||
my ($class, $storeid, $scfg, $cache) = @_;
|
||||
|
||||
$cache->{mountdata} //= PVE::ProcFSTools::parse_proc_mounts();
|
||||
|
||||
my $path = $scfg->{path};
|
||||
|
||||
if (cephfs_is_mounted($scfg, $storeid, $cache->{mountdata})) {
|
||||
run_command(['/bin/umount', $path], errmsg => 'umount error');
|
||||
}
|
||||
}
|
||||
|
||||
# FIXME remove on the next APIAGE reset.
|
||||
# Deprecated, use get_volume_attribute instead.
|
||||
sub get_volume_notes {
|
||||
my $class = shift;
|
||||
PVE::Storage::DirPlugin::get_volume_notes($class, @_);
|
||||
}
|
||||
|
||||
# FIXME remove on the next APIAGE reset.
|
||||
# Deprecated, use update_volume_attribute instead.
|
||||
sub update_volume_notes {
|
||||
my $class = shift;
|
||||
PVE::Storage::DirPlugin::update_volume_notes($class, @_);
|
||||
}
|
||||
|
||||
sub get_volume_attribute {
|
||||
return PVE::Storage::DirPlugin::get_volume_attribute(@_);
|
||||
}
|
||||
|
||||
sub update_volume_attribute {
|
||||
return PVE::Storage::DirPlugin::update_volume_attribute(@_);
|
||||
}
|
||||
|
||||
1;
|
||||
239
src/PVE/Storage/DirPlugin.pm
Normal file
239
src/PVE/Storage/DirPlugin.pm
Normal file
@ -0,0 +1,239 @@
|
||||
package PVE::Storage::DirPlugin;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
use Cwd;
|
||||
use Encode qw(decode encode);
|
||||
use File::Path;
|
||||
use IO::File;
|
||||
use POSIX;
|
||||
|
||||
use PVE::Storage::Plugin;
|
||||
use PVE::JSONSchema qw(get_standard_option);
|
||||
|
||||
use base qw(PVE::Storage::Plugin);
|
||||
|
||||
# Configuration
|
||||
|
||||
sub type {
|
||||
return 'dir';
|
||||
}
|
||||
|
||||
sub plugindata {
|
||||
return {
|
||||
content => [ { images => 1, rootdir => 1, vztmpl => 1, iso => 1, backup => 1, snippets => 1, none => 1 },
|
||||
{ images => 1, rootdir => 1 }],
|
||||
format => [ { raw => 1, qcow2 => 1, vmdk => 1, subvol => 1 } , 'raw' ],
|
||||
};
|
||||
}
|
||||
|
||||
sub properties {
|
||||
return {
|
||||
path => {
|
||||
description => "File system path.",
|
||||
type => 'string', format => 'pve-storage-path',
|
||||
},
|
||||
mkdir => {
|
||||
description => "Create the directory if it doesn't exist.",
|
||||
type => 'boolean',
|
||||
default => 'yes',
|
||||
},
|
||||
is_mountpoint => {
|
||||
description =>
|
||||
"Assume the given path is an externally managed mountpoint " .
|
||||
"and consider the storage offline if it is not mounted. ".
|
||||
"Using a boolean (yes/no) value serves as a shortcut to using the target path in this field.",
|
||||
type => 'string',
|
||||
default => 'no',
|
||||
},
|
||||
bwlimit => get_standard_option('bwlimit'),
|
||||
};
|
||||
}
|
||||
|
||||
sub options {
|
||||
return {
|
||||
path => { fixed => 1 },
|
||||
'content-dirs' => { optional => 1 },
|
||||
nodes => { optional => 1 },
|
||||
shared => { optional => 1 },
|
||||
disable => { optional => 1 },
|
||||
maxfiles => { optional => 1 },
|
||||
'prune-backups' => { optional => 1 },
|
||||
'max-protected-backups' => { optional => 1 },
|
||||
content => { optional => 1 },
|
||||
format => { optional => 1 },
|
||||
mkdir => { optional => 1 },
|
||||
is_mountpoint => { optional => 1 },
|
||||
bwlimit => { optional => 1 },
|
||||
preallocation => { optional => 1 },
|
||||
};
|
||||
}
|
||||
|
||||
# Storage implementation
|
||||
#
|
||||
|
||||
# NOTE: should ProcFSTools::is_mounted accept an optional cache like this?
|
||||
sub path_is_mounted {
|
||||
my ($mountpoint, $mountdata) = @_;
|
||||
|
||||
$mountpoint = Cwd::realpath($mountpoint); # symlinks
|
||||
return 0 if !defined($mountpoint); # path does not exist
|
||||
|
||||
$mountdata = PVE::ProcFSTools::parse_proc_mounts() if !$mountdata;
|
||||
return 1 if grep { $_->[1] eq $mountpoint } @$mountdata;
|
||||
return undef;
|
||||
}
|
||||
|
||||
sub parse_is_mountpoint {
|
||||
my ($scfg) = @_;
|
||||
my $is_mp = $scfg->{is_mountpoint};
|
||||
return undef if !defined $is_mp;
|
||||
if (defined(my $bool = PVE::JSONSchema::parse_boolean($is_mp))) {
|
||||
return $bool ? $scfg->{path} : undef;
|
||||
}
|
||||
return $is_mp; # contains a path
|
||||
}
|
||||
|
||||
# FIXME move into 'get_volume_attribute' when removing 'get_volume_notes'
|
||||
my $get_volume_notes_impl = sub {
|
||||
my ($class, $scfg, $storeid, $volname, $timeout) = @_;
|
||||
|
||||
my ($vtype) = $class->parse_volname($volname);
|
||||
return if $vtype ne 'backup';
|
||||
|
||||
my $path = $class->filesystem_path($scfg, $volname);
|
||||
$path .= $class->SUPER::NOTES_EXT;
|
||||
|
||||
if (-f $path) {
|
||||
my $data = PVE::Tools::file_get_contents($path);
|
||||
return eval { decode('UTF-8', $data, 1) } // $data;
|
||||
}
|
||||
|
||||
return '';
|
||||
};
|
||||
|
||||
# FIXME remove on the next APIAGE reset.
|
||||
# Deprecated, use get_volume_attribute instead.
|
||||
sub get_volume_notes {
|
||||
my ($class, $scfg, $storeid, $volname, $timeout) = @_;
|
||||
return $get_volume_notes_impl->($class, $scfg, $storeid, $volname, $timeout);
|
||||
}
|
||||
|
||||
# FIXME move into 'update_volume_attribute' when removing 'update_volume_notes'
|
||||
my $update_volume_notes_impl = sub {
|
||||
my ($class, $scfg, $storeid, $volname, $notes, $timeout) = @_;
|
||||
|
||||
my ($vtype) = $class->parse_volname($volname);
|
||||
die "only backups can have notes\n" if $vtype ne 'backup';
|
||||
|
||||
my $path = $class->filesystem_path($scfg, $volname);
|
||||
$path .= $class->SUPER::NOTES_EXT;
|
||||
|
||||
if (defined($notes) && $notes ne '') {
|
||||
my $encoded = encode('UTF-8', $notes);
|
||||
PVE::Tools::file_set_contents($path, $encoded);
|
||||
} else {
|
||||
unlink $path or $! == ENOENT or die "could not delete notes - $!\n";
|
||||
}
|
||||
return;
|
||||
};
|
||||
|
||||
# FIXME remove on the next APIAGE reset.
|
||||
# Deprecated, use update_volume_attribute instead.
|
||||
sub update_volume_notes {
|
||||
my ($class, $scfg, $storeid, $volname, $notes, $timeout) = @_;
|
||||
return $update_volume_notes_impl->($class, $scfg, $storeid, $volname, $notes, $timeout);
|
||||
}
|
||||
|
||||
sub get_volume_attribute {
|
||||
my ($class, $scfg, $storeid, $volname, $attribute) = @_;
|
||||
|
||||
if ($attribute eq 'notes') {
|
||||
return $get_volume_notes_impl->($class, $scfg, $storeid, $volname);
|
||||
}
|
||||
|
||||
my ($vtype) = $class->parse_volname($volname);
|
||||
return if $vtype ne 'backup';
|
||||
|
||||
if ($attribute eq 'protected') {
|
||||
my $path = $class->filesystem_path($scfg, $volname);
|
||||
return -e PVE::Storage::protection_file_path($path) ? 1 : 0;
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
sub update_volume_attribute {
|
||||
my ($class, $scfg, $storeid, $volname, $attribute, $value) = @_;
|
||||
|
||||
if ($attribute eq 'notes') {
|
||||
return $update_volume_notes_impl->($class, $scfg, $storeid, $volname, $value);
|
||||
}
|
||||
|
||||
my ($vtype) = $class->parse_volname($volname);
|
||||
die "only backups support attribute '$attribute'\n" if $vtype ne 'backup';
|
||||
|
||||
if ($attribute eq 'protected') {
|
||||
my $path = $class->filesystem_path($scfg, $volname);
|
||||
my $protection_path = PVE::Storage::protection_file_path($path);
|
||||
|
||||
return if !((-e $protection_path) xor $value); # protection status already correct
|
||||
|
||||
if ($value) {
|
||||
my $fh = IO::File->new($protection_path, O_CREAT, 0644)
|
||||
or die "unable to create protection file '$protection_path' - $!\n";
|
||||
close($fh);
|
||||
} else {
|
||||
unlink $protection_path or $! == ENOENT
|
||||
or die "could not delete protection file '$protection_path' - $!\n";
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
die "attribute '$attribute' is not supported for storage type '$scfg->{type}'\n";
|
||||
}
|
||||
|
||||
sub status {
|
||||
my ($class, $storeid, $scfg, $cache) = @_;
|
||||
|
||||
if (defined(my $mp = parse_is_mountpoint($scfg))) {
|
||||
$cache->{mountdata} = PVE::ProcFSTools::parse_proc_mounts()
|
||||
if !$cache->{mountdata};
|
||||
|
||||
return undef if !path_is_mounted($mp, $cache->{mountdata});
|
||||
}
|
||||
|
||||
return $class->SUPER::status($storeid, $scfg, $cache);
|
||||
}
|
||||
|
||||
|
||||
sub activate_storage {
|
||||
my ($class, $storeid, $scfg, $cache) = @_;
|
||||
|
||||
my $path = $scfg->{path};
|
||||
if (!defined($scfg->{mkdir}) || $scfg->{mkdir}) {
|
||||
mkpath $path;
|
||||
}
|
||||
|
||||
my $mp = parse_is_mountpoint($scfg);
|
||||
if (defined($mp) && !path_is_mounted($mp, $cache->{mountdata})) {
|
||||
die "unable to activate storage '$storeid' - " .
|
||||
"directory is expected to be a mount point but is not mounted: '$mp'\n";
|
||||
}
|
||||
|
||||
$class->SUPER::activate_storage($storeid, $scfg, $cache);
|
||||
}
|
||||
|
||||
sub check_config {
|
||||
my ($self, $sectionId, $config, $create, $skipSchemaCheck) = @_;
|
||||
my $opts = PVE::SectionConfig::check_config($self, $sectionId, $config, $create, $skipSchemaCheck);
|
||||
return $opts if !$create;
|
||||
if ($opts->{path} !~ m@^/[-/a-zA-Z0-9_.]+$@) {
|
||||
die "illegal path for directory storage: $opts->{path}\n";
|
||||
}
|
||||
return $opts;
|
||||
}
|
||||
|
||||
1;
|
||||
354
src/PVE/Storage/GlusterfsPlugin.pm
Normal file
354
src/PVE/Storage/GlusterfsPlugin.pm
Normal file
@ -0,0 +1,354 @@
|
||||
package PVE::Storage::GlusterfsPlugin;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use IO::File;
|
||||
use File::Path;
|
||||
use PVE::Tools qw(run_command);
|
||||
use PVE::ProcFSTools;
|
||||
use PVE::Network;
|
||||
use PVE::Storage::Plugin;
|
||||
use PVE::JSONSchema qw(get_standard_option);
|
||||
|
||||
use base qw(PVE::Storage::Plugin);
|
||||
|
||||
# Glusterfs helper functions
|
||||
|
||||
my $server_test_results = {};
|
||||
|
||||
my $get_active_server = sub {
|
||||
my ($scfg, $return_default_if_offline) = @_;
|
||||
|
||||
my $defaultserver = $scfg->{server} ? $scfg->{server} : 'localhost';
|
||||
|
||||
if ($return_default_if_offline && !defined($scfg->{server2})) {
|
||||
# avoid delays (there is no backup server anyways)
|
||||
return $defaultserver;
|
||||
}
|
||||
|
||||
my $serverlist = [ $defaultserver ];
|
||||
push @$serverlist, $scfg->{server2} if $scfg->{server2};
|
||||
|
||||
my $ctime = time();
|
||||
foreach my $server (@$serverlist) {
|
||||
my $stat = $server_test_results->{$server};
|
||||
return $server if $stat && $stat->{active} && (($ctime - $stat->{time}) <= 2);
|
||||
}
|
||||
|
||||
foreach my $server (@$serverlist) {
|
||||
my $status = 0;
|
||||
|
||||
if ($server && $server ne 'localhost' && $server ne '127.0.0.1' && $server ne '::1') {
|
||||
# ping the gluster daemon default port (24007) as heuristic
|
||||
$status = PVE::Network::tcp_ping($server, 24007, 2);
|
||||
|
||||
} else {
|
||||
|
||||
my $parser = sub {
|
||||
my $line = shift;
|
||||
|
||||
if ($line =~ m/Status: Started$/) {
|
||||
$status = 1;
|
||||
}
|
||||
};
|
||||
|
||||
my $cmd = ['/usr/sbin/gluster', 'volume', 'info', $scfg->{volume}];
|
||||
|
||||
run_command($cmd, errmsg => "glusterfs error", errfunc => sub {}, outfunc => $parser);
|
||||
}
|
||||
|
||||
$server_test_results->{$server} = { time => time(), active => $status };
|
||||
return $server if $status;
|
||||
}
|
||||
|
||||
return $defaultserver if $return_default_if_offline;
|
||||
|
||||
return undef;
|
||||
};
|
||||
|
||||
sub glusterfs_is_mounted {
|
||||
my ($volume, $mountpoint, $mountdata) = @_;
|
||||
|
||||
$mountdata = PVE::ProcFSTools::parse_proc_mounts() if !$mountdata;
|
||||
|
||||
return $mountpoint if grep {
|
||||
$_->[2] eq 'fuse.glusterfs' &&
|
||||
$_->[0] =~ /^\S+:\Q$volume\E$/ &&
|
||||
$_->[1] eq $mountpoint
|
||||
} @$mountdata;
|
||||
return undef;
|
||||
}
|
||||
|
||||
sub glusterfs_mount {
|
||||
my ($server, $volume, $mountpoint) = @_;
|
||||
|
||||
my $source = "$server:$volume";
|
||||
|
||||
my $cmd = ['/bin/mount', '-t', 'glusterfs', $source, $mountpoint];
|
||||
|
||||
run_command($cmd, errmsg => "mount error");
|
||||
}
|
||||
|
||||
# Configuration
|
||||
|
||||
sub type {
|
||||
return 'glusterfs';
|
||||
}
|
||||
|
||||
sub plugindata {
|
||||
return {
|
||||
content => [ { images => 1, vztmpl => 1, iso => 1, backup => 1, snippets => 1},
|
||||
{ images => 1 }],
|
||||
format => [ { raw => 1, qcow2 => 1, vmdk => 1 } , 'raw' ],
|
||||
};
|
||||
}
|
||||
|
||||
sub properties {
|
||||
return {
|
||||
volume => {
|
||||
description => "Glusterfs Volume.",
|
||||
type => 'string',
|
||||
},
|
||||
server2 => {
|
||||
description => "Backup volfile server IP or DNS name.",
|
||||
type => 'string', format => 'pve-storage-server',
|
||||
requires => 'server',
|
||||
},
|
||||
transport => {
|
||||
description => "Gluster transport: tcp or rdma",
|
||||
type => 'string',
|
||||
enum => ['tcp', 'rdma', 'unix'],
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
sub options {
|
||||
return {
|
||||
path => { fixed => 1 },
|
||||
server => { optional => 1 },
|
||||
server2 => { optional => 1 },
|
||||
volume => { fixed => 1 },
|
||||
transport => { optional => 1 },
|
||||
nodes => { optional => 1 },
|
||||
disable => { optional => 1 },
|
||||
maxfiles => { optional => 1 },
|
||||
'prune-backups' => { optional => 1 },
|
||||
'max-protected-backups' => { optional => 1 },
|
||||
content => { optional => 1 },
|
||||
format => { optional => 1 },
|
||||
mkdir => { optional => 1 },
|
||||
bwlimit => { optional => 1 },
|
||||
preallocation => { optional => 1 },
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
sub check_config {
|
||||
my ($class, $sectionId, $config, $create, $skipSchemaCheck) = @_;
|
||||
|
||||
$config->{path} = "/mnt/pve/$sectionId" if $create && !$config->{path};
|
||||
|
||||
return $class->SUPER::check_config($sectionId, $config, $create, $skipSchemaCheck);
|
||||
}
|
||||
|
||||
# Storage implementation
|
||||
|
||||
sub parse_name_dir {
|
||||
my $name = shift;
|
||||
|
||||
if ($name =~ m!^((base-)?[^/\s]+\.(raw|qcow2|vmdk))$!) {
|
||||
return ($1, $3, $2);
|
||||
}
|
||||
|
||||
die "unable to parse volume filename '$name'\n";
|
||||
}
|
||||
|
||||
sub path {
|
||||
my ($class, $scfg, $volname, $storeid, $snapname) = @_;
|
||||
|
||||
my ($vtype, $name, $vmid, undef, undef, $isBase, $format) =
|
||||
$class->parse_volname($volname);
|
||||
|
||||
# Note: qcow2/qed has internal snapshot, so path is always
|
||||
# the same (with or without snapshot => same file).
|
||||
die "can't snapshot this image format\n"
|
||||
if defined($snapname) && $format !~ m/^(qcow2|qed)$/;
|
||||
|
||||
my $path = undef;
|
||||
if ($vtype eq 'images') {
|
||||
|
||||
my $server = &$get_active_server($scfg, 1);
|
||||
my $glustervolume = $scfg->{volume};
|
||||
my $transport = $scfg->{transport};
|
||||
my $protocol = "gluster";
|
||||
|
||||
if ($transport) {
|
||||
$protocol = "gluster+$transport";
|
||||
}
|
||||
|
||||
$path = "$protocol://$server/$glustervolume/images/$vmid/$name";
|
||||
|
||||
} else {
|
||||
my $dir = $class->get_subdir($scfg, $vtype);
|
||||
$path = "$dir/$name";
|
||||
}
|
||||
|
||||
return wantarray ? ($path, $vmid, $vtype) : $path;
|
||||
}
|
||||
|
||||
sub clone_image {
|
||||
my ($class, $scfg, $storeid, $volname, $vmid, $snap) = @_;
|
||||
|
||||
die "storage definition has no path\n" if !$scfg->{path};
|
||||
|
||||
my ($vtype, $basename, $basevmid, undef, undef, $isBase, $format) =
|
||||
$class->parse_volname($volname);
|
||||
|
||||
die "clone_image on wrong vtype '$vtype'\n" if $vtype ne 'images';
|
||||
|
||||
die "this storage type does not support clone_image on snapshot\n" if $snap;
|
||||
|
||||
die "this storage type does not support clone_image on subvolumes\n" if $format eq 'subvol';
|
||||
|
||||
die "clone_image only works on base images\n" if !$isBase;
|
||||
|
||||
my $imagedir = $class->get_subdir($scfg, 'images');
|
||||
$imagedir .= "/$vmid";
|
||||
|
||||
mkpath $imagedir;
|
||||
|
||||
my $name = $class->find_free_diskname($storeid, $scfg, $vmid, "qcow2", 1);
|
||||
|
||||
warn "clone $volname: $vtype, $name, $vmid to $name (base=../$basevmid/$basename)\n";
|
||||
|
||||
my $path = "$imagedir/$name";
|
||||
|
||||
die "disk image '$path' already exists\n" if -e $path;
|
||||
|
||||
my $server = &$get_active_server($scfg, 1);
|
||||
my $glustervolume = $scfg->{volume};
|
||||
my $volumepath = "gluster://$server/$glustervolume/images/$vmid/$name";
|
||||
|
||||
my $cmd = ['/usr/bin/qemu-img', 'create', '-b', "../$basevmid/$basename",
|
||||
'-F', $format, '-f', 'qcow2', $volumepath];
|
||||
|
||||
run_command($cmd, errmsg => "unable to create image");
|
||||
|
||||
return "$basevmid/$basename/$vmid/$name";
|
||||
}
|
||||
|
||||
sub alloc_image {
|
||||
my ($class, $storeid, $scfg, $vmid, $fmt, $name, $size) = @_;
|
||||
|
||||
my $imagedir = $class->get_subdir($scfg, 'images');
|
||||
$imagedir .= "/$vmid";
|
||||
|
||||
mkpath $imagedir;
|
||||
|
||||
$name = $class->find_free_diskname($storeid, $scfg, $vmid, $fmt, 1) if !$name;
|
||||
|
||||
my (undef, $tmpfmt) = parse_name_dir($name);
|
||||
|
||||
die "illegal name '$name' - wrong extension for format ('$tmpfmt != '$fmt')\n"
|
||||
if $tmpfmt ne $fmt;
|
||||
|
||||
my $path = "$imagedir/$name";
|
||||
|
||||
die "disk image '$path' already exists\n" if -e $path;
|
||||
|
||||
my $server = &$get_active_server($scfg, 1);
|
||||
my $glustervolume = $scfg->{volume};
|
||||
my $volumepath = "gluster://$server/$glustervolume/images/$vmid/$name";
|
||||
|
||||
my $cmd = ['/usr/bin/qemu-img', 'create'];
|
||||
|
||||
my $prealloc_opt = PVE::Storage::Plugin::preallocation_cmd_option($scfg, $fmt);
|
||||
push @$cmd, '-o', $prealloc_opt if defined($prealloc_opt);
|
||||
|
||||
push @$cmd, '-f', $fmt, $volumepath, "${size}K";
|
||||
|
||||
eval { run_command($cmd, errmsg => "unable to create image"); };
|
||||
if ($@) {
|
||||
unlink $path;
|
||||
rmdir $imagedir;
|
||||
die "$@";
|
||||
}
|
||||
|
||||
return "$vmid/$name";
|
||||
}
|
||||
|
||||
sub status {
|
||||
my ($class, $storeid, $scfg, $cache) = @_;
|
||||
|
||||
$cache->{mountdata} = PVE::ProcFSTools::parse_proc_mounts()
|
||||
if !$cache->{mountdata};
|
||||
|
||||
my $path = $scfg->{path};
|
||||
|
||||
my $volume = $scfg->{volume};
|
||||
|
||||
return undef if !glusterfs_is_mounted($volume, $path, $cache->{mountdata});
|
||||
|
||||
return $class->SUPER::status($storeid, $scfg, $cache);
|
||||
}
|
||||
|
||||
sub activate_storage {
|
||||
my ($class, $storeid, $scfg, $cache) = @_;
|
||||
|
||||
$cache->{mountdata} = PVE::ProcFSTools::parse_proc_mounts()
|
||||
if !$cache->{mountdata};
|
||||
|
||||
my $path = $scfg->{path};
|
||||
my $volume = $scfg->{volume};
|
||||
|
||||
if (!glusterfs_is_mounted($volume, $path, $cache->{mountdata})) {
|
||||
|
||||
mkpath $path if !(defined($scfg->{mkdir}) && !$scfg->{mkdir});
|
||||
|
||||
die "unable to activate storage '$storeid' - " .
|
||||
"directory '$path' does not exist\n" if ! -d $path;
|
||||
|
||||
my $server = &$get_active_server($scfg, 1);
|
||||
|
||||
glusterfs_mount($server, $volume, $path);
|
||||
}
|
||||
|
||||
$class->SUPER::activate_storage($storeid, $scfg, $cache);
|
||||
}
|
||||
|
||||
sub deactivate_storage {
|
||||
my ($class, $storeid, $scfg, $cache) = @_;
|
||||
|
||||
$cache->{mountdata} = PVE::ProcFSTools::parse_proc_mounts()
|
||||
if !$cache->{mountdata};
|
||||
|
||||
my $path = $scfg->{path};
|
||||
my $volume = $scfg->{volume};
|
||||
|
||||
if (glusterfs_is_mounted($volume, $path, $cache->{mountdata})) {
|
||||
my $cmd = ['/bin/umount', $path];
|
||||
run_command($cmd, errmsg => 'umount error');
|
||||
}
|
||||
}
|
||||
|
||||
sub activate_volume {
|
||||
my ($class, $storeid, $scfg, $volname, $snapname, $cache) = @_;
|
||||
|
||||
# do nothing by default
|
||||
}
|
||||
|
||||
sub deactivate_volume {
|
||||
my ($class, $storeid, $scfg, $volname, $snapname, $cache) = @_;
|
||||
|
||||
# do nothing by default
|
||||
}
|
||||
|
||||
sub check_connection {
|
||||
my ($class, $storeid, $scfg, $cache) = @_;
|
||||
|
||||
my $server = &$get_active_server($scfg);
|
||||
|
||||
return defined($server) ? 1 : 0;
|
||||
}
|
||||
|
||||
1;
|
||||
255
src/PVE/Storage/ISCSIDirectPlugin.pm
Normal file
255
src/PVE/Storage/ISCSIDirectPlugin.pm
Normal file
@ -0,0 +1,255 @@
|
||||
package PVE::Storage::ISCSIDirectPlugin;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use IO::File;
|
||||
use HTTP::Request;
|
||||
use LWP::UserAgent;
|
||||
use PVE::Tools qw(run_command file_read_firstline trim dir_glob_regex dir_glob_foreach);
|
||||
use PVE::Storage::Plugin;
|
||||
use PVE::JSONSchema qw(get_standard_option);
|
||||
|
||||
use base qw(PVE::Storage::Plugin);
|
||||
|
||||
sub iscsi_ls {
|
||||
my ($scfg, $storeid) = @_;
|
||||
|
||||
my $portal = $scfg->{portal};
|
||||
my $cmd = ['/usr/bin/iscsi-ls', '-s', 'iscsi://'.$portal ];
|
||||
my $list = {};
|
||||
my %unittobytes = (
|
||||
"k" => 1024,
|
||||
"M" => 1024*1024,
|
||||
"G" => 1024*1024*1024,
|
||||
"T" => 1024*1024*1024*1024
|
||||
);
|
||||
eval {
|
||||
|
||||
run_command($cmd, errmsg => "iscsi error", errfunc => sub {}, outfunc => sub {
|
||||
my $line = shift;
|
||||
$line = trim($line);
|
||||
if( $line =~ /Lun:(\d+)\s+([A-Za-z0-9\-\_\.\:]*)\s+\(Size:([0-9\.]*)(k|M|G|T)\)/ ) {
|
||||
my $image = "lun".$1;
|
||||
my $size = $3;
|
||||
my $unit = $4;
|
||||
|
||||
$list->{$storeid}->{$image} = {
|
||||
name => $image,
|
||||
size => $size * $unittobytes{$unit},
|
||||
format => 'raw',
|
||||
};
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
my $err = $@;
|
||||
die $err if $err && $err !~ m/TESTUNITREADY failed with SENSE KEY/ ;
|
||||
|
||||
return $list;
|
||||
|
||||
}
|
||||
|
||||
# Configuration
|
||||
|
||||
sub type {
|
||||
return 'iscsidirect';
|
||||
}
|
||||
|
||||
sub plugindata {
|
||||
return {
|
||||
content => [ {images => 1, none => 1}, { images => 1 }],
|
||||
select_existing => 1,
|
||||
};
|
||||
}
|
||||
|
||||
sub options {
|
||||
return {
|
||||
portal => { fixed => 1 },
|
||||
target => { fixed => 1 },
|
||||
nodes => { optional => 1},
|
||||
disable => { optional => 1},
|
||||
content => { optional => 1},
|
||||
bwlimit => { optional => 1 },
|
||||
};
|
||||
}
|
||||
|
||||
# Storage implementation
|
||||
|
||||
sub parse_volname {
|
||||
my ($class, $volname) = @_;
|
||||
|
||||
|
||||
if ($volname =~ m/^lun(\d+)$/) {
|
||||
return ('images', $1, undef, undef, undef, undef, 'raw');
|
||||
}
|
||||
|
||||
die "unable to parse iscsi volume name '$volname'\n";
|
||||
|
||||
}
|
||||
|
||||
sub path {
|
||||
my ($class, $scfg, $volname, $storeid, $snapname) = @_;
|
||||
|
||||
die "volume snapshot is not possible on iscsi device"
|
||||
if defined($snapname);
|
||||
|
||||
my ($vtype, $lun, $vmid) = $class->parse_volname($volname);
|
||||
|
||||
my $target = $scfg->{target};
|
||||
my $portal = $scfg->{portal};
|
||||
|
||||
my $path = "iscsi://$portal/$target/$lun";
|
||||
|
||||
return ($path, $vmid, $vtype);
|
||||
}
|
||||
|
||||
sub create_base {
|
||||
my ($class, $storeid, $scfg, $volname) = @_;
|
||||
|
||||
die "can't create base images in iscsi storage\n";
|
||||
}
|
||||
|
||||
sub clone_image {
|
||||
my ($class, $scfg, $storeid, $volname, $vmid, $snap) = @_;
|
||||
|
||||
die "can't clone images in iscsi storage\n";
|
||||
}
|
||||
|
||||
sub alloc_image {
|
||||
my ($class, $storeid, $scfg, $vmid, $fmt, $name, $size) = @_;
|
||||
|
||||
die "can't allocate space in iscsi storage\n";
|
||||
}
|
||||
|
||||
sub free_image {
|
||||
my ($class, $storeid, $scfg, $volname, $isBase) = @_;
|
||||
|
||||
die "can't free space in iscsi storage\n";
|
||||
}
|
||||
|
||||
|
||||
sub list_images {
|
||||
my ($class, $storeid, $scfg, $vmid, $vollist, $cache) = @_;
|
||||
|
||||
my $res = [];
|
||||
|
||||
$cache->{directiscsi} = iscsi_ls($scfg,$storeid) if !$cache->{directiscsi};
|
||||
|
||||
# we have no owner for iscsi devices
|
||||
|
||||
my $target = $scfg->{target};
|
||||
|
||||
if (my $dat = $cache->{directiscsi}->{$storeid}) {
|
||||
|
||||
foreach my $volname (keys %$dat) {
|
||||
|
||||
my $volid = "$storeid:$volname";
|
||||
|
||||
if ($vollist) {
|
||||
my $found = grep { $_ eq $volid } @$vollist;
|
||||
next if !$found;
|
||||
} else {
|
||||
# we have no owner for iscsi devices
|
||||
next if defined($vmid);
|
||||
}
|
||||
|
||||
my $info = $dat->{$volname};
|
||||
$info->{volid} = $volid;
|
||||
|
||||
push @$res, $info;
|
||||
}
|
||||
}
|
||||
|
||||
return $res;
|
||||
}
|
||||
|
||||
|
||||
sub status {
|
||||
my ($class, $storeid, $scfg, $cache) = @_;
|
||||
|
||||
my $total = 0;
|
||||
my $free = 0;
|
||||
my $used = 0;
|
||||
my $active = 1;
|
||||
return ($total,$free,$used,$active);
|
||||
|
||||
return undef;
|
||||
}
|
||||
|
||||
sub activate_storage {
|
||||
my ($class, $storeid, $scfg, $cache) = @_;
|
||||
return 1;
|
||||
}
|
||||
|
||||
sub deactivate_storage {
|
||||
my ($class, $storeid, $scfg, $cache) = @_;
|
||||
return 1;
|
||||
}
|
||||
|
||||
sub activate_volume {
|
||||
my ($class, $storeid, $scfg, $volname, $snapname, $cache) = @_;
|
||||
|
||||
die "volume snapshot is not possible on iscsi device" if $snapname;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
sub deactivate_volume {
|
||||
my ($class, $storeid, $scfg, $volname, $snapname, $cache) = @_;
|
||||
|
||||
die "volume snapshot is not possible on iscsi device" if $snapname;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
sub volume_size_info {
|
||||
my ($class, $scfg, $storeid, $volname, $timeout) = @_;
|
||||
|
||||
my $vollist = iscsi_ls($scfg,$storeid);
|
||||
my $info = $vollist->{$storeid}->{$volname};
|
||||
|
||||
return wantarray ? ($info->{size}, 'raw', 0, undef) : $info->{size};
|
||||
}
|
||||
|
||||
sub volume_resize {
|
||||
my ($class, $scfg, $storeid, $volname, $size, $running) = @_;
|
||||
die "volume resize is not possible on iscsi device";
|
||||
}
|
||||
|
||||
sub volume_snapshot {
|
||||
my ($class, $scfg, $storeid, $volname, $snap) = @_;
|
||||
die "volume snapshot is not possible on iscsi device";
|
||||
}
|
||||
|
||||
sub volume_snapshot_rollback {
|
||||
my ($class, $scfg, $storeid, $volname, $snap) = @_;
|
||||
die "volume snapshot rollback is not possible on iscsi device";
|
||||
}
|
||||
|
||||
sub volume_snapshot_delete {
|
||||
my ($class, $scfg, $storeid, $volname, $snap) = @_;
|
||||
die "volume snapshot delete is not possible on iscsi device";
|
||||
}
|
||||
|
||||
sub volume_has_feature {
|
||||
my ($class, $scfg, $feature, $storeid, $volname, $snapname, $running) = @_;
|
||||
|
||||
my $features = {
|
||||
copy => { current => 1},
|
||||
};
|
||||
|
||||
my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) =
|
||||
$class->parse_volname($volname);
|
||||
|
||||
my $key = undef;
|
||||
if($snapname){
|
||||
$key = 'snap';
|
||||
}else{
|
||||
$key = $isBase ? 'base' : 'current';
|
||||
}
|
||||
return 1 if $features->{$feature}->{$key};
|
||||
|
||||
return undef;
|
||||
}
|
||||
|
||||
1;
|
||||
437
src/PVE/Storage/ISCSIPlugin.pm
Normal file
437
src/PVE/Storage/ISCSIPlugin.pm
Normal file
@ -0,0 +1,437 @@
|
||||
package PVE::Storage::ISCSIPlugin;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
use File::stat;
|
||||
use IO::Dir;
|
||||
use IO::File;
|
||||
|
||||
use PVE::JSONSchema qw(get_standard_option);
|
||||
use PVE::Storage::Plugin;
|
||||
use PVE::Tools qw(run_command file_read_firstline trim dir_glob_regex dir_glob_foreach $IPV4RE $IPV6RE);
|
||||
|
||||
use base qw(PVE::Storage::Plugin);
|
||||
|
||||
# iscsi helper function
|
||||
|
||||
my $ISCSIADM = '/usr/bin/iscsiadm';
|
||||
$ISCSIADM = undef if ! -X $ISCSIADM;
|
||||
|
||||
sub check_iscsi_support {
|
||||
my $noerr = shift;
|
||||
|
||||
if (!$ISCSIADM) {
|
||||
my $msg = "no iscsi support - please install open-iscsi";
|
||||
if ($noerr) {
|
||||
warn "warning: $msg\n";
|
||||
return 0;
|
||||
}
|
||||
|
||||
die "error: $msg\n";
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
sub iscsi_session_list {
|
||||
|
||||
check_iscsi_support ();
|
||||
|
||||
my $cmd = [$ISCSIADM, '--mode', 'session'];
|
||||
|
||||
my $res = {};
|
||||
|
||||
eval {
|
||||
run_command($cmd, errmsg => 'iscsi session scan failed', outfunc => sub {
|
||||
my $line = shift;
|
||||
|
||||
if ($line =~ m/^tcp:\s+\[(\S+)\]\s+\S+\s+(\S+)(\s+\S+)?\s*$/) {
|
||||
my ($session, $target) = ($1, $2);
|
||||
# there can be several sessions per target (multipath)
|
||||
push @{$res->{$target}}, $session;
|
||||
}
|
||||
});
|
||||
};
|
||||
if (my $err = $@) {
|
||||
die $err if $err !~ m/: No active sessions.$/i;
|
||||
}
|
||||
|
||||
return $res;
|
||||
}
|
||||
|
||||
sub iscsi_test_portal {
|
||||
my ($portal) = @_;
|
||||
|
||||
my ($server, $port) = PVE::Tools::parse_host_and_port($portal);
|
||||
return 0 if !$server;
|
||||
return PVE::Network::tcp_ping($server, $port || 3260, 2);
|
||||
}
|
||||
|
||||
sub iscsi_discovery {
|
||||
my ($portal) = @_;
|
||||
|
||||
check_iscsi_support ();
|
||||
|
||||
my $res = {};
|
||||
return $res if !iscsi_test_portal($portal); # fixme: raise exception here?
|
||||
|
||||
my $cmd = [$ISCSIADM, '--mode', 'discovery', '--type', 'sendtargets', '--portal', $portal];
|
||||
run_command($cmd, outfunc => sub {
|
||||
my $line = shift;
|
||||
|
||||
if ($line =~ m/^((?:$IPV4RE|\[$IPV6RE\]):\d+)\,\S+\s+(\S+)\s*$/) {
|
||||
my $portal = $1;
|
||||
my $target = $2;
|
||||
# one target can have more than one portal (multipath).
|
||||
push @{$res->{$target}}, $portal;
|
||||
}
|
||||
});
|
||||
|
||||
return $res;
|
||||
}
|
||||
|
||||
sub iscsi_login {
|
||||
my ($target, $portal_in) = @_;
|
||||
|
||||
check_iscsi_support();
|
||||
|
||||
eval { iscsi_discovery($portal_in); };
|
||||
warn $@ if $@;
|
||||
|
||||
run_command([$ISCSIADM, '--mode', 'node', '--targetname', $target, '--login']);
|
||||
}
|
||||
|
||||
sub iscsi_logout {
|
||||
my ($target, $portal) = @_;
|
||||
|
||||
check_iscsi_support();
|
||||
|
||||
run_command([$ISCSIADM, '--mode', 'node', '--targetname', $target, '--logout']);
|
||||
}
|
||||
|
||||
my $rescan_filename = "/var/run/pve-iscsi-rescan.lock";
|
||||
|
||||
sub iscsi_session_rescan {
|
||||
my $session_list = shift;
|
||||
|
||||
check_iscsi_support();
|
||||
|
||||
my $rstat = stat($rescan_filename);
|
||||
|
||||
if (!$rstat) {
|
||||
if (my $fh = IO::File->new($rescan_filename, "a")) {
|
||||
utime undef, undef, $fh;
|
||||
close($fh);
|
||||
}
|
||||
} else {
|
||||
my $atime = $rstat->atime;
|
||||
my $tdiff = time() - $atime;
|
||||
# avoid frequent rescans
|
||||
return if !($tdiff < 0 || $tdiff > 10);
|
||||
utime undef, undef, $rescan_filename;
|
||||
}
|
||||
|
||||
foreach my $session (@$session_list) {
|
||||
my $cmd = [$ISCSIADM, '--mode', 'session', '--sid', $session, '--rescan'];
|
||||
eval { run_command($cmd, outfunc => sub {}); };
|
||||
warn $@ if $@;
|
||||
}
|
||||
}
|
||||
|
||||
sub load_stable_scsi_paths {
|
||||
|
||||
my $stable_paths = {};
|
||||
|
||||
my $stabledir = "/dev/disk/by-id";
|
||||
|
||||
if (my $dh = IO::Dir->new($stabledir)) {
|
||||
foreach my $tmp (sort $dh->read) {
|
||||
# exclude filenames with part in name (same disk but partitions)
|
||||
# use only filenames with scsi(with multipath i have the same device
|
||||
# with dm-uuid-mpath , dm-name and scsi in name)
|
||||
if($tmp !~ m/-part\d+$/ && ($tmp =~ m/^scsi-/ || $tmp =~ m/^dm-uuid-mpath-/)) {
|
||||
my $path = "$stabledir/$tmp";
|
||||
my $bdevdest = readlink($path);
|
||||
if ($bdevdest && $bdevdest =~ m|^../../([^/]+)|) {
|
||||
$stable_paths->{$1}=$tmp;
|
||||
}
|
||||
}
|
||||
}
|
||||
$dh->close;
|
||||
}
|
||||
return $stable_paths;
|
||||
}
|
||||
|
||||
sub iscsi_device_list {
|
||||
|
||||
my $res = {};
|
||||
|
||||
my $dirname = '/sys/class/iscsi_session';
|
||||
|
||||
my $stable_paths = load_stable_scsi_paths();
|
||||
|
||||
dir_glob_foreach($dirname, 'session(\d+)', sub {
|
||||
my ($ent, $session) = @_;
|
||||
|
||||
my $target = file_read_firstline("$dirname/$ent/targetname");
|
||||
return if !$target;
|
||||
|
||||
my (undef, $host) = dir_glob_regex("$dirname/$ent/device", 'target(\d+):.*');
|
||||
return if !defined($host);
|
||||
|
||||
dir_glob_foreach("/sys/bus/scsi/devices", "$host:" . '(\d+):(\d+):(\d+)', sub {
|
||||
my ($tmp, $channel, $id, $lun) = @_;
|
||||
|
||||
my $type = file_read_firstline("/sys/bus/scsi/devices/$tmp/type");
|
||||
return if !defined($type) || $type ne '0'; # list disks only
|
||||
|
||||
my $bdev;
|
||||
if (-d "/sys/bus/scsi/devices/$tmp/block") { # newer kernels
|
||||
(undef, $bdev) = dir_glob_regex("/sys/bus/scsi/devices/$tmp/block/", '([A-Za-z]\S*)');
|
||||
} else {
|
||||
(undef, $bdev) = dir_glob_regex("/sys/bus/scsi/devices/$tmp", 'block:(\S+)');
|
||||
}
|
||||
return if !$bdev;
|
||||
|
||||
#check multipath
|
||||
if (-d "/sys/block/$bdev/holders") {
|
||||
my $multipathdev = dir_glob_regex("/sys/block/$bdev/holders", '[A-Za-z]\S*');
|
||||
$bdev = $multipathdev if $multipathdev;
|
||||
}
|
||||
|
||||
my $blockdev = $stable_paths->{$bdev};
|
||||
return if !$blockdev;
|
||||
|
||||
my $size = file_read_firstline("/sys/block/$bdev/size");
|
||||
return if !$size;
|
||||
|
||||
my $volid = "$channel.$id.$lun.$blockdev";
|
||||
|
||||
$res->{$target}->{$volid} = {
|
||||
'format' => 'raw',
|
||||
'size' => int($size * 512),
|
||||
'vmid' => 0, # not assigned to any vm
|
||||
'channel' => int($channel),
|
||||
'id' => int($id),
|
||||
'lun' => int($lun),
|
||||
};
|
||||
|
||||
#print "TEST: $target $session $host,$bus,$tg,$lun $blockdev\n";
|
||||
});
|
||||
|
||||
});
|
||||
|
||||
return $res;
|
||||
}
|
||||
|
||||
# Configuration
|
||||
|
||||
sub type {
|
||||
return 'iscsi';
|
||||
}
|
||||
|
||||
sub plugindata {
|
||||
return {
|
||||
content => [ {images => 1, none => 1}, { images => 1 }],
|
||||
select_existing => 1,
|
||||
};
|
||||
}
|
||||
|
||||
sub properties {
|
||||
return {
|
||||
target => {
|
||||
description => "iSCSI target.",
|
||||
type => 'string',
|
||||
},
|
||||
portal => {
|
||||
description => "iSCSI portal (IP or DNS name with optional port).",
|
||||
type => 'string', format => 'pve-storage-portal-dns',
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
sub options {
|
||||
return {
|
||||
portal => { fixed => 1 },
|
||||
target => { fixed => 1 },
|
||||
nodes => { optional => 1},
|
||||
disable => { optional => 1},
|
||||
content => { optional => 1},
|
||||
bwlimit => { optional => 1 },
|
||||
};
|
||||
}
|
||||
|
||||
# Storage implementation
|
||||
|
||||
sub parse_volname {
|
||||
my ($class, $volname) = @_;
|
||||
|
||||
if ($volname =~ m!^\d+\.\d+\.\d+\.(\S+)$!) {
|
||||
return ('images', $1, undef, undef, undef, undef, 'raw');
|
||||
}
|
||||
|
||||
die "unable to parse iscsi volume name '$volname'\n";
|
||||
}
|
||||
|
||||
sub filesystem_path {
|
||||
my ($class, $scfg, $volname, $snapname) = @_;
|
||||
|
||||
die "snapshot is not possible on iscsi storage\n" if defined($snapname);
|
||||
|
||||
my ($vtype, $name, $vmid) = $class->parse_volname($volname);
|
||||
|
||||
my $path = "/dev/disk/by-id/$name";
|
||||
|
||||
return wantarray ? ($path, $vmid, $vtype) : $path;
|
||||
}
|
||||
|
||||
sub create_base {
|
||||
my ($class, $storeid, $scfg, $volname) = @_;
|
||||
|
||||
die "can't create base images in iscsi storage\n";
|
||||
}
|
||||
|
||||
sub clone_image {
|
||||
my ($class, $scfg, $storeid, $volname, $vmid, $snap) = @_;
|
||||
|
||||
die "can't clone images in iscsi storage\n";
|
||||
}
|
||||
|
||||
sub alloc_image {
|
||||
my ($class, $storeid, $scfg, $vmid, $fmt, $name, $size) = @_;
|
||||
|
||||
die "can't allocate space in iscsi storage\n";
|
||||
}
|
||||
|
||||
sub free_image {
|
||||
my ($class, $storeid, $scfg, $volname, $isBase) = @_;
|
||||
|
||||
die "can't free space in iscsi storage\n";
|
||||
}
|
||||
|
||||
# list all luns regardless of set content_types, since we need it for
|
||||
# listing in the gui and we can only have images anyway
|
||||
sub list_volumes {
|
||||
my ($class, $storeid, $scfg, $vmid, $content_types) = @_;
|
||||
|
||||
my $res = $class->list_images($storeid, $scfg, $vmid);
|
||||
|
||||
for my $item (@$res) {
|
||||
$item->{content} = 'images'; # we only have images
|
||||
}
|
||||
|
||||
return $res;
|
||||
}
|
||||
|
||||
sub list_images {
|
||||
my ($class, $storeid, $scfg, $vmid, $vollist, $cache) = @_;
|
||||
|
||||
my $res = [];
|
||||
|
||||
$cache->{iscsi_devices} = iscsi_device_list() if !$cache->{iscsi_devices};
|
||||
|
||||
# we have no owner for iscsi devices
|
||||
|
||||
my $target = $scfg->{target};
|
||||
|
||||
if (my $dat = $cache->{iscsi_devices}->{$target}) {
|
||||
|
||||
foreach my $volname (keys %$dat) {
|
||||
|
||||
my $volid = "$storeid:$volname";
|
||||
|
||||
if ($vollist) {
|
||||
my $found = grep { $_ eq $volid } @$vollist;
|
||||
next if !$found;
|
||||
} else {
|
||||
# we have no owner for iscsi devices
|
||||
next if defined($vmid);
|
||||
}
|
||||
|
||||
my $info = $dat->{$volname};
|
||||
$info->{volid} = $volid;
|
||||
|
||||
push @$res, $info;
|
||||
}
|
||||
}
|
||||
|
||||
return $res;
|
||||
}
|
||||
|
||||
sub iscsi_session {
|
||||
my ($cache, $target) = @_;
|
||||
$cache->{iscsi_sessions} = iscsi_session_list() if !$cache->{iscsi_sessions};
|
||||
return $cache->{iscsi_sessions}->{$target};
|
||||
}
|
||||
|
||||
sub status {
|
||||
my ($class, $storeid, $scfg, $cache) = @_;
|
||||
|
||||
my $session = iscsi_session($cache, $scfg->{target});
|
||||
my $active = defined($session) ? 1 : 0;
|
||||
|
||||
return (0, 0, 0, $active);
|
||||
}
|
||||
|
||||
sub activate_storage {
|
||||
my ($class, $storeid, $scfg, $cache) = @_;
|
||||
|
||||
return if !check_iscsi_support(1);
|
||||
|
||||
my $session = iscsi_session($cache, $scfg->{target});
|
||||
|
||||
if (!defined ($session)) {
|
||||
eval { iscsi_login($scfg->{target}, $scfg->{portal}); };
|
||||
warn $@ if $@;
|
||||
} else {
|
||||
# make sure we get all devices
|
||||
iscsi_session_rescan($session);
|
||||
}
|
||||
}
|
||||
|
||||
sub deactivate_storage {
|
||||
my ($class, $storeid, $scfg, $cache) = @_;
|
||||
|
||||
return if !check_iscsi_support(1);
|
||||
|
||||
if (defined(iscsi_session($cache, $scfg->{target}))) {
|
||||
iscsi_logout($scfg->{target}, $scfg->{portal});
|
||||
}
|
||||
}
|
||||
|
||||
sub check_connection {
|
||||
my ($class, $storeid, $scfg) = @_;
|
||||
|
||||
my $portal = $scfg->{portal};
|
||||
return iscsi_test_portal($portal);
|
||||
}
|
||||
|
||||
sub volume_resize {
|
||||
my ($class, $scfg, $storeid, $volname, $size, $running) = @_;
|
||||
die "volume resize is not possible on iscsi device";
|
||||
}
|
||||
|
||||
sub volume_has_feature {
|
||||
my ($class, $scfg, $feature, $storeid, $volname, $snapname, $running) = @_;
|
||||
|
||||
my $features = {
|
||||
copy => { current => 1},
|
||||
};
|
||||
|
||||
my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) =
|
||||
$class->parse_volname($volname);
|
||||
|
||||
my $key = undef;
|
||||
if($snapname){
|
||||
$key = 'snap';
|
||||
}else{
|
||||
$key = $isBase ? 'base' : 'current';
|
||||
}
|
||||
return 1 if $features->{$feature}->{$key};
|
||||
|
||||
return undef;
|
||||
}
|
||||
|
||||
|
||||
1;
|
||||
741
src/PVE/Storage/LVMPlugin.pm
Normal file
741
src/PVE/Storage/LVMPlugin.pm
Normal file
@ -0,0 +1,741 @@
|
||||
package PVE::Storage::LVMPlugin;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
use IO::File;
|
||||
|
||||
use PVE::Tools qw(run_command trim);
|
||||
use PVE::Storage::Plugin;
|
||||
use PVE::JSONSchema qw(get_standard_option);
|
||||
|
||||
use base qw(PVE::Storage::Plugin);
|
||||
|
||||
# lvm helper functions
|
||||
|
||||
my $ignore_no_medium_warnings = sub {
|
||||
my $line = shift;
|
||||
# ignore those, most of the time they're from (virtual) IPMI/iKVM devices
|
||||
# and just spam the log..
|
||||
if ($line !~ /open failed: No medium found/) {
|
||||
print STDERR "$line\n";
|
||||
}
|
||||
};
|
||||
|
||||
sub lvm_pv_info {
|
||||
my ($device) = @_;
|
||||
|
||||
die "no device specified" if !$device;
|
||||
|
||||
my $has_label = 0;
|
||||
|
||||
my $cmd = ['/usr/bin/file', '-L', '-s', $device];
|
||||
run_command($cmd, outfunc => sub {
|
||||
my $line = shift;
|
||||
$has_label = 1 if $line =~ m/LVM2/;
|
||||
});
|
||||
|
||||
return undef if !$has_label;
|
||||
|
||||
$cmd = ['/sbin/pvs', '--separator', ':', '--noheadings', '--units', 'k',
|
||||
'--unbuffered', '--nosuffix', '--options',
|
||||
'pv_name,pv_size,vg_name,pv_uuid', $device];
|
||||
|
||||
my $pvinfo;
|
||||
run_command($cmd, outfunc => sub {
|
||||
my $line = shift;
|
||||
|
||||
$line = trim($line);
|
||||
|
||||
my ($pvname, $size, $vgname, $uuid) = split(':', $line);
|
||||
|
||||
die "found multiple pvs entries for device '$device'\n"
|
||||
if $pvinfo;
|
||||
|
||||
$pvinfo = {
|
||||
pvname => $pvname,
|
||||
size => int($size),
|
||||
vgname => $vgname,
|
||||
uuid => $uuid,
|
||||
};
|
||||
});
|
||||
|
||||
return $pvinfo;
|
||||
}
|
||||
|
||||
sub clear_first_sector {
|
||||
my ($dev) = shift;
|
||||
|
||||
if (my $fh = IO::File->new($dev, "w")) {
|
||||
my $buf = 0 x 512;
|
||||
syswrite $fh, $buf;
|
||||
$fh->close();
|
||||
}
|
||||
}
|
||||
|
||||
sub lvm_create_volume_group {
|
||||
my ($device, $vgname, $shared) = @_;
|
||||
|
||||
my $res = lvm_pv_info($device);
|
||||
|
||||
if ($res->{vgname}) {
|
||||
return if $res->{vgname} eq $vgname; # already created
|
||||
die "device '$device' is already used by volume group '$res->{vgname}'\n";
|
||||
}
|
||||
|
||||
clear_first_sector($device); # else pvcreate fails
|
||||
|
||||
# we use --metadatasize 250k, which reseults in "pe_start = 512"
|
||||
# so pe_start is aligned on a 128k boundary (advantage for SSDs)
|
||||
my $cmd = ['/sbin/pvcreate', '--metadatasize', '250k', $device];
|
||||
|
||||
run_command($cmd, errmsg => "pvcreate '$device' error");
|
||||
|
||||
$cmd = ['/sbin/vgcreate', $vgname, $device];
|
||||
# push @$cmd, '-c', 'y' if $shared; # we do not use this yet
|
||||
|
||||
run_command($cmd, errmsg => "vgcreate $vgname $device error", errfunc => $ignore_no_medium_warnings, outfunc => $ignore_no_medium_warnings);
|
||||
}
|
||||
|
||||
sub lvm_destroy_volume_group {
|
||||
my ($vgname) = @_;
|
||||
|
||||
run_command(
|
||||
['vgremove', '-y', $vgname],
|
||||
errmsg => "unable to remove volume group $vgname",
|
||||
errfunc => $ignore_no_medium_warnings,
|
||||
outfunc => $ignore_no_medium_warnings,
|
||||
);
|
||||
}
|
||||
|
||||
sub lvm_vgs {
|
||||
my ($includepvs) = @_;
|
||||
|
||||
my $cmd = ['/sbin/vgs', '--separator', ':', '--noheadings', '--units', 'b',
|
||||
'--unbuffered', '--nosuffix', '--options'];
|
||||
|
||||
my $cols = [qw(vg_name vg_size vg_free lv_count)];
|
||||
|
||||
if ($includepvs) {
|
||||
push @$cols, qw(pv_name pv_size pv_free);
|
||||
}
|
||||
|
||||
push @$cmd, join(',', @$cols);
|
||||
|
||||
my $vgs = {};
|
||||
eval {
|
||||
run_command($cmd, outfunc => sub {
|
||||
my $line = shift;
|
||||
$line = trim($line);
|
||||
|
||||
my ($name, $size, $free, $lvcount, $pvname, $pvsize, $pvfree) = split (':', $line);
|
||||
|
||||
$vgs->{$name} //= {
|
||||
size => int ($size),
|
||||
free => int ($free),
|
||||
lvcount => int($lvcount)
|
||||
};
|
||||
|
||||
if (defined($pvname) && defined($pvsize) && defined($pvfree)) {
|
||||
push @{$vgs->{$name}->{pvs}}, {
|
||||
name => $pvname,
|
||||
size => int($pvsize),
|
||||
free => int($pvfree),
|
||||
};
|
||||
}
|
||||
},
|
||||
errfunc => $ignore_no_medium_warnings,
|
||||
);
|
||||
};
|
||||
my $err = $@;
|
||||
|
||||
# just warn (vgs return error code 5 if clvmd does not run)
|
||||
# but output is still OK (list without clustered VGs)
|
||||
warn $err if $err;
|
||||
|
||||
return $vgs;
|
||||
}
|
||||
|
||||
sub lvm_list_volumes {
|
||||
my ($vgname) = @_;
|
||||
|
||||
my $option_list = 'vg_name,lv_name,lv_size,lv_attr,pool_lv,data_percent,metadata_percent,snap_percent,uuid,tags,metadata_size,time';
|
||||
|
||||
my $cmd = [
|
||||
'/sbin/lvs', '--separator', ':', '--noheadings', '--units', 'b',
|
||||
'--unbuffered', '--nosuffix',
|
||||
'--config', 'report/time_format="%s"',
|
||||
'--options', $option_list,
|
||||
];
|
||||
|
||||
push @$cmd, $vgname if $vgname;
|
||||
|
||||
my $lvs = {};
|
||||
run_command($cmd, outfunc => sub {
|
||||
my $line = shift;
|
||||
|
||||
$line = trim($line);
|
||||
|
||||
my ($vg_name, $lv_name, $lv_size, $lv_attr, $pool_lv, $data_percent, $meta_percent, $snap_percent, $uuid, $tags, $meta_size, $ctime) = split(':', $line);
|
||||
return if !$vg_name;
|
||||
return if !$lv_name;
|
||||
|
||||
my $lv_type = substr($lv_attr, 0, 1);
|
||||
|
||||
my $d = {
|
||||
lv_size => int($lv_size),
|
||||
lv_state => substr($lv_attr, 4, 1),
|
||||
lv_type => $lv_type,
|
||||
};
|
||||
$d->{pool_lv} = $pool_lv if $pool_lv;
|
||||
$d->{tags} = $tags if $tags;
|
||||
$d->{ctime} = $ctime;
|
||||
|
||||
if ($lv_type eq 't') {
|
||||
$data_percent ||= 0;
|
||||
$meta_percent ||= 0;
|
||||
$snap_percent ||= 0;
|
||||
$d->{metadata_size} = int($meta_size);
|
||||
$d->{metadata_used} = int(($meta_percent * $meta_size)/100);
|
||||
$d->{used} = int(($data_percent * $lv_size)/100);
|
||||
}
|
||||
$lvs->{$vg_name}->{$lv_name} = $d;
|
||||
},
|
||||
errfunc => $ignore_no_medium_warnings,
|
||||
);
|
||||
|
||||
return $lvs;
|
||||
}
|
||||
|
||||
# Configuration
|
||||
|
||||
sub type {
|
||||
return 'lvm';
|
||||
}
|
||||
|
||||
sub plugindata {
|
||||
return {
|
||||
content => [ {images => 1, rootdir => 1}, { images => 1 }],
|
||||
};
|
||||
}
|
||||
|
||||
sub properties {
|
||||
return {
|
||||
vgname => {
|
||||
description => "Volume group name.",
|
||||
type => 'string', format => 'pve-storage-vgname',
|
||||
},
|
||||
base => {
|
||||
description => "Base volume. This volume is automatically activated.",
|
||||
type => 'string', format => 'pve-volume-id',
|
||||
},
|
||||
saferemove => {
|
||||
description => "Zero-out data when removing LVs.",
|
||||
type => 'boolean',
|
||||
},
|
||||
saferemove_throughput => {
|
||||
description => "Wipe throughput (cstream -t parameter value).",
|
||||
type => 'string',
|
||||
},
|
||||
tagged_only => {
|
||||
description => "Only use logical volumes tagged with 'pve-vm-ID'.",
|
||||
type => 'boolean',
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
sub options {
|
||||
return {
|
||||
vgname => { fixed => 1 },
|
||||
nodes => { optional => 1 },
|
||||
shared => { optional => 1 },
|
||||
disable => { optional => 1 },
|
||||
saferemove => { optional => 1 },
|
||||
saferemove_throughput => { optional => 1 },
|
||||
content => { optional => 1 },
|
||||
base => { fixed => 1, optional => 1 },
|
||||
tagged_only => { optional => 1 },
|
||||
bwlimit => { optional => 1 },
|
||||
};
|
||||
}
|
||||
|
||||
# Storage implementation
|
||||
|
||||
sub on_add_hook {
|
||||
my ($class, $storeid, $scfg, %param) = @_;
|
||||
|
||||
if (my $base = $scfg->{base}) {
|
||||
my ($baseid, $volname) = PVE::Storage::parse_volume_id($base);
|
||||
|
||||
my $cfg = PVE::Storage::config();
|
||||
my $basecfg = PVE::Storage::storage_config ($cfg, $baseid, 1);
|
||||
die "base storage ID '$baseid' does not exist\n" if !$basecfg;
|
||||
|
||||
# we only support iscsi for now
|
||||
die "unsupported base type '$basecfg->{type}'"
|
||||
if $basecfg->{type} ne 'iscsi';
|
||||
|
||||
my $path = PVE::Storage::path($cfg, $base);
|
||||
|
||||
PVE::Storage::activate_storage($cfg, $baseid);
|
||||
|
||||
lvm_create_volume_group($path, $scfg->{vgname}, $scfg->{shared});
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
sub parse_volname {
|
||||
my ($class, $volname) = @_;
|
||||
|
||||
PVE::Storage::Plugin::parse_lvm_name($volname);
|
||||
|
||||
if ($volname =~ m/^(vm-(\d+)-\S+)$/) {
|
||||
return ('images', $1, $2, undef, undef, undef, 'raw');
|
||||
}
|
||||
|
||||
die "unable to parse lvm volume name '$volname'\n";
|
||||
}
|
||||
|
||||
sub filesystem_path {
|
||||
my ($class, $scfg, $volname, $snapname) = @_;
|
||||
|
||||
die "lvm snapshot is not implemented"if defined($snapname);
|
||||
|
||||
my ($vtype, $name, $vmid) = $class->parse_volname($volname);
|
||||
|
||||
my $vg = $scfg->{vgname};
|
||||
|
||||
my $path = "/dev/$vg/$name";
|
||||
|
||||
return wantarray ? ($path, $vmid, $vtype) : $path;
|
||||
}
|
||||
|
||||
sub create_base {
|
||||
my ($class, $storeid, $scfg, $volname) = @_;
|
||||
|
||||
die "can't create base images in lvm storage\n";
|
||||
}
|
||||
|
||||
sub clone_image {
|
||||
my ($class, $scfg, $storeid, $volname, $vmid, $snap) = @_;
|
||||
|
||||
die "can't clone images in lvm storage\n";
|
||||
}
|
||||
|
||||
sub find_free_diskname {
|
||||
my ($class, $storeid, $scfg, $vmid, $fmt, $add_fmt_suffix) = @_;
|
||||
|
||||
my $vg = $scfg->{vgname};
|
||||
|
||||
my $lvs = lvm_list_volumes($vg);
|
||||
|
||||
my $disk_list = [ keys %{$lvs->{$vg}} ];
|
||||
|
||||
return PVE::Storage::Plugin::get_next_vm_diskname($disk_list, $storeid, $vmid, undef, $scfg);
|
||||
}
|
||||
|
||||
sub lvcreate {
|
||||
my ($vg, $name, $size, $tags) = @_;
|
||||
|
||||
if ($size =~ m/\d$/) { # no unit is given
|
||||
$size .= "k"; # default to kilobytes
|
||||
}
|
||||
|
||||
my $cmd = ['/sbin/lvcreate', '-aly', '-Wy', '--yes', '--size', $size, '--name', $name];
|
||||
for my $tag (@$tags) {
|
||||
push @$cmd, '--addtag', $tag;
|
||||
}
|
||||
push @$cmd, $vg;
|
||||
|
||||
run_command($cmd, errmsg => "lvcreate '$vg/$name' error");
|
||||
}
|
||||
|
||||
sub lvrename {
|
||||
my ($vg, $oldname, $newname) = @_;
|
||||
|
||||
run_command(
|
||||
['/sbin/lvrename', $vg, $oldname, $newname],
|
||||
errmsg => "lvrename '${vg}/${oldname}' to '${newname}' error",
|
||||
);
|
||||
}
|
||||
|
||||
sub alloc_image {
|
||||
my ($class, $storeid, $scfg, $vmid, $fmt, $name, $size) = @_;
|
||||
|
||||
die "unsupported format '$fmt'" if $fmt ne 'raw';
|
||||
|
||||
die "illegal name '$name' - should be 'vm-$vmid-*'\n"
|
||||
if $name && $name !~ m/^vm-$vmid-/;
|
||||
|
||||
my $vgs = lvm_vgs();
|
||||
|
||||
my $vg = $scfg->{vgname};
|
||||
|
||||
die "no such volume group '$vg'\n" if !defined ($vgs->{$vg});
|
||||
|
||||
my $free = int($vgs->{$vg}->{free});
|
||||
|
||||
die "not enough free space ($free < $size)\n" if $free < $size;
|
||||
|
||||
$name = $class->find_free_diskname($storeid, $scfg, $vmid)
|
||||
if !$name;
|
||||
|
||||
lvcreate($vg, $name, $size, ["pve-vm-$vmid"]);
|
||||
|
||||
return $name;
|
||||
}
|
||||
|
||||
sub free_image {
|
||||
my ($class, $storeid, $scfg, $volname, $isBase) = @_;
|
||||
|
||||
my $vg = $scfg->{vgname};
|
||||
|
||||
# we need to zero out LVM data for security reasons
|
||||
# and to allow thin provisioning
|
||||
|
||||
my $zero_out_worker = sub {
|
||||
print "zero-out data on image $volname (/dev/$vg/del-$volname)\n";
|
||||
|
||||
# wipe throughput up to 10MB/s by default; may be overwritten with saferemove_throughput
|
||||
my $throughput = '-10485760';
|
||||
if ($scfg->{saferemove_throughput}) {
|
||||
$throughput = $scfg->{saferemove_throughput};
|
||||
}
|
||||
|
||||
my $cmd = [
|
||||
'/usr/bin/cstream',
|
||||
'-i', '/dev/zero',
|
||||
'-o', "/dev/$vg/del-$volname",
|
||||
'-T', '10',
|
||||
'-v', '1',
|
||||
'-b', '1048576',
|
||||
'-t', "$throughput"
|
||||
];
|
||||
eval { run_command($cmd, errmsg => "zero out finished (note: 'No space left on device' is ok here)"); };
|
||||
warn $@ if $@;
|
||||
|
||||
$class->cluster_lock_storage($storeid, $scfg->{shared}, undef, sub {
|
||||
my $cmd = ['/sbin/lvremove', '-f', "$vg/del-$volname"];
|
||||
run_command($cmd, errmsg => "lvremove '$vg/del-$volname' error");
|
||||
});
|
||||
print "successfully removed volume $volname ($vg/del-$volname)\n";
|
||||
};
|
||||
|
||||
my $cmd = ['/sbin/lvchange', '-aly', "$vg/$volname"];
|
||||
run_command($cmd, errmsg => "can't activate LV '$vg/$volname' to zero-out its data");
|
||||
$cmd = ['/sbin/lvchange', '--refresh', "$vg/$volname"];
|
||||
run_command($cmd, errmsg => "can't refresh LV '$vg/$volname' to zero-out its data");
|
||||
|
||||
if ($scfg->{saferemove}) {
|
||||
# avoid long running task, so we only rename here
|
||||
$cmd = ['/sbin/lvrename', $vg, $volname, "del-$volname"];
|
||||
run_command($cmd, errmsg => "lvrename '$vg/$volname' error");
|
||||
return $zero_out_worker;
|
||||
} else {
|
||||
my $tmpvg = $scfg->{vgname};
|
||||
$cmd = ['/sbin/lvremove', '-f', "$tmpvg/$volname"];
|
||||
run_command($cmd, errmsg => "lvremove '$tmpvg/$volname' error");
|
||||
}
|
||||
|
||||
return undef;
|
||||
}
|
||||
|
||||
my $check_tags = sub {
|
||||
my ($tags) = @_;
|
||||
|
||||
return defined($tags) && $tags =~ /(^|,)pve-vm-\d+(,|$)/;
|
||||
};
|
||||
|
||||
sub list_images {
|
||||
my ($class, $storeid, $scfg, $vmid, $vollist, $cache) = @_;
|
||||
|
||||
my $vgname = $scfg->{vgname};
|
||||
|
||||
$cache->{lvs} = lvm_list_volumes() if !$cache->{lvs};
|
||||
|
||||
my $res = [];
|
||||
|
||||
if (my $dat = $cache->{lvs}->{$vgname}) {
|
||||
|
||||
foreach my $volname (keys %$dat) {
|
||||
|
||||
next if $volname !~ m/^vm-(\d+)-/;
|
||||
my $owner = $1;
|
||||
|
||||
my $info = $dat->{$volname};
|
||||
|
||||
next if $scfg->{tagged_only} && !&$check_tags($info->{tags});
|
||||
|
||||
# Allow mirrored and RAID LVs
|
||||
next if $info->{lv_type} !~ m/^[-mMrR]$/;
|
||||
|
||||
my $volid = "$storeid:$volname";
|
||||
|
||||
if ($vollist) {
|
||||
my $found = grep { $_ eq $volid } @$vollist;
|
||||
next if !$found;
|
||||
} else {
|
||||
next if defined($vmid) && ($owner ne $vmid);
|
||||
}
|
||||
|
||||
push @$res, {
|
||||
volid => $volid, format => 'raw', size => $info->{lv_size}, vmid => $owner,
|
||||
ctime => $info->{ctime},
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
return $res;
|
||||
}
|
||||
|
||||
sub status {
|
||||
my ($class, $storeid, $scfg, $cache) = @_;
|
||||
|
||||
$cache->{vgs} = lvm_vgs() if !$cache->{vgs};
|
||||
|
||||
my $vgname = $scfg->{vgname};
|
||||
|
||||
if (my $info = $cache->{vgs}->{$vgname}) {
|
||||
return ($info->{size}, $info->{free}, $info->{size} - $info->{free}, 1);
|
||||
}
|
||||
|
||||
return undef;
|
||||
}
|
||||
|
||||
sub activate_storage {
|
||||
my ($class, $storeid, $scfg, $cache) = @_;
|
||||
|
||||
$cache->{vgs} = lvm_vgs() if !$cache->{vgs};
|
||||
|
||||
# In LVM2, vgscans take place automatically;
|
||||
# this is just to be sure
|
||||
if ($cache->{vgs} && !$cache->{vgscaned} &&
|
||||
!$cache->{vgs}->{$scfg->{vgname}}) {
|
||||
$cache->{vgscaned} = 1;
|
||||
my $cmd = ['/sbin/vgscan', '--ignorelockingfailure', '--mknodes'];
|
||||
eval { run_command($cmd, outfunc => sub {}); };
|
||||
warn $@ if $@;
|
||||
}
|
||||
|
||||
# we do not acticate any volumes here ('vgchange -aly')
|
||||
# instead, volumes are activate individually later
|
||||
}
|
||||
|
||||
sub deactivate_storage {
|
||||
my ($class, $storeid, $scfg, $cache) = @_;
|
||||
|
||||
my $cmd = ['/sbin/vgchange', '-aln', $scfg->{vgname}];
|
||||
run_command($cmd, errmsg => "can't deactivate VG '$scfg->{vgname}'");
|
||||
}
|
||||
|
||||
sub activate_volume {
|
||||
my ($class, $storeid, $scfg, $volname, $snapname, $cache) = @_;
|
||||
#fix me lvmchange is not provided on
|
||||
my $path = $class->path($scfg, $volname, $snapname);
|
||||
|
||||
my $lvm_activate_mode = 'ey';
|
||||
|
||||
my $cmd = ['/sbin/lvchange', "-a$lvm_activate_mode", $path];
|
||||
run_command($cmd, errmsg => "can't activate LV '$path'");
|
||||
$cmd = ['/sbin/lvchange', '--refresh', $path];
|
||||
run_command($cmd, errmsg => "can't refresh LV '$path' for activation");
|
||||
}
|
||||
|
||||
sub deactivate_volume {
|
||||
my ($class, $storeid, $scfg, $volname, $snapname, $cache) = @_;
|
||||
|
||||
my $path = $class->path($scfg, $volname, $snapname);
|
||||
return if ! -b $path;
|
||||
|
||||
my $cmd = ['/sbin/lvchange', '-aln', $path];
|
||||
run_command($cmd, errmsg => "can't deactivate LV '$path'");
|
||||
}
|
||||
|
||||
sub volume_resize {
|
||||
my ($class, $scfg, $storeid, $volname, $size, $running) = @_;
|
||||
|
||||
$size = ($size/1024/1024) . "M";
|
||||
|
||||
my $path = $class->path($scfg, $volname);
|
||||
my $cmd = ['/sbin/lvextend', '-L', $size, $path];
|
||||
|
||||
$class->cluster_lock_storage($storeid, $scfg->{shared}, undef, sub {
|
||||
run_command($cmd, errmsg => "error resizing volume '$path'");
|
||||
});
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
sub volume_size_info {
|
||||
my ($class, $scfg, $storeid, $volname, $timeout) = @_;
|
||||
my $path = $class->filesystem_path($scfg, $volname);
|
||||
|
||||
my $cmd = ['/sbin/lvs', '--separator', ':', '--noheadings', '--units', 'b',
|
||||
'--unbuffered', '--nosuffix', '--options', 'lv_size', $path];
|
||||
|
||||
my $size;
|
||||
run_command($cmd, timeout => $timeout, errmsg => "can't get size of '$path'",
|
||||
outfunc => sub {
|
||||
$size = int(shift);
|
||||
});
|
||||
return wantarray ? ($size, 'raw', 0, undef) : $size;
|
||||
}
|
||||
|
||||
sub volume_snapshot {
|
||||
my ($class, $scfg, $storeid, $volname, $snap) = @_;
|
||||
|
||||
die "lvm snapshot is not implemented";
|
||||
}
|
||||
|
||||
sub volume_snapshot_rollback {
|
||||
my ($class, $scfg, $storeid, $volname, $snap) = @_;
|
||||
|
||||
die "lvm snapshot rollback is not implemented";
|
||||
}
|
||||
|
||||
sub volume_snapshot_delete {
|
||||
my ($class, $scfg, $storeid, $volname, $snap) = @_;
|
||||
|
||||
die "lvm snapshot delete is not implemented";
|
||||
}
|
||||
|
||||
sub volume_has_feature {
|
||||
my ($class, $scfg, $feature, $storeid, $volname, $snapname, $running) = @_;
|
||||
|
||||
my $features = {
|
||||
copy => { base => 1, current => 1},
|
||||
rename => {current => 1},
|
||||
};
|
||||
|
||||
my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) =
|
||||
$class->parse_volname($volname);
|
||||
|
||||
my $key = undef;
|
||||
if($snapname){
|
||||
$key = 'snap';
|
||||
}else{
|
||||
$key = $isBase ? 'base' : 'current';
|
||||
}
|
||||
return 1 if $features->{$feature}->{$key};
|
||||
|
||||
return undef;
|
||||
}
|
||||
|
||||
sub volume_export_formats {
|
||||
my ($class, $scfg, $storeid, $volname, $snapshot, $base_snapshot, $with_snapshots) = @_;
|
||||
return () if defined($snapshot); # lvm-thin only
|
||||
return volume_import_formats($class, $scfg, $storeid, $volname, $snapshot, $base_snapshot, $with_snapshots);
|
||||
}
|
||||
|
||||
sub volume_export {
|
||||
my ($class, $scfg, $storeid, $fh, $volname, $format, $snapshot, $base_snapshot, $with_snapshots) = @_;
|
||||
die "volume export format $format not available for $class\n"
|
||||
if $format ne 'raw+size';
|
||||
die "cannot export volumes together with their snapshots in $class\n"
|
||||
if $with_snapshots;
|
||||
die "cannot export a snapshot in $class\n" if defined($snapshot);
|
||||
die "cannot export an incremental stream in $class\n" if defined($base_snapshot);
|
||||
my $file = $class->path($scfg, $volname, $storeid);
|
||||
my $size;
|
||||
# should be faster than querying LVM, also checks for the device file's availability
|
||||
run_command(['/sbin/blockdev', '--getsize64', $file], outfunc => sub {
|
||||
my ($line) = @_;
|
||||
die "unexpected output from /sbin/blockdev: $line\n" if $line !~ /^(\d+)$/;
|
||||
$size = int($1);
|
||||
});
|
||||
PVE::Storage::Plugin::write_common_header($fh, $size);
|
||||
run_command(['dd', "if=$file", "bs=64k"], output => '>&'.fileno($fh));
|
||||
}
|
||||
|
||||
sub volume_import_formats {
|
||||
my ($class, $scfg, $storeid, $volname, $snapshot, $base_snapshot, $with_snapshots) = @_;
|
||||
return () if $with_snapshots; # not supported
|
||||
return () if defined($base_snapshot); # not supported
|
||||
return ('raw+size');
|
||||
}
|
||||
|
||||
sub volume_import {
|
||||
my ($class, $scfg, $storeid, $fh, $volname, $format, $snapshot, $base_snapshot, $with_snapshots, $allow_rename) = @_;
|
||||
die "volume import format $format not available for $class\n"
|
||||
if $format ne 'raw+size';
|
||||
die "cannot import volumes together with their snapshots in $class\n"
|
||||
if $with_snapshots;
|
||||
die "cannot import an incremental stream in $class\n" if defined($base_snapshot);
|
||||
|
||||
my ($vtype, $name, $vmid, $basename, $basevmid, $isBase, $file_format) =
|
||||
$class->parse_volname($volname);
|
||||
die "cannot import format $format into a file of format $file_format\n"
|
||||
if $file_format ne 'raw';
|
||||
|
||||
my $vg = $scfg->{vgname};
|
||||
my $lvs = lvm_list_volumes($vg);
|
||||
if ($lvs->{$vg}->{$volname}) {
|
||||
die "volume $vg/$volname already exists\n" if !$allow_rename;
|
||||
warn "volume $vg/$volname already exists - importing with a different name\n";
|
||||
$name = undef;
|
||||
}
|
||||
|
||||
my ($size) = PVE::Storage::Plugin::read_common_header($fh);
|
||||
$size = int($size/1024);
|
||||
|
||||
eval {
|
||||
my $allocname = $class->alloc_image($storeid, $scfg, $vmid, 'raw', $name, $size);
|
||||
my $oldname = $volname;
|
||||
$volname = $allocname;
|
||||
if (defined($name) && $allocname ne $oldname) {
|
||||
die "internal error: unexpected allocated name: '$allocname' != '$oldname'\n";
|
||||
}
|
||||
my $file = $class->path($scfg, $volname, $storeid)
|
||||
or die "internal error: failed to get path to newly allocated volume $volname\n";
|
||||
|
||||
$class->volume_import_write($fh, $file);
|
||||
};
|
||||
if (my $err = $@) {
|
||||
my $cleanup_worker = eval { $class->free_image($storeid, $scfg, $volname, 0) };
|
||||
warn $@ if $@;
|
||||
|
||||
if ($cleanup_worker) {
|
||||
my $rpcenv = PVE::RPCEnvironment::get();
|
||||
my $authuser = $rpcenv->get_user();
|
||||
|
||||
$rpcenv->fork_worker('imgdel', undef, $authuser, $cleanup_worker);
|
||||
}
|
||||
|
||||
die $err;
|
||||
}
|
||||
|
||||
return "$storeid:$volname";
|
||||
}
|
||||
|
||||
sub volume_import_write {
|
||||
my ($class, $input_fh, $output_file) = @_;
|
||||
run_command(['dd', "of=$output_file", 'bs=64k'],
|
||||
input => '<&'.fileno($input_fh));
|
||||
}
|
||||
|
||||
sub rename_volume {
|
||||
my ($class, $scfg, $storeid, $source_volname, $target_vmid, $target_volname) = @_;
|
||||
|
||||
my (
|
||||
undef,
|
||||
$source_image,
|
||||
$source_vmid,
|
||||
$base_name,
|
||||
$base_vmid,
|
||||
undef,
|
||||
$format
|
||||
) = $class->parse_volname($source_volname);
|
||||
$target_volname = $class->find_free_diskname($storeid, $scfg, $target_vmid, $format)
|
||||
if !$target_volname;
|
||||
|
||||
my $vg = $scfg->{vgname};
|
||||
my $lvs = lvm_list_volumes($vg);
|
||||
die "target volume '${target_volname}' already exists\n"
|
||||
if ($lvs->{$vg}->{$target_volname});
|
||||
|
||||
lvrename($vg, $source_volname, $target_volname);
|
||||
return "${storeid}:${target_volname}";
|
||||
}
|
||||
|
||||
1;
|
||||
115
src/PVE/Storage/LunCmd/Comstar.pm
Normal file
115
src/PVE/Storage/LunCmd/Comstar.pm
Normal file
@ -0,0 +1,115 @@
|
||||
package PVE::Storage::LunCmd::Comstar;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
use Digest::MD5 qw(md5_hex);
|
||||
use PVE::Tools qw(run_command file_read_firstline trim dir_glob_regex dir_glob_foreach);
|
||||
|
||||
my @ssh_opts = ('-o', 'BatchMode=yes');
|
||||
my @ssh_cmd = ('/usr/bin/ssh', @ssh_opts);
|
||||
my $id_rsa_path = '/etc/pve/priv/zfs';
|
||||
|
||||
my $get_lun_cmd_map = sub {
|
||||
my ($method) = @_;
|
||||
|
||||
my $stmfadmcmd = "/usr/sbin/stmfadm";
|
||||
my $sbdadmcmd = "/usr/sbin/sbdadm";
|
||||
|
||||
my $cmdmap = {
|
||||
create_lu => { cmd => $stmfadmcmd, method => 'create-lu' },
|
||||
delete_lu => { cmd => $stmfadmcmd, method => 'delete-lu' },
|
||||
import_lu => { cmd => $stmfadmcmd, method => 'import-lu' },
|
||||
modify_lu => { cmd => $stmfadmcmd, method => 'modify-lu' },
|
||||
add_view => { cmd => $stmfadmcmd, method => 'add-view' },
|
||||
list_view => { cmd => $stmfadmcmd, method => 'list-view' },
|
||||
list_lu => { cmd => $sbdadmcmd, method => 'list-lu' },
|
||||
};
|
||||
|
||||
die "unknown command '$method'" unless exists $cmdmap->{$method};
|
||||
|
||||
return $cmdmap->{$method};
|
||||
};
|
||||
|
||||
sub get_base {
|
||||
return '/dev/zvol/rdsk';
|
||||
}
|
||||
|
||||
sub run_lun_command {
|
||||
my ($scfg, $timeout, $method, @params) = @_;
|
||||
|
||||
my $msg = '';
|
||||
my $luncmd;
|
||||
my $target;
|
||||
my $guid;
|
||||
$timeout = 10 if !$timeout;
|
||||
|
||||
my $output = sub {
|
||||
my $line = shift;
|
||||
$msg .= "$line\n";
|
||||
};
|
||||
|
||||
if ($method eq 'create_lu') {
|
||||
my $wcd = 'false';
|
||||
if ($scfg->{nowritecache}) {
|
||||
$wcd = 'true';
|
||||
}
|
||||
my $prefix = '600144f';
|
||||
my $digest = md5_hex($params[0]);
|
||||
$digest =~ /(\w{7}(.*))/;
|
||||
$guid = "$prefix$2";
|
||||
@params = ('-p', "wcd=$wcd", '-p', "guid=$guid", @params);
|
||||
} elsif ($method eq 'modify_lu') {
|
||||
@params = ('-s', @params);
|
||||
} elsif ($method eq 'list_view') {
|
||||
@params = ('-l', @params);
|
||||
} elsif ($method eq 'list_lu') {
|
||||
$guid = $params[0];
|
||||
@params = undef;
|
||||
} elsif ($method eq 'add_view') {
|
||||
if ($scfg->{comstar_tg}) {
|
||||
unshift @params, $scfg->{comstar_tg};
|
||||
unshift @params, '--target-group';
|
||||
}
|
||||
if ($scfg->{comstar_hg}) {
|
||||
unshift @params, $scfg->{comstar_hg};
|
||||
unshift @params, '--host-group';
|
||||
}
|
||||
}
|
||||
|
||||
my $cmdmap = $get_lun_cmd_map->($method);
|
||||
$luncmd = $cmdmap->{cmd};
|
||||
my $lunmethod = $cmdmap->{method};
|
||||
|
||||
$target = 'root@' . $scfg->{portal};
|
||||
|
||||
my $cmd = [@ssh_cmd, '-i', "$id_rsa_path/$scfg->{portal}_id_rsa", $target, $luncmd, $lunmethod, @params];
|
||||
|
||||
run_command($cmd, outfunc => $output, timeout => $timeout);
|
||||
|
||||
if ($method eq 'list_view') {
|
||||
my @lines = split /\n/, $msg;
|
||||
$msg = undef;
|
||||
foreach my $line (@lines) {
|
||||
if ($line =~ /^\s*LUN\s*:\s*(\d+)$/) {
|
||||
$msg = $1;
|
||||
last;
|
||||
}
|
||||
}
|
||||
} elsif ($method eq 'list_lu') {
|
||||
my $object = $guid;
|
||||
my @lines = split /\n/, $msg;
|
||||
$msg = undef;
|
||||
foreach my $line (@lines) {
|
||||
if ($line =~ /(\w+)\s+\d+\s+$object$/) {
|
||||
$msg = $1;
|
||||
last;
|
||||
}
|
||||
}
|
||||
} elsif ($method eq 'create_lu') {
|
||||
$msg = $guid;
|
||||
}
|
||||
|
||||
return $msg;
|
||||
}
|
||||
|
||||
478
src/PVE/Storage/LunCmd/Iet.pm
Normal file
478
src/PVE/Storage/LunCmd/Iet.pm
Normal file
@ -0,0 +1,478 @@
|
||||
package PVE::Storage::LunCmd::Iet;
|
||||
|
||||
# iscsi storage running Debian
|
||||
# 1) apt-get install iscsitarget iscsitarget-dkms
|
||||
# 2) Create target like (/etc/iet/ietd.conf):
|
||||
# Target iqn.2001-04.com.example:tank
|
||||
# Alias tank
|
||||
# 3) Activate daemon (/etc/default/iscsitarget)
|
||||
# ISCSITARGET_ENABLE=true
|
||||
# 4) service iscsitarget start
|
||||
#
|
||||
# On one of the proxmox nodes:
|
||||
# 1) Login as root
|
||||
# 2) ssh-copy-id <ip_of_iscsi_storage>
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
use PVE::Tools qw(run_command file_read_firstline trim dir_glob_regex dir_glob_foreach);
|
||||
|
||||
sub get_base;
|
||||
|
||||
# A logical unit can max have 16864 LUNs
|
||||
# http://manpages.ubuntu.com/manpages/precise/man5/ietd.conf.5.html
|
||||
my $MAX_LUNS = 16864;
|
||||
|
||||
my $CONFIG_FILE = '/etc/iet/ietd.conf';
|
||||
my $DAEMON = '/usr/sbin/ietadm';
|
||||
my $SETTINGS = undef;
|
||||
my $CONFIG = undef;
|
||||
my $OLD_CONFIG = undef;
|
||||
|
||||
my @ssh_opts = ('-o', 'BatchMode=yes');
|
||||
my @ssh_cmd = ('/usr/bin/ssh', @ssh_opts);
|
||||
my @scp_cmd = ('/usr/bin/scp', @ssh_opts);
|
||||
my $id_rsa_path = '/etc/pve/priv/zfs';
|
||||
my $ietadm = '/usr/sbin/ietadm';
|
||||
|
||||
my $execute_command = sub {
|
||||
my ($scfg, $exec, $timeout, $method, @params) = @_;
|
||||
|
||||
my $msg = '';
|
||||
my $err = undef;
|
||||
my $target;
|
||||
my $cmd;
|
||||
my $res = ();
|
||||
|
||||
$timeout = 10 if !$timeout;
|
||||
|
||||
my $output = sub {
|
||||
my $line = shift;
|
||||
$msg .= "$line\n";
|
||||
};
|
||||
|
||||
my $errfunc = sub {
|
||||
my $line = shift;
|
||||
$err .= "$line";
|
||||
};
|
||||
|
||||
if ($exec eq 'scp') {
|
||||
$target = 'root@[' . $scfg->{portal} . ']';
|
||||
$cmd = [@scp_cmd, '-i', "$id_rsa_path/$scfg->{portal}_id_rsa", '--', $method, "$target:$params[0]"];
|
||||
} else {
|
||||
$target = 'root@' . $scfg->{portal};
|
||||
$cmd = [@ssh_cmd, '-i', "$id_rsa_path/$scfg->{portal}_id_rsa", $target, '--', $method, @params];
|
||||
}
|
||||
|
||||
eval {
|
||||
run_command($cmd, outfunc => $output, errfunc => $errfunc, timeout => $timeout);
|
||||
};
|
||||
if ($@) {
|
||||
$res = {
|
||||
result => 0,
|
||||
msg => $err,
|
||||
}
|
||||
} else {
|
||||
$res = {
|
||||
result => 1,
|
||||
msg => $msg,
|
||||
}
|
||||
}
|
||||
|
||||
return $res;
|
||||
};
|
||||
|
||||
my $read_config = sub {
|
||||
my ($scfg, $timeout) = @_;
|
||||
|
||||
my $msg = '';
|
||||
my $err = undef;
|
||||
my $luncmd = 'cat';
|
||||
my $target;
|
||||
$timeout = 10 if !$timeout;
|
||||
|
||||
my $output = sub {
|
||||
my $line = shift;
|
||||
$msg .= "$line\n";
|
||||
};
|
||||
|
||||
my $errfunc = sub {
|
||||
my $line = shift;
|
||||
$err .= "$line";
|
||||
};
|
||||
|
||||
$target = 'root@' . $scfg->{portal};
|
||||
|
||||
my $cmd = [@ssh_cmd, '-i', "$id_rsa_path/$scfg->{portal}_id_rsa", $target, $luncmd, $CONFIG_FILE];
|
||||
eval {
|
||||
run_command($cmd, outfunc => $output, errfunc => $errfunc, timeout => $timeout);
|
||||
};
|
||||
if ($@) {
|
||||
die $err if ($err !~ /No such file or directory/);
|
||||
die "No configuration found. Install iet on $scfg->{portal}" if $msg eq '';
|
||||
}
|
||||
|
||||
return $msg;
|
||||
};
|
||||
|
||||
my $get_config = sub {
|
||||
my ($scfg) = @_;
|
||||
my @conf = undef;
|
||||
|
||||
my $config = $read_config->($scfg, undef);
|
||||
die "Missing config file" unless $config;
|
||||
|
||||
$OLD_CONFIG = $config;
|
||||
|
||||
return $config;
|
||||
};
|
||||
|
||||
my $parser = sub {
|
||||
my ($scfg) = @_;
|
||||
|
||||
my $line = 0;
|
||||
|
||||
my $base = get_base;
|
||||
my $config = $get_config->($scfg);
|
||||
my @cfgfile = split "\n", $config;
|
||||
|
||||
my $cfg_target = 0;
|
||||
foreach (@cfgfile) {
|
||||
$line++;
|
||||
if ($_ =~ /^\s*Target\s*([\w\-\:\.]+)\s*$/) {
|
||||
if ($1 eq $scfg->{target} && ! $cfg_target) {
|
||||
# start colect info
|
||||
die "$line: Parse error [$_]" if $SETTINGS;
|
||||
$SETTINGS->{target} = $1;
|
||||
$cfg_target = 1;
|
||||
} elsif ($1 eq $scfg->{target} && $cfg_target) {
|
||||
die "$line: Parse error [$_]";
|
||||
} elsif ($cfg_target) {
|
||||
$cfg_target = 0;
|
||||
$CONFIG .= "$_\n";
|
||||
} else {
|
||||
$CONFIG .= "$_\n";
|
||||
}
|
||||
} else {
|
||||
if ($cfg_target) {
|
||||
$SETTINGS->{text} .= "$_\n";
|
||||
next if ($_ =~ /^\s*#/ || ! $_);
|
||||
my $option = $_;
|
||||
if ($_ =~ /^(\w+)\s*#/) {
|
||||
$option = $1;
|
||||
}
|
||||
if ($option =~ /^\s*(\w+)\s+(\w+)\s*$/) {
|
||||
if ($1 eq 'Lun') {
|
||||
die "$line: Parse error [$_]";
|
||||
}
|
||||
$SETTINGS->{$1} = $2;
|
||||
} elsif ($option =~ /^\s*(\w+)\s+(\d+)\s+([\w\-\/=,]+)\s*$/) {
|
||||
die "$line: Parse error [$option]" unless ($1 eq 'Lun');
|
||||
my $conf = undef;
|
||||
my $num = $2;
|
||||
my @lun = split ',', $3;
|
||||
die "$line: Parse error [$option]" unless (scalar(@lun) > 1);
|
||||
foreach (@lun) {
|
||||
my @lun_opt = split '=', $_;
|
||||
die "$line: Parse error [$option]" unless (scalar(@lun_opt) == 2);
|
||||
$conf->{$lun_opt[0]} = $lun_opt[1];
|
||||
}
|
||||
if ($conf->{Path} && $conf->{Path} =~ /^$base\/$scfg->{pool}\/([\w\-]+)$/) {
|
||||
$conf->{include} = 1;
|
||||
} else {
|
||||
$conf->{include} = 0;
|
||||
}
|
||||
$conf->{lun} = $num;
|
||||
push @{$SETTINGS->{luns}}, $conf;
|
||||
} else {
|
||||
die "$line: Parse error [$option]";
|
||||
}
|
||||
} else {
|
||||
$CONFIG .= "$_\n";
|
||||
}
|
||||
}
|
||||
}
|
||||
$CONFIG =~ s/^\s+|\s+$|"\s*//g;
|
||||
};
|
||||
|
||||
my $update_config = sub {
|
||||
my ($scfg) = @_;
|
||||
my $file = "/tmp/config$$";
|
||||
my $config = '';
|
||||
|
||||
while ((my $option, my $value) = each(%$SETTINGS)) {
|
||||
next if ($option eq 'include' || $option eq 'luns' || $option eq 'Path' || $option eq 'text' || $option eq 'used');
|
||||
if ($option eq 'target') {
|
||||
$config = "\n\nTarget " . $SETTINGS->{target} . "\n" . $config;
|
||||
} else {
|
||||
$config .= "\t$option\t\t\t$value\n";
|
||||
}
|
||||
}
|
||||
foreach my $lun (@{$SETTINGS->{luns}}) {
|
||||
my $lun_opt = '';
|
||||
while ((my $option, my $value) = each(%$lun)) {
|
||||
next if ($option eq 'include' || $option eq 'lun' || $option eq 'Path');
|
||||
if ($lun_opt eq '') {
|
||||
$lun_opt = $option . '=' . $value;
|
||||
} else {
|
||||
$lun_opt .= ',' . $option . '=' . $value;
|
||||
}
|
||||
}
|
||||
$config .= "\tLun $lun->{lun} Path=$lun->{Path},$lun_opt\n";
|
||||
}
|
||||
open(my $fh, '>', $file) or die "Could not open file '$file' $!";
|
||||
|
||||
print $fh $CONFIG;
|
||||
print $fh $config;
|
||||
close $fh;
|
||||
|
||||
my @params = ($CONFIG_FILE);
|
||||
my $res = $execute_command->($scfg, 'scp', undef, $file, @params);
|
||||
unlink $file;
|
||||
|
||||
die $res->{msg} unless $res->{result};
|
||||
};
|
||||
|
||||
my $get_target_tid = sub {
|
||||
my ($scfg) = @_;
|
||||
my $proc = '/proc/net/iet/volume';
|
||||
my $tid = undef;
|
||||
|
||||
my @params = ($proc);
|
||||
my $res = $execute_command->($scfg, 'ssh', undef, 'cat', @params);
|
||||
die $res->{msg} unless $res->{result};
|
||||
my @cfg = split "\n", $res->{msg};
|
||||
|
||||
foreach (@cfg) {
|
||||
if ($_ =~ /^\s*tid:(\d+)\s+name:([\w\-\:\.]+)\s*$/) {
|
||||
if ($2 && $2 eq $scfg->{target}) {
|
||||
$tid = $1;
|
||||
last;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return $tid;
|
||||
};
|
||||
|
||||
my $get_lu_name = sub {
|
||||
my $used = ();
|
||||
my $i;
|
||||
|
||||
if (! exists $SETTINGS->{used}) {
|
||||
for ($i = 0; $i < $MAX_LUNS; $i++) {
|
||||
$used->{$i} = 0;
|
||||
}
|
||||
foreach my $lun (@{$SETTINGS->{luns}}) {
|
||||
$used->{$lun->{lun}} = 1;
|
||||
}
|
||||
$SETTINGS->{used} = $used;
|
||||
}
|
||||
|
||||
$used = $SETTINGS->{used};
|
||||
for ($i = 0; $i < $MAX_LUNS; $i++) {
|
||||
last unless $used->{$i};
|
||||
}
|
||||
$SETTINGS->{used}->{$i} = 1;
|
||||
|
||||
return $i;
|
||||
};
|
||||
|
||||
my $init_lu_name = sub {
|
||||
my $used = ();
|
||||
|
||||
if (! exists($SETTINGS->{used})) {
|
||||
for (my $i = 0; $i < $MAX_LUNS; $i++) {
|
||||
$used->{$i} = 0;
|
||||
}
|
||||
$SETTINGS->{used} = $used;
|
||||
}
|
||||
foreach my $lun (@{$SETTINGS->{luns}}) {
|
||||
$SETTINGS->{used}->{$lun->{lun}} = 1;
|
||||
}
|
||||
};
|
||||
|
||||
my $free_lu_name = sub {
|
||||
my ($lu_name) = @_;
|
||||
my $new;
|
||||
|
||||
foreach my $lun (@{$SETTINGS->{luns}}) {
|
||||
if ($lun->{lun} != $lu_name) {
|
||||
push @$new, $lun;
|
||||
}
|
||||
}
|
||||
|
||||
$SETTINGS->{luns} = $new;
|
||||
$SETTINGS->{used}->{$lu_name} = 0;
|
||||
};
|
||||
|
||||
my $make_lun = sub {
|
||||
my ($scfg, $path) = @_;
|
||||
|
||||
die 'Maximum number of LUNs per target is 16384' if scalar @{$SETTINGS->{luns}} >= $MAX_LUNS;
|
||||
|
||||
my $lun = $get_lu_name->();
|
||||
my $conf = {
|
||||
lun => $lun,
|
||||
Path => $path,
|
||||
Type => 'blockio',
|
||||
include => 1,
|
||||
};
|
||||
push @{$SETTINGS->{luns}}, $conf;
|
||||
|
||||
return $conf;
|
||||
};
|
||||
|
||||
my $list_view = sub {
|
||||
my ($scfg, $timeout, $method, @params) = @_;
|
||||
my $lun = undef;
|
||||
|
||||
my $object = $params[0];
|
||||
foreach my $lun (@{$SETTINGS->{luns}}) {
|
||||
next unless $lun->{include} == 1;
|
||||
if ($lun->{Path} =~ /^$object$/) {
|
||||
return $lun->{lun} if (defined($lun->{lun}));
|
||||
die "$lun->{Path}: Missing LUN";
|
||||
}
|
||||
}
|
||||
|
||||
return $lun;
|
||||
};
|
||||
|
||||
my $list_lun = sub {
|
||||
my ($scfg, $timeout, $method, @params) = @_;
|
||||
my $name = undef;
|
||||
|
||||
my $object = $params[0];
|
||||
foreach my $lun (@{$SETTINGS->{luns}}) {
|
||||
next unless $lun->{include} == 1;
|
||||
if ($lun->{Path} =~ /^$object$/) {
|
||||
return $lun->{Path};
|
||||
}
|
||||
}
|
||||
|
||||
return $name;
|
||||
};
|
||||
|
||||
my $create_lun = sub {
|
||||
my ($scfg, $timeout, $method, @params) = @_;
|
||||
|
||||
if ($list_lun->($scfg, $timeout, $method, @params)) {
|
||||
die "$params[0]: LUN exists";
|
||||
}
|
||||
my $lun = $params[0];
|
||||
$lun = $make_lun->($scfg, $lun);
|
||||
my $tid = $get_target_tid->($scfg);
|
||||
$update_config->($scfg);
|
||||
|
||||
my $path = "Path=$lun->{Path},Type=$lun->{Type}";
|
||||
|
||||
@params = ('--op', 'new', "--tid=$tid", "--lun=$lun->{lun}", '--params', $path);
|
||||
my $res = $execute_command->($scfg, 'ssh', $timeout, $ietadm, @params);
|
||||
do {
|
||||
$free_lu_name->($lun->{lun});
|
||||
$update_config->($scfg);
|
||||
die $res->{msg};
|
||||
} unless $res->{result};
|
||||
|
||||
return $res->{msg};
|
||||
};
|
||||
|
||||
my $delete_lun = sub {
|
||||
my ($scfg, $timeout, $method, @params) = @_;
|
||||
my $res = {msg => undef};
|
||||
|
||||
my $path = $params[0];
|
||||
my $tid = $get_target_tid->($scfg);
|
||||
|
||||
foreach my $lun (@{$SETTINGS->{luns}}) {
|
||||
if ($lun->{Path} eq $path) {
|
||||
@params = ('--op', 'delete', "--tid=$tid", "--lun=$lun->{lun}");
|
||||
$res = $execute_command->($scfg, 'ssh', $timeout, $ietadm, @params);
|
||||
if ($res->{result}) {
|
||||
$free_lu_name->($lun->{lun});
|
||||
$update_config->($scfg);
|
||||
last;
|
||||
} else {
|
||||
die $res->{msg};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return $res->{msg};
|
||||
};
|
||||
|
||||
my $import_lun = sub {
|
||||
my ($scfg, $timeout, $method, @params) = @_;
|
||||
|
||||
return $create_lun->($scfg, $timeout, $method, @params);
|
||||
};
|
||||
|
||||
my $modify_lun = sub {
|
||||
my ($scfg, $timeout, $method, @params) = @_;
|
||||
my $lun;
|
||||
my $res;
|
||||
|
||||
my $path = $params[1];
|
||||
my $tid = $get_target_tid->($scfg);
|
||||
|
||||
foreach my $cfg (@{$SETTINGS->{luns}}) {
|
||||
if ($cfg->{Path} eq $path) {
|
||||
$lun = $cfg;
|
||||
last;
|
||||
}
|
||||
}
|
||||
|
||||
@params = ('--op', 'delete', "--tid=$tid", "--lun=$lun->{lun}");
|
||||
$res = $execute_command->($scfg, 'ssh', $timeout, $ietadm, @params);
|
||||
die $res->{msg} unless $res->{result};
|
||||
|
||||
$path = "Path=$lun->{Path},Type=$lun->{Type}";
|
||||
@params = ('--op', 'new', "--tid=$tid", "--lun=$lun->{lun}", '--params', $path);
|
||||
$res = $execute_command->($scfg, 'ssh', $timeout, $ietadm, @params);
|
||||
die $res->{msg} unless $res->{result};
|
||||
|
||||
return $res->{msg};
|
||||
};
|
||||
|
||||
my $add_view = sub {
|
||||
my ($scfg, $timeout, $method, @params) = @_;
|
||||
|
||||
return '';
|
||||
};
|
||||
|
||||
my $get_lun_cmd_map = sub {
|
||||
my ($method) = @_;
|
||||
|
||||
my $cmdmap = {
|
||||
create_lu => { cmd => $create_lun },
|
||||
delete_lu => { cmd => $delete_lun },
|
||||
import_lu => { cmd => $import_lun },
|
||||
modify_lu => { cmd => $modify_lun },
|
||||
add_view => { cmd => $add_view },
|
||||
list_view => { cmd => $list_view },
|
||||
list_lu => { cmd => $list_lun },
|
||||
};
|
||||
|
||||
die "unknown command '$method'" unless exists $cmdmap->{$method};
|
||||
|
||||
return $cmdmap->{$method};
|
||||
};
|
||||
|
||||
sub run_lun_command {
|
||||
my ($scfg, $timeout, $method, @params) = @_;
|
||||
|
||||
$parser->($scfg) unless $SETTINGS;
|
||||
my $cmdmap = $get_lun_cmd_map->($method);
|
||||
my $msg = $cmdmap->{cmd}->($scfg, $timeout, $method, @params);
|
||||
|
||||
return $msg;
|
||||
}
|
||||
|
||||
sub get_base {
|
||||
return '/dev';
|
||||
}
|
||||
|
||||
1;
|
||||
|
||||
601
src/PVE/Storage/LunCmd/Istgt.pm
Normal file
601
src/PVE/Storage/LunCmd/Istgt.pm
Normal file
@ -0,0 +1,601 @@
|
||||
package PVE::Storage::LunCmd::Istgt;
|
||||
|
||||
# TODO
|
||||
# Create initial target and LUN if target is missing ?
|
||||
# Create and use list of free LUNs
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
use PVE::Tools qw(run_command file_read_firstline trim dir_glob_regex dir_glob_foreach);
|
||||
|
||||
my @CONFIG_FILES = (
|
||||
'/usr/local/etc/istgt/istgt.conf', # FreeBSD, FreeNAS
|
||||
'/var/etc/iscsi/istgt.conf' # NAS4Free
|
||||
);
|
||||
my @DAEMONS = (
|
||||
'/usr/local/etc/rc.d/istgt', # FreeBSD, FreeNAS
|
||||
'/var/etc/rc.d/istgt' # NAS4Free
|
||||
);
|
||||
|
||||
# A logical unit can max have 63 LUNs
|
||||
# https://code.google.com/p/istgt/source/browse/src/istgt_lu.h#39
|
||||
my $MAX_LUNS = 64;
|
||||
|
||||
my $CONFIG_FILE = undef;
|
||||
my $DAEMON = undef;
|
||||
my $SETTINGS = undef;
|
||||
my $CONFIG = undef;
|
||||
my $OLD_CONFIG = undef;
|
||||
|
||||
my @ssh_opts = ('-o', 'BatchMode=yes');
|
||||
my @ssh_cmd = ('/usr/bin/ssh', @ssh_opts);
|
||||
my @scp_cmd = ('/usr/bin/scp', @ssh_opts);
|
||||
my $id_rsa_path = '/etc/pve/priv/zfs';
|
||||
|
||||
#Current SIGHUP reload limitations (http://www.peach.ne.jp/archives/istgt/):
|
||||
#
|
||||
# The parameters other than PG, IG, and LU are not reloaded by SIGHUP.
|
||||
# LU connected by the initiator can't be reloaded by SIGHUP.
|
||||
# PG and IG mapped to LU can't be deleted by SIGHUP.
|
||||
# If you delete an active LU, all connections of the LU are closed by SIGHUP.
|
||||
# Updating IG is not affected until the next login.
|
||||
#
|
||||
# FreeBSD
|
||||
# 1. Alt-F2 to change to native shell (zfsguru)
|
||||
# 2. pw mod user root -w yes (change password for root to root)
|
||||
# 3. vi /etc/ssh/sshd_config
|
||||
# 4. uncomment PermitRootLogin yes
|
||||
# 5. change PasswordAuthentication no to PasswordAuthentication yes
|
||||
# 5. /etc/rc.d/sshd restart
|
||||
# 6. On one of the proxmox nodes login as root and run: ssh-copy-id ip_freebsd_host
|
||||
# 7. vi /etc/ssh/sshd_config
|
||||
# 8. comment PermitRootLogin yes
|
||||
# 9. change PasswordAuthentication yes to PasswordAuthentication no
|
||||
# 10. /etc/rc.d/sshd restart
|
||||
# 11. Reset passwd -> pw mod user root -w no
|
||||
# 12. Alt-Ctrl-F1 to return to zfsguru shell (zfsguru)
|
||||
|
||||
sub get_base;
|
||||
sub run_lun_command;
|
||||
|
||||
my $read_config = sub {
|
||||
my ($scfg, $timeout, $method) = @_;
|
||||
|
||||
my $msg = '';
|
||||
my $err = undef;
|
||||
my $luncmd = 'cat';
|
||||
my $target;
|
||||
$timeout = 10 if !$timeout;
|
||||
|
||||
my $output = sub {
|
||||
my $line = shift;
|
||||
$msg .= "$line\n";
|
||||
};
|
||||
|
||||
my $errfunc = sub {
|
||||
my $line = shift;
|
||||
$err .= "$line";
|
||||
};
|
||||
|
||||
$target = 'root@' . $scfg->{portal};
|
||||
|
||||
my $daemon = 0;
|
||||
foreach my $config (@CONFIG_FILES) {
|
||||
$err = undef;
|
||||
my $cmd = [@ssh_cmd, '-i', "$id_rsa_path/$scfg->{portal}_id_rsa", $target, $luncmd, $config];
|
||||
eval {
|
||||
run_command($cmd, outfunc => $output, errfunc => $errfunc, timeout => $timeout);
|
||||
};
|
||||
do {
|
||||
$err = undef;
|
||||
$DAEMON = $DAEMONS[$daemon];
|
||||
$CONFIG_FILE = $config;
|
||||
last;
|
||||
} unless $@;
|
||||
$daemon++;
|
||||
}
|
||||
die $err if ($err && $err !~ /No such file or directory/);
|
||||
die "No configuration found. Install istgt on $scfg->{portal}" if $msg eq '';
|
||||
|
||||
return $msg;
|
||||
};
|
||||
|
||||
my $get_config = sub {
|
||||
my ($scfg) = @_;
|
||||
my @conf = undef;
|
||||
|
||||
my $config = $read_config->($scfg, undef, 'get_config');
|
||||
die "Missing config file" unless $config;
|
||||
|
||||
$OLD_CONFIG = $config;
|
||||
|
||||
return $config;
|
||||
};
|
||||
|
||||
my $parse_size = sub {
|
||||
my ($text) = @_;
|
||||
|
||||
return 0 if !$text;
|
||||
|
||||
if ($text =~ m/^(\d+(\.\d+)?)([TGMK]B)?$/) {
|
||||
my ($size, $reminder, $unit) = ($1, $2, $3);
|
||||
return $size if !$unit;
|
||||
if ($unit eq 'KB') {
|
||||
$size *= 1024;
|
||||
} elsif ($unit eq 'MB') {
|
||||
$size *= 1024*1024;
|
||||
} elsif ($unit eq 'GB') {
|
||||
$size *= 1024*1024*1024;
|
||||
} elsif ($unit eq 'TB') {
|
||||
$size *= 1024*1024*1024*1024;
|
||||
}
|
||||
if ($reminder) {
|
||||
$size = ceil($size);
|
||||
}
|
||||
return $size;
|
||||
} elsif ($text =~ /^auto$/i) {
|
||||
return 'AUTO';
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
};
|
||||
|
||||
my $size_with_unit = sub {
|
||||
my ($size, $n) = (shift, 0);
|
||||
|
||||
return '0KB' if !$size;
|
||||
|
||||
return $size if $size eq 'AUTO';
|
||||
|
||||
if ($size =~ m/^\d+$/) {
|
||||
++$n and $size /= 1024 until $size < 1024;
|
||||
if ($size =~ /\./) {
|
||||
return sprintf "%.2f%s", $size, ( qw[bytes KB MB GB TB] )[ $n ];
|
||||
} else {
|
||||
return sprintf "%d%s", $size, ( qw[bytes KB MB GB TB] )[ $n ];
|
||||
}
|
||||
}
|
||||
die "$size: Not a number";
|
||||
};
|
||||
|
||||
my $lun_dumper = sub {
|
||||
my ($lun) = @_;
|
||||
my $config = '';
|
||||
|
||||
$config .= "\n[$lun]\n";
|
||||
$config .= 'TargetName ' . $SETTINGS->{$lun}->{TargetName} . "\n";
|
||||
$config .= 'Mapping ' . $SETTINGS->{$lun}->{Mapping} . "\n";
|
||||
$config .= 'AuthGroup ' . $SETTINGS->{$lun}->{AuthGroup} . "\n";
|
||||
$config .= 'UnitType ' . $SETTINGS->{$lun}->{UnitType} . "\n";
|
||||
$config .= 'QueueDepth ' . $SETTINGS->{$lun}->{QueueDepth} . "\n";
|
||||
|
||||
foreach my $conf (@{$SETTINGS->{$lun}->{luns}}) {
|
||||
$config .= "$conf->{lun} Storage " . $conf->{Storage};
|
||||
$config .= ' ' . $size_with_unit->($conf->{Size}) . "\n";
|
||||
foreach ($conf->{options}) {
|
||||
if ($_) {
|
||||
$config .= "$conf->{lun} Option " . $_ . "\n";
|
||||
}
|
||||
}
|
||||
}
|
||||
$config .= "\n";
|
||||
|
||||
return $config;
|
||||
};
|
||||
|
||||
my $get_lu_name = sub {
|
||||
my ($target) = @_;
|
||||
my $used = ();
|
||||
my $i;
|
||||
|
||||
if (! exists $SETTINGS->{$target}->{used}) {
|
||||
for ($i = 0; $i < $MAX_LUNS; $i++) {
|
||||
$used->{$i} = 0;
|
||||
}
|
||||
foreach my $lun (@{$SETTINGS->{$target}->{luns}}) {
|
||||
$lun->{lun} =~ /^LUN(\d+)$/;
|
||||
$used->{$1} = 1;
|
||||
}
|
||||
$SETTINGS->{$target}->{used} = $used;
|
||||
}
|
||||
|
||||
$used = $SETTINGS->{$target}->{used};
|
||||
for ($i = 0; $i < $MAX_LUNS; $i++) {
|
||||
last unless $used->{$i};
|
||||
}
|
||||
$SETTINGS->{$target}->{used}->{$i} = 1;
|
||||
|
||||
return "LUN$i";
|
||||
};
|
||||
|
||||
my $init_lu_name = sub {
|
||||
my ($target) = @_;
|
||||
my $used = ();
|
||||
|
||||
if (! exists($SETTINGS->{$target}->{used})) {
|
||||
for (my $i = 0; $i < $MAX_LUNS; $i++) {
|
||||
$used->{$i} = 0;
|
||||
}
|
||||
$SETTINGS->{$target}->{used} = $used;
|
||||
}
|
||||
foreach my $lun (@{$SETTINGS->{$target}->{luns}}) {
|
||||
$lun->{lun} =~ /^LUN(\d+)$/;
|
||||
$SETTINGS->{$target}->{used}->{$1} = 1;
|
||||
}
|
||||
};
|
||||
|
||||
my $free_lu_name = sub {
|
||||
my ($target, $lu_name) = @_;
|
||||
|
||||
$lu_name =~ /^LUN(\d+)$/;
|
||||
$SETTINGS->{$target}->{used}->{$1} = 0;
|
||||
};
|
||||
|
||||
my $make_lun = sub {
|
||||
my ($scfg, $path) = @_;
|
||||
|
||||
my $target = $SETTINGS->{current};
|
||||
die 'Maximum number of LUNs per target is 63' if scalar @{$SETTINGS->{$target}->{luns}} >= $MAX_LUNS;
|
||||
|
||||
my @options = ();
|
||||
my $lun = $get_lu_name->($target);
|
||||
if ($scfg->{nowritecache}) {
|
||||
push @options, "WriteCache Disable";
|
||||
}
|
||||
my $conf = {
|
||||
lun => $lun,
|
||||
Storage => $path,
|
||||
Size => 'AUTO',
|
||||
options => @options,
|
||||
};
|
||||
push @{$SETTINGS->{$target}->{luns}}, $conf;
|
||||
|
||||
return $conf->{lun};
|
||||
};
|
||||
|
||||
my $parser = sub {
|
||||
my ($scfg) = @_;
|
||||
|
||||
my $lun = undef;
|
||||
my $line = 0;
|
||||
|
||||
my $config = $get_config->($scfg);
|
||||
my @cfgfile = split "\n", $config;
|
||||
|
||||
foreach (@cfgfile) {
|
||||
$line++;
|
||||
if ($_ =~ /^\s*\[(PortalGroup\d+)\]\s*/) {
|
||||
$lun = undef;
|
||||
$SETTINGS->{$1} = ();
|
||||
} elsif ($_ =~ /^\s*\[(InitiatorGroup\d+)\]\s*/) {
|
||||
$lun = undef;
|
||||
$SETTINGS->{$1} = ();
|
||||
} elsif ($_ =~ /^\s*PidFile\s+"?([\w\/\.]+)"?\s*/) {
|
||||
$lun = undef;
|
||||
$SETTINGS->{pidfile} = $1;
|
||||
} elsif ($_ =~ /^\s*NodeBase\s+"?([\w\-\.]+)"?\s*/) {
|
||||
$lun = undef;
|
||||
$SETTINGS->{nodebase} = $1;
|
||||
} elsif ($_ =~ /^\s*\[(LogicalUnit\d+)\]\s*/) {
|
||||
$lun = $1;
|
||||
$SETTINGS->{$lun} = ();
|
||||
$SETTINGS->{targets}++;
|
||||
} elsif ($lun) {
|
||||
next if (($_ =~ /^\s*#/) || ($_ =~ /^\s*$/));
|
||||
if ($_ =~ /^\s*(\w+)\s+(.+)\s*/) {
|
||||
my $arg1 = $1;
|
||||
my $arg2 = $2;
|
||||
$arg2 =~ s/^\s+|\s+$|"\s*//g;
|
||||
if ($arg2 =~ /^Storage\s*(.+)/i) {
|
||||
$SETTINGS->{$lun}->{$arg1}->{storage} = $1;
|
||||
} elsif ($arg2 =~ /^Option\s*(.+)/i) {
|
||||
push @{$SETTINGS->{$lun}->{$arg1}->{options}}, $1;
|
||||
} else {
|
||||
$SETTINGS->{$lun}->{$arg1} = $arg2;
|
||||
}
|
||||
} else {
|
||||
die "$line: parse error [$_]";
|
||||
}
|
||||
}
|
||||
$CONFIG .= "$_\n" unless $lun;
|
||||
}
|
||||
|
||||
$CONFIG =~ s/\n$//;
|
||||
die "$scfg->{target}: Target not found" unless $SETTINGS->{targets};
|
||||
my $max = $SETTINGS->{targets};
|
||||
my $base = get_base;
|
||||
|
||||
for (my $i = 1; $i <= $max; $i++) {
|
||||
my $target = $SETTINGS->{nodebase}.':'.$SETTINGS->{"LogicalUnit$i"}->{TargetName};
|
||||
if ($target eq $scfg->{target}) {
|
||||
my $lu = ();
|
||||
while ((my $key, my $val) = each(%{$SETTINGS->{"LogicalUnit$i"}})) {
|
||||
if ($key =~ /^LUN\d+/) {
|
||||
$val->{storage} =~ /^([\w\/\-]+)\s+(\w+)/;
|
||||
my $storage = $1;
|
||||
my $size = $parse_size->($2);
|
||||
my $conf = undef;
|
||||
my @options = ();
|
||||
if ($val->{options}) {
|
||||
@options = @{$val->{options}};
|
||||
}
|
||||
if ($storage =~ /^$base\/$scfg->{pool}\/([\w\-]+)$/) {
|
||||
$conf = {
|
||||
lun => $key,
|
||||
Storage => $storage,
|
||||
Size => $size,
|
||||
options => @options,
|
||||
}
|
||||
}
|
||||
push @$lu, $conf if $conf;
|
||||
delete $SETTINGS->{"LogicalUnit$i"}->{$key};
|
||||
}
|
||||
}
|
||||
$SETTINGS->{"LogicalUnit$i"}->{luns} = $lu;
|
||||
$SETTINGS->{current} = "LogicalUnit$i";
|
||||
$init_lu_name->("LogicalUnit$i");
|
||||
} else {
|
||||
$CONFIG .= $lun_dumper->("LogicalUnit$i");
|
||||
delete $SETTINGS->{"LogicalUnit$i"};
|
||||
$SETTINGS->{targets}--;
|
||||
}
|
||||
}
|
||||
die "$scfg->{target}: Target not found" unless $SETTINGS->{targets} > 0;
|
||||
};
|
||||
|
||||
my $list_lun = sub {
|
||||
my ($scfg, $timeout, $method, @params) = @_;
|
||||
my $name = undef;
|
||||
|
||||
my $object = $params[0];
|
||||
for my $key (keys %$SETTINGS) {
|
||||
next unless $key =~ /^LogicalUnit\d+$/;
|
||||
foreach my $lun (@{$SETTINGS->{$key}->{luns}}) {
|
||||
if ($lun->{Storage} =~ /^$object$/) {
|
||||
return $lun->{Storage};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return $name;
|
||||
};
|
||||
|
||||
my $create_lun = sub {
|
||||
my ($scfg, $timeout, $method, @params) = @_;
|
||||
my $res = ();
|
||||
my $file = "/tmp/config$$";
|
||||
|
||||
if ($list_lun->($scfg, $timeout, $method, @params)) {
|
||||
die "$params[0]: LUN exists";
|
||||
}
|
||||
my $lun = $params[0];
|
||||
$lun = $make_lun->($scfg, $lun);
|
||||
my $config = $lun_dumper->($SETTINGS->{current});
|
||||
open(my $fh, '>', $file) or die "Could not open file '$file' $!";
|
||||
|
||||
print $fh $CONFIG;
|
||||
print $fh $config;
|
||||
close $fh;
|
||||
@params = ($CONFIG_FILE);
|
||||
$res = {
|
||||
cmd => 'scp',
|
||||
method => $file,
|
||||
params => \@params,
|
||||
msg => $lun,
|
||||
post_exe => sub {
|
||||
unlink $file;
|
||||
},
|
||||
};
|
||||
|
||||
return $res;
|
||||
};
|
||||
|
||||
my $delete_lun = sub {
|
||||
my ($scfg, $timeout, $method, @params) = @_;
|
||||
my $res = ();
|
||||
my $file = "/tmp/config$$";
|
||||
|
||||
my $target = $SETTINGS->{current};
|
||||
my $luns = ();
|
||||
|
||||
foreach my $conf (@{$SETTINGS->{$target}->{luns}}) {
|
||||
if ($conf->{Storage} =~ /^$params[0]$/) {
|
||||
$free_lu_name->($target, $conf->{lun});
|
||||
} else {
|
||||
push @$luns, $conf;
|
||||
}
|
||||
}
|
||||
$SETTINGS->{$target}->{luns} = $luns;
|
||||
|
||||
my $config = $lun_dumper->($SETTINGS->{current});
|
||||
open(my $fh, '>', $file) or die "Could not open file '$file' $!";
|
||||
|
||||
print $fh $CONFIG;
|
||||
print $fh $config;
|
||||
close $fh;
|
||||
@params = ($CONFIG_FILE);
|
||||
$res = {
|
||||
cmd => 'scp',
|
||||
method => $file,
|
||||
params => \@params,
|
||||
post_exe => sub {
|
||||
unlink $file;
|
||||
run_lun_command($scfg, undef, 'add_view', 'restart');
|
||||
},
|
||||
};
|
||||
|
||||
return $res;
|
||||
};
|
||||
|
||||
my $import_lun = sub {
|
||||
my ($scfg, $timeout, $method, @params) = @_;
|
||||
|
||||
my $res = $create_lun->($scfg, $timeout, $method, @params);
|
||||
|
||||
return $res;
|
||||
};
|
||||
|
||||
my $add_view = sub {
|
||||
my ($scfg, $timeout, $method, @params) = @_;
|
||||
my $cmdmap;
|
||||
|
||||
if (@params && $params[0] eq 'restart') {
|
||||
@params = ('onerestart', '>&', '/dev/null');
|
||||
$cmdmap = {
|
||||
cmd => 'ssh',
|
||||
method => $DAEMON,
|
||||
params => \@params,
|
||||
};
|
||||
} else {
|
||||
@params = ('-HUP', '`cat '. "$SETTINGS->{pidfile}`");
|
||||
$cmdmap = {
|
||||
cmd => 'ssh',
|
||||
method => 'kill',
|
||||
params => \@params,
|
||||
};
|
||||
}
|
||||
|
||||
return $cmdmap;
|
||||
};
|
||||
|
||||
my $modify_lun = sub {
|
||||
my ($scfg, $timeout, $method, @params) = @_;
|
||||
|
||||
# Current SIGHUP reload limitations
|
||||
# LU connected by the initiator can't be reloaded by SIGHUP.
|
||||
# Until above limitation persists modifying a LUN will require
|
||||
# a restart of the daemon breaking all current connections
|
||||
#die 'Modify a connected LUN is not currently supported by istgt';
|
||||
@params = ('restart', @params);
|
||||
|
||||
return $add_view->($scfg, $timeout, $method, @params);
|
||||
};
|
||||
|
||||
my $list_view = sub {
|
||||
my ($scfg, $timeout, $method, @params) = @_;
|
||||
my $lun = undef;
|
||||
|
||||
my $object = $params[0];
|
||||
for my $key (keys %$SETTINGS) {
|
||||
next unless $key =~ /^LogicalUnit\d+$/;
|
||||
foreach my $lun (@{$SETTINGS->{$key}->{luns}}) {
|
||||
if ($lun->{Storage} =~ /^$object$/) {
|
||||
if ($lun->{lun} =~ /^LUN(\d+)/) {
|
||||
return $1;
|
||||
}
|
||||
die "$lun->{Storage}: Missing LUN";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return $lun;
|
||||
};
|
||||
|
||||
my $get_lun_cmd_map = sub {
|
||||
my ($method) = @_;
|
||||
|
||||
my $cmdmap = {
|
||||
create_lu => { cmd => $create_lun },
|
||||
delete_lu => { cmd => $delete_lun },
|
||||
import_lu => { cmd => $import_lun },
|
||||
modify_lu => { cmd => $modify_lun },
|
||||
add_view => { cmd => $add_view },
|
||||
list_view => { cmd => $list_view },
|
||||
list_lu => { cmd => $list_lun },
|
||||
};
|
||||
|
||||
die "unknown command '$method'" unless exists $cmdmap->{$method};
|
||||
|
||||
return $cmdmap->{$method};
|
||||
};
|
||||
|
||||
sub run_lun_command {
|
||||
my ($scfg, $timeout, $method, @params) = @_;
|
||||
|
||||
my $msg = '';
|
||||
my $luncmd;
|
||||
my $target;
|
||||
my $cmd;
|
||||
my $res;
|
||||
$timeout = 10 if !$timeout;
|
||||
my $is_add_view = 0;
|
||||
|
||||
my $output = sub {
|
||||
my $line = shift;
|
||||
$msg .= "$line\n";
|
||||
};
|
||||
|
||||
$target = 'root@' . $scfg->{portal};
|
||||
|
||||
$parser->($scfg) unless $SETTINGS;
|
||||
my $cmdmap = $get_lun_cmd_map->($method);
|
||||
if ($method eq 'add_view') {
|
||||
$is_add_view = 1 ;
|
||||
$timeout = 15;
|
||||
}
|
||||
if (ref $cmdmap->{cmd} eq 'CODE') {
|
||||
$res = $cmdmap->{cmd}->($scfg, $timeout, $method, @params);
|
||||
if (ref $res) {
|
||||
$method = $res->{method};
|
||||
@params = @{$res->{params}};
|
||||
if ($res->{cmd} eq 'scp') {
|
||||
$cmd = [@scp_cmd, '-i', "$id_rsa_path/$scfg->{portal}_id_rsa", $method, "$target:$params[0]"];
|
||||
} else {
|
||||
$cmd = [@ssh_cmd, '-i', "$id_rsa_path/$scfg->{portal}_id_rsa", $target, $method, @params];
|
||||
}
|
||||
} else {
|
||||
return $res;
|
||||
}
|
||||
} else {
|
||||
$luncmd = $cmdmap->{cmd};
|
||||
$method = $cmdmap->{method};
|
||||
$cmd = [@ssh_cmd, '-i', "$id_rsa_path/$scfg->{portal}_id_rsa", $target, $luncmd, $method, @params];
|
||||
}
|
||||
|
||||
eval {
|
||||
run_command($cmd, outfunc => $output, timeout => $timeout);
|
||||
};
|
||||
if ($@ && $is_add_view) {
|
||||
my $err = $@;
|
||||
if ($OLD_CONFIG) {
|
||||
my $err1 = undef;
|
||||
my $file = "/tmp/config$$";
|
||||
open(my $fh, '>', $file) or die "Could not open file '$file' $!";
|
||||
print $fh $OLD_CONFIG;
|
||||
close $fh;
|
||||
$cmd = [@scp_cmd, '-i', "$id_rsa_path/$scfg->{portal}_id_rsa", $file, $CONFIG_FILE];
|
||||
eval {
|
||||
run_command($cmd, outfunc => $output, timeout => $timeout);
|
||||
};
|
||||
$err1 = $@ if $@;
|
||||
unlink $file;
|
||||
die "$err\n$err1" if $err1;
|
||||
eval {
|
||||
run_lun_command($scfg, undef, 'add_view', 'restart');
|
||||
};
|
||||
die "$err\n$@" if ($@);
|
||||
}
|
||||
die $err;
|
||||
} elsif ($@) {
|
||||
die $@;
|
||||
} elsif ($is_add_view) {
|
||||
$OLD_CONFIG = undef;
|
||||
}
|
||||
|
||||
if ($res->{post_exe} && ref $res->{post_exe} eq 'CODE') {
|
||||
$res->{post_exe}->();
|
||||
}
|
||||
|
||||
if ($res->{msg}) {
|
||||
$msg = $res->{msg};
|
||||
}
|
||||
|
||||
return $msg;
|
||||
}
|
||||
|
||||
sub get_base {
|
||||
return '/dev/zvol';
|
||||
}
|
||||
|
||||
1;
|
||||
420
src/PVE/Storage/LunCmd/LIO.pm
Normal file
420
src/PVE/Storage/LunCmd/LIO.pm
Normal file
@ -0,0 +1,420 @@
|
||||
package PVE::Storage::LunCmd::LIO;
|
||||
|
||||
# lightly based on code from Iet.pm
|
||||
#
|
||||
# additional changes:
|
||||
# -----------------------------------------------------------------
|
||||
# Copyright (c) 2018 BestSolution.at EDV Systemhaus GmbH
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# This software is released under the terms of the
|
||||
#
|
||||
# "GNU Affero General Public License"
|
||||
#
|
||||
# and may only be distributed and used under the terms of the
|
||||
# mentioned license. You should have received a copy of the license
|
||||
# along with this software product, if not you can download it from
|
||||
# https://www.gnu.org/licenses/agpl-3.0.en.html
|
||||
#
|
||||
# Author: udo.rader@bestsolution.at
|
||||
# -----------------------------------------------------------------
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use PVE::Tools qw(run_command);
|
||||
use JSON;
|
||||
|
||||
sub get_base;
|
||||
|
||||
# targetcli constants
|
||||
# config file location differs from distro to distro
|
||||
my @CONFIG_FILES = (
|
||||
'/etc/rtslib-fb-target/saveconfig.json', # Debian 9.x et al
|
||||
'/etc/target/saveconfig.json' , # ArchLinux, CentOS
|
||||
);
|
||||
my $BACKSTORE = '/backstores/block';
|
||||
|
||||
my $SETTINGS = undef;
|
||||
my $SETTINGS_TIMESTAMP = 0;
|
||||
my $SETTINGS_MAXAGE = 15; # in seconds
|
||||
|
||||
my @ssh_opts = ('-o', 'BatchMode=yes');
|
||||
my @ssh_cmd = ('/usr/bin/ssh', @ssh_opts);
|
||||
my $id_rsa_path = '/etc/pve/priv/zfs';
|
||||
my $targetcli = '/usr/bin/targetcli';
|
||||
|
||||
my $execute_remote_command = sub {
|
||||
my ($scfg, $timeout, $remote_command, @params) = @_;
|
||||
|
||||
my $msg = '';
|
||||
my $err = undef;
|
||||
my $target;
|
||||
my $cmd;
|
||||
my $res = ();
|
||||
|
||||
$timeout = 10 if !$timeout;
|
||||
|
||||
my $output = sub { $msg .= "$_[0]\n" };
|
||||
my $errfunc = sub { $err .= "$_[0]\n" };
|
||||
|
||||
$target = 'root@' . $scfg->{portal};
|
||||
$cmd = [@ssh_cmd, '-i', "$id_rsa_path/$scfg->{portal}_id_rsa", $target, '--', $remote_command, @params];
|
||||
|
||||
eval {
|
||||
run_command($cmd, outfunc => $output, errfunc => $errfunc, timeout => $timeout);
|
||||
};
|
||||
if ($@) {
|
||||
$res = {
|
||||
result => 0,
|
||||
msg => $err,
|
||||
}
|
||||
} else {
|
||||
$res = {
|
||||
result => 1,
|
||||
msg => $msg,
|
||||
}
|
||||
}
|
||||
|
||||
return $res;
|
||||
};
|
||||
|
||||
# fetch targetcli configuration from the portal
|
||||
my $read_config = sub {
|
||||
my ($scfg, $timeout) = @_;
|
||||
|
||||
my $msg = '';
|
||||
my $err = undef;
|
||||
my $luncmd = 'cat';
|
||||
my $target;
|
||||
my $retry = 1;
|
||||
|
||||
$timeout = 10 if !$timeout;
|
||||
|
||||
my $output = sub { $msg .= "$_[0]\n" };
|
||||
my $errfunc = sub { $err .= "$_[0]\n" };
|
||||
|
||||
$target = 'root@' . $scfg->{portal};
|
||||
|
||||
foreach my $oneFile (@CONFIG_FILES) {
|
||||
my $cmd = [@ssh_cmd, '-i', "$id_rsa_path/$scfg->{portal}_id_rsa", $target, $luncmd, $oneFile];
|
||||
eval {
|
||||
run_command($cmd, outfunc => $output, errfunc => $errfunc, timeout => $timeout);
|
||||
};
|
||||
if ($@) {
|
||||
die $err if ($err !~ /No such file or directory/);
|
||||
}
|
||||
return $msg if $msg ne '';
|
||||
}
|
||||
|
||||
die "No configuration found. Install targetcli on $scfg->{portal}\n" if $msg eq '';
|
||||
|
||||
return $msg;
|
||||
};
|
||||
|
||||
my $get_config = sub {
|
||||
my ($scfg) = @_;
|
||||
my @conf = undef;
|
||||
|
||||
my $config = $read_config->($scfg, undef);
|
||||
die "Missing config file" unless $config;
|
||||
|
||||
return $config;
|
||||
};
|
||||
|
||||
# Return settings of a specific target
|
||||
my $get_target_settings = sub {
|
||||
my ($scfg) = @_;
|
||||
|
||||
my $id = "$scfg->{portal}.$scfg->{target}";
|
||||
return undef if !$SETTINGS;
|
||||
return $SETTINGS->{$id};
|
||||
};
|
||||
|
||||
# fetches and parses targetcli config from the portal
|
||||
my $parser = sub {
|
||||
my ($scfg) = @_;
|
||||
my $tpg = $scfg->{lio_tpg} || die "Target Portal Group not set, aborting!\n";
|
||||
my $tpg_tag;
|
||||
|
||||
if ($tpg =~ /^tpg(\d+)$/) {
|
||||
$tpg_tag = $1;
|
||||
} else {
|
||||
die "Target Portal Group has invalid value, must contain string 'tpg' and a suffix number, eg 'tpg17'\n";
|
||||
}
|
||||
|
||||
my $config = $get_config->($scfg);
|
||||
my $jsonconfig = JSON->new->utf8->decode($config);
|
||||
|
||||
my $haveTarget = 0;
|
||||
foreach my $target (@{$jsonconfig->{targets}}) {
|
||||
# only interested in iSCSI targets
|
||||
next if !($target->{fabric} eq 'iscsi' && $target->{wwn} eq $scfg->{target});
|
||||
# find correct TPG
|
||||
foreach my $tpg (@{$target->{tpgs}}) {
|
||||
if ($tpg->{tag} == $tpg_tag) {
|
||||
my $res = [];
|
||||
foreach my $lun (@{$tpg->{luns}}) {
|
||||
my ($idx, $storage_object);
|
||||
if ($lun->{index} =~ /^(\d+)$/) {
|
||||
$idx = $1;
|
||||
}
|
||||
if ($lun->{storage_object} =~ m|^($BACKSTORE/.*)$|) {
|
||||
$storage_object = $1;
|
||||
}
|
||||
die "Invalid lun definition in config!\n"
|
||||
if !(defined($idx) && defined($storage_object));
|
||||
push @$res, { index => $idx, storage_object => $storage_object };
|
||||
}
|
||||
|
||||
my $id = "$scfg->{portal}.$scfg->{target}";
|
||||
$SETTINGS->{$id}->{luns} = $res;
|
||||
$haveTarget = 1;
|
||||
last;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# seriously unhappy if the target server lacks iSCSI target configuration ...
|
||||
if (!$haveTarget) {
|
||||
die "target portal group tpg$tpg_tag not found!\n";
|
||||
}
|
||||
};
|
||||
|
||||
# Get prefix for backstores
|
||||
my $get_backstore_prefix = sub {
|
||||
my ($scfg) = @_;
|
||||
my $pool = $scfg->{pool};
|
||||
$pool =~ s/\//-/g;
|
||||
return $pool . '-';
|
||||
};
|
||||
|
||||
# removes the given lu_name from the local list of luns
|
||||
my $free_lu_name = sub {
|
||||
my ($scfg, $lu_name) = @_;
|
||||
|
||||
my $new = [];
|
||||
my $target = $get_target_settings->($scfg);
|
||||
foreach my $lun (@{$target->{luns}}) {
|
||||
if ($lun->{storage_object} ne "$BACKSTORE/$lu_name") {
|
||||
push @$new, $lun;
|
||||
}
|
||||
}
|
||||
|
||||
$target->{luns} = $new;
|
||||
};
|
||||
|
||||
# locally registers a new lun
|
||||
my $register_lun = sub {
|
||||
my ($scfg, $idx, $volname) = @_;
|
||||
|
||||
my $conf = {
|
||||
index => $idx,
|
||||
storage_object => "$BACKSTORE/$volname",
|
||||
is_new => 1,
|
||||
};
|
||||
my $target = $get_target_settings->($scfg);
|
||||
push @{$target->{luns}}, $conf;
|
||||
|
||||
return $conf;
|
||||
};
|
||||
|
||||
# extracts the ZFS volume name from a device path
|
||||
my $extract_volname = sub {
|
||||
my ($scfg, $lunpath) = @_;
|
||||
my $volname = undef;
|
||||
|
||||
my $base = get_base;
|
||||
if ($lunpath =~ /^$base\/$scfg->{pool}\/([\w\-]+)$/) {
|
||||
$volname = $1;
|
||||
my $prefix = $get_backstore_prefix->($scfg);
|
||||
my $target = $get_target_settings->($scfg);
|
||||
foreach my $lun (@{$target->{luns}}) {
|
||||
# If we have a lun with the pool prefix matching this vol, then return this one
|
||||
# like pool-pve-vm-100-disk-0
|
||||
# Else, just fallback to the old name scheme which is vm-100-disk-0
|
||||
if ($lun->{storage_object} =~ /^$BACKSTORE\/($prefix$volname)$/) {
|
||||
return $1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return $volname;
|
||||
};
|
||||
|
||||
# retrieves the LUN index for a particular object
|
||||
my $list_view = sub {
|
||||
my ($scfg, $timeout, $method, @params) = @_;
|
||||
my $lun = undef;
|
||||
|
||||
my $object = $params[0];
|
||||
my $volname = $extract_volname->($scfg, $object);
|
||||
my $target = $get_target_settings->($scfg);
|
||||
|
||||
return undef if !defined($volname); # nothing to search for..
|
||||
|
||||
foreach my $lun (@{$target->{luns}}) {
|
||||
if ($lun->{storage_object} eq "$BACKSTORE/$volname") {
|
||||
return $lun->{index};
|
||||
}
|
||||
}
|
||||
|
||||
return $lun;
|
||||
};
|
||||
|
||||
# determines, if the given object exists on the portal
|
||||
my $list_lun = sub {
|
||||
my ($scfg, $timeout, $method, @params) = @_;
|
||||
|
||||
my $object = $params[0];
|
||||
my $volname = $extract_volname->($scfg, $object);
|
||||
my $target = $get_target_settings->($scfg);
|
||||
|
||||
foreach my $lun (@{$target->{luns}}) {
|
||||
if ($lun->{storage_object} eq "$BACKSTORE/$volname") {
|
||||
return $object;
|
||||
}
|
||||
}
|
||||
|
||||
return undef;
|
||||
};
|
||||
|
||||
# adds a new LUN to the target
|
||||
my $create_lun = sub {
|
||||
my ($scfg, $timeout, $method, @params) = @_;
|
||||
|
||||
if ($list_lun->($scfg, $timeout, $method, @params)) {
|
||||
die "$params[0]: LUN already exists!";
|
||||
}
|
||||
|
||||
my $device = $params[0];
|
||||
my $volname = $extract_volname->($scfg, $device);
|
||||
# Here we create a new device, so we didn't get the volname prefixed with the pool name
|
||||
# as extract_volname couldn't find a matching vol yet
|
||||
$volname = $get_backstore_prefix->($scfg) . $volname;
|
||||
my $tpg = $scfg->{lio_tpg} || die "Target Portal Group not set, aborting!\n";
|
||||
|
||||
# step 1: create backstore for device
|
||||
my @cliparams = ($BACKSTORE, 'create', "name=$volname", "dev=$device" );
|
||||
my $res = $execute_remote_command->($scfg, $timeout, $targetcli, @cliparams);
|
||||
die $res->{msg} if !$res->{result};
|
||||
|
||||
# step 2: enable unmap support on the backstore
|
||||
@cliparams = ($BACKSTORE . '/' . $volname, 'set', 'attribute', 'emulate_tpu=1' );
|
||||
$res = $execute_remote_command->($scfg, $timeout, $targetcli, @cliparams);
|
||||
die $res->{msg} if !$res->{result};
|
||||
|
||||
# step 3: register lun with target
|
||||
# targetcli /iscsi/iqn.2018-04.at.bestsolution.somehost:target/tpg1/luns/ create /backstores/block/foobar
|
||||
@cliparams = ("/iscsi/$scfg->{target}/$tpg/luns/", 'create', "$BACKSTORE/$volname" );
|
||||
$res = $execute_remote_command->($scfg, $timeout, $targetcli, @cliparams);
|
||||
die $res->{msg} if !$res->{result};
|
||||
|
||||
# targetcli responds with "Created LUN 99"
|
||||
# not calculating the index ourselves, because the index at the portal might have
|
||||
# changed without our knowledge, so relying on the number that targetcli returns
|
||||
my $lun_idx;
|
||||
if ($res->{msg} =~ /LUN (\d+)/) {
|
||||
$lun_idx = $1;
|
||||
} else {
|
||||
die "unable to determine new LUN index: $res->{msg}";
|
||||
}
|
||||
|
||||
$register_lun->($scfg, $lun_idx, $volname);
|
||||
|
||||
# step 3: unfortunately, targetcli doesn't always save changes, no matter
|
||||
# if auto_save_on_exit is true or not. So saving to be safe ...
|
||||
$execute_remote_command->($scfg, $timeout, $targetcli, 'saveconfig');
|
||||
|
||||
return $res->{msg};
|
||||
};
|
||||
|
||||
my $delete_lun = sub {
|
||||
my ($scfg, $timeout, $method, @params) = @_;
|
||||
my $res = {msg => undef};
|
||||
|
||||
my $tpg = $scfg->{lio_tpg} || die "Target Portal Group not set, aborting!\n";
|
||||
|
||||
my $path = $params[0];
|
||||
my $volname = $extract_volname->($scfg, $path);
|
||||
my $target = $get_target_settings->($scfg);
|
||||
|
||||
foreach my $lun (@{$target->{luns}}) {
|
||||
next if $lun->{storage_object} ne "$BACKSTORE/$volname";
|
||||
|
||||
# step 1: delete the lun
|
||||
my @cliparams = ("/iscsi/$scfg->{target}/$tpg/luns/", 'delete', "lun$lun->{index}" );
|
||||
my $res = $execute_remote_command->($scfg, $timeout, $targetcli, @cliparams);
|
||||
do {
|
||||
die $res->{msg};
|
||||
} unless $res->{result};
|
||||
|
||||
# step 2: delete the backstore
|
||||
@cliparams = ($BACKSTORE, 'delete', $volname);
|
||||
$res = $execute_remote_command->($scfg, $timeout, $targetcli, @cliparams);
|
||||
do {
|
||||
die $res->{msg};
|
||||
} unless $res->{result};
|
||||
|
||||
# step 3: save to be safe ...
|
||||
$execute_remote_command->($scfg, $timeout, $targetcli, 'saveconfig');
|
||||
|
||||
# update internal cache
|
||||
$free_lu_name->($scfg, $volname);
|
||||
|
||||
last;
|
||||
}
|
||||
|
||||
return $res->{msg};
|
||||
};
|
||||
|
||||
my $import_lun = sub {
|
||||
my ($scfg, $timeout, $method, @params) = @_;
|
||||
|
||||
return $create_lun->($scfg, $timeout, $method, @params);
|
||||
};
|
||||
|
||||
# needed for example when the underlying ZFS volume has been resized
|
||||
my $modify_lun = sub {
|
||||
my ($scfg, $timeout, $method, @params) = @_;
|
||||
# Nothing to do on volume modification for LIO
|
||||
return undef;
|
||||
};
|
||||
|
||||
my $add_view = sub {
|
||||
my ($scfg, $timeout, $method, @params) = @_;
|
||||
|
||||
return '';
|
||||
};
|
||||
|
||||
my %lun_cmd_map = (
|
||||
create_lu => $create_lun,
|
||||
delete_lu => $delete_lun,
|
||||
import_lu => $import_lun,
|
||||
modify_lu => $modify_lun,
|
||||
add_view => $add_view,
|
||||
list_view => $list_view,
|
||||
list_lu => $list_lun,
|
||||
);
|
||||
|
||||
sub run_lun_command {
|
||||
my ($scfg, $timeout, $method, @params) = @_;
|
||||
|
||||
# fetch configuration from target if we haven't yet or if it is stale
|
||||
my $timediff = time - $SETTINGS_TIMESTAMP;
|
||||
my $target = $get_target_settings->($scfg);
|
||||
if (!$target || $timediff > $SETTINGS_MAXAGE) {
|
||||
$SETTINGS_TIMESTAMP = time;
|
||||
$parser->($scfg);
|
||||
}
|
||||
|
||||
die "unknown command '$method'" unless exists $lun_cmd_map{$method};
|
||||
my $msg = $lun_cmd_map{$method}->($scfg, $timeout, $method, @params);
|
||||
|
||||
return $msg;
|
||||
}
|
||||
|
||||
sub get_base {
|
||||
return '/dev';
|
||||
}
|
||||
|
||||
1;
|
||||
5
src/PVE/Storage/LunCmd/Makefile
Normal file
5
src/PVE/Storage/LunCmd/Makefile
Normal file
@ -0,0 +1,5 @@
|
||||
SOURCES=Comstar.pm Istgt.pm Iet.pm LIO.pm
|
||||
|
||||
.PHONY: install
|
||||
install:
|
||||
for i in ${SOURCES}; do install -D -m 0644 $$i ${DESTDIR}${PERLDIR}/PVE/Storage/LunCmd/$$i; done
|
||||
393
src/PVE/Storage/LvmThinPlugin.pm
Normal file
393
src/PVE/Storage/LvmThinPlugin.pm
Normal file
@ -0,0 +1,393 @@
|
||||
package PVE::Storage::LvmThinPlugin;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
use IO::File;
|
||||
|
||||
use PVE::Tools qw(run_command trim);
|
||||
use PVE::Storage::Plugin;
|
||||
use PVE::Storage::LVMPlugin;
|
||||
use PVE::JSONSchema qw(get_standard_option);
|
||||
|
||||
# see: man lvmthin
|
||||
# lvcreate -n ThinDataLV -L LargeSize VG
|
||||
# lvconvert --type thin-pool VG/ThinDataLV
|
||||
# lvcreate -n pvepool -L 20G pve
|
||||
# lvconvert --type thin-pool pve/pvepool
|
||||
|
||||
# NOTE: volumes which were created as linked clones of another base volume
|
||||
# are currently not tracking this relationship in their volume IDs. this is
|
||||
# generally not a problem, as LVM thin allows deletion of such base volumes
|
||||
# without affecting the linked clones. this leads to increased disk usage
|
||||
# when migrating LVM-thin volumes, which is normally prevented for linked clones.
|
||||
|
||||
use base qw(PVE::Storage::LVMPlugin);
|
||||
|
||||
sub type {
|
||||
return 'lvmthin';
|
||||
}
|
||||
|
||||
sub plugindata {
|
||||
return {
|
||||
content => [ {images => 1, rootdir => 1}, { images => 1, rootdir => 1}],
|
||||
};
|
||||
}
|
||||
|
||||
sub properties {
|
||||
return {
|
||||
thinpool => {
|
||||
description => "LVM thin pool LV name.",
|
||||
type => 'string', format => 'pve-storage-vgname',
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
sub options {
|
||||
return {
|
||||
thinpool => { fixed => 1 },
|
||||
vgname => { fixed => 1 },
|
||||
nodes => { optional => 1 },
|
||||
disable => { optional => 1 },
|
||||
content => { optional => 1 },
|
||||
bwlimit => { optional => 1 },
|
||||
};
|
||||
}
|
||||
|
||||
# NOTE: the fourth and fifth element of the returned array are always
|
||||
# undef, even if the volume is a linked clone of another volume. see note
|
||||
# at beginning of file.
|
||||
sub parse_volname {
|
||||
my ($class, $volname) = @_;
|
||||
|
||||
PVE::Storage::Plugin::parse_lvm_name($volname);
|
||||
|
||||
if ($volname =~ m/^((vm|base)-(\d+)-\S+)$/) {
|
||||
return ('images', $1, $3, undef, undef, $2 eq 'base', 'raw');
|
||||
}
|
||||
|
||||
die "unable to parse lvm volume name '$volname'\n";
|
||||
}
|
||||
|
||||
sub filesystem_path {
|
||||
my ($class, $scfg, $volname, $snapname) = @_;
|
||||
|
||||
my ($vtype, $name, $vmid) = $class->parse_volname($volname);
|
||||
|
||||
my $vg = $scfg->{vgname};
|
||||
|
||||
my $path = defined($snapname) ? "/dev/$vg/snap_${name}_$snapname": "/dev/$vg/$name";
|
||||
|
||||
return wantarray ? ($path, $vmid, $vtype) : $path;
|
||||
}
|
||||
|
||||
sub alloc_image {
|
||||
my ($class, $storeid, $scfg, $vmid, $fmt, $name, $size) = @_;
|
||||
|
||||
die "unsupported format '$fmt'" if $fmt ne 'raw';
|
||||
|
||||
die "illegal name '$name' - should be 'vm-$vmid-*'\n"
|
||||
if $name && $name !~ m/^vm-$vmid-/;
|
||||
|
||||
my $vgs = PVE::Storage::LVMPlugin::lvm_vgs();
|
||||
|
||||
my $vg = $scfg->{vgname};
|
||||
|
||||
die "no such volume group '$vg'\n" if !defined ($vgs->{$vg});
|
||||
|
||||
$name = $class->find_free_diskname($storeid, $scfg, $vmid)
|
||||
if !$name;
|
||||
|
||||
my $cmd = ['/sbin/lvcreate', '-aly', '-V', "${size}k", '--name', $name,
|
||||
'--thinpool', "$vg/$scfg->{thinpool}" ];
|
||||
|
||||
run_command($cmd, errmsg => "lvcreate '$vg/$name' error");
|
||||
|
||||
return $name;
|
||||
}
|
||||
|
||||
sub free_image {
|
||||
my ($class, $storeid, $scfg, $volname, $isBase) = @_;
|
||||
|
||||
my $vg = $scfg->{vgname};
|
||||
|
||||
my $lvs = PVE::Storage::LVMPlugin::lvm_list_volumes($vg);
|
||||
|
||||
if (my $dat = $lvs->{$scfg->{vgname}}) {
|
||||
|
||||
# remove all volume snapshots first
|
||||
foreach my $lv (keys %$dat) {
|
||||
next if $lv !~ m/^snap_${volname}_${PVE::JSONSchema::CONFIGID_RE}$/;
|
||||
my $cmd = ['/sbin/lvremove', '-f', "$vg/$lv"];
|
||||
run_command($cmd, errmsg => "lvremove snapshot '$vg/$lv' error");
|
||||
}
|
||||
|
||||
# finally remove original (if exists)
|
||||
if ($dat->{$volname}) {
|
||||
my $cmd = ['/sbin/lvremove', '-f', "$vg/$volname"];
|
||||
run_command($cmd, errmsg => "lvremove '$vg/$volname' error");
|
||||
}
|
||||
}
|
||||
|
||||
return undef;
|
||||
}
|
||||
|
||||
sub list_images {
|
||||
my ($class, $storeid, $scfg, $vmid, $vollist, $cache) = @_;
|
||||
|
||||
my $vgname = $scfg->{vgname};
|
||||
|
||||
$cache->{lvs} = PVE::Storage::LVMPlugin::lvm_list_volumes() if !$cache->{lvs};
|
||||
|
||||
my $res = [];
|
||||
|
||||
if (my $dat = $cache->{lvs}->{$vgname}) {
|
||||
|
||||
foreach my $volname (keys %$dat) {
|
||||
|
||||
next if $volname !~ m/^(vm|base)-(\d+)-/;
|
||||
my $owner = $2;
|
||||
|
||||
my $info = $dat->{$volname};
|
||||
|
||||
next if $info->{lv_type} ne 'V';
|
||||
|
||||
next if $info->{pool_lv} ne $scfg->{thinpool};
|
||||
|
||||
my $volid = "$storeid:$volname";
|
||||
|
||||
if ($vollist) {
|
||||
my $found = grep { $_ eq $volid } @$vollist;
|
||||
next if !$found;
|
||||
} else {
|
||||
next if defined($vmid) && ($owner ne $vmid);
|
||||
}
|
||||
|
||||
push @$res, {
|
||||
volid => $volid, format => 'raw', size => $info->{lv_size}, vmid => $owner,
|
||||
ctime => $info->{ctime},
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
return $res;
|
||||
}
|
||||
|
||||
sub list_thinpools {
|
||||
my ($vg) = @_;
|
||||
|
||||
my $lvs = PVE::Storage::LVMPlugin::lvm_list_volumes($vg);
|
||||
my $thinpools = [];
|
||||
|
||||
foreach my $vg (keys %$lvs) {
|
||||
foreach my $lvname (keys %{$lvs->{$vg}}) {
|
||||
next if $lvs->{$vg}->{$lvname}->{lv_type} ne 't';
|
||||
my $lv = $lvs->{$vg}->{$lvname};
|
||||
$lv->{lv} = $lvname;
|
||||
$lv->{vg} = $vg;
|
||||
push @$thinpools, $lv;
|
||||
}
|
||||
}
|
||||
|
||||
return $thinpools;
|
||||
}
|
||||
|
||||
sub status {
|
||||
my ($class, $storeid, $scfg, $cache) = @_;
|
||||
|
||||
my $lvs = $cache->{lvs} ||= PVE::Storage::LVMPlugin::lvm_list_volumes();
|
||||
|
||||
return if !$lvs->{$scfg->{vgname}};
|
||||
|
||||
my $info = $lvs->{$scfg->{vgname}}->{$scfg->{thinpool}};
|
||||
|
||||
return if !$info || $info->{lv_type} ne 't' || !$info->{lv_size};
|
||||
|
||||
return (
|
||||
$info->{lv_size},
|
||||
$info->{lv_size} - $info->{used},
|
||||
$info->{used},
|
||||
$info->{lv_state} eq 'a' ? 1 : 0,
|
||||
);
|
||||
}
|
||||
|
||||
my $activate_lv = sub {
|
||||
my ($vg, $lv, $cache) = @_;
|
||||
|
||||
my $lvs = $cache->{lvs} ||= PVE::Storage::LVMPlugin::lvm_list_volumes();
|
||||
|
||||
die "no such logical volume $vg/$lv\n" if !$lvs->{$vg} || !$lvs->{$vg}->{$lv};
|
||||
|
||||
return if $lvs->{$vg}->{$lv}->{lv_state} eq 'a';
|
||||
|
||||
run_command(['lvchange', '-ay', '-K', "$vg/$lv"], errmsg => "activating LV '$vg/$lv' failed");
|
||||
|
||||
$lvs->{$vg}->{$lv}->{lv_state} = 'a'; # update cache
|
||||
|
||||
return;
|
||||
};
|
||||
|
||||
sub activate_storage {
|
||||
my ($class, $storeid, $scfg, $cache) = @_;
|
||||
|
||||
$class->SUPER::activate_storage($storeid, $scfg, $cache);
|
||||
|
||||
$activate_lv->($scfg->{vgname}, $scfg->{thinpool}, $cache);
|
||||
}
|
||||
|
||||
sub activate_volume {
|
||||
my ($class, $storeid, $scfg, $volname, $snapname, $cache) = @_;
|
||||
|
||||
my $vg = $scfg->{vgname};
|
||||
my $lv = $snapname ? "snap_${volname}_$snapname" : $volname;
|
||||
|
||||
$activate_lv->($vg, $lv, $cache);
|
||||
}
|
||||
|
||||
sub deactivate_volume {
|
||||
my ($class, $storeid, $scfg, $volname, $snapname, $cache) = @_;
|
||||
|
||||
return if !$snapname && $volname !~ /^base-/; # other volumes are kept active
|
||||
|
||||
my $vg = $scfg->{vgname};
|
||||
my $lv = $snapname ? "snap_${volname}_$snapname" : $volname;
|
||||
|
||||
run_command(['lvchange', '-an', "$vg/$lv"], errmsg => "deactivate_volume '$vg/$lv' error");
|
||||
|
||||
$cache->{lvs}->{$vg}->{$lv}->{lv_state} = '-' # update cache
|
||||
if $cache->{lvs} && $cache->{lvs}->{$vg} && $cache->{lvs}->{$vg}->{$lv};
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
sub clone_image {
|
||||
my ($class, $scfg, $storeid, $volname, $vmid, $snap) = @_;
|
||||
|
||||
my $vg = $scfg->{vgname};
|
||||
|
||||
my $lv;
|
||||
|
||||
if ($snap) {
|
||||
$lv = "$vg/snap_${volname}_$snap";
|
||||
} else {
|
||||
my ($vtype, undef, undef, undef, undef, $isBase, $format) =
|
||||
$class->parse_volname($volname);
|
||||
|
||||
die "clone_image only works on base images\n" if !$isBase;
|
||||
|
||||
$lv = "$vg/$volname";
|
||||
}
|
||||
|
||||
my $name = $class->find_free_diskname($storeid, $scfg, $vmid);
|
||||
|
||||
my $cmd = ['/sbin/lvcreate', '-n', $name, '-prw', '-kn', '-s', $lv];
|
||||
run_command($cmd, errmsg => "clone image '$lv' error");
|
||||
|
||||
return $name;
|
||||
}
|
||||
|
||||
sub create_base {
|
||||
my ($class, $storeid, $scfg, $volname) = @_;
|
||||
|
||||
my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) =
|
||||
$class->parse_volname($volname);
|
||||
|
||||
die "create_base not possible with base image\n" if $isBase;
|
||||
|
||||
my $vg = $scfg->{vgname};
|
||||
my $lvs = PVE::Storage::LVMPlugin::lvm_list_volumes($vg);
|
||||
|
||||
if (my $dat = $lvs->{$vg}) {
|
||||
# to avoid confusion, reject if we find volume snapshots
|
||||
foreach my $lv (keys %$dat) {
|
||||
die "unable to create base volume - found snaphost '$lv'\n"
|
||||
if $lv =~ m/^snap_${volname}_(\w+)$/;
|
||||
}
|
||||
}
|
||||
|
||||
my $newname = $name;
|
||||
$newname =~ s/^vm-/base-/;
|
||||
|
||||
my $cmd = ['/sbin/lvrename', $vg, $volname, $newname];
|
||||
run_command($cmd, errmsg => "lvrename '$vg/$volname' => '$vg/$newname' error");
|
||||
|
||||
# set inactive, read-only and activationskip flags
|
||||
$cmd = ['/sbin/lvchange', '-an', '-pr', '-ky', "$vg/$newname"];
|
||||
eval { run_command($cmd); };
|
||||
warn $@ if $@;
|
||||
|
||||
my $newvolname = $newname;
|
||||
|
||||
return $newvolname;
|
||||
}
|
||||
|
||||
# sub volume_resize {} reuse code from parent class
|
||||
|
||||
sub volume_snapshot {
|
||||
my ($class, $scfg, $storeid, $volname, $snap) = @_;
|
||||
|
||||
my $vg = $scfg->{vgname};
|
||||
my $snapvol = "snap_${volname}_$snap";
|
||||
|
||||
my $cmd = ['/sbin/lvcreate', '-n', $snapvol, '-pr', '-s', "$vg/$volname"];
|
||||
run_command($cmd, errmsg => "lvcreate snapshot '$vg/$snapvol' error");
|
||||
|
||||
}
|
||||
|
||||
sub volume_snapshot_rollback {
|
||||
my ($class, $scfg, $storeid, $volname, $snap) = @_;
|
||||
|
||||
my $vg = $scfg->{vgname};
|
||||
my $snapvol = "snap_${volname}_$snap";
|
||||
|
||||
my $cmd = ['/sbin/lvremove', '-f', "$vg/$volname"];
|
||||
run_command($cmd, errmsg => "lvremove '$vg/$volname' error");
|
||||
|
||||
$cmd = ['/sbin/lvcreate', '-kn', '-n', $volname, '-s', "$vg/$snapvol"];
|
||||
run_command($cmd, errmsg => "lvm rollback '$vg/$snapvol' error");
|
||||
}
|
||||
|
||||
sub volume_snapshot_delete {
|
||||
my ($class, $scfg, $storeid, $volname, $snap) = @_;
|
||||
|
||||
my $vg = $scfg->{vgname};
|
||||
my $snapvol = "snap_${volname}_$snap";
|
||||
|
||||
my $cmd = ['/sbin/lvremove', '-f', "$vg/$snapvol"];
|
||||
run_command($cmd, errmsg => "lvremove snapshot '$vg/$snapvol' error");
|
||||
}
|
||||
|
||||
sub volume_has_feature {
|
||||
my ($class, $scfg, $feature, $storeid, $volname, $snapname, $running) = @_;
|
||||
|
||||
my $features = {
|
||||
snapshot => { current => 1 },
|
||||
clone => { base => 1, snap => 1},
|
||||
template => { current => 1},
|
||||
copy => { base => 1, current => 1, snap => 1},
|
||||
sparseinit => { base => 1, current => 1},
|
||||
rename => {current => 1},
|
||||
};
|
||||
|
||||
my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) =
|
||||
$class->parse_volname($volname);
|
||||
|
||||
my $key = undef;
|
||||
if($snapname){
|
||||
$key = 'snap';
|
||||
}else{
|
||||
$key = $isBase ? 'base' : 'current';
|
||||
}
|
||||
return 1 if $features->{$feature}->{$key};
|
||||
|
||||
return undef;
|
||||
}
|
||||
|
||||
# used in LVMPlugin->volume_import
|
||||
sub volume_import_write {
|
||||
my ($class, $input_fh, $output_file) = @_;
|
||||
run_command(['dd', "of=$output_file", 'conv=sparse', 'bs=64k'],
|
||||
input => '<&'.fileno($input_fh));
|
||||
}
|
||||
|
||||
1;
|
||||
21
src/PVE/Storage/Makefile
Normal file
21
src/PVE/Storage/Makefile
Normal file
@ -0,0 +1,21 @@
|
||||
SOURCES= \
|
||||
Plugin.pm \
|
||||
DirPlugin.pm \
|
||||
LVMPlugin.pm \
|
||||
NFSPlugin.pm \
|
||||
CIFSPlugin.pm \
|
||||
ISCSIPlugin.pm \
|
||||
CephFSPlugin.pm \
|
||||
RBDPlugin.pm \
|
||||
ISCSIDirectPlugin.pm \
|
||||
GlusterfsPlugin.pm \
|
||||
ZFSPoolPlugin.pm \
|
||||
ZFSPlugin.pm \
|
||||
PBSPlugin.pm \
|
||||
BTRFSPlugin.pm \
|
||||
LvmThinPlugin.pm
|
||||
|
||||
.PHONY: install
|
||||
install:
|
||||
for i in ${SOURCES}; do install -D -m 0644 $$i ${DESTDIR}${PERLDIR}/PVE/Storage/$$i; done
|
||||
make -C LunCmd install
|
||||
228
src/PVE/Storage/NFSPlugin.pm
Normal file
228
src/PVE/Storage/NFSPlugin.pm
Normal file
@ -0,0 +1,228 @@
|
||||
package PVE::Storage::NFSPlugin;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use IO::File;
|
||||
use Net::IP;
|
||||
use File::Path;
|
||||
|
||||
use PVE::Network;
|
||||
use PVE::Tools qw(run_command);
|
||||
use PVE::ProcFSTools;
|
||||
use PVE::Storage::Plugin;
|
||||
use PVE::JSONSchema qw(get_standard_option);
|
||||
|
||||
use base qw(PVE::Storage::Plugin);
|
||||
|
||||
# NFS helper functions
|
||||
|
||||
sub nfs_is_mounted {
|
||||
my ($server, $export, $mountpoint, $mountdata) = @_;
|
||||
|
||||
$server = "[$server]" if Net::IP::ip_is_ipv6($server);
|
||||
my $source = "$server:$export";
|
||||
|
||||
$mountdata = PVE::ProcFSTools::parse_proc_mounts() if !$mountdata;
|
||||
return $mountpoint if grep {
|
||||
$_->[2] =~ /^nfs/ &&
|
||||
$_->[0] =~ m|^\Q$source\E/?$| &&
|
||||
$_->[1] eq $mountpoint
|
||||
} @$mountdata;
|
||||
return undef;
|
||||
}
|
||||
|
||||
sub nfs_mount {
|
||||
my ($server, $export, $mountpoint, $options) = @_;
|
||||
|
||||
$server = "[$server]" if Net::IP::ip_is_ipv6($server);
|
||||
my $source = "$server:$export";
|
||||
|
||||
my $cmd = ['/bin/mount', '-t', 'nfs', $source, $mountpoint];
|
||||
if ($options) {
|
||||
push @$cmd, '-o', $options;
|
||||
}
|
||||
|
||||
run_command($cmd, errmsg => "mount error");
|
||||
}
|
||||
|
||||
# Configuration
|
||||
|
||||
sub type {
|
||||
return 'nfs';
|
||||
}
|
||||
|
||||
sub plugindata {
|
||||
return {
|
||||
content => [ { images => 1, rootdir => 1, vztmpl => 1, iso => 1, backup => 1, snippets => 1 },
|
||||
{ images => 1 }],
|
||||
format => [ { raw => 1, qcow2 => 1, vmdk => 1 } , 'raw' ],
|
||||
};
|
||||
}
|
||||
|
||||
sub properties {
|
||||
return {
|
||||
export => {
|
||||
description => "NFS export path.",
|
||||
type => 'string', format => 'pve-storage-path',
|
||||
},
|
||||
server => {
|
||||
description => "Server IP or DNS name.",
|
||||
type => 'string', format => 'pve-storage-server',
|
||||
},
|
||||
options => {
|
||||
description => "NFS mount options (see 'man nfs')",
|
||||
type => 'string', format => 'pve-storage-options',
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
sub options {
|
||||
return {
|
||||
path => { fixed => 1 },
|
||||
'content-dirs' => { optional => 1 },
|
||||
server => { fixed => 1 },
|
||||
export => { fixed => 1 },
|
||||
nodes => { optional => 1 },
|
||||
disable => { optional => 1 },
|
||||
maxfiles => { optional => 1 },
|
||||
'prune-backups' => { optional => 1 },
|
||||
'max-protected-backups' => { optional => 1 },
|
||||
options => { optional => 1 },
|
||||
content => { optional => 1 },
|
||||
format => { optional => 1 },
|
||||
mkdir => { optional => 1 },
|
||||
bwlimit => { optional => 1 },
|
||||
preallocation => { optional => 1 },
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
sub check_config {
|
||||
my ($class, $sectionId, $config, $create, $skipSchemaCheck) = @_;
|
||||
|
||||
$config->{path} = "/mnt/pve/$sectionId" if $create && !$config->{path};
|
||||
|
||||
return $class->SUPER::check_config($sectionId, $config, $create, $skipSchemaCheck);
|
||||
}
|
||||
|
||||
# Storage implementation
|
||||
|
||||
sub status {
|
||||
my ($class, $storeid, $scfg, $cache) = @_;
|
||||
|
||||
$cache->{mountdata} = PVE::ProcFSTools::parse_proc_mounts()
|
||||
if !$cache->{mountdata};
|
||||
|
||||
my $path = $scfg->{path};
|
||||
my $server = $scfg->{server};
|
||||
my $export = $scfg->{export};
|
||||
|
||||
return undef if !nfs_is_mounted($server, $export, $path, $cache->{mountdata});
|
||||
|
||||
return $class->SUPER::status($storeid, $scfg, $cache);
|
||||
}
|
||||
|
||||
sub activate_storage {
|
||||
my ($class, $storeid, $scfg, $cache) = @_;
|
||||
|
||||
$cache->{mountdata} = PVE::ProcFSTools::parse_proc_mounts()
|
||||
if !$cache->{mountdata};
|
||||
|
||||
my $path = $scfg->{path};
|
||||
my $server = $scfg->{server};
|
||||
my $export = $scfg->{export};
|
||||
|
||||
if (!nfs_is_mounted($server, $export, $path, $cache->{mountdata})) {
|
||||
# NOTE: only call mkpath when not mounted (avoid hang when NFS server is offline
|
||||
mkpath $path if !(defined($scfg->{mkdir}) && !$scfg->{mkdir});
|
||||
|
||||
die "unable to activate storage '$storeid' - " .
|
||||
"directory '$path' does not exist\n" if ! -d $path;
|
||||
|
||||
nfs_mount($server, $export, $path, $scfg->{options});
|
||||
}
|
||||
|
||||
$class->SUPER::activate_storage($storeid, $scfg, $cache);
|
||||
}
|
||||
|
||||
sub deactivate_storage {
|
||||
my ($class, $storeid, $scfg, $cache) = @_;
|
||||
|
||||
$cache->{mountdata} = PVE::ProcFSTools::parse_proc_mounts()
|
||||
if !$cache->{mountdata};
|
||||
|
||||
my $path = $scfg->{path};
|
||||
my $server = $scfg->{server};
|
||||
my $export = $scfg->{export};
|
||||
|
||||
if (nfs_is_mounted($server, $export, $path, $cache->{mountdata})) {
|
||||
my $cmd = ['/bin/umount', $path];
|
||||
run_command($cmd, errmsg => 'umount error');
|
||||
}
|
||||
}
|
||||
|
||||
sub check_connection {
|
||||
my ($class, $storeid, $scfg) = @_;
|
||||
|
||||
my $server = $scfg->{server};
|
||||
my $opts = $scfg->{options};
|
||||
|
||||
my $cmd;
|
||||
|
||||
my $is_v4 = defined($opts) && $opts =~ /vers=4.*/;
|
||||
if ($is_v4) {
|
||||
my $ip = PVE::JSONSchema::pve_verify_ip($server, 1);
|
||||
if (!defined($ip)) {
|
||||
$ip = PVE::Network::get_ip_from_hostname($server);
|
||||
}
|
||||
|
||||
my $transport = PVE::JSONSchema::pve_verify_ipv4($ip, 1) ? 'tcp' : 'tcp6';
|
||||
|
||||
# nfsv4 uses a pseudo-filesystem always beginning with /
|
||||
# no exports are listed
|
||||
$cmd = ['/usr/sbin/rpcinfo', '-T', $transport, $ip, 'nfs', '4'];
|
||||
} else {
|
||||
$cmd = ['/sbin/showmount', '--no-headers', '--exports', $server];
|
||||
}
|
||||
|
||||
eval { run_command($cmd, timeout => 10, outfunc => sub {}, errfunc => sub {}) };
|
||||
if (my $err = $@) {
|
||||
if ($is_v4) {
|
||||
my $port = 2049;
|
||||
$port = $1 if defined($opts) && $opts =~ /port=(\d+)/;
|
||||
|
||||
# rpcinfo is expected to work when the port is 0 (see 'man 5 nfs') and tcp_ping()
|
||||
# defaults to port 7 when passing in 0.
|
||||
return 0 if $port == 0;
|
||||
|
||||
return PVE::Network::tcp_ping($server, $port, 2);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
# FIXME remove on the next APIAGE reset.
|
||||
# Deprecated, use get_volume_attribute instead.
|
||||
sub get_volume_notes {
|
||||
my $class = shift;
|
||||
PVE::Storage::DirPlugin::get_volume_notes($class, @_);
|
||||
}
|
||||
|
||||
# FIXME remove on the next APIAGE reset.
|
||||
# Deprecated, use update_volume_attribute instead.
|
||||
sub update_volume_notes {
|
||||
my $class = shift;
|
||||
PVE::Storage::DirPlugin::update_volume_notes($class, @_);
|
||||
}
|
||||
|
||||
sub get_volume_attribute {
|
||||
return PVE::Storage::DirPlugin::get_volume_attribute(@_);
|
||||
}
|
||||
|
||||
sub update_volume_attribute {
|
||||
return PVE::Storage::DirPlugin::update_volume_attribute(@_);
|
||||
}
|
||||
|
||||
1;
|
||||
981
src/PVE/Storage/PBSPlugin.pm
Normal file
981
src/PVE/Storage/PBSPlugin.pm
Normal file
@ -0,0 +1,981 @@
|
||||
package PVE::Storage::PBSPlugin;
|
||||
|
||||
# Plugin to access Proxmox Backup Server
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
use Fcntl qw(F_GETFD F_SETFD FD_CLOEXEC);
|
||||
use IO::File;
|
||||
use JSON;
|
||||
use MIME::Base64 qw(decode_base64);
|
||||
use POSIX qw(mktime strftime ENOENT);
|
||||
use POSIX::strptime;
|
||||
|
||||
use PVE::APIClient::LWP;
|
||||
use PVE::JSONSchema qw(get_standard_option);
|
||||
use PVE::Network;
|
||||
use PVE::PBSClient;
|
||||
use PVE::Storage::Plugin;
|
||||
use PVE::Tools qw(run_command file_read_firstline trim dir_glob_regex dir_glob_foreach $IPV6RE);
|
||||
|
||||
use base qw(PVE::Storage::Plugin);
|
||||
|
||||
# Configuration
|
||||
|
||||
sub type {
|
||||
return 'pbs';
|
||||
}
|
||||
|
||||
sub plugindata {
|
||||
return {
|
||||
content => [ {backup => 1, none => 1}, { backup => 1 }],
|
||||
};
|
||||
}
|
||||
|
||||
sub properties {
|
||||
return {
|
||||
datastore => {
|
||||
description => "Proxmox Backup Server datastore name.",
|
||||
type => 'string',
|
||||
},
|
||||
# openssl s_client -connect <host>:8007 2>&1 |openssl x509 -fingerprint -sha256
|
||||
fingerprint => get_standard_option('fingerprint-sha256'),
|
||||
'encryption-key' => {
|
||||
description => "Encryption key. Use 'autogen' to generate one automatically without passphrase.",
|
||||
type => 'string',
|
||||
},
|
||||
'master-pubkey' => {
|
||||
description => "Base64-encoded, PEM-formatted public RSA key. Used to encrypt a copy of the encryption-key which will be added to each encrypted backup.",
|
||||
type => 'string',
|
||||
},
|
||||
port => {
|
||||
description => "For non default port.",
|
||||
type => 'integer',
|
||||
minimum => 1,
|
||||
maximum => 65535,
|
||||
default => 8007,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
sub options {
|
||||
return {
|
||||
server => { fixed => 1 },
|
||||
datastore => { fixed => 1 },
|
||||
namespace => { optional => 1 },
|
||||
port => { optional => 1 },
|
||||
nodes => { optional => 1},
|
||||
disable => { optional => 1},
|
||||
content => { optional => 1},
|
||||
username => { optional => 1 },
|
||||
password => { optional => 1 },
|
||||
'encryption-key' => { optional => 1 },
|
||||
'master-pubkey' => { optional => 1 },
|
||||
maxfiles => { optional => 1 },
|
||||
'prune-backups' => { optional => 1 },
|
||||
'max-protected-backups' => { optional => 1 },
|
||||
fingerprint => { optional => 1 },
|
||||
};
|
||||
}
|
||||
|
||||
# Helpers
|
||||
|
||||
sub pbs_password_file_name {
|
||||
my ($scfg, $storeid) = @_;
|
||||
|
||||
return "/etc/pve/priv/storage/${storeid}.pw";
|
||||
}
|
||||
|
||||
sub pbs_set_password {
|
||||
my ($scfg, $storeid, $password) = @_;
|
||||
|
||||
my $pwfile = pbs_password_file_name($scfg, $storeid);
|
||||
mkdir "/etc/pve/priv/storage";
|
||||
|
||||
PVE::Tools::file_set_contents($pwfile, "$password\n");
|
||||
}
|
||||
|
||||
sub pbs_delete_password {
|
||||
my ($scfg, $storeid) = @_;
|
||||
|
||||
my $pwfile = pbs_password_file_name($scfg, $storeid);
|
||||
|
||||
unlink $pwfile;
|
||||
}
|
||||
|
||||
sub pbs_get_password {
|
||||
my ($scfg, $storeid) = @_;
|
||||
|
||||
my $pwfile = pbs_password_file_name($scfg, $storeid);
|
||||
|
||||
return PVE::Tools::file_read_firstline($pwfile);
|
||||
}
|
||||
|
||||
sub pbs_encryption_key_file_name {
|
||||
my ($scfg, $storeid) = @_;
|
||||
|
||||
return "/etc/pve/priv/storage/${storeid}.enc";
|
||||
}
|
||||
|
||||
sub pbs_set_encryption_key {
|
||||
my ($scfg, $storeid, $key) = @_;
|
||||
|
||||
my $pwfile = pbs_encryption_key_file_name($scfg, $storeid);
|
||||
mkdir "/etc/pve/priv/storage";
|
||||
|
||||
PVE::Tools::file_set_contents($pwfile, "$key\n");
|
||||
}
|
||||
|
||||
sub pbs_delete_encryption_key {
|
||||
my ($scfg, $storeid) = @_;
|
||||
|
||||
my $pwfile = pbs_encryption_key_file_name($scfg, $storeid);
|
||||
|
||||
if (!unlink $pwfile) {
|
||||
return if $! == ENOENT;
|
||||
die "failed to delete encryption key! $!\n";
|
||||
}
|
||||
delete $scfg->{'encryption-key'};
|
||||
}
|
||||
|
||||
sub pbs_get_encryption_key {
|
||||
my ($scfg, $storeid) = @_;
|
||||
|
||||
my $pwfile = pbs_encryption_key_file_name($scfg, $storeid);
|
||||
|
||||
return PVE::Tools::file_get_contents($pwfile);
|
||||
}
|
||||
|
||||
# Returns a file handle if there is an encryption key, or `undef` if there is not. Dies on error.
|
||||
sub pbs_open_encryption_key {
|
||||
my ($scfg, $storeid) = @_;
|
||||
|
||||
my $encryption_key_file = pbs_encryption_key_file_name($scfg, $storeid);
|
||||
|
||||
my $keyfd;
|
||||
if (!open($keyfd, '<', $encryption_key_file)) {
|
||||
if ($! == ENOENT) {
|
||||
my $encryption_fp = $scfg->{'encryption-key'};
|
||||
die "encryption configured ('$encryption_fp') but no encryption key file found!\n"
|
||||
if $encryption_fp;
|
||||
return undef;
|
||||
}
|
||||
die "failed to open encryption key: $encryption_key_file: $!\n";
|
||||
}
|
||||
|
||||
return $keyfd;
|
||||
}
|
||||
|
||||
sub pbs_master_pubkey_file_name {
|
||||
my ($scfg, $storeid) = @_;
|
||||
|
||||
return "/etc/pve/priv/storage/${storeid}.master.pem";
|
||||
}
|
||||
|
||||
sub pbs_set_master_pubkey {
|
||||
my ($scfg, $storeid, $key) = @_;
|
||||
|
||||
my $pwfile = pbs_master_pubkey_file_name($scfg, $storeid);
|
||||
mkdir "/etc/pve/priv/storage";
|
||||
|
||||
PVE::Tools::file_set_contents($pwfile, "$key\n");
|
||||
}
|
||||
|
||||
sub pbs_delete_master_pubkey {
|
||||
my ($scfg, $storeid) = @_;
|
||||
|
||||
my $pwfile = pbs_master_pubkey_file_name($scfg, $storeid);
|
||||
|
||||
if (!unlink $pwfile) {
|
||||
return if $! == ENOENT;
|
||||
die "failed to delete master public key! $!\n";
|
||||
}
|
||||
delete $scfg->{'master-pubkey'};
|
||||
}
|
||||
|
||||
sub pbs_get_master_pubkey {
|
||||
my ($scfg, $storeid) = @_;
|
||||
|
||||
my $pwfile = pbs_master_pubkey_file_name($scfg, $storeid);
|
||||
|
||||
return PVE::Tools::file_get_contents($pwfile);
|
||||
}
|
||||
|
||||
# Returns a file handle if there is a master key, or `undef` if there is not. Dies on error.
|
||||
sub pbs_open_master_pubkey {
|
||||
my ($scfg, $storeid) = @_;
|
||||
|
||||
my $master_pubkey_file = pbs_master_pubkey_file_name($scfg, $storeid);
|
||||
|
||||
my $keyfd;
|
||||
if (!open($keyfd, '<', $master_pubkey_file)) {
|
||||
if ($! == ENOENT) {
|
||||
die "master public key configured but no key file found!\n"
|
||||
if $scfg->{'master-pubkey'};
|
||||
return undef;
|
||||
}
|
||||
die "failed to open master public key: $master_pubkey_file: $!\n";
|
||||
}
|
||||
|
||||
return $keyfd;
|
||||
}
|
||||
|
||||
sub print_volid {
|
||||
my ($storeid, $btype, $bid, $btime) = @_;
|
||||
|
||||
my $time_str = strftime("%FT%TZ", gmtime($btime));
|
||||
my $volname = "backup/${btype}/${bid}/${time_str}";
|
||||
|
||||
return "${storeid}:${volname}";
|
||||
}
|
||||
|
||||
my sub ns : prototype($$) {
|
||||
my ($scfg, $name) = @_;
|
||||
my $ns = $scfg->{namespace};
|
||||
return defined($ns) ? ($name, $ns) : ();
|
||||
}
|
||||
|
||||
# essentially the inverse of print_volid
|
||||
my sub api_param_from_volname : prototype($$$) {
|
||||
my ($class, $scfg, $volname) = @_;
|
||||
|
||||
my $name = ($class->parse_volname($volname))[1];
|
||||
|
||||
my ($btype, $bid, $timestr) = split('/', $name);
|
||||
|
||||
my @tm = (POSIX::strptime($timestr, "%FT%TZ"));
|
||||
# expect sec, min, hour, mday, mon, year
|
||||
die "error parsing time from '$volname'" if grep { !defined($_) } @tm[0..5];
|
||||
|
||||
my $btime;
|
||||
{
|
||||
local $ENV{TZ} = 'UTC'; # $timestr is UTC
|
||||
|
||||
# Fill in isdst to avoid undef warning. No daylight saving time for UTC.
|
||||
$tm[8] //= 0;
|
||||
|
||||
my $since_epoch = mktime(@tm) or die "error converting time from '$volname'\n";
|
||||
$btime = int($since_epoch);
|
||||
}
|
||||
|
||||
return {
|
||||
(ns($scfg, 'ns')),
|
||||
'backup-type' => $btype,
|
||||
'backup-id' => $bid,
|
||||
'backup-time' => $btime,
|
||||
};
|
||||
}
|
||||
|
||||
my $USE_CRYPT_PARAMS = {
|
||||
backup => 1,
|
||||
restore => 1,
|
||||
'upload-log' => 1,
|
||||
};
|
||||
|
||||
my $USE_MASTER_KEY = {
|
||||
backup => 1,
|
||||
};
|
||||
|
||||
my sub do_raw_client_cmd {
|
||||
my ($scfg, $storeid, $client_cmd, $param, %opts) = @_;
|
||||
|
||||
my $use_crypto = $USE_CRYPT_PARAMS->{$client_cmd};
|
||||
my $use_master = $USE_MASTER_KEY->{$client_cmd};
|
||||
|
||||
my $client_exe = '/usr/bin/proxmox-backup-client';
|
||||
die "executable not found '$client_exe'! Proxmox backup client not installed?\n"
|
||||
if ! -x $client_exe;
|
||||
|
||||
my $repo = PVE::PBSClient::get_repository($scfg);
|
||||
|
||||
my $userns_cmd = delete $opts{userns_cmd};
|
||||
|
||||
my $cmd = [];
|
||||
|
||||
push @$cmd, @$userns_cmd if defined($userns_cmd);
|
||||
|
||||
push @$cmd, $client_exe, $client_cmd;
|
||||
|
||||
# This must live in the top scope to not get closed before the `run_command`
|
||||
my ($keyfd, $master_fd);
|
||||
if ($use_crypto) {
|
||||
if (defined($keyfd = pbs_open_encryption_key($scfg, $storeid))) {
|
||||
my $flags = fcntl($keyfd, F_GETFD, 0)
|
||||
// die "failed to get file descriptor flags: $!\n";
|
||||
fcntl($keyfd, F_SETFD, $flags & ~FD_CLOEXEC)
|
||||
or die "failed to remove FD_CLOEXEC from encryption key file descriptor\n";
|
||||
push @$cmd, '--crypt-mode=encrypt', '--keyfd='.fileno($keyfd);
|
||||
if ($use_master && defined($master_fd = pbs_open_master_pubkey($scfg, $storeid))) {
|
||||
my $flags = fcntl($master_fd, F_GETFD, 0)
|
||||
// die "failed to get file descriptor flags: $!\n";
|
||||
fcntl($master_fd, F_SETFD, $flags & ~FD_CLOEXEC)
|
||||
or die "failed to remove FD_CLOEXEC from master public key file descriptor\n";
|
||||
push @$cmd, '--master-pubkey-fd='.fileno($master_fd);
|
||||
}
|
||||
} else {
|
||||
push @$cmd, '--crypt-mode=none';
|
||||
}
|
||||
}
|
||||
|
||||
push @$cmd, @$param if defined($param);
|
||||
|
||||
push @$cmd, "--repository", $repo;
|
||||
if ($client_cmd ne 'status' && defined(my $ns = $scfg->{namespace})) {
|
||||
push @$cmd, '--ns', $ns;
|
||||
}
|
||||
|
||||
local $ENV{PBS_PASSWORD} = pbs_get_password($scfg, $storeid);
|
||||
|
||||
local $ENV{PBS_FINGERPRINT} = $scfg->{fingerprint};
|
||||
|
||||
# no ascii-art on task logs
|
||||
local $ENV{PROXMOX_OUTPUT_NO_BORDER} = 1;
|
||||
local $ENV{PROXMOX_OUTPUT_NO_HEADER} = 1;
|
||||
|
||||
if (my $logfunc = $opts{logfunc}) {
|
||||
$logfunc->("run: " . join(' ', @$cmd));
|
||||
}
|
||||
|
||||
run_command($cmd, %opts);
|
||||
}
|
||||
|
||||
# FIXME: External perl code should NOT have access to this.
|
||||
#
|
||||
# There should be separate functions to
|
||||
# - make backups
|
||||
# - restore backups
|
||||
# - restore files
|
||||
# with a sane API
|
||||
sub run_raw_client_cmd {
|
||||
my ($scfg, $storeid, $client_cmd, $param, %opts) = @_;
|
||||
return do_raw_client_cmd($scfg, $storeid, $client_cmd, $param, %opts);
|
||||
}
|
||||
|
||||
sub run_client_cmd {
|
||||
my ($scfg, $storeid, $client_cmd, $param, $no_output) = @_;
|
||||
|
||||
my $json_str = '';
|
||||
my $outfunc = sub { $json_str .= "$_[0]\n" };
|
||||
|
||||
$param = [] if !defined($param);
|
||||
$param = [ $param ] if !ref($param);
|
||||
|
||||
$param = [@$param, '--output-format=json'] if !$no_output;
|
||||
|
||||
do_raw_client_cmd($scfg, $storeid, $client_cmd, $param,
|
||||
outfunc => $outfunc, errmsg => 'proxmox-backup-client failed');
|
||||
|
||||
return undef if $no_output;
|
||||
|
||||
my $res = decode_json($json_str);
|
||||
|
||||
return $res;
|
||||
}
|
||||
|
||||
# Storage implementation
|
||||
|
||||
sub extract_vzdump_config {
|
||||
my ($class, $scfg, $volname, $storeid) = @_;
|
||||
|
||||
my ($vtype, $name, $vmid, undef, undef, undef, $format) = $class->parse_volname($volname);
|
||||
|
||||
my $config = '';
|
||||
my $outfunc = sub { $config .= "$_[0]\n" };
|
||||
|
||||
my $config_name;
|
||||
if ($format eq 'pbs-vm') {
|
||||
$config_name = 'qemu-server.conf';
|
||||
} elsif ($format eq 'pbs-ct') {
|
||||
$config_name = 'pct.conf';
|
||||
} else {
|
||||
die "unable to extract configuration for backup format '$format'\n";
|
||||
}
|
||||
|
||||
do_raw_client_cmd($scfg, $storeid, 'restore', [ $name, $config_name, '-' ],
|
||||
outfunc => $outfunc, errmsg => 'proxmox-backup-client failed');
|
||||
|
||||
return $config;
|
||||
}
|
||||
|
||||
sub prune_backups {
|
||||
my ($class, $scfg, $storeid, $keep, $vmid, $type, $dryrun, $logfunc) = @_;
|
||||
|
||||
$logfunc //= sub { print "$_[1]\n" };
|
||||
|
||||
$type = 'vm' if defined($type) && $type eq 'qemu';
|
||||
$type = 'ct' if defined($type) && $type eq 'lxc';
|
||||
|
||||
my $backup_groups = {};
|
||||
|
||||
if (defined($vmid) && defined($type)) {
|
||||
# no need to get the list of volumes, we only got a single backup group anyway
|
||||
$backup_groups->{"$type/$vmid"} = 1;
|
||||
} else {
|
||||
my $backups = eval { $class->list_volumes($storeid, $scfg, $vmid, ['backup']) };
|
||||
die "failed to get list of all backups to prune - $@" if $@;
|
||||
|
||||
foreach my $backup (@{$backups}) {
|
||||
(my $backup_type = $backup->{format}) =~ s/^pbs-//;
|
||||
next if defined($type) && $backup_type ne $type;
|
||||
|
||||
my $backup_group = "$backup_type/$backup->{vmid}";
|
||||
$backup_groups->{$backup_group} = 1;
|
||||
}
|
||||
}
|
||||
|
||||
my @param;
|
||||
|
||||
my $keep_all = delete $keep->{'keep-all'};
|
||||
|
||||
if (!$keep_all) {
|
||||
foreach my $opt (keys %{$keep}) {
|
||||
next if $keep->{$opt} == 0;
|
||||
push @param, "--$opt";
|
||||
push @param, "$keep->{$opt}";
|
||||
}
|
||||
} else { # no need to pass anything to PBS
|
||||
$keep = { 'keep-all' => 1 };
|
||||
}
|
||||
|
||||
push @param, '--dry-run' if $dryrun;
|
||||
|
||||
my $prune_list = [];
|
||||
my $failed;
|
||||
|
||||
foreach my $backup_group (keys %{$backup_groups}) {
|
||||
$logfunc->('info', "running 'proxmox-backup-client prune' for '$backup_group'")
|
||||
if !$dryrun;
|
||||
eval {
|
||||
my $res = run_client_cmd($scfg, $storeid, 'prune', [ $backup_group, @param ]);
|
||||
|
||||
foreach my $backup (@{$res}) {
|
||||
die "result from proxmox-backup-client is not as expected\n"
|
||||
if !defined($backup->{'backup-time'})
|
||||
|| !defined($backup->{'backup-type'})
|
||||
|| !defined($backup->{'backup-id'})
|
||||
|| !defined($backup->{'keep'});
|
||||
|
||||
my $ctime = $backup->{'backup-time'};
|
||||
my $type = $backup->{'backup-type'};
|
||||
my $vmid = $backup->{'backup-id'};
|
||||
my $volid = print_volid($storeid, $type, $vmid, $ctime);
|
||||
|
||||
my $mark = $backup->{keep} ? 'keep' : 'remove';
|
||||
$mark = 'protected' if $backup->{protected};
|
||||
|
||||
push @{$prune_list}, {
|
||||
ctime => $ctime,
|
||||
mark => $mark,
|
||||
type => $type eq 'vm' ? 'qemu' : 'lxc',
|
||||
vmid => $vmid,
|
||||
volid => $volid,
|
||||
};
|
||||
}
|
||||
};
|
||||
if (my $err = $@) {
|
||||
$logfunc->('err', "prune '$backup_group': $err\n");
|
||||
$failed = 1;
|
||||
}
|
||||
}
|
||||
die "error pruning backups - check log\n" if $failed;
|
||||
|
||||
return $prune_list;
|
||||
}
|
||||
|
||||
my $autogen_encryption_key = sub {
|
||||
my ($scfg, $storeid) = @_;
|
||||
my $encfile = pbs_encryption_key_file_name($scfg, $storeid);
|
||||
if (-f $encfile) {
|
||||
rename $encfile, "$encfile.old";
|
||||
}
|
||||
my $cmd = ['proxmox-backup-client', 'key', 'create', '--kdf', 'none', $encfile];
|
||||
run_command($cmd, errmsg => 'failed to create encryption key');
|
||||
return PVE::Tools::file_get_contents($encfile);
|
||||
};
|
||||
|
||||
sub on_add_hook {
|
||||
my ($class, $storeid, $scfg, %param) = @_;
|
||||
|
||||
my $res = {};
|
||||
|
||||
if (defined(my $password = $param{password})) {
|
||||
pbs_set_password($scfg, $storeid, $password);
|
||||
} else {
|
||||
pbs_delete_password($scfg, $storeid);
|
||||
}
|
||||
|
||||
if (defined(my $encryption_key = $param{'encryption-key'})) {
|
||||
my $decoded_key;
|
||||
if ($encryption_key eq 'autogen') {
|
||||
$res->{'encryption-key'} = $autogen_encryption_key->($scfg, $storeid);
|
||||
$decoded_key = decode_json($res->{'encryption-key'});
|
||||
} else {
|
||||
$decoded_key = eval { decode_json($encryption_key) };
|
||||
if ($@ || !exists($decoded_key->{data})) {
|
||||
die "Value does not seems like a valid, JSON formatted encryption key!\n";
|
||||
}
|
||||
pbs_set_encryption_key($scfg, $storeid, $encryption_key);
|
||||
$res->{'encryption-key'} = $encryption_key;
|
||||
}
|
||||
$scfg->{'encryption-key'} = $decoded_key->{fingerprint} || 1;
|
||||
} else {
|
||||
pbs_delete_encryption_key($scfg, $storeid);
|
||||
}
|
||||
|
||||
if (defined(my $master_key = delete $param{'master-pubkey'})) {
|
||||
die "'master-pubkey' can only be used together with 'encryption-key'\n"
|
||||
if !defined($scfg->{'encryption-key'});
|
||||
|
||||
my $decoded = decode_base64($master_key);
|
||||
pbs_set_master_pubkey($scfg, $storeid, $decoded);
|
||||
$scfg->{'master-pubkey'} = 1;
|
||||
} else {
|
||||
pbs_delete_master_pubkey($scfg, $storeid);
|
||||
}
|
||||
|
||||
return $res;
|
||||
}
|
||||
|
||||
sub on_update_hook {
|
||||
my ($class, $storeid, $scfg, %param) = @_;
|
||||
|
||||
my $res = {};
|
||||
|
||||
if (exists($param{password})) {
|
||||
if (defined($param{password})) {
|
||||
pbs_set_password($scfg, $storeid, $param{password});
|
||||
} else {
|
||||
pbs_delete_password($scfg, $storeid);
|
||||
}
|
||||
}
|
||||
|
||||
if (exists($param{'encryption-key'})) {
|
||||
if (defined(my $encryption_key = delete($param{'encryption-key'}))) {
|
||||
my $decoded_key;
|
||||
if ($encryption_key eq 'autogen') {
|
||||
$res->{'encryption-key'} = $autogen_encryption_key->($scfg, $storeid);
|
||||
$decoded_key = decode_json($res->{'encryption-key'});
|
||||
} else {
|
||||
$decoded_key = eval { decode_json($encryption_key) };
|
||||
if ($@ || !exists($decoded_key->{data})) {
|
||||
die "Value does not seems like a valid, JSON formatted encryption key!\n";
|
||||
}
|
||||
pbs_set_encryption_key($scfg, $storeid, $encryption_key);
|
||||
$res->{'encryption-key'} = $encryption_key;
|
||||
}
|
||||
$scfg->{'encryption-key'} = $decoded_key->{fingerprint} || 1;
|
||||
} else {
|
||||
pbs_delete_encryption_key($scfg, $storeid);
|
||||
delete $scfg->{'encryption-key'};
|
||||
}
|
||||
}
|
||||
|
||||
if (exists($param{'master-pubkey'})) {
|
||||
if (defined(my $master_key = delete($param{'master-pubkey'}))) {
|
||||
my $decoded = decode_base64($master_key);
|
||||
|
||||
pbs_set_master_pubkey($scfg, $storeid, $decoded);
|
||||
$scfg->{'master-pubkey'} = 1;
|
||||
} else {
|
||||
pbs_delete_master_pubkey($scfg, $storeid);
|
||||
}
|
||||
}
|
||||
|
||||
return $res;
|
||||
}
|
||||
|
||||
sub on_delete_hook {
|
||||
my ($class, $storeid, $scfg) = @_;
|
||||
|
||||
pbs_delete_password($scfg, $storeid);
|
||||
pbs_delete_encryption_key($scfg, $storeid);
|
||||
pbs_delete_master_pubkey($scfg, $storeid);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
sub parse_volname {
|
||||
my ($class, $volname) = @_;
|
||||
|
||||
if ($volname =~ m!^backup/([^\s_]+)/([^\s_]+)/([0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z)$!) {
|
||||
my $btype = $1;
|
||||
my $bid = $2;
|
||||
my $btime = $3;
|
||||
my $format = "pbs-$btype";
|
||||
|
||||
my $name = "$btype/$bid/$btime";
|
||||
|
||||
if ($bid =~ m/^\d+$/) {
|
||||
return ('backup', $name, $bid, undef, undef, undef, $format);
|
||||
} else {
|
||||
return ('backup', $name, undef, undef, undef, undef, $format);
|
||||
}
|
||||
}
|
||||
|
||||
die "unable to parse PBS volume name '$volname'\n";
|
||||
}
|
||||
|
||||
sub path {
|
||||
my ($class, $scfg, $volname, $storeid, $snapname) = @_;
|
||||
|
||||
die "volume snapshot is not possible on pbs storage"
|
||||
if defined($snapname);
|
||||
|
||||
my ($vtype, $name, $vmid) = $class->parse_volname($volname);
|
||||
|
||||
my $repo = PVE::PBSClient::get_repository($scfg);
|
||||
|
||||
# artificial url - we currently do not use that anywhere
|
||||
my $path = "pbs://$repo/$name";
|
||||
if (defined(my $ns = $scfg->{namespace})) {
|
||||
$ns =~ s|/|%2f|g; # other characters to escape aren't allowed in the namespace schema
|
||||
$path .= "?ns=$ns";
|
||||
}
|
||||
|
||||
return ($path, $vmid, $vtype);
|
||||
}
|
||||
|
||||
sub create_base {
|
||||
my ($class, $storeid, $scfg, $volname) = @_;
|
||||
|
||||
die "can't create base images in pbs storage\n";
|
||||
}
|
||||
|
||||
sub clone_image {
|
||||
my ($class, $scfg, $storeid, $volname, $vmid, $snap) = @_;
|
||||
|
||||
die "can't clone images in pbs storage\n";
|
||||
}
|
||||
|
||||
sub alloc_image {
|
||||
my ($class, $storeid, $scfg, $vmid, $fmt, $name, $size) = @_;
|
||||
|
||||
die "can't allocate space in pbs storage\n";
|
||||
}
|
||||
|
||||
sub free_image {
|
||||
my ($class, $storeid, $scfg, $volname, $isBase) = @_;
|
||||
|
||||
my ($vtype, $name, $vmid) = $class->parse_volname($volname);
|
||||
|
||||
run_client_cmd($scfg, $storeid, "forget", [ $name ], 1);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
sub list_images {
|
||||
my ($class, $storeid, $scfg, $vmid, $vollist, $cache) = @_;
|
||||
|
||||
my $res = [];
|
||||
|
||||
return $res;
|
||||
}
|
||||
|
||||
my sub snapshot_files_encrypted {
|
||||
my ($files) = @_;
|
||||
return 0 if !$files;
|
||||
|
||||
my $any;
|
||||
my $all = 1;
|
||||
for my $file (@$files) {
|
||||
my $fn = $file->{filename};
|
||||
next if $fn eq 'client.log.blob' || $fn eq 'index.json.blob';
|
||||
|
||||
my $crypt = $file->{'crypt-mode'};
|
||||
|
||||
$all = 0 if !$crypt || $crypt ne 'encrypt';
|
||||
$any ||= defined($crypt) && $crypt eq 'encrypt';
|
||||
}
|
||||
return $any && $all;
|
||||
}
|
||||
|
||||
# TODO: use a client with native rust/proxmox-backup bindings to profit from
|
||||
# API schema checks and types
|
||||
my sub pbs_api_connect {
|
||||
my ($scfg, $password, $timeout) = @_;
|
||||
|
||||
my $params = {};
|
||||
|
||||
my $user = $scfg->{username} // 'root@pam';
|
||||
|
||||
if (my $tokenid = PVE::AccessControl::pve_verify_tokenid($user, 1)) {
|
||||
$params->{apitoken} = "PBSAPIToken=${tokenid}:${password}";
|
||||
} else {
|
||||
$params->{password} = $password;
|
||||
$params->{username} = $user;
|
||||
}
|
||||
|
||||
if (my $fp = $scfg->{fingerprint}) {
|
||||
$params->{cached_fingerprints}->{uc($fp)} = 1;
|
||||
}
|
||||
|
||||
my $conn = PVE::APIClient::LWP->new(
|
||||
%$params,
|
||||
host => $scfg->{server},
|
||||
port => $scfg->{port} // 8007,
|
||||
timeout => ($timeout // 7), # cope with a 401 (3s api delay) and high latency
|
||||
cookie_name => 'PBSAuthCookie',
|
||||
);
|
||||
|
||||
return $conn;
|
||||
}
|
||||
|
||||
sub list_volumes {
|
||||
my ($class, $storeid, $scfg, $vmid, $content_types) = @_;
|
||||
|
||||
my $res = [];
|
||||
|
||||
return $res if !grep { $_ eq 'backup' } @$content_types;
|
||||
|
||||
my $password = pbs_get_password($scfg, $storeid);
|
||||
my $conn = pbs_api_connect($scfg, $password, 120);
|
||||
my $datastore = $scfg->{datastore};
|
||||
|
||||
my $param = {};
|
||||
$param->{'backup-id'} = "$vmid" if defined($vmid);
|
||||
$param->{'ns'} = "$scfg->{namespace}" if defined($scfg->{namespace});
|
||||
my $data = eval { $conn->get("/api2/json/admin/datastore/$datastore/snapshots", $param); };
|
||||
die "error listing snapshots - $@" if $@;
|
||||
|
||||
foreach my $item (@$data) {
|
||||
my $btype = $item->{"backup-type"};
|
||||
my $bid = $item->{"backup-id"};
|
||||
my $epoch = $item->{"backup-time"};
|
||||
my $size = $item->{size} // 1;
|
||||
|
||||
next if !($btype eq 'vm' || $btype eq 'ct');
|
||||
next if $bid !~ m/^\d+$/;
|
||||
next if defined($vmid) && $bid ne $vmid;
|
||||
|
||||
my $volid = print_volid($storeid, $btype, $bid, $epoch);
|
||||
|
||||
my $info = {
|
||||
volid => $volid,
|
||||
format => "pbs-$btype",
|
||||
size => $size,
|
||||
content => 'backup',
|
||||
vmid => int($bid),
|
||||
ctime => $epoch,
|
||||
subtype => $btype eq 'vm' ? 'qemu' : 'lxc', # convert to PVE backup type
|
||||
};
|
||||
|
||||
$info->{verification} = $item->{verification} if defined($item->{verification});
|
||||
$info->{notes} = $item->{comment} if defined($item->{comment});
|
||||
$info->{protected} = 1 if $item->{protected};
|
||||
if (defined($item->{fingerprint})) {
|
||||
$info->{encrypted} = $item->{fingerprint};
|
||||
} elsif (snapshot_files_encrypted($item->{files})) {
|
||||
$info->{encrypted} = '1';
|
||||
}
|
||||
|
||||
push @$res, $info;
|
||||
}
|
||||
|
||||
return $res;
|
||||
}
|
||||
|
||||
sub status {
|
||||
my ($class, $storeid, $scfg, $cache) = @_;
|
||||
|
||||
my $total = 0;
|
||||
my $free = 0;
|
||||
my $used = 0;
|
||||
my $active = 0;
|
||||
|
||||
eval {
|
||||
my $res = run_client_cmd($scfg, $storeid, "status");
|
||||
|
||||
$active = 1;
|
||||
$total = $res->{total};
|
||||
$used = $res->{used};
|
||||
$free = $res->{avail};
|
||||
};
|
||||
if (my $err = $@) {
|
||||
warn $err;
|
||||
}
|
||||
|
||||
return ($total, $free, $used, $active);
|
||||
}
|
||||
|
||||
# can also be used for not (yet) added storages, pass $scfg with
|
||||
# {
|
||||
# server
|
||||
# user
|
||||
# port (optional default to 8007)
|
||||
# fingerprint (optional for trusted certs)
|
||||
# }
|
||||
sub scan_datastores {
|
||||
my ($scfg, $password) = @_;
|
||||
|
||||
my $conn = pbs_api_connect($scfg, $password);
|
||||
|
||||
my $response = eval { $conn->get('/api2/json/admin/datastore', {}) };
|
||||
die "error fetching datastores - $@" if $@;
|
||||
|
||||
return $response;
|
||||
}
|
||||
|
||||
sub activate_storage {
|
||||
my ($class, $storeid, $scfg, $cache) = @_;
|
||||
|
||||
my $password = pbs_get_password($scfg, $storeid);
|
||||
|
||||
my $datastores = eval { scan_datastores($scfg, $password) };
|
||||
die "$storeid: $@" if $@;
|
||||
|
||||
my $datastore = $scfg->{datastore};
|
||||
|
||||
for my $ds (@$datastores) {
|
||||
if ($ds->{store} eq $datastore) {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
die "$storeid: Cannot find datastore '$datastore', check permissions and existence!\n";
|
||||
}
|
||||
|
||||
sub deactivate_storage {
|
||||
my ($class, $storeid, $scfg, $cache) = @_;
|
||||
return 1;
|
||||
}
|
||||
|
||||
sub activate_volume {
|
||||
my ($class, $storeid, $scfg, $volname, $snapname, $cache) = @_;
|
||||
|
||||
die "volume snapshot is not possible on pbs device" if $snapname;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
sub deactivate_volume {
|
||||
my ($class, $storeid, $scfg, $volname, $snapname, $cache) = @_;
|
||||
|
||||
die "volume snapshot is not possible on pbs device" if $snapname;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
# FIXME remove on the next APIAGE reset.
|
||||
# Deprecated, use get_volume_attribute instead.
|
||||
sub get_volume_notes {
|
||||
my ($class, $scfg, $storeid, $volname, $timeout) = @_;
|
||||
|
||||
my (undef, $name, undef, undef, undef, undef, $format) = $class->parse_volname($volname);
|
||||
|
||||
my $data = run_client_cmd($scfg, $storeid, "snapshot", [ "notes", "show", $name ]);
|
||||
|
||||
return $data->{notes};
|
||||
}
|
||||
|
||||
# FIXME remove on the next APIAGE reset.
|
||||
# Deprecated, use update_volume_attribute instead.
|
||||
sub update_volume_notes {
|
||||
my ($class, $scfg, $storeid, $volname, $notes, $timeout) = @_;
|
||||
|
||||
my (undef, $name, undef, undef, undef, undef, $format) = $class->parse_volname($volname);
|
||||
|
||||
run_client_cmd($scfg, $storeid, "snapshot", [ "notes", "update", $name, $notes ], 1);
|
||||
|
||||
return undef;
|
||||
}
|
||||
|
||||
sub get_volume_attribute {
|
||||
my ($class, $scfg, $storeid, $volname, $attribute) = @_;
|
||||
|
||||
if ($attribute eq 'notes') {
|
||||
return $class->get_volume_notes($scfg, $storeid, $volname);
|
||||
}
|
||||
|
||||
if ($attribute eq 'protected') {
|
||||
my $param = api_param_from_volname($class, $scfg, $volname);
|
||||
|
||||
my $password = pbs_get_password($scfg, $storeid);
|
||||
my $conn = pbs_api_connect($scfg, $password);
|
||||
my $datastore = $scfg->{datastore};
|
||||
|
||||
my $res = eval { $conn->get("/api2/json/admin/datastore/$datastore/$attribute", $param); };
|
||||
if (my $err = $@) {
|
||||
return if $err->{code} == 404; # not supported
|
||||
die $err;
|
||||
}
|
||||
return $res;
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
sub update_volume_attribute {
|
||||
my ($class, $scfg, $storeid, $volname, $attribute, $value) = @_;
|
||||
|
||||
if ($attribute eq 'notes') {
|
||||
return $class->update_volume_notes($scfg, $storeid, $volname, $value);
|
||||
}
|
||||
|
||||
if ($attribute eq 'protected') {
|
||||
my $param = api_param_from_volname($class, $scfg, $volname);
|
||||
$param->{$attribute} = $value;
|
||||
|
||||
my $password = pbs_get_password($scfg, $storeid);
|
||||
my $conn = pbs_api_connect($scfg, $password);
|
||||
my $datastore = $scfg->{datastore};
|
||||
|
||||
eval { $conn->put("/api2/json/admin/datastore/$datastore/$attribute", $param); };
|
||||
if (my $err = $@) {
|
||||
die "Server is not recent enough to support feature '$attribute'\n"
|
||||
if $err->{code} == 404;
|
||||
die $err;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
die "attribute '$attribute' is not supported for storage type '$scfg->{type}'\n";
|
||||
}
|
||||
|
||||
sub volume_size_info {
|
||||
my ($class, $scfg, $storeid, $volname, $timeout) = @_;
|
||||
|
||||
my ($vtype, $name, undef, undef, undef, undef, $format) = $class->parse_volname($volname);
|
||||
|
||||
my $data = run_client_cmd($scfg, $storeid, "files", [ $name ]);
|
||||
|
||||
my $size = 0;
|
||||
foreach my $info (@$data) {
|
||||
if ($info->{size} && $info->{size} =~ /^(\d+)$/) { # untaints
|
||||
$size += $1;
|
||||
}
|
||||
}
|
||||
|
||||
my $used = $size;
|
||||
|
||||
return wantarray ? ($size, $format, $used, undef) : $size;
|
||||
}
|
||||
|
||||
sub volume_resize {
|
||||
my ($class, $scfg, $storeid, $volname, $size, $running) = @_;
|
||||
die "volume resize is not possible on pbs device";
|
||||
}
|
||||
|
||||
sub volume_snapshot {
|
||||
my ($class, $scfg, $storeid, $volname, $snap) = @_;
|
||||
die "volume snapshot is not possible on pbs device";
|
||||
}
|
||||
|
||||
sub volume_snapshot_rollback {
|
||||
my ($class, $scfg, $storeid, $volname, $snap) = @_;
|
||||
die "volume snapshot rollback is not possible on pbs device";
|
||||
}
|
||||
|
||||
sub volume_snapshot_delete {
|
||||
my ($class, $scfg, $storeid, $volname, $snap) = @_;
|
||||
die "volume snapshot delete is not possible on pbs device";
|
||||
}
|
||||
|
||||
sub volume_has_feature {
|
||||
my ($class, $scfg, $feature, $storeid, $volname, $snapname, $running) = @_;
|
||||
|
||||
return undef;
|
||||
}
|
||||
|
||||
1;
|
||||
1701
src/PVE/Storage/Plugin.pm
Normal file
1701
src/PVE/Storage/Plugin.pm
Normal file
File diff suppressed because it is too large
Load Diff
891
src/PVE/Storage/RBDPlugin.pm
Normal file
891
src/PVE/Storage/RBDPlugin.pm
Normal file
@ -0,0 +1,891 @@
|
||||
package PVE::Storage::RBDPlugin;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
use Cwd qw(abs_path);
|
||||
use IO::File;
|
||||
use JSON;
|
||||
use Net::IP;
|
||||
|
||||
use PVE::CephConfig;
|
||||
use PVE::Cluster qw(cfs_read_file);;
|
||||
use PVE::JSONSchema qw(get_standard_option);
|
||||
use PVE::ProcFSTools;
|
||||
use PVE::RADOS;
|
||||
use PVE::RPCEnvironment;
|
||||
use PVE::Storage::Plugin;
|
||||
use PVE::Tools qw(run_command trim file_read_firstline);
|
||||
|
||||
use base qw(PVE::Storage::Plugin);
|
||||
|
||||
my $get_parent_image_name = sub {
|
||||
my ($parent) = @_;
|
||||
return undef if !$parent;
|
||||
return $parent->{image} . "@" . $parent->{snapshot};
|
||||
};
|
||||
|
||||
my $librados_connect = sub {
|
||||
my ($scfg, $storeid, $options) = @_;
|
||||
|
||||
$options->{timeout} = 60
|
||||
if !defined($options->{timeout}) && PVE::RPCEnvironment->is_worker();
|
||||
|
||||
my $librados_config = PVE::CephConfig::ceph_connect_option($scfg, $storeid, $options->%*);
|
||||
|
||||
my $rados = PVE::RADOS->new(%$librados_config);
|
||||
|
||||
return $rados;
|
||||
};
|
||||
|
||||
my sub get_rbd_path {
|
||||
my ($scfg, $volume) = @_;
|
||||
my $path = $scfg->{pool} ? $scfg->{pool} : 'rbd';
|
||||
$path .= "/$scfg->{namespace}" if defined($scfg->{namespace});
|
||||
$path .= "/$volume" if defined($volume);
|
||||
return $path;
|
||||
};
|
||||
|
||||
my sub get_rbd_dev_path {
|
||||
my ($scfg, $storeid, $volume) = @_;
|
||||
|
||||
my $cluster_id = '';
|
||||
if ($scfg->{fsid}) {
|
||||
# NOTE: the config doesn't support this currently (but it could!), hack for qemu-server tests
|
||||
$cluster_id = $scfg->{fsid};
|
||||
} elsif ($scfg->{monhost}) {
|
||||
my $rados = $librados_connect->($scfg, $storeid);
|
||||
$cluster_id = $rados->mon_command({ prefix => 'fsid', format => 'json' })->{fsid};
|
||||
} else {
|
||||
$cluster_id = cfs_read_file('ceph.conf')->{global}->{fsid};
|
||||
}
|
||||
|
||||
my $uuid_pattern = "([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})";
|
||||
if ($cluster_id =~ qr/^${uuid_pattern}$/is) {
|
||||
$cluster_id = $1; # use untained value
|
||||
} else {
|
||||
die "cluster fsid has invalid format\n";
|
||||
}
|
||||
|
||||
my $rbd_path = get_rbd_path($scfg, $volume);
|
||||
my $pve_path = "/dev/rbd-pve/${cluster_id}/${rbd_path}";
|
||||
my $path = "/dev/rbd/${rbd_path}";
|
||||
|
||||
if (!-e $pve_path && -e $path) {
|
||||
# possibly mapped before rbd-pve rule existed
|
||||
my $real_dev = abs_path($path);
|
||||
my ($rbd_id) = ($real_dev =~ m|/dev/rbd([0-9]+)$|);
|
||||
my $dev_cluster_id = file_read_firstline("/sys/devices/rbd/${rbd_id}/cluster_fsid");
|
||||
return $path if $cluster_id eq $dev_cluster_id;
|
||||
}
|
||||
return $pve_path;
|
||||
}
|
||||
|
||||
my $build_cmd = sub {
|
||||
my ($binary, $scfg, $storeid, $op, @options) = @_;
|
||||
|
||||
my $cmd_option = PVE::CephConfig::ceph_connect_option($scfg, $storeid);
|
||||
my $pool = $scfg->{pool} ? $scfg->{pool} : 'rbd';
|
||||
|
||||
my $cmd = [$binary, '-p', $pool];
|
||||
|
||||
if (defined(my $namespace = $scfg->{namespace})) {
|
||||
# some subcommands will fail if the --namespace parameter is present
|
||||
my $no_namespace_parameter = {
|
||||
unmap => 1,
|
||||
};
|
||||
push @$cmd, '--namespace', "$namespace" if !$no_namespace_parameter->{$op};
|
||||
}
|
||||
push @$cmd, '-c', $cmd_option->{ceph_conf} if ($cmd_option->{ceph_conf});
|
||||
push @$cmd, '-m', $cmd_option->{mon_host} if ($cmd_option->{mon_host});
|
||||
push @$cmd, '--auth_supported', $cmd_option->{auth_supported} if ($cmd_option->{auth_supported});
|
||||
push @$cmd, '-n', "client.$cmd_option->{userid}" if ($cmd_option->{userid});
|
||||
push @$cmd, '--keyring', $cmd_option->{keyring} if ($cmd_option->{keyring});
|
||||
|
||||
push @$cmd, $op;
|
||||
|
||||
push @$cmd, @options if scalar(@options);
|
||||
|
||||
return $cmd;
|
||||
};
|
||||
|
||||
my $rbd_cmd = sub {
|
||||
my ($scfg, $storeid, $op, @options) = @_;
|
||||
|
||||
return $build_cmd->('/usr/bin/rbd', $scfg, $storeid, $op, @options);
|
||||
};
|
||||
|
||||
my $rados_cmd = sub {
|
||||
my ($scfg, $storeid, $op, @options) = @_;
|
||||
|
||||
return $build_cmd->('/usr/bin/rados', $scfg, $storeid, $op, @options);
|
||||
};
|
||||
|
||||
# needed for volumes created using ceph jewel (or higher)
|
||||
my $krbd_feature_update = sub {
|
||||
my ($scfg, $storeid, $name) = @_;
|
||||
|
||||
my (@disable, @enable);
|
||||
my ($kmajor, $kminor) = PVE::ProcFSTools::kernel_version();
|
||||
|
||||
if ($kmajor > 5 || $kmajor == 5 && $kminor >= 3) {
|
||||
# 'deep-flatten' can only be disabled, not enabled after image creation
|
||||
push @enable, 'fast-diff', 'object-map';
|
||||
} else {
|
||||
push @disable, 'fast-diff', 'object-map', 'deep-flatten';
|
||||
}
|
||||
|
||||
if ($kmajor >= 5) {
|
||||
push @enable, 'exclusive-lock';
|
||||
} else {
|
||||
push @disable, 'exclusive-lock';
|
||||
}
|
||||
|
||||
my $active_features_list = (rbd_volume_info($scfg, $storeid, $name))[4];
|
||||
my $active_features = { map { $_ => 1 } @$active_features_list };
|
||||
|
||||
my $to_disable = join(',', grep { $active_features->{$_} } @disable);
|
||||
my $to_enable = join(',', grep { !$active_features->{$_} } @enable );
|
||||
|
||||
if ($to_disable) {
|
||||
print "disable RBD image features this kernel RBD drivers is not compatible with: $to_disable\n";
|
||||
my $cmd = $rbd_cmd->($scfg, $storeid, 'feature', 'disable', $name, $to_disable);
|
||||
run_rbd_command(
|
||||
$cmd,
|
||||
errmsg => "could not disable krbd-incompatible image features '$to_disable' for rbd image: $name",
|
||||
);
|
||||
}
|
||||
if ($to_enable) {
|
||||
print "enable RBD image features this kernel RBD drivers supports: $to_enable\n";
|
||||
eval {
|
||||
my $cmd = $rbd_cmd->($scfg, $storeid, 'feature', 'enable', $name, $to_enable);
|
||||
run_rbd_command(
|
||||
$cmd,
|
||||
errmsg => "could not enable krbd-compatible image features '$to_enable' for rbd image: $name",
|
||||
);
|
||||
};
|
||||
warn "$@" if $@;
|
||||
}
|
||||
};
|
||||
|
||||
sub run_rbd_command {
|
||||
my ($cmd, %args) = @_;
|
||||
|
||||
my $lasterr;
|
||||
my $errmsg = $args{errmsg} . ": " || "";
|
||||
if (!exists($args{errfunc})) {
|
||||
# ' error: 2014-02-06 11:51:59.839135 7f09f94d0760 -1 librbd: snap_unprotect: can't unprotect;
|
||||
# at least 1 child(ren) in pool cephstor1
|
||||
$args{errfunc} = sub {
|
||||
my $line = shift;
|
||||
if ($line =~ m/^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d+ [0-9a-f]+ [\-\d]+ librbd: (.*)$/) {
|
||||
$lasterr = "$1\n";
|
||||
} else {
|
||||
$lasterr = $line;
|
||||
}
|
||||
print STDERR $lasterr;
|
||||
*STDERR->flush();
|
||||
};
|
||||
}
|
||||
|
||||
eval { run_command($cmd, %args); };
|
||||
if (my $err = $@) {
|
||||
die $errmsg . $lasterr if length($lasterr);
|
||||
die $err;
|
||||
}
|
||||
|
||||
return undef;
|
||||
}
|
||||
|
||||
sub rbd_ls {
|
||||
my ($scfg, $storeid) = @_;
|
||||
|
||||
my $pool = $scfg->{pool} ? $scfg->{pool} : 'rbd';
|
||||
$pool .= "/$scfg->{namespace}" if defined($scfg->{namespace});
|
||||
|
||||
my $raw = '';
|
||||
my $parser = sub { $raw .= shift };
|
||||
|
||||
my $cmd = $rbd_cmd->($scfg, $storeid, 'ls', '-l', '--format', 'json');
|
||||
eval {
|
||||
run_rbd_command($cmd, errmsg => "rbd error", errfunc => sub {}, outfunc => $parser);
|
||||
};
|
||||
my $err = $@;
|
||||
|
||||
die $err if $err && $err !~ m/doesn't contain rbd images/ ;
|
||||
|
||||
my $result;
|
||||
if ($raw eq '') {
|
||||
$result = [];
|
||||
} elsif ($raw =~ m/^(\[.*\])$/s) { # untaint
|
||||
$result = JSON::decode_json($1);
|
||||
} else {
|
||||
die "got unexpected data from rbd ls: '$raw'\n";
|
||||
}
|
||||
|
||||
my $list = {};
|
||||
|
||||
foreach my $el (@$result) {
|
||||
next if defined($el->{snapshot});
|
||||
|
||||
my $image = $el->{image};
|
||||
|
||||
my ($owner) = $image =~ m/^(?:vm|base)-(\d+)-/;
|
||||
next if !defined($owner);
|
||||
|
||||
$list->{$pool}->{$image} = {
|
||||
name => $image,
|
||||
size => $el->{size},
|
||||
parent => $get_parent_image_name->($el->{parent}),
|
||||
vmid => $owner
|
||||
};
|
||||
}
|
||||
|
||||
return $list;
|
||||
}
|
||||
|
||||
sub rbd_ls_snap {
|
||||
my ($scfg, $storeid, $name) = @_;
|
||||
|
||||
my $cmd = $rbd_cmd->($scfg, $storeid, 'snap', 'ls', $name, '--format', 'json');
|
||||
|
||||
my $raw = '';
|
||||
run_rbd_command($cmd, errmsg => "rbd error", errfunc => sub {}, outfunc => sub { $raw .= shift; });
|
||||
|
||||
my $list;
|
||||
if ($raw =~ m/^(\[.*\])$/s) { # untaint
|
||||
$list = eval { JSON::decode_json($1) };
|
||||
die "invalid JSON output from 'rbd snap ls $name': $@\n" if $@;
|
||||
} else {
|
||||
die "got unexpected data from 'rbd snap ls $name': '$raw'\n";
|
||||
}
|
||||
|
||||
$list = [] if !defined($list);
|
||||
|
||||
my $res = {};
|
||||
foreach my $el (@$list) {
|
||||
my $snap = $el->{name};
|
||||
my $protected = defined($el->{protected}) && $el->{protected} eq "true" ? 1 : undef;
|
||||
$res->{$snap} = {
|
||||
name => $snap,
|
||||
id => $el->{id} // undef,
|
||||
size => $el->{size} // 0,
|
||||
protected => $protected,
|
||||
};
|
||||
}
|
||||
return $res;
|
||||
}
|
||||
|
||||
sub rbd_volume_info {
|
||||
my ($scfg, $storeid, $volname, $snap) = @_;
|
||||
|
||||
my $cmd = undef;
|
||||
|
||||
my @options = ('info', $volname, '--format', 'json');
|
||||
if ($snap) {
|
||||
push @options, '--snap', $snap;
|
||||
}
|
||||
|
||||
$cmd = $rbd_cmd->($scfg, $storeid, @options);
|
||||
|
||||
my $raw = '';
|
||||
my $parser = sub { $raw .= shift };
|
||||
|
||||
run_rbd_command($cmd, errmsg => "rbd error", errfunc => sub {}, outfunc => $parser);
|
||||
|
||||
my $volume;
|
||||
if ($raw eq '') {
|
||||
$volume = {};
|
||||
} elsif ($raw =~ m/^(\{.*\})$/s) { # untaint
|
||||
$volume = JSON::decode_json($1);
|
||||
} else {
|
||||
die "got unexpected data from rbd info: '$raw'\n";
|
||||
}
|
||||
|
||||
$volume->{parent} = $get_parent_image_name->($volume->{parent});
|
||||
$volume->{protected} = defined($volume->{protected}) && $volume->{protected} eq "true" ? 1 : undef;
|
||||
|
||||
return $volume->@{qw(size parent format protected features)};
|
||||
}
|
||||
|
||||
sub rbd_volume_du {
|
||||
my ($scfg, $storeid, $volname) = @_;
|
||||
|
||||
my @options = ('du', $volname, '--format', 'json');
|
||||
my $cmd = $rbd_cmd->($scfg, $storeid, @options);
|
||||
|
||||
my $raw = '';
|
||||
my $parser = sub { $raw .= shift };
|
||||
|
||||
run_rbd_command($cmd, errmsg => "rbd error", errfunc => sub {}, outfunc => $parser);
|
||||
|
||||
my $volume;
|
||||
if ($raw eq '') {
|
||||
$volume = {};
|
||||
} elsif ($raw =~ m/^(\{.*\})$/s) { # untaint
|
||||
$volume = JSON::decode_json($1);
|
||||
} else {
|
||||
die "got unexpected data from rbd du: '$raw'\n";
|
||||
}
|
||||
|
||||
if (!defined($volume->{images})) {
|
||||
die "got no images from rbd du\n";
|
||||
}
|
||||
|
||||
# `rbd du` returns array of images for name matching `volname`,
|
||||
# including snapshots.
|
||||
my $images = $volume->{images};
|
||||
foreach my $image (@$images) {
|
||||
next if defined($image->{snapshot});
|
||||
next if !defined($image->{used_size}) || !defined($image->{name});
|
||||
|
||||
# Return `used_size` of first volume with matching name which
|
||||
# is not a snapshot.
|
||||
return $image->{used_size} if $image->{name} eq $volname;
|
||||
}
|
||||
|
||||
die "got no matching image from rbd du\n";
|
||||
}
|
||||
|
||||
# Configuration
|
||||
|
||||
sub type {
|
||||
return 'rbd';
|
||||
}
|
||||
|
||||
sub plugindata {
|
||||
return {
|
||||
content => [ {images => 1, rootdir => 1}, { images => 1 }],
|
||||
};
|
||||
}
|
||||
|
||||
sub properties {
|
||||
return {
|
||||
monhost => {
|
||||
description => "IP addresses of monitors (for external clusters).",
|
||||
type => 'string', format => 'pve-storage-portal-dns-list',
|
||||
},
|
||||
pool => {
|
||||
description => "Pool.",
|
||||
type => 'string',
|
||||
},
|
||||
'data-pool' => {
|
||||
description => "Data Pool (for erasure coding only)",
|
||||
type => 'string',
|
||||
},
|
||||
namespace => {
|
||||
description => "Namespace.",
|
||||
type => 'string',
|
||||
},
|
||||
username => {
|
||||
description => "RBD Id.",
|
||||
type => 'string',
|
||||
},
|
||||
authsupported => {
|
||||
description => "Authsupported.",
|
||||
type => 'string',
|
||||
},
|
||||
krbd => {
|
||||
description => "Always access rbd through krbd kernel module.",
|
||||
type => 'boolean',
|
||||
},
|
||||
keyring => {
|
||||
description => "Client keyring contents (for external clusters).",
|
||||
type => 'string',
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
sub options {
|
||||
return {
|
||||
nodes => { optional => 1 },
|
||||
disable => { optional => 1 },
|
||||
monhost => { optional => 1},
|
||||
pool => { optional => 1 },
|
||||
'data-pool' => { optional => 1 },
|
||||
namespace => { optional => 1 },
|
||||
username => { optional => 1 },
|
||||
content => { optional => 1 },
|
||||
krbd => { optional => 1 },
|
||||
keyring => { optional => 1 },
|
||||
bwlimit => { optional => 1 },
|
||||
};
|
||||
}
|
||||
|
||||
# Storage implementation
|
||||
|
||||
sub on_add_hook {
|
||||
my ($class, $storeid, $scfg, %param) = @_;
|
||||
|
||||
PVE::CephConfig::ceph_create_keyfile($scfg->{type}, $storeid, $param{keyring});
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
sub on_update_hook {
|
||||
my ($class, $storeid, $scfg, %param) = @_;
|
||||
|
||||
if (exists($param{keyring})) {
|
||||
if (defined($param{keyring})) {
|
||||
PVE::CephConfig::ceph_create_keyfile($scfg->{type}, $storeid, $param{keyring});
|
||||
} else {
|
||||
PVE::CephConfig::ceph_remove_keyfile($scfg->{type}, $storeid);
|
||||
}
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
sub on_delete_hook {
|
||||
my ($class, $storeid, $scfg) = @_;
|
||||
PVE::CephConfig::ceph_remove_keyfile($scfg->{type}, $storeid);
|
||||
return;
|
||||
}
|
||||
|
||||
sub parse_volname {
|
||||
my ($class, $volname) = @_;
|
||||
|
||||
if ($volname =~ m/^((base-(\d+)-\S+)\/)?((base)?(vm)?-(\d+)-\S+)$/) {
|
||||
return ('images', $4, $7, $2, $3, $5, 'raw');
|
||||
}
|
||||
|
||||
die "unable to parse rbd volume name '$volname'\n";
|
||||
}
|
||||
|
||||
sub path {
|
||||
my ($class, $scfg, $volname, $storeid, $snapname) = @_;
|
||||
|
||||
my $cmd_option = PVE::CephConfig::ceph_connect_option($scfg, $storeid);
|
||||
my ($vtype, $name, $vmid) = $class->parse_volname($volname);
|
||||
$name .= '@'.$snapname if $snapname;
|
||||
|
||||
if ($scfg->{krbd}) {
|
||||
my $rbd_dev_path = get_rbd_dev_path($scfg, $storeid, $name);
|
||||
return ($rbd_dev_path, $vmid, $vtype);
|
||||
}
|
||||
|
||||
my $rbd_path = get_rbd_path($scfg, $name);
|
||||
my $path = "rbd:${rbd_path}";
|
||||
|
||||
$path .= ":conf=$cmd_option->{ceph_conf}" if $cmd_option->{ceph_conf};
|
||||
if (defined($scfg->{monhost})) {
|
||||
my $monhost = PVE::CephConfig::hostlist($scfg->{monhost}, ';');
|
||||
$monhost =~ s/:/\\:/g;
|
||||
$path .= ":mon_host=$monhost";
|
||||
$path .= ":auth_supported=$cmd_option->{auth_supported}";
|
||||
}
|
||||
|
||||
$path .= ":id=$cmd_option->{userid}:keyring=$cmd_option->{keyring}" if ($cmd_option->{keyring});
|
||||
|
||||
return ($path, $vmid, $vtype);
|
||||
}
|
||||
|
||||
sub find_free_diskname {
|
||||
my ($class, $storeid, $scfg, $vmid, $fmt, $add_fmt_suffix) = @_;
|
||||
|
||||
my $cmd = $rbd_cmd->($scfg, $storeid, 'ls');
|
||||
|
||||
my $disk_list = [];
|
||||
|
||||
my $parser = sub {
|
||||
my $line = shift;
|
||||
if ($line =~ m/^(.*)$/) { # untaint
|
||||
push @$disk_list, $1;
|
||||
}
|
||||
};
|
||||
|
||||
eval {
|
||||
run_rbd_command($cmd, errmsg => "rbd error", errfunc => sub {}, outfunc => $parser);
|
||||
};
|
||||
my $err = $@;
|
||||
|
||||
die $err if $err && $err !~ m/doesn't contain rbd images/;
|
||||
|
||||
return PVE::Storage::Plugin::get_next_vm_diskname($disk_list, $storeid, $vmid, undef, $scfg);
|
||||
}
|
||||
|
||||
sub create_base {
|
||||
my ($class, $storeid, $scfg, $volname) = @_;
|
||||
|
||||
my $snap = '__base__';
|
||||
|
||||
my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) =
|
||||
$class->parse_volname($volname);
|
||||
|
||||
die "create_base not possible with base image\n" if $isBase;
|
||||
|
||||
my ($size, $parent, $format, undef) = rbd_volume_info($scfg, $storeid, $name);
|
||||
die "rbd volume info on '$name' failed\n" if !($size);
|
||||
|
||||
die "rbd image must be at format V2" if $format ne "2";
|
||||
|
||||
die "volname '$volname' contains wrong information about parent $parent $basename\n"
|
||||
if $basename && (!$parent || $parent ne $basename."@".$snap);
|
||||
|
||||
my $newname = $name;
|
||||
$newname =~ s/^vm-/base-/;
|
||||
|
||||
my $newvolname = $basename ? "$basename/$newname" : "$newname";
|
||||
|
||||
my $cmd = $rbd_cmd->(
|
||||
$scfg,
|
||||
$storeid,
|
||||
'rename',
|
||||
get_rbd_path($scfg, $name),
|
||||
get_rbd_path($scfg, $newname),
|
||||
);
|
||||
run_rbd_command($cmd, errmsg => "rbd rename '$name' error");
|
||||
|
||||
eval { $class->unmap_volume($storeid, $scfg, $volname); };
|
||||
warn $@ if $@;
|
||||
|
||||
my $running = undef; #fixme : is create_base always offline ?
|
||||
|
||||
$class->volume_snapshot($scfg, $storeid, $newname, $snap, $running);
|
||||
|
||||
my (undef, undef, undef, $protected) = rbd_volume_info($scfg, $storeid, $newname, $snap);
|
||||
|
||||
if (!$protected){
|
||||
my $cmd = $rbd_cmd->($scfg, $storeid, 'snap', 'protect', $newname, '--snap', $snap);
|
||||
run_rbd_command($cmd, errmsg => "rbd protect $newname snap '$snap' error");
|
||||
}
|
||||
|
||||
return $newvolname;
|
||||
|
||||
}
|
||||
|
||||
sub clone_image {
|
||||
my ($class, $scfg, $storeid, $volname, $vmid, $snapname) = @_;
|
||||
|
||||
my $snap = '__base__';
|
||||
$snap = $snapname if length $snapname;
|
||||
|
||||
my ($vtype, $basename, $basevmid, undef, undef, $isBase) =
|
||||
$class->parse_volname($volname);
|
||||
|
||||
die "$volname is not a base image and snapname is not provided\n"
|
||||
if !$isBase && !length($snapname);
|
||||
|
||||
my $name = $class->find_free_diskname($storeid, $scfg, $vmid);
|
||||
|
||||
warn "clone $volname: $basename snapname $snap to $name\n";
|
||||
|
||||
if (length($snapname)) {
|
||||
my (undef, undef, undef, $protected) = rbd_volume_info($scfg, $storeid, $volname, $snapname);
|
||||
|
||||
if (!$protected) {
|
||||
my $cmd = $rbd_cmd->($scfg, $storeid, 'snap', 'protect', $volname, '--snap', $snapname);
|
||||
run_rbd_command($cmd, errmsg => "rbd protect $volname snap $snapname error");
|
||||
}
|
||||
}
|
||||
|
||||
my $newvol = "$basename/$name";
|
||||
$newvol = $name if length($snapname);
|
||||
|
||||
my @options = (
|
||||
get_rbd_path($scfg, $basename),
|
||||
'--snap', $snap,
|
||||
);
|
||||
push @options, ('--data-pool', $scfg->{'data-pool'}) if $scfg->{'data-pool'};
|
||||
|
||||
my $cmd = $rbd_cmd->($scfg, $storeid, 'clone', @options, get_rbd_path($scfg, $name));
|
||||
run_rbd_command($cmd, errmsg => "rbd clone '$basename' error");
|
||||
|
||||
return $newvol;
|
||||
}
|
||||
|
||||
sub alloc_image {
|
||||
my ($class, $storeid, $scfg, $vmid, $fmt, $name, $size) = @_;
|
||||
|
||||
|
||||
die "illegal name '$name' - should be 'vm-$vmid-*'\n"
|
||||
if $name && $name !~ m/^vm-$vmid-/;
|
||||
|
||||
$name = $class->find_free_diskname($storeid, $scfg, $vmid) if !$name;
|
||||
|
||||
my @options = (
|
||||
'--image-format' , 2,
|
||||
'--size', int(($size + 1023) / 1024),
|
||||
);
|
||||
push @options, ('--data-pool', $scfg->{'data-pool'}) if $scfg->{'data-pool'};
|
||||
|
||||
my $cmd = $rbd_cmd->($scfg, $storeid, 'create', @options, $name);
|
||||
run_rbd_command($cmd, errmsg => "rbd create '$name' error");
|
||||
|
||||
return $name;
|
||||
}
|
||||
|
||||
sub free_image {
|
||||
my ($class, $storeid, $scfg, $volname, $isBase) = @_;
|
||||
|
||||
my ($vtype, $name, $vmid, undef, undef, undef) =
|
||||
$class->parse_volname($volname);
|
||||
|
||||
|
||||
my $snaps = rbd_ls_snap($scfg, $storeid, $name);
|
||||
foreach my $snap (keys %$snaps) {
|
||||
if ($snaps->{$snap}->{protected}) {
|
||||
my $cmd = $rbd_cmd->($scfg, $storeid, 'snap', 'unprotect', $name, '--snap', $snap);
|
||||
run_rbd_command($cmd, errmsg => "rbd unprotect $name snap '$snap' error");
|
||||
}
|
||||
}
|
||||
|
||||
$class->deactivate_volume($storeid, $scfg, $volname);
|
||||
|
||||
my $cmd = $rbd_cmd->($scfg, $storeid, 'snap', 'purge', $name);
|
||||
run_rbd_command($cmd, errmsg => "rbd snap purge '$name' error");
|
||||
|
||||
$cmd = $rbd_cmd->($scfg, $storeid, 'rm', $name);
|
||||
run_rbd_command($cmd, errmsg => "rbd rm '$name' error");
|
||||
|
||||
return undef;
|
||||
}
|
||||
|
||||
sub list_images {
|
||||
my ($class, $storeid, $scfg, $vmid, $vollist, $cache) = @_;
|
||||
|
||||
$cache->{rbd} = rbd_ls($scfg, $storeid) if !$cache->{rbd};
|
||||
|
||||
my $dat = $cache->{rbd}->{get_rbd_path($scfg)};
|
||||
return [] if !$dat; # nothing found
|
||||
|
||||
my $res = [];
|
||||
for my $image (sort keys %$dat) {
|
||||
my $info = $dat->{$image};
|
||||
my ($volname, $parent, $owner) = $info->@{'name', 'parent', 'vmid'};
|
||||
|
||||
if ($parent && $parent =~ m/^(base-\d+-\S+)\@__base__$/) {
|
||||
$info->{volid} = "$storeid:$1/$volname";
|
||||
} else {
|
||||
$info->{volid} = "$storeid:$volname";
|
||||
}
|
||||
|
||||
if ($vollist) {
|
||||
my $found = grep { $_ eq $info->{volid} } @$vollist;
|
||||
next if !$found;
|
||||
} else {
|
||||
next if defined ($vmid) && ($owner ne $vmid);
|
||||
}
|
||||
|
||||
$info->{format} = 'raw';
|
||||
|
||||
push @$res, $info;
|
||||
}
|
||||
|
||||
return $res;
|
||||
}
|
||||
|
||||
sub status {
|
||||
my ($class, $storeid, $scfg, $cache) = @_;
|
||||
|
||||
my $rados = $librados_connect->($scfg, $storeid);
|
||||
my $df = $rados->mon_command({ prefix => 'df', format => 'json' });
|
||||
|
||||
my $pool = $scfg->{'data-pool'} // $scfg->{pool} // 'rbd';
|
||||
|
||||
my ($d) = grep { $_->{name} eq $pool } @{$df->{pools}};
|
||||
|
||||
if (!defined($d)) {
|
||||
warn "could not get usage stats for pool '$pool'\n";
|
||||
return;
|
||||
}
|
||||
|
||||
# max_avail -> max available space for data w/o replication in the pool
|
||||
# bytes_used -> data w/o replication in the pool
|
||||
my $free = $d->{stats}->{max_avail};
|
||||
my $used = $d->{stats}->{stored} // $d->{stats}->{bytes_used};
|
||||
my $total = $used + $free;
|
||||
my $active = 1;
|
||||
|
||||
return ($total, $free, $used, $active);
|
||||
}
|
||||
|
||||
sub activate_storage {
|
||||
my ($class, $storeid, $scfg, $cache) = @_;
|
||||
return 1;
|
||||
}
|
||||
|
||||
sub deactivate_storage {
|
||||
my ($class, $storeid, $scfg, $cache) = @_;
|
||||
return 1;
|
||||
}
|
||||
|
||||
sub map_volume {
|
||||
my ($class, $storeid, $scfg, $volname, $snapname) = @_;
|
||||
|
||||
my ($vtype, $img_name, $vmid) = $class->parse_volname($volname);
|
||||
|
||||
my $name = $img_name;
|
||||
$name .= '@'.$snapname if $snapname;
|
||||
|
||||
my $kerneldev = get_rbd_dev_path($scfg, $storeid, $name);
|
||||
|
||||
return $kerneldev if -b $kerneldev; # already mapped
|
||||
|
||||
# features can only be enabled/disabled for image, not for snapshot!
|
||||
$krbd_feature_update->($scfg, $storeid, $img_name);
|
||||
|
||||
my $cmd = $rbd_cmd->($scfg, $storeid, 'map', $name);
|
||||
run_rbd_command($cmd, errmsg => "can't map rbd volume $name");
|
||||
|
||||
return $kerneldev;
|
||||
}
|
||||
|
||||
sub unmap_volume {
|
||||
my ($class, $storeid, $scfg, $volname, $snapname) = @_;
|
||||
|
||||
my ($vtype, $name, $vmid) = $class->parse_volname($volname);
|
||||
$name .= '@'.$snapname if $snapname;
|
||||
|
||||
my $kerneldev = get_rbd_dev_path($scfg, $storeid, $name);
|
||||
|
||||
if (-b $kerneldev) {
|
||||
my $cmd = $rbd_cmd->($scfg, $storeid, 'unmap', $kerneldev);
|
||||
run_rbd_command($cmd, errmsg => "can't unmap rbd device $kerneldev");
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
sub activate_volume {
|
||||
my ($class, $storeid, $scfg, $volname, $snapname, $cache) = @_;
|
||||
|
||||
$class->map_volume($storeid, $scfg, $volname, $snapname) if $scfg->{krbd};
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
sub deactivate_volume {
|
||||
my ($class, $storeid, $scfg, $volname, $snapname, $cache) = @_;
|
||||
|
||||
$class->unmap_volume($storeid, $scfg, $volname, $snapname);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
sub volume_size_info {
|
||||
my ($class, $scfg, $storeid, $volname, $timeout) = @_;
|
||||
|
||||
my ($vtype, $name, $vmid) = $class->parse_volname($volname);
|
||||
my ($size, $parent) = rbd_volume_info($scfg, $storeid, $name);
|
||||
my $used = wantarray ? rbd_volume_du($scfg, $storeid, $name) : 0;
|
||||
return wantarray ? ($size, 'raw', $used, $parent) : $size;
|
||||
}
|
||||
|
||||
sub volume_resize {
|
||||
my ($class, $scfg, $storeid, $volname, $size, $running) = @_;
|
||||
|
||||
return 1 if $running && !$scfg->{krbd}; # FIXME???
|
||||
|
||||
my ($vtype, $name, $vmid) = $class->parse_volname($volname);
|
||||
|
||||
my $cmd = $rbd_cmd->($scfg, $storeid, 'resize', '--allow-shrink', '--size', ($size/1024/1024), $name);
|
||||
run_rbd_command($cmd, errmsg => "rbd resize '$volname' error");
|
||||
return undef;
|
||||
}
|
||||
|
||||
sub volume_snapshot {
|
||||
my ($class, $scfg, $storeid, $volname, $snap) = @_;
|
||||
|
||||
my ($vtype, $name, $vmid) = $class->parse_volname($volname);
|
||||
|
||||
my $cmd = $rbd_cmd->($scfg, $storeid, 'snap', 'create', '--snap', $snap, $name);
|
||||
run_rbd_command($cmd, errmsg => "rbd snapshot '$volname' error");
|
||||
return undef;
|
||||
}
|
||||
|
||||
sub volume_snapshot_rollback {
|
||||
my ($class, $scfg, $storeid, $volname, $snap) = @_;
|
||||
|
||||
my ($vtype, $name, $vmid) = $class->parse_volname($volname);
|
||||
|
||||
my $cmd = $rbd_cmd->($scfg, $storeid, 'snap', 'rollback', '--snap', $snap, $name);
|
||||
run_rbd_command($cmd, errmsg => "rbd snapshot $volname to '$snap' error");
|
||||
}
|
||||
|
||||
sub volume_snapshot_delete {
|
||||
my ($class, $scfg, $storeid, $volname, $snap, $running) = @_;
|
||||
|
||||
$class->deactivate_volume($storeid, $scfg, $volname, $snap, {});
|
||||
|
||||
my ($vtype, $name, $vmid) = $class->parse_volname($volname);
|
||||
|
||||
my (undef, undef, undef, $protected) = rbd_volume_info($scfg, $storeid, $name, $snap);
|
||||
if ($protected){
|
||||
my $cmd = $rbd_cmd->($scfg, $storeid, 'snap', 'unprotect', $name, '--snap', $snap);
|
||||
run_rbd_command($cmd, errmsg => "rbd unprotect $name snap '$snap' error");
|
||||
}
|
||||
|
||||
my $cmd = $rbd_cmd->($scfg, $storeid, 'snap', 'rm', '--snap', $snap, $name);
|
||||
|
||||
run_rbd_command($cmd, errmsg => "rbd snapshot '$volname' error");
|
||||
|
||||
return undef;
|
||||
}
|
||||
|
||||
sub volume_snapshot_needs_fsfreeze {
|
||||
return 1;
|
||||
}
|
||||
|
||||
sub volume_has_feature {
|
||||
my ($class, $scfg, $feature, $storeid, $volname, $snapname, $running) = @_;
|
||||
|
||||
my $features = {
|
||||
snapshot => { current => 1, snap => 1},
|
||||
clone => { base => 1, snap => 1},
|
||||
template => { current => 1},
|
||||
copy => { base => 1, current => 1, snap => 1},
|
||||
sparseinit => { base => 1, current => 1},
|
||||
rename => {current => 1},
|
||||
};
|
||||
|
||||
my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) = $class->parse_volname($volname);
|
||||
|
||||
my $key = undef;
|
||||
if ($snapname){
|
||||
$key = 'snap';
|
||||
} else {
|
||||
$key = $isBase ? 'base' : 'current';
|
||||
}
|
||||
return 1 if $features->{$feature}->{$key};
|
||||
|
||||
return undef;
|
||||
}
|
||||
|
||||
sub rename_volume {
|
||||
my ($class, $scfg, $storeid, $source_volname, $target_vmid, $target_volname) = @_;
|
||||
|
||||
my (
|
||||
undef,
|
||||
$source_image,
|
||||
$source_vmid,
|
||||
$base_name,
|
||||
$base_vmid,
|
||||
undef,
|
||||
$format
|
||||
) = $class->parse_volname($source_volname);
|
||||
$target_volname = $class->find_free_diskname($storeid, $scfg, $target_vmid, $format)
|
||||
if !$target_volname;
|
||||
|
||||
eval {
|
||||
my $cmd = $rbd_cmd->($scfg, $storeid, 'info', $target_volname);
|
||||
run_rbd_command($cmd, errmsg => "exist check", quiet => 1);
|
||||
};
|
||||
die "target volume '${target_volname}' already exists\n" if !$@;
|
||||
|
||||
my $cmd = $rbd_cmd->($scfg, $storeid, 'rename', $source_image, $target_volname);
|
||||
|
||||
run_rbd_command(
|
||||
$cmd,
|
||||
errmsg => "could not rename image '${source_image}' to '${target_volname}'",
|
||||
);
|
||||
|
||||
eval { $class->unmap_volume($storeid, $scfg, $source_volname); };
|
||||
warn $@ if $@;
|
||||
|
||||
$base_name = $base_name ? "${base_name}/" : '';
|
||||
|
||||
return "${storeid}:${base_name}${target_volname}";
|
||||
}
|
||||
|
||||
1;
|
||||
422
src/PVE/Storage/ZFSPlugin.pm
Normal file
422
src/PVE/Storage/ZFSPlugin.pm
Normal file
@ -0,0 +1,422 @@
|
||||
package PVE::Storage::ZFSPlugin;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use IO::File;
|
||||
use POSIX;
|
||||
use PVE::Tools qw(run_command);
|
||||
use PVE::Storage::ZFSPoolPlugin;
|
||||
use PVE::RPCEnvironment;
|
||||
|
||||
use base qw(PVE::Storage::ZFSPoolPlugin);
|
||||
use PVE::Storage::LunCmd::Comstar;
|
||||
use PVE::Storage::LunCmd::Istgt;
|
||||
use PVE::Storage::LunCmd::Iet;
|
||||
use PVE::Storage::LunCmd::LIO;
|
||||
|
||||
|
||||
my @ssh_opts = ('-o', 'BatchMode=yes');
|
||||
my @ssh_cmd = ('/usr/bin/ssh', @ssh_opts);
|
||||
my $id_rsa_path = '/etc/pve/priv/zfs';
|
||||
|
||||
my $lun_cmds = {
|
||||
create_lu => 1,
|
||||
delete_lu => 1,
|
||||
import_lu => 1,
|
||||
modify_lu => 1,
|
||||
add_view => 1,
|
||||
list_view => 1,
|
||||
list_lu => 1,
|
||||
};
|
||||
|
||||
my $zfs_unknown_scsi_provider = sub {
|
||||
my ($provider) = @_;
|
||||
|
||||
die "$provider: unknown iscsi provider. Available [comstar, istgt, iet, LIO]";
|
||||
};
|
||||
|
||||
my $zfs_get_base = sub {
|
||||
my ($scfg) = @_;
|
||||
|
||||
if ($scfg->{iscsiprovider} eq 'comstar') {
|
||||
return PVE::Storage::LunCmd::Comstar::get_base;
|
||||
} elsif ($scfg->{iscsiprovider} eq 'istgt') {
|
||||
return PVE::Storage::LunCmd::Istgt::get_base;
|
||||
} elsif ($scfg->{iscsiprovider} eq 'iet') {
|
||||
return PVE::Storage::LunCmd::Iet::get_base;
|
||||
} elsif ($scfg->{iscsiprovider} eq 'LIO') {
|
||||
return PVE::Storage::LunCmd::LIO::get_base;
|
||||
} else {
|
||||
$zfs_unknown_scsi_provider->($scfg->{iscsiprovider});
|
||||
}
|
||||
};
|
||||
|
||||
sub zfs_request {
|
||||
my ($class, $scfg, $timeout, $method, @params) = @_;
|
||||
|
||||
$timeout = PVE::RPCEnvironment->is_worker() ? 60*60 : 10
|
||||
if !$timeout;
|
||||
|
||||
my $msg = '';
|
||||
|
||||
if ($lun_cmds->{$method}) {
|
||||
if ($scfg->{iscsiprovider} eq 'comstar') {
|
||||
$msg = PVE::Storage::LunCmd::Comstar::run_lun_command($scfg, $timeout, $method, @params);
|
||||
} elsif ($scfg->{iscsiprovider} eq 'istgt') {
|
||||
$msg = PVE::Storage::LunCmd::Istgt::run_lun_command($scfg, $timeout, $method, @params);
|
||||
} elsif ($scfg->{iscsiprovider} eq 'iet') {
|
||||
$msg = PVE::Storage::LunCmd::Iet::run_lun_command($scfg, $timeout, $method, @params);
|
||||
} elsif ($scfg->{iscsiprovider} eq 'LIO') {
|
||||
$msg = PVE::Storage::LunCmd::LIO::run_lun_command($scfg, $timeout, $method, @params);
|
||||
} else {
|
||||
$zfs_unknown_scsi_provider->($scfg->{iscsiprovider});
|
||||
}
|
||||
} else {
|
||||
|
||||
my $target = 'root@' . $scfg->{portal};
|
||||
|
||||
my $cmd = [@ssh_cmd, '-i', "$id_rsa_path/$scfg->{portal}_id_rsa", $target];
|
||||
|
||||
if ($method eq 'zpool_list') {
|
||||
push @$cmd, 'zpool', 'list';
|
||||
} else {
|
||||
push @$cmd, 'zfs', $method;
|
||||
}
|
||||
|
||||
push @$cmd, @params;
|
||||
|
||||
my $output = sub {
|
||||
my $line = shift;
|
||||
$msg .= "$line\n";
|
||||
};
|
||||
|
||||
run_command($cmd, outfunc => $output, timeout => $timeout);
|
||||
}
|
||||
|
||||
return $msg;
|
||||
}
|
||||
|
||||
sub zfs_get_lu_name {
|
||||
my ($class, $scfg, $zvol) = @_;
|
||||
|
||||
my $base = $zfs_get_base->($scfg);
|
||||
|
||||
$zvol = ($class->parse_volname($zvol))[1];
|
||||
|
||||
my $object = ($zvol =~ /^.+\/.+/) ? "$base/$zvol" : "$base/$scfg->{pool}/$zvol";
|
||||
|
||||
my $lu_name = $class->zfs_request($scfg, undef, 'list_lu', $object);
|
||||
|
||||
return $lu_name if $lu_name;
|
||||
|
||||
die "Could not find lu_name for zvol $zvol";
|
||||
}
|
||||
|
||||
sub zfs_add_lun_mapping_entry {
|
||||
my ($class, $scfg, $zvol, $guid) = @_;
|
||||
|
||||
if (!defined($guid)) {
|
||||
$guid = $class->zfs_get_lu_name($scfg, $zvol);
|
||||
}
|
||||
|
||||
$class->zfs_request($scfg, undef, 'add_view', $guid);
|
||||
}
|
||||
|
||||
sub zfs_delete_lu {
|
||||
my ($class, $scfg, $zvol) = @_;
|
||||
|
||||
my $guid = $class->zfs_get_lu_name($scfg, $zvol);
|
||||
|
||||
$class->zfs_request($scfg, undef, 'delete_lu', $guid);
|
||||
}
|
||||
|
||||
sub zfs_create_lu {
|
||||
my ($class, $scfg, $zvol) = @_;
|
||||
|
||||
my $base = $zfs_get_base->($scfg);
|
||||
my $guid = $class->zfs_request($scfg, undef, 'create_lu', "$base/$scfg->{pool}/$zvol");
|
||||
|
||||
return $guid;
|
||||
}
|
||||
|
||||
sub zfs_import_lu {
|
||||
my ($class, $scfg, $zvol) = @_;
|
||||
|
||||
my $base = $zfs_get_base->($scfg);
|
||||
$class->zfs_request($scfg, undef, 'import_lu', "$base/$scfg->{pool}/$zvol");
|
||||
}
|
||||
|
||||
sub zfs_resize_lu {
|
||||
my ($class, $scfg, $zvol, $size) = @_;
|
||||
|
||||
my $guid = $class->zfs_get_lu_name($scfg, $zvol);
|
||||
|
||||
$class->zfs_request($scfg, undef, 'modify_lu', "${size}K", $guid);
|
||||
}
|
||||
|
||||
sub zfs_get_lun_number {
|
||||
my ($class, $scfg, $guid) = @_;
|
||||
|
||||
die "could not find lun_number for guid $guid" if !$guid;
|
||||
|
||||
if ($class->zfs_request($scfg, undef, 'list_view', $guid) =~ /^(\d+)$/) {
|
||||
return $1;
|
||||
}
|
||||
|
||||
die "lun_number for guid $guid is not a number";
|
||||
}
|
||||
|
||||
# Configuration
|
||||
|
||||
sub type {
|
||||
return 'zfs';
|
||||
}
|
||||
|
||||
sub plugindata {
|
||||
return {
|
||||
content => [ {images => 1}, { images => 1 }],
|
||||
};
|
||||
}
|
||||
|
||||
sub properties {
|
||||
return {
|
||||
iscsiprovider => {
|
||||
description => "iscsi provider",
|
||||
type => 'string',
|
||||
},
|
||||
# this will disable write caching on comstar and istgt.
|
||||
# it is not implemented for iet. iet blockio always operates with
|
||||
# writethrough caching when not in readonly mode
|
||||
nowritecache => {
|
||||
description => "disable write caching on the target",
|
||||
type => 'boolean',
|
||||
},
|
||||
comstar_tg => {
|
||||
description => "target group for comstar views",
|
||||
type => 'string',
|
||||
},
|
||||
comstar_hg => {
|
||||
description => "host group for comstar views",
|
||||
type => 'string',
|
||||
},
|
||||
lio_tpg => {
|
||||
description => "target portal group for Linux LIO targets",
|
||||
type => 'string',
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
sub options {
|
||||
return {
|
||||
nodes => { optional => 1 },
|
||||
disable => { optional => 1 },
|
||||
portal => { fixed => 1 },
|
||||
target => { fixed => 1 },
|
||||
pool => { fixed => 1 },
|
||||
blocksize => { fixed => 1 },
|
||||
iscsiprovider => { fixed => 1 },
|
||||
nowritecache => { optional => 1 },
|
||||
sparse => { optional => 1 },
|
||||
comstar_hg => { optional => 1 },
|
||||
comstar_tg => { optional => 1 },
|
||||
lio_tpg => { optional => 1 },
|
||||
content => { optional => 1 },
|
||||
bwlimit => { optional => 1 },
|
||||
};
|
||||
}
|
||||
|
||||
# Storage implementation
|
||||
|
||||
sub path {
|
||||
my ($class, $scfg, $volname, $storeid, $snapname) = @_;
|
||||
|
||||
die "direct access to snapshots not implemented"
|
||||
if defined($snapname);
|
||||
|
||||
my ($vtype, $name, $vmid) = $class->parse_volname($volname);
|
||||
|
||||
my $target = $scfg->{target};
|
||||
my $portal = $scfg->{portal};
|
||||
|
||||
my $guid = $class->zfs_get_lu_name($scfg, $name);
|
||||
my $lun = $class->zfs_get_lun_number($scfg, $guid);
|
||||
|
||||
my $path = "iscsi://$portal/$target/$lun";
|
||||
|
||||
return ($path, $vmid, $vtype);
|
||||
}
|
||||
|
||||
sub create_base {
|
||||
my ($class, $storeid, $scfg, $volname) = @_;
|
||||
|
||||
my $snap = '__base__';
|
||||
|
||||
my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) =
|
||||
$class->parse_volname($volname);
|
||||
|
||||
die "create_base not possible with base image\n" if $isBase;
|
||||
|
||||
my $newname = $name;
|
||||
$newname =~ s/^vm-/base-/;
|
||||
|
||||
my $newvolname = $basename ? "$basename/$newname" : "$newname";
|
||||
|
||||
$class->zfs_delete_lu($scfg, $name);
|
||||
$class->zfs_request($scfg, undef, 'rename', "$scfg->{pool}/$name", "$scfg->{pool}/$newname");
|
||||
|
||||
my $guid = $class->zfs_create_lu($scfg, $newname);
|
||||
$class->zfs_add_lun_mapping_entry($scfg, $newname, $guid);
|
||||
|
||||
my $running = undef; #fixme : is create_base always offline ?
|
||||
|
||||
$class->volume_snapshot($scfg, $storeid, $newname, $snap, $running);
|
||||
|
||||
return $newvolname;
|
||||
}
|
||||
|
||||
sub clone_image {
|
||||
my ($class, $scfg, $storeid, $volname, $vmid, $snap) = @_;
|
||||
|
||||
my $name = $class->SUPER::clone_image($scfg, $storeid, $volname, $vmid, $snap);
|
||||
|
||||
# get ZFS dataset name from PVE volname
|
||||
my (undef, $clonedname) = $class->parse_volname($name);
|
||||
|
||||
my $guid = $class->zfs_create_lu($scfg, $clonedname);
|
||||
$class->zfs_add_lun_mapping_entry($scfg, $clonedname, $guid);
|
||||
|
||||
return $name;
|
||||
}
|
||||
|
||||
sub alloc_image {
|
||||
my ($class, $storeid, $scfg, $vmid, $fmt, $name, $size) = @_;
|
||||
|
||||
die "unsupported format '$fmt'" if $fmt ne 'raw';
|
||||
|
||||
die "illegal name '$name' - should be 'vm-$vmid-*'\n"
|
||||
if $name && $name !~ m/^vm-$vmid-/;
|
||||
|
||||
my $volname = $name;
|
||||
|
||||
$volname = $class->find_free_diskname($storeid, $scfg, $vmid, $fmt) if !$volname;
|
||||
|
||||
$class->zfs_create_zvol($scfg, $volname, $size);
|
||||
|
||||
my $guid = $class->zfs_create_lu($scfg, $volname);
|
||||
$class->zfs_add_lun_mapping_entry($scfg, $volname, $guid);
|
||||
|
||||
return $volname;
|
||||
}
|
||||
|
||||
sub free_image {
|
||||
my ($class, $storeid, $scfg, $volname, $isBase) = @_;
|
||||
|
||||
my ($vtype, $name, $vmid) = $class->parse_volname($volname);
|
||||
|
||||
$class->zfs_delete_lu($scfg, $name);
|
||||
|
||||
eval { $class->zfs_delete_zvol($scfg, $name); };
|
||||
if (my $err = $@) {
|
||||
my $guid = $class->zfs_create_lu($scfg, $name);
|
||||
$class->zfs_add_lun_mapping_entry($scfg, $name, $guid);
|
||||
die $err;
|
||||
}
|
||||
|
||||
return undef;
|
||||
}
|
||||
|
||||
sub volume_resize {
|
||||
my ($class, $scfg, $storeid, $volname, $size, $running) = @_;
|
||||
|
||||
$volname = ($class->parse_volname($volname))[1];
|
||||
|
||||
my $new_size = $class->SUPER::volume_resize($scfg, $storeid, $volname, $size, $running);
|
||||
|
||||
$class->zfs_resize_lu($scfg, $volname, $new_size);
|
||||
|
||||
return $new_size;
|
||||
}
|
||||
|
||||
sub volume_snapshot_delete {
|
||||
my ($class, $scfg, $storeid, $volname, $snap, $running) = @_;
|
||||
|
||||
$volname = ($class->parse_volname($volname))[1];
|
||||
|
||||
$class->zfs_request($scfg, undef, 'destroy', "$scfg->{pool}/$volname\@$snap");
|
||||
}
|
||||
|
||||
sub volume_snapshot_rollback {
|
||||
my ($class, $scfg, $storeid, $volname, $snap) = @_;
|
||||
|
||||
$volname = ($class->parse_volname($volname))[1];
|
||||
|
||||
$class->zfs_delete_lu($scfg, $volname);
|
||||
|
||||
$class->zfs_request($scfg, undef, 'rollback', "$scfg->{pool}/$volname\@$snap");
|
||||
|
||||
$class->zfs_import_lu($scfg, $volname);
|
||||
|
||||
$class->zfs_add_lun_mapping_entry($scfg, $volname);
|
||||
}
|
||||
|
||||
sub storage_can_replicate {
|
||||
my ($class, $scfg, $storeid, $format) = @_;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
sub volume_has_feature {
|
||||
my ($class, $scfg, $feature, $storeid, $volname, $snapname, $running) = @_;
|
||||
|
||||
my $features = {
|
||||
snapshot => { current => 1, snap => 1},
|
||||
clone => { base => 1},
|
||||
template => { current => 1},
|
||||
copy => { base => 1, current => 1},
|
||||
};
|
||||
|
||||
my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) =
|
||||
$class->parse_volname($volname);
|
||||
|
||||
my $key = undef;
|
||||
|
||||
if ($snapname) {
|
||||
$key = 'snap';
|
||||
} else {
|
||||
$key = $isBase ? 'base' : 'current';
|
||||
}
|
||||
|
||||
return 1 if $features->{$feature}->{$key};
|
||||
|
||||
return undef;
|
||||
}
|
||||
|
||||
sub activate_storage {
|
||||
my ($class, $storeid, $scfg, $cache) = @_;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
sub deactivate_storage {
|
||||
my ($class, $storeid, $scfg, $cache) = @_;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
sub activate_volume {
|
||||
my ($class, $storeid, $scfg, $volname, $snapname, $cache) = @_;
|
||||
|
||||
die "unable to activate snapshot from remote zfs storage" if $snapname;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
sub deactivate_volume {
|
||||
my ($class, $storeid, $scfg, $volname, $snapname, $cache) = @_;
|
||||
|
||||
die "unable to deactivate snapshot from remote zfs storage" if $snapname;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
1;
|
||||
852
src/PVE/Storage/ZFSPoolPlugin.pm
Normal file
852
src/PVE/Storage/ZFSPoolPlugin.pm
Normal file
@ -0,0 +1,852 @@
|
||||
package PVE::Storage::ZFSPoolPlugin;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
use IO::File;
|
||||
use Net::IP;
|
||||
use POSIX;
|
||||
|
||||
use PVE::ProcFSTools;
|
||||
use PVE::RPCEnvironment;
|
||||
use PVE::Storage::Plugin;
|
||||
use PVE::Tools qw(run_command);
|
||||
|
||||
use base qw(PVE::Storage::Plugin);
|
||||
|
||||
sub type {
|
||||
return 'zfspool';
|
||||
}
|
||||
|
||||
sub plugindata {
|
||||
return {
|
||||
content => [ {images => 1, rootdir => 1}, {images => 1 , rootdir => 1}],
|
||||
format => [ { raw => 1, subvol => 1 } , 'raw' ],
|
||||
};
|
||||
}
|
||||
|
||||
sub properties {
|
||||
return {
|
||||
blocksize => {
|
||||
description => "block size",
|
||||
type => 'string',
|
||||
},
|
||||
sparse => {
|
||||
description => "use sparse volumes",
|
||||
type => 'boolean',
|
||||
},
|
||||
mountpoint => {
|
||||
description => "mount point",
|
||||
type => 'string', format => 'pve-storage-path',
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
sub options {
|
||||
return {
|
||||
pool => { fixed => 1 },
|
||||
blocksize => { optional => 1 },
|
||||
sparse => { optional => 1 },
|
||||
nodes => { optional => 1 },
|
||||
disable => { optional => 1 },
|
||||
content => { optional => 1 },
|
||||
bwlimit => { optional => 1 },
|
||||
mountpoint => { optional => 1 },
|
||||
};
|
||||
}
|
||||
|
||||
# static zfs helper methods
|
||||
|
||||
sub zfs_parse_zvol_list {
|
||||
my ($text, $pool) = @_;
|
||||
|
||||
my $list = ();
|
||||
|
||||
return $list if !$text;
|
||||
|
||||
my @lines = split /\n/, $text;
|
||||
foreach my $line (@lines) {
|
||||
my ($dataset, $size, $origin, $type, $refquota) = split(/\s+/, $line);
|
||||
next if !($type eq 'volume' || $type eq 'filesystem');
|
||||
|
||||
my $zvol = {};
|
||||
my @parts = split /\//, $dataset;
|
||||
next if scalar(@parts) < 2; # we need pool/name
|
||||
my $name = pop @parts;
|
||||
my $parsed_pool = join('/', @parts);
|
||||
next if $parsed_pool ne $pool;
|
||||
|
||||
next unless $name =~ m!^(vm|base|subvol|basevol)-(\d+)-(\S+)$!;
|
||||
$zvol->{owner} = $2;
|
||||
|
||||
$zvol->{name} = $name;
|
||||
if ($type eq 'filesystem') {
|
||||
if ($refquota eq 'none') {
|
||||
$zvol->{size} = 0;
|
||||
} else {
|
||||
$zvol->{size} = $refquota + 0;
|
||||
}
|
||||
$zvol->{format} = 'subvol';
|
||||
} else {
|
||||
$zvol->{size} = $size + 0;
|
||||
$zvol->{format} = 'raw';
|
||||
}
|
||||
if ($origin !~ /^-$/) {
|
||||
$zvol->{origin} = $origin;
|
||||
}
|
||||
push @$list, $zvol;
|
||||
}
|
||||
|
||||
return $list;
|
||||
}
|
||||
|
||||
sub parse_volname {
|
||||
my ($class, $volname) = @_;
|
||||
|
||||
if ($volname =~ m/^(((base|basevol)-(\d+)-\S+)\/)?((base|basevol|vm|subvol)-(\d+)-\S+)$/) {
|
||||
my $format = ($6 eq 'subvol' || $6 eq 'basevol') ? 'subvol' : 'raw';
|
||||
my $isBase = ($6 eq 'base' || $6 eq 'basevol');
|
||||
return ('images', $5, $7, $2, $4, $isBase, $format);
|
||||
}
|
||||
|
||||
die "unable to parse zfs volume name '$volname'\n";
|
||||
}
|
||||
|
||||
# virtual zfs methods (subclass can overwrite them)
|
||||
|
||||
sub on_add_hook {
|
||||
my ($class, $storeid, $scfg, %param) = @_;
|
||||
|
||||
my $cfg_mountpoint = $scfg->{mountpoint};
|
||||
|
||||
# ignore failure, pool might currently not be imported
|
||||
my $mountpoint;
|
||||
eval {
|
||||
my $res = $class->zfs_get_properties($scfg, 'mountpoint', $scfg->{pool}, 1);
|
||||
$mountpoint = PVE::Storage::Plugin::verify_path($res, 1) if defined($res);
|
||||
};
|
||||
|
||||
if (defined($cfg_mountpoint)) {
|
||||
if (defined($mountpoint) && !($cfg_mountpoint =~ m|^\Q$mountpoint\E/?$|)) {
|
||||
warn "warning for $storeid - mountpoint: $cfg_mountpoint " .
|
||||
"does not match current mount point: $mountpoint\n";
|
||||
}
|
||||
} else {
|
||||
$scfg->{mountpoint} = $mountpoint;
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
sub path {
|
||||
my ($class, $scfg, $volname, $storeid, $snapname) = @_;
|
||||
|
||||
my ($vtype, $name, $vmid) = $class->parse_volname($volname);
|
||||
|
||||
my $path = '';
|
||||
my $mountpoint = $scfg->{mountpoint} // "/$scfg->{pool}";
|
||||
|
||||
if ($vtype eq "images") {
|
||||
if ($name =~ m/^subvol-/ || $name =~ m/^basevol-/) {
|
||||
$path = "$mountpoint/$name";
|
||||
} else {
|
||||
$path = "/dev/zvol/$scfg->{pool}/$name";
|
||||
}
|
||||
$path .= "\@$snapname" if defined($snapname);
|
||||
} else {
|
||||
die "$vtype is not allowed in ZFSPool!";
|
||||
}
|
||||
|
||||
return ($path, $vmid, $vtype);
|
||||
}
|
||||
|
||||
sub zfs_request {
|
||||
my ($class, $scfg, $timeout, $method, @params) = @_;
|
||||
|
||||
my $cmd = [];
|
||||
|
||||
if ($method eq 'zpool_list') {
|
||||
push @$cmd, 'zpool', 'list';
|
||||
} elsif ($method eq 'zpool_import') {
|
||||
push @$cmd, 'zpool', 'import';
|
||||
$timeout = 15 if !$timeout || $timeout < 15;
|
||||
} else {
|
||||
push @$cmd, 'zfs', $method;
|
||||
}
|
||||
push @$cmd, @params;
|
||||
|
||||
my $msg = '';
|
||||
my $output = sub { $msg .= "$_[0]\n" };
|
||||
|
||||
if (PVE::RPCEnvironment->is_worker()) {
|
||||
$timeout = 60*60 if !$timeout;
|
||||
$timeout = 60*5 if $timeout < 60*5;
|
||||
} else {
|
||||
$timeout = 10 if !$timeout;
|
||||
}
|
||||
|
||||
run_command($cmd, errmsg => "zfs error", outfunc => $output, timeout => $timeout);
|
||||
|
||||
return $msg;
|
||||
}
|
||||
|
||||
sub zfs_wait_for_zvol_link {
|
||||
my ($class, $scfg, $volname, $timeout) = @_;
|
||||
|
||||
my $default_timeout = PVE::RPCEnvironment->is_worker() ? 60*5 : 10;
|
||||
$timeout = $default_timeout if !defined($timeout);
|
||||
|
||||
my ($devname, undef, undef) = $class->path($scfg, $volname);
|
||||
|
||||
for (my $i = 1; $i <= $timeout; $i++) {
|
||||
last if -b $devname;
|
||||
die "timeout: no zvol device link for '$volname' found after $timeout sec found.\n"
|
||||
if $i == $timeout;
|
||||
|
||||
sleep(1);
|
||||
}
|
||||
}
|
||||
|
||||
sub alloc_image {
|
||||
my ($class, $storeid, $scfg, $vmid, $fmt, $name, $size) = @_;
|
||||
|
||||
my $volname = $name;
|
||||
|
||||
if ($fmt eq 'raw') {
|
||||
|
||||
die "illegal name '$volname' - should be 'vm-$vmid-*'\n"
|
||||
if $volname && $volname !~ m/^vm-$vmid-/;
|
||||
$volname = $class->find_free_diskname($storeid, $scfg, $vmid, $fmt)
|
||||
if !$volname;
|
||||
|
||||
$class->zfs_create_zvol($scfg, $volname, $size);
|
||||
$class->zfs_wait_for_zvol_link($scfg, $volname);
|
||||
|
||||
} elsif ( $fmt eq 'subvol') {
|
||||
|
||||
die "illegal name '$volname' - should be 'subvol-$vmid-*'\n"
|
||||
if $volname && $volname !~ m/^subvol-$vmid-/;
|
||||
$volname = $class->find_free_diskname($storeid, $scfg, $vmid, $fmt)
|
||||
if !$volname;
|
||||
|
||||
die "illegal name '$volname' - should be 'subvol-$vmid-*'\n"
|
||||
if $volname !~ m/^subvol-$vmid-/;
|
||||
|
||||
$class->zfs_create_subvol($scfg, $volname, $size);
|
||||
|
||||
} else {
|
||||
die "unsupported format '$fmt'";
|
||||
}
|
||||
|
||||
return $volname;
|
||||
}
|
||||
|
||||
sub free_image {
|
||||
my ($class, $storeid, $scfg, $volname, $isBase) = @_;
|
||||
|
||||
my (undef, $name, undef) = $class->parse_volname($volname);
|
||||
|
||||
$class->zfs_delete_zvol($scfg, $name);
|
||||
|
||||
return undef;
|
||||
}
|
||||
|
||||
sub list_images {
|
||||
my ($class, $storeid, $scfg, $vmid, $vollist, $cache) = @_;
|
||||
|
||||
my $zfs_list = $class->zfs_list_zvol($scfg);
|
||||
|
||||
my $res = [];
|
||||
|
||||
for my $info (values $zfs_list->%*) {
|
||||
my $volname = $info->{name};
|
||||
my $parent = $info->{parent};
|
||||
my $owner = $info->{vmid};
|
||||
|
||||
if ($parent && $parent =~ m/^(\S+)\@__base__$/) {
|
||||
my ($basename) = ($1);
|
||||
$info->{volid} = "$storeid:$basename/$volname";
|
||||
} else {
|
||||
$info->{volid} = "$storeid:$volname";
|
||||
}
|
||||
|
||||
if ($vollist) {
|
||||
my $found = grep { $_ eq $info->{volid} } @$vollist;
|
||||
next if !$found;
|
||||
} else {
|
||||
next if defined ($vmid) && ($owner ne $vmid);
|
||||
}
|
||||
|
||||
push @$res, $info;
|
||||
}
|
||||
return $res;
|
||||
}
|
||||
|
||||
sub zfs_get_properties {
|
||||
my ($class, $scfg, $properties, $dataset, $timeout) = @_;
|
||||
|
||||
my $result = $class->zfs_request($scfg, $timeout, 'get', '-o', 'value',
|
||||
'-Hp', $properties, $dataset);
|
||||
my @values = split /\n/, $result;
|
||||
return wantarray ? @values : $values[0];
|
||||
}
|
||||
|
||||
sub zfs_get_pool_stats {
|
||||
my ($class, $scfg) = @_;
|
||||
|
||||
my $available = 0;
|
||||
my $used = 0;
|
||||
|
||||
my @lines = $class->zfs_get_properties($scfg, 'available,used', $scfg->{pool});
|
||||
|
||||
if($lines[0] =~ /^(\d+)$/) {
|
||||
$available = $1;
|
||||
}
|
||||
|
||||
if($lines[1] =~ /^(\d+)$/) {
|
||||
$used = $1;
|
||||
}
|
||||
|
||||
return ($available, $used);
|
||||
}
|
||||
|
||||
sub zfs_create_zvol {
|
||||
my ($class, $scfg, $zvol, $size) = @_;
|
||||
|
||||
# always align size to 1M as workaround until
|
||||
# https://github.com/zfsonlinux/zfs/issues/8541 is solved
|
||||
my $padding = (1024 - $size % 1024) % 1024;
|
||||
$size = $size + $padding;
|
||||
|
||||
my $cmd = ['create'];
|
||||
|
||||
push @$cmd, '-s' if $scfg->{sparse};
|
||||
|
||||
push @$cmd, '-b', $scfg->{blocksize} if $scfg->{blocksize};
|
||||
|
||||
push @$cmd, '-V', "${size}k", "$scfg->{pool}/$zvol";
|
||||
|
||||
$class->zfs_request($scfg, undef, @$cmd);
|
||||
}
|
||||
|
||||
sub zfs_create_subvol {
|
||||
my ($class, $scfg, $volname, $size) = @_;
|
||||
|
||||
my $dataset = "$scfg->{pool}/$volname";
|
||||
my $quota = $size ? "${size}k" : "none";
|
||||
|
||||
my $cmd = ['create', '-o', 'acltype=posixacl', '-o', 'xattr=sa',
|
||||
'-o', "refquota=${quota}", $dataset];
|
||||
|
||||
$class->zfs_request($scfg, undef, @$cmd);
|
||||
}
|
||||
|
||||
sub zfs_delete_zvol {
|
||||
my ($class, $scfg, $zvol) = @_;
|
||||
|
||||
my $err;
|
||||
|
||||
for (my $i = 0; $i < 6; $i++) {
|
||||
|
||||
eval { $class->zfs_request($scfg, undef, 'destroy', '-r', "$scfg->{pool}/$zvol"); };
|
||||
if ($err = $@) {
|
||||
if ($err =~ m/^zfs error:(.*): dataset is busy.*/) {
|
||||
sleep(1);
|
||||
} elsif ($err =~ m/^zfs error:.*: dataset does not exist.*$/) {
|
||||
$err = undef;
|
||||
last;
|
||||
} else {
|
||||
die $err;
|
||||
}
|
||||
} else {
|
||||
last;
|
||||
}
|
||||
}
|
||||
|
||||
die $err if $err;
|
||||
}
|
||||
|
||||
sub zfs_list_zvol {
|
||||
my ($class, $scfg) = @_;
|
||||
|
||||
my $text = $class->zfs_request(
|
||||
$scfg,
|
||||
10,
|
||||
'list',
|
||||
'-o',
|
||||
'name,volsize,origin,type,refquota',
|
||||
'-t',
|
||||
'volume,filesystem',
|
||||
'-d1',
|
||||
'-Hp',
|
||||
$scfg->{pool},
|
||||
);
|
||||
# It's still required to have zfs_parse_zvol_list filter by pool, because -d1 lists
|
||||
# $scfg->{pool} too and while unlikely, it could be named to be mistaken for a volume.
|
||||
my $zvols = zfs_parse_zvol_list($text, $scfg->{pool});
|
||||
return {} if !$zvols;
|
||||
|
||||
my $list = {};
|
||||
foreach my $zvol (@$zvols) {
|
||||
my $name = $zvol->{name};
|
||||
my $parent = $zvol->{origin};
|
||||
if($zvol->{origin} && $zvol->{origin} =~ m/^$scfg->{pool}\/(\S+)$/){
|
||||
$parent = $1;
|
||||
}
|
||||
|
||||
$list->{$name} = {
|
||||
name => $name,
|
||||
size => $zvol->{size},
|
||||
parent => $parent,
|
||||
format => $zvol->{format},
|
||||
vmid => $zvol->{owner},
|
||||
};
|
||||
}
|
||||
|
||||
return $list;
|
||||
}
|
||||
|
||||
sub zfs_get_sorted_snapshot_list {
|
||||
my ($class, $scfg, $volname, $sort_params) = @_;
|
||||
|
||||
my @params = ('-H', '-r', '-t', 'snapshot', '-o', 'name', $sort_params->@*);
|
||||
|
||||
my $vname = ($class->parse_volname($volname))[1];
|
||||
push @params, "$scfg->{pool}\/$vname";
|
||||
|
||||
my $text = $class->zfs_request($scfg, undef, 'list', @params);
|
||||
my @snapshots = split(/\n/, $text);
|
||||
|
||||
my $snap_names = [];
|
||||
for my $snapshot (@snapshots) {
|
||||
(my $snap_name = $snapshot) =~ s/^.*@//;
|
||||
push $snap_names->@*, $snap_name;
|
||||
}
|
||||
return $snap_names;
|
||||
}
|
||||
|
||||
sub status {
|
||||
my ($class, $storeid, $scfg, $cache) = @_;
|
||||
|
||||
my $total = 0;
|
||||
my $free = 0;
|
||||
my $used = 0;
|
||||
my $active = 0;
|
||||
|
||||
eval {
|
||||
($free, $used) = $class->zfs_get_pool_stats($scfg);
|
||||
$active = 1;
|
||||
$total = $free + $used;
|
||||
};
|
||||
warn $@ if $@;
|
||||
|
||||
return ($total, $free, $used, $active);
|
||||
}
|
||||
|
||||
sub volume_size_info {
|
||||
my ($class, $scfg, $storeid, $volname, $timeout) = @_;
|
||||
|
||||
my (undef, $vname, undef, $parent, undef, undef, $format) =
|
||||
$class->parse_volname($volname);
|
||||
|
||||
my $attr = $format eq 'subvol' ? 'refquota' : 'volsize';
|
||||
my ($size, $used) = $class->zfs_get_properties($scfg, "$attr,usedbydataset", "$scfg->{pool}/$vname");
|
||||
|
||||
$used = ($used =~ /^(\d+)$/) ? $1 : 0;
|
||||
|
||||
if ($size =~ /^(\d+)$/) {
|
||||
return wantarray ? ($1, $format, $used, $parent) : $1;
|
||||
}
|
||||
|
||||
die "Could not get zfs volume size\n";
|
||||
}
|
||||
|
||||
sub volume_snapshot {
|
||||
my ($class, $scfg, $storeid, $volname, $snap) = @_;
|
||||
|
||||
my $vname = ($class->parse_volname($volname))[1];
|
||||
|
||||
$class->zfs_request($scfg, undef, 'snapshot', "$scfg->{pool}/$vname\@$snap");
|
||||
}
|
||||
|
||||
sub volume_snapshot_delete {
|
||||
my ($class, $scfg, $storeid, $volname, $snap, $running) = @_;
|
||||
|
||||
my $vname = ($class->parse_volname($volname))[1];
|
||||
|
||||
$class->deactivate_volume($storeid, $scfg, $vname, $snap, {});
|
||||
$class->zfs_request($scfg, undef, 'destroy', "$scfg->{pool}/$vname\@$snap");
|
||||
}
|
||||
|
||||
sub volume_snapshot_rollback {
|
||||
my ($class, $scfg, $storeid, $volname, $snap) = @_;
|
||||
|
||||
my (undef, $vname, undef, undef, undef, undef, $format) = $class->parse_volname($volname);
|
||||
|
||||
my $msg = $class->zfs_request($scfg, undef, 'rollback', "$scfg->{pool}/$vname\@$snap");
|
||||
|
||||
# we have to unmount rollbacked subvols, to invalidate wrong kernel
|
||||
# caches, they get mounted in activate volume again
|
||||
# see zfs bug #10931 https://github.com/openzfs/zfs/issues/10931
|
||||
if ($format eq 'subvol') {
|
||||
eval { $class->zfs_request($scfg, undef, 'unmount', "$scfg->{pool}/$vname"); };
|
||||
if (my $err = $@) {
|
||||
die $err if $err !~ m/not currently mounted$/;
|
||||
}
|
||||
}
|
||||
|
||||
return $msg;
|
||||
}
|
||||
|
||||
sub volume_rollback_is_possible {
|
||||
my ($class, $scfg, $storeid, $volname, $snap, $blockers) = @_;
|
||||
|
||||
# can't use '-S creation', because zfs list won't reverse the order when the
|
||||
# creation time is the same second, breaking at least our tests.
|
||||
my $snapshots = $class->zfs_get_sorted_snapshot_list($scfg, $volname, ['-s', 'creation']);
|
||||
|
||||
my $found;
|
||||
$blockers //= []; # not guaranteed to be set by caller
|
||||
for my $snapshot ($snapshots->@*) {
|
||||
if ($snapshot eq $snap) {
|
||||
$found = 1;
|
||||
} elsif ($found) {
|
||||
push $blockers->@*, $snapshot;
|
||||
}
|
||||
}
|
||||
|
||||
my $volid = "${storeid}:${volname}";
|
||||
|
||||
die "can't rollback, snapshot '$snap' does not exist on '$volid'\n"
|
||||
if !$found;
|
||||
|
||||
die "can't rollback, '$snap' is not most recent snapshot on '$volid'\n"
|
||||
if scalar($blockers->@*) > 0;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
sub volume_snapshot_info {
|
||||
my ($class, $scfg, $storeid, $volname) = @_;
|
||||
|
||||
my @params = ('-Hp', '-r', '-t', 'snapshot', '-o', 'name,guid,creation');
|
||||
|
||||
my $vname = ($class->parse_volname($volname))[1];
|
||||
push @params, "$scfg->{pool}\/$vname";
|
||||
|
||||
my $text = $class->zfs_request($scfg, undef, 'list', @params);
|
||||
my @lines = split(/\n/, $text);
|
||||
|
||||
my $info = {};
|
||||
for my $line (@lines) {
|
||||
my ($snapshot, $guid, $creation) = split(/\s+/, $line);
|
||||
(my $snap_name = $snapshot) =~ s/^.*@//;
|
||||
|
||||
$info->{$snap_name} = {
|
||||
id => $guid,
|
||||
timestamp => $creation,
|
||||
};
|
||||
}
|
||||
return $info;
|
||||
}
|
||||
|
||||
my sub dataset_mounted_heuristic {
|
||||
my ($dataset) = @_;
|
||||
|
||||
my $mounts = PVE::ProcFSTools::parse_proc_mounts();
|
||||
for my $mp (@$mounts) {
|
||||
my ($what, $dir, $fs) = $mp->@*;
|
||||
next if $fs ne 'zfs';
|
||||
# check for root-dataset or any child-dataset (root-dataset could have 'canmount=off')
|
||||
# If any child is mounted heuristically assume that `zfs mount -a` was successful
|
||||
next if $what !~ m!^$dataset(?:/|$)!;
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
sub activate_storage {
|
||||
my ($class, $storeid, $scfg, $cache) = @_;
|
||||
|
||||
# Note: $scfg->{pool} can include dataset <pool>/<dataset>
|
||||
my $dataset = $scfg->{pool};
|
||||
my $pool = ($dataset =~ s!/.*$!!r);
|
||||
|
||||
return 1 if dataset_mounted_heuristic($dataset); # early return
|
||||
|
||||
my $pool_imported = sub {
|
||||
my @param = ('-o', 'name', '-H', $pool);
|
||||
my $res = eval { $class->zfs_request($scfg, undef, 'zpool_list', @param) };
|
||||
warn "$@\n" if $@;
|
||||
|
||||
return defined($res) && $res =~ m/$pool/;
|
||||
};
|
||||
|
||||
if (!$pool_imported->()) {
|
||||
# import can only be done if not yet imported!
|
||||
my @param = ('-d', '/dev/disk/by-id/', '-o', 'cachefile=none', $pool);
|
||||
eval { $class->zfs_request($scfg, undef, 'zpool_import', @param) };
|
||||
if (my $err = $@) {
|
||||
# just could've raced with another import, so recheck if it is imported
|
||||
die "could not activate storage '$storeid', $err\n" if !$pool_imported->();
|
||||
}
|
||||
}
|
||||
eval { $class->zfs_request($scfg, undef, 'mount', '-a') };
|
||||
die "could not activate storage '$storeid', $@\n" if $@;
|
||||
return 1;
|
||||
}
|
||||
|
||||
sub deactivate_storage {
|
||||
my ($class, $storeid, $scfg, $cache) = @_;
|
||||
return 1;
|
||||
}
|
||||
|
||||
sub activate_volume {
|
||||
my ($class, $storeid, $scfg, $volname, $snapname, $cache) = @_;
|
||||
|
||||
return 1 if defined($snapname);
|
||||
|
||||
my (undef, $dataset, undef, undef, undef, undef, $format) = $class->parse_volname($volname);
|
||||
|
||||
if ($format eq 'raw') {
|
||||
$class->zfs_wait_for_zvol_link($scfg, $volname);
|
||||
} elsif ($format eq 'subvol') {
|
||||
my $mounted = $class->zfs_get_properties($scfg, 'mounted', "$scfg->{pool}/$dataset");
|
||||
if ($mounted !~ m/^yes$/) {
|
||||
$class->zfs_request($scfg, undef, 'mount', "$scfg->{pool}/$dataset");
|
||||
}
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
sub deactivate_volume {
|
||||
my ($class, $storeid, $scfg, $volname, $snapname, $cache) = @_;
|
||||
return 1;
|
||||
}
|
||||
|
||||
sub clone_image {
|
||||
my ($class, $scfg, $storeid, $volname, $vmid, $snap) = @_;
|
||||
|
||||
$snap ||= '__base__';
|
||||
|
||||
my ($vtype, $basename, $basevmid, undef, undef, $isBase, $format) =
|
||||
$class->parse_volname($volname);
|
||||
|
||||
die "clone_image only works on base images\n" if !$isBase;
|
||||
|
||||
my $name = $class->find_free_diskname($storeid, $scfg, $vmid, $format);
|
||||
|
||||
if ($format eq 'subvol') {
|
||||
my $size = $class->zfs_request($scfg, undef, 'list', '-Hp', '-o', 'refquota', "$scfg->{pool}/$basename");
|
||||
chomp($size);
|
||||
$class->zfs_request($scfg, undef, 'clone', "$scfg->{pool}/$basename\@$snap", "$scfg->{pool}/$name", '-o', "refquota=$size");
|
||||
} else {
|
||||
$class->zfs_request($scfg, undef, 'clone', "$scfg->{pool}/$basename\@$snap", "$scfg->{pool}/$name");
|
||||
}
|
||||
|
||||
return "$basename/$name";
|
||||
}
|
||||
|
||||
sub create_base {
|
||||
my ($class, $storeid, $scfg, $volname) = @_;
|
||||
|
||||
my $snap = '__base__';
|
||||
|
||||
my ($vtype, $name, $vmid, $basename, $basevmid, $isBase, $format) =
|
||||
$class->parse_volname($volname);
|
||||
|
||||
die "create_base not possible with base image\n" if $isBase;
|
||||
|
||||
my $newname = $name;
|
||||
if ( $format eq 'subvol' ) {
|
||||
$newname =~ s/^subvol-/basevol-/;
|
||||
} else {
|
||||
$newname =~ s/^vm-/base-/;
|
||||
}
|
||||
my $newvolname = $basename ? "$basename/$newname" : "$newname";
|
||||
|
||||
$class->zfs_request($scfg, undef, 'rename', "$scfg->{pool}/$name", "$scfg->{pool}/$newname");
|
||||
|
||||
my $running = undef; #fixme : is create_base always offline ?
|
||||
|
||||
$class->volume_snapshot($scfg, $storeid, $newname, $snap, $running);
|
||||
|
||||
return $newvolname;
|
||||
}
|
||||
|
||||
sub volume_resize {
|
||||
my ($class, $scfg, $storeid, $volname, $size, $running) = @_;
|
||||
|
||||
my $new_size = int($size/1024);
|
||||
|
||||
my (undef, $vname, undef, undef, undef, undef, $format) =
|
||||
$class->parse_volname($volname);
|
||||
|
||||
my $attr = $format eq 'subvol' ? 'refquota' : 'volsize';
|
||||
|
||||
# align size to 1M so we always have a valid multiple of the volume block size
|
||||
if ($format eq 'raw') {
|
||||
my $padding = (1024 - $new_size % 1024) % 1024;
|
||||
$new_size = $new_size + $padding;
|
||||
}
|
||||
|
||||
$class->zfs_request($scfg, undef, 'set', "$attr=${new_size}k", "$scfg->{pool}/$vname");
|
||||
|
||||
return $new_size;
|
||||
}
|
||||
|
||||
sub storage_can_replicate {
|
||||
my ($class, $scfg, $storeid, $format) = @_;
|
||||
|
||||
return 1 if $format eq 'raw' || $format eq 'subvol';
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
sub volume_has_feature {
|
||||
my ($class, $scfg, $feature, $storeid, $volname, $snapname, $running) = @_;
|
||||
|
||||
my $features = {
|
||||
snapshot => { current => 1, snap => 1},
|
||||
clone => { base => 1},
|
||||
template => { current => 1},
|
||||
copy => { base => 1, current => 1},
|
||||
sparseinit => { base => 1, current => 1},
|
||||
replicate => { base => 1, current => 1},
|
||||
rename => {current => 1},
|
||||
};
|
||||
|
||||
my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) =
|
||||
$class->parse_volname($volname);
|
||||
|
||||
my $key = undef;
|
||||
|
||||
if ($snapname) {
|
||||
$key = 'snap';
|
||||
} else {
|
||||
$key = $isBase ? 'base' : 'current';
|
||||
}
|
||||
|
||||
return 1 if $features->{$feature}->{$key};
|
||||
|
||||
return undef;
|
||||
}
|
||||
|
||||
sub volume_export {
|
||||
my ($class, $scfg, $storeid, $fh, $volname, $format, $snapshot, $base_snapshot, $with_snapshots) = @_;
|
||||
|
||||
die "unsupported export stream format for $class: $format\n"
|
||||
if $format ne 'zfs';
|
||||
|
||||
die "$class storage can only export snapshots\n"
|
||||
if !defined($snapshot);
|
||||
|
||||
my $dataset = ($class->parse_volname($volname))[1];
|
||||
|
||||
my $fd = fileno($fh);
|
||||
die "internal error: invalid file handle for volume_export\n"
|
||||
if !defined($fd);
|
||||
$fd = ">&$fd";
|
||||
|
||||
# For zfs we always create a replication stream (-R) which means the remote
|
||||
# side will always delete non-existing source snapshots. This should work
|
||||
# for all our use cases.
|
||||
my $cmd = ['zfs', 'send', '-Rpv'];
|
||||
if (defined($base_snapshot)) {
|
||||
my $arg = $with_snapshots ? '-I' : '-i';
|
||||
push @$cmd, $arg, $base_snapshot;
|
||||
}
|
||||
push @$cmd, '--', "$scfg->{pool}/$dataset\@$snapshot";
|
||||
|
||||
run_command($cmd, output => $fd);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
sub volume_export_formats {
|
||||
my ($class, $scfg, $storeid, $volname, $snapshot, $base_snapshot, $with_snapshots) = @_;
|
||||
|
||||
my @formats = ('zfs');
|
||||
# TODOs:
|
||||
# push @formats, 'fies' if $volname !~ /^(?:basevol|subvol)-/;
|
||||
# push @formats, 'raw' if !$base_snapshot && !$with_snapshots;
|
||||
return @formats;
|
||||
}
|
||||
|
||||
sub volume_import {
|
||||
my ($class, $scfg, $storeid, $fh, $volname, $format, $snapshot, $base_snapshot, $with_snapshots, $allow_rename) = @_;
|
||||
|
||||
die "unsupported import stream format for $class: $format\n"
|
||||
if $format ne 'zfs';
|
||||
|
||||
my $fd = fileno($fh);
|
||||
die "internal error: invalid file handle for volume_import\n"
|
||||
if !defined($fd);
|
||||
|
||||
my (undef, $dataset, $vmid, undef, undef, undef, $volume_format) =
|
||||
$class->parse_volname($volname);
|
||||
|
||||
my $zfspath = "$scfg->{pool}/$dataset";
|
||||
my $suffix = defined($base_snapshot) ? "\@$base_snapshot" : '';
|
||||
my $exists = 0 == run_command(['zfs', 'get', '-H', 'name', $zfspath.$suffix],
|
||||
noerr => 1, quiet => 1);
|
||||
if (defined($base_snapshot)) {
|
||||
die "base snapshot '$zfspath\@$base_snapshot' doesn't exist\n" if !$exists;
|
||||
} elsif ($exists) {
|
||||
die "volume '$zfspath' already exists\n" if !$allow_rename;
|
||||
warn "volume '$zfspath' already exists - importing with a different name\n";
|
||||
$dataset = $class->find_free_diskname($storeid, $scfg, $vmid, $volume_format);
|
||||
$zfspath = "$scfg->{pool}/$dataset";
|
||||
}
|
||||
|
||||
eval { run_command(['zfs', 'recv', '-F', '--', $zfspath], input => "<&$fd") };
|
||||
if (my $err = $@) {
|
||||
if (defined($base_snapshot)) {
|
||||
eval { run_command(['zfs', 'rollback', '-r', '--', "$zfspath\@$base_snapshot"]) };
|
||||
} else {
|
||||
eval { run_command(['zfs', 'destroy', '-r', '--', $zfspath]) };
|
||||
}
|
||||
die $err;
|
||||
}
|
||||
|
||||
return "$storeid:$dataset";
|
||||
}
|
||||
|
||||
sub volume_import_formats {
|
||||
my ($class, $scfg, $storeid, $volname, $snapshot, $base_snapshot, $with_snapshots) = @_;
|
||||
|
||||
return $class->volume_export_formats($scfg, $storeid, $volname, $snapshot, $base_snapshot, $with_snapshots);
|
||||
}
|
||||
|
||||
sub rename_volume {
|
||||
my ($class, $scfg, $storeid, $source_volname, $target_vmid, $target_volname) = @_;
|
||||
|
||||
my (
|
||||
undef,
|
||||
$source_image,
|
||||
$source_vmid,
|
||||
$base_name,
|
||||
$base_vmid,
|
||||
undef,
|
||||
$format
|
||||
) = $class->parse_volname($source_volname);
|
||||
$target_volname = $class->find_free_diskname($storeid, $scfg, $target_vmid, $format)
|
||||
if !$target_volname;
|
||||
|
||||
my $pool = $scfg->{pool};
|
||||
my $source_zfspath = "${pool}/${source_image}";
|
||||
my $target_zfspath = "${pool}/${target_volname}";
|
||||
|
||||
my $exists = 0 == run_command(['zfs', 'get', '-H', 'name', $target_zfspath],
|
||||
noerr => 1, quiet => 1);
|
||||
die "target volume '${target_volname}' already exists\n" if $exists;
|
||||
|
||||
$class->zfs_request($scfg, 5, 'rename', ${source_zfspath}, ${target_zfspath});
|
||||
|
||||
$base_name = $base_name ? "${base_name}/" : '';
|
||||
|
||||
return "${storeid}:${base_name}${target_volname}";
|
||||
}
|
||||
|
||||
1;
|
||||
36
src/bin/Makefile
Normal file
36
src/bin/Makefile
Normal file
@ -0,0 +1,36 @@
|
||||
DESTDIR=
|
||||
PREFIX=/usr
|
||||
SBINDIR=$(PREFIX)/sbin
|
||||
MANDIR=$(PREFIX)/share/man
|
||||
MAN1DIR=$(MANDIR)/man1/
|
||||
BASHCOMPLDIR=$(PREFIX)/share/bash-completion/completions/
|
||||
ZSHCOMPLDIR=$(PREFIX)/share/zsh/vendor-completions/
|
||||
|
||||
export PERLDIR=$(PREFIX)/share/perl5
|
||||
|
||||
PERL_DOC_INC_DIRS=..
|
||||
-include /usr/share/pve-doc-generator/pve-doc-generator.mk
|
||||
|
||||
all:
|
||||
|
||||
pvesm.bash-completion:
|
||||
perl -I.. -T -e "use PVE::CLI::pvesm; PVE::CLI::pvesm->generate_bash_completions();" >$@.tmp
|
||||
mv $@.tmp $@
|
||||
|
||||
pvesm.zsh-completion:
|
||||
perl -I.. -T -e "use PVE::CLI::pvesm; PVE::CLI::pvesm->generate_zsh_completions();" >$@.tmp
|
||||
mv $@.tmp $@
|
||||
|
||||
.PHONY: install
|
||||
install: pvesm.1 pvesm.bash-completion pvesm.zsh-completion
|
||||
install -d $(DESTDIR)$(SBINDIR)
|
||||
install -m 0755 pvesm $(DESTDIR)$(SBINDIR)
|
||||
install -d $(DESTDIR)$(MAN1DIR)
|
||||
install -m 0644 pvesm.1 $(DESTDIR)$(MAN1DIR)
|
||||
gzip -9 -n $(DESTDIR)$(MAN1DIR)/pvesm.1
|
||||
install -m 0644 -D pvesm.bash-completion $(DESTDIR)$(BASHCOMPLDIR)/pvesm
|
||||
install -m 0644 -D pvesm.zsh-completion $(DESTDIR)$(ZSHCOMPLDIR)/_pvesm
|
||||
|
||||
.PHONY: clean
|
||||
clean:
|
||||
rm -f *.xml.tmp *.1 *.5 *.8 *{synopsis,opts}.adoc docinfo.xml
|
||||
8
src/bin/pvesm
Executable file
8
src/bin/pvesm
Executable file
@ -0,0 +1,8 @@
|
||||
#!/usr/bin/perl
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
use PVE::CLI::pvesm;
|
||||
|
||||
PVE::CLI::pvesm->run_cli_handler();
|
||||
15
src/test/Makefile
Normal file
15
src/test/Makefile
Normal file
@ -0,0 +1,15 @@
|
||||
all: test
|
||||
|
||||
test: test_zfspoolplugin test_disklist test_bwlimit test_plugin
|
||||
|
||||
test_zfspoolplugin: run_test_zfspoolplugin.pl
|
||||
./run_test_zfspoolplugin.pl
|
||||
|
||||
test_disklist: run_disk_tests.pl
|
||||
./run_disk_tests.pl
|
||||
|
||||
test_bwlimit: run_bwlimit_tests.pl
|
||||
./run_bwlimit_tests.pl
|
||||
|
||||
test_plugin: run_plugin_tests.pl
|
||||
./run_plugin_tests.pl
|
||||
198
src/test/archive_info_test.pm
Normal file
198
src/test/archive_info_test.pm
Normal file
@ -0,0 +1,198 @@
|
||||
package PVE::Storage::TestArchiveInfo;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
use lib qw(..);
|
||||
|
||||
use PVE::Storage;
|
||||
use Test::More;
|
||||
|
||||
my $vmid = 16110;
|
||||
|
||||
my $LOG_EXT = PVE::Storage::Plugin::LOG_EXT;
|
||||
my $NOTES_EXT = PVE::Storage::Plugin::NOTES_EXT;
|
||||
|
||||
# an array of test cases, each test is comprised of the following keys:
|
||||
# description => to identify a single test
|
||||
# archive => the input filename for archive_info
|
||||
# expected => the hash that archive_info returns
|
||||
#
|
||||
# most of them are created further below
|
||||
my $tests = [
|
||||
# backup archives
|
||||
{
|
||||
description => 'Backup archive, lxc, tgz, future millenium',
|
||||
archive => "backup/vzdump-lxc-$vmid-3070_01_01-00_00_00.tgz",
|
||||
expected => {
|
||||
'filename' => "vzdump-lxc-$vmid-3070_01_01-00_00_00.tgz",
|
||||
'logfilename' => "vzdump-lxc-$vmid-3070_01_01-00_00_00".$LOG_EXT,
|
||||
'notesfilename'=> "vzdump-lxc-$vmid-3070_01_01-00_00_00.tgz".$NOTES_EXT,
|
||||
'type' => 'lxc',
|
||||
'format' => 'tar',
|
||||
'decompressor' => ['tar', '-z'],
|
||||
'compression' => 'gz',
|
||||
'vmid' => $vmid,
|
||||
'ctime' => 60*60*24 * (365*1100 + 267),
|
||||
'is_std_name' => 1,
|
||||
},
|
||||
},
|
||||
{
|
||||
description => 'Backup archive, lxc, tgz, very old',
|
||||
archive => "backup/vzdump-lxc-$vmid-1970_01_01-02_00_30.tgz",
|
||||
expected => {
|
||||
'filename' => "vzdump-lxc-$vmid-1970_01_01-02_00_30.tgz",
|
||||
'logfilename' => "vzdump-lxc-$vmid-1970_01_01-02_00_30".$LOG_EXT,
|
||||
'notesfilename'=> "vzdump-lxc-$vmid-1970_01_01-02_00_30.tgz".$NOTES_EXT,
|
||||
'type' => 'lxc',
|
||||
'format' => 'tar',
|
||||
'decompressor' => ['tar', '-z'],
|
||||
'compression' => 'gz',
|
||||
'vmid' => $vmid,
|
||||
'ctime' => 60*60*2 + 30,
|
||||
'is_std_name' => 1,
|
||||
},
|
||||
},
|
||||
{
|
||||
description => 'Backup archive, lxc, tgz',
|
||||
archive => "backup/vzdump-lxc-$vmid-2020_03_30-21_39_30.tgz",
|
||||
expected => {
|
||||
'filename' => "vzdump-lxc-$vmid-2020_03_30-21_39_30.tgz",
|
||||
'logfilename' => "vzdump-lxc-$vmid-2020_03_30-21_39_30".$LOG_EXT,
|
||||
'notesfilename'=> "vzdump-lxc-$vmid-2020_03_30-21_39_30.tgz".$NOTES_EXT,
|
||||
'type' => 'lxc',
|
||||
'format' => 'tar',
|
||||
'decompressor' => ['tar', '-z'],
|
||||
'compression' => 'gz',
|
||||
'vmid' => $vmid,
|
||||
'ctime' => 1585604370,
|
||||
'is_std_name' => 1,
|
||||
},
|
||||
},
|
||||
{
|
||||
description => 'Backup archive, openvz, tgz',
|
||||
archive => "backup/vzdump-openvz-$vmid-2020_03_30-21_39_30.tgz",
|
||||
expected => {
|
||||
'filename' => "vzdump-openvz-$vmid-2020_03_30-21_39_30.tgz",
|
||||
'logfilename' => "vzdump-openvz-$vmid-2020_03_30-21_39_30".$LOG_EXT,
|
||||
'notesfilename'=> "vzdump-openvz-$vmid-2020_03_30-21_39_30.tgz".$NOTES_EXT,
|
||||
'type' => 'openvz',
|
||||
'format' => 'tar',
|
||||
'decompressor' => ['tar', '-z'],
|
||||
'compression' => 'gz',
|
||||
'vmid' => $vmid,
|
||||
'ctime' => 1585604370,
|
||||
'is_std_name' => 1,
|
||||
},
|
||||
},
|
||||
{
|
||||
description => 'Backup archive, custom dump directory, qemu, tgz',
|
||||
archive => "/here/be/Back-ups/vzdump-qemu-$vmid-2020_03_30-21_39_30.tgz",
|
||||
expected => {
|
||||
'filename' => "vzdump-qemu-$vmid-2020_03_30-21_39_30.tgz",
|
||||
'logfilename' => "vzdump-qemu-$vmid-2020_03_30-21_39_30".$LOG_EXT,
|
||||
'notesfilename'=> "vzdump-qemu-$vmid-2020_03_30-21_39_30.tgz".$NOTES_EXT,
|
||||
'type' => 'qemu',
|
||||
'format' => 'tar',
|
||||
'decompressor' => ['tar', '-z'],
|
||||
'compression' => 'gz',
|
||||
'vmid' => $vmid,
|
||||
'ctime' => 1585604370,
|
||||
'is_std_name' => 1,
|
||||
},
|
||||
},
|
||||
{
|
||||
description => 'Backup archive, none, tgz',
|
||||
archive => "backup/vzdump-qemu-$vmid-whatever-the-name_is_here.tgz",
|
||||
expected => {
|
||||
'filename' => "vzdump-qemu-$vmid-whatever-the-name_is_here.tgz",
|
||||
'type' => 'qemu',
|
||||
'format' => 'tar',
|
||||
'decompressor' => ['tar', '-z'],
|
||||
'compression' => 'gz',
|
||||
'is_std_name' => 0,
|
||||
},
|
||||
},
|
||||
];
|
||||
|
||||
# add new compression fromats to test
|
||||
my $decompressor = {
|
||||
tar => {
|
||||
gz => ['tar', '-z'],
|
||||
lzo => ['tar', '--lzop'],
|
||||
zst => ['tar', '--zstd'],
|
||||
},
|
||||
vma => {
|
||||
gz => ['zcat'],
|
||||
lzo => ['lzop', '-d', '-c'],
|
||||
zst => ['zstd', '-q', '-d', '-c'],
|
||||
},
|
||||
};
|
||||
|
||||
my $bkp_suffix = {
|
||||
qemu => [ 'vma', $decompressor->{vma}, ],
|
||||
lxc => [ 'tar', $decompressor->{tar}, ],
|
||||
openvz => [ 'tar', $decompressor->{tar}, ],
|
||||
};
|
||||
|
||||
# create more test cases for backup files matches
|
||||
for my $virt (sort keys %$bkp_suffix) {
|
||||
my ($format, $decomp) = $bkp_suffix->{$virt}->@*;
|
||||
my $archive_name = "vzdump-$virt-$vmid-2020_03_30-21_12_40";
|
||||
|
||||
for my $suffix (sort keys %$decomp) {
|
||||
push @$tests, {
|
||||
description => "Backup archive, $virt, $format.$suffix",
|
||||
archive => "backup/$archive_name.$format.$suffix",
|
||||
expected => {
|
||||
'filename' => "$archive_name.$format.$suffix",
|
||||
'logfilename' => $archive_name.$LOG_EXT,
|
||||
'notesfilename'=> "$archive_name.$format.$suffix".$NOTES_EXT,
|
||||
'type' => "$virt",
|
||||
'format' => "$format",
|
||||
'decompressor' => $decomp->{$suffix},
|
||||
'compression' => "$suffix",
|
||||
'vmid' => $vmid,
|
||||
'ctime' => 1585602760,
|
||||
'is_std_name' => 1,
|
||||
},
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# add compression formats to test failed matches
|
||||
my $non_bkp_suffix = {
|
||||
'openvz' => [ 'zip', 'tgz.lzo', 'tar.bz2', 'zip.gz', '', ],
|
||||
'lxc' => [ 'zip', 'tgz.lzo', 'tar.bz2', 'zip.gz', '', ],
|
||||
'qemu' => [ 'vma.xz', 'vms.gz', 'vmx.zst', '', ],
|
||||
'none' => [ 'tar.gz', ],
|
||||
};
|
||||
|
||||
# create tests for failed matches
|
||||
for my $virt (sort keys %$non_bkp_suffix) {
|
||||
my $suffix = $non_bkp_suffix->{$virt};
|
||||
for my $s (@$suffix) {
|
||||
my $archive = "backup/vzdump-$virt-$vmid-2020_03_30-21_12_40.$s";
|
||||
push @$tests, {
|
||||
description => "Failed match: Backup archive, $virt, $s",
|
||||
archive => $archive,
|
||||
expected => "ERROR: couldn't determine archive info from '$archive'\n",
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
plan tests => scalar @$tests;
|
||||
|
||||
for my $tt (@$tests) {
|
||||
|
||||
my $got = eval { PVE::Storage::archive_info($tt->{archive}) };
|
||||
$got = $@ if $@;
|
||||
|
||||
is_deeply($got, $tt->{expected}, $tt->{description}) || diag(explain($got));
|
||||
}
|
||||
|
||||
done_testing();
|
||||
|
||||
1;
|
||||
1
src/test/disk_tests/cciss/cciss!c0d0/device/model
Normal file
1
src/test/disk_tests/cciss/cciss!c0d0/device/model
Normal file
@ -0,0 +1 @@
|
||||
LOGICAL_VOLUME
|
||||
1
src/test/disk_tests/cciss/cciss!c0d0/device/vendor
Normal file
1
src/test/disk_tests/cciss/cciss!c0d0/device/vendor
Normal file
@ -0,0 +1 @@
|
||||
HP
|
||||
1
src/test/disk_tests/cciss/cciss!c0d0/queue/rotational
Normal file
1
src/test/disk_tests/cciss/cciss!c0d0/queue/rotational
Normal file
@ -0,0 +1 @@
|
||||
1
|
||||
1
src/test/disk_tests/cciss/cciss!c0d0/size
Normal file
1
src/test/disk_tests/cciss/cciss!c0d0/size
Normal file
@ -0,0 +1 @@
|
||||
10
|
||||
32
src/test/disk_tests/cciss/cciss!c0d0_udevadm
Normal file
32
src/test/disk_tests/cciss/cciss!c0d0_udevadm
Normal file
@ -0,0 +1,32 @@
|
||||
P: /devices/pci0000:40/0000:40:13.0/0000:45:00.0/cciss0/c0d0/block/cciss!c0d0
|
||||
N: cciss/c0d0
|
||||
S: disk/by-id/cciss-SERIAL111
|
||||
S: disk/by-id/wwn-0x00000000000000000000000000000000
|
||||
S: disk/by-path/pci-0000:45:00.0-cciss-disk0
|
||||
E: DEVLINKS=/dev/disk/by-id/cciss-000000000000000000000000000000000 /dev/disk/by-id/wwn-0x000000000000000000000000000000000/dev/disk/by-path/pci-0000:45:00.0-cciss-disk0
|
||||
E: DEVNAME=/dev/cciss/c0d0
|
||||
E: DEVPATH=/devices/pci0000:40/0000:40:13.0/0000:45:00.0/cciss0/c0d0/block/cciss!c0d0
|
||||
E: DEVTYPE=disk
|
||||
E: ID_BUS=cciss
|
||||
E: ID_MODEL=LOGICAL_VOLUME
|
||||
E: ID_MODEL_ENC=LOGICAL\x20VOLUME\x20\x20
|
||||
E: ID_PART_TABLE_TYPE=gpt
|
||||
E: ID_PART_TABLE_UUID=cfe72deb-65d1-487c-bdfa-8af66dc1a969
|
||||
E: ID_PATH=pci-0000:45:00.0-cciss-disk0
|
||||
E: ID_PATH_TAG=pci-0000_45_00_0-cciss-disk0
|
||||
E: ID_REVISION=7.24
|
||||
E: ID_SCSI=1
|
||||
E: ID_SCSI_SERIAL=SERIAL1
|
||||
E: ID_SERIAL=SERIAL111
|
||||
E: ID_SERIAL_SHORT=SER111
|
||||
E: ID_TYPE=disk
|
||||
E: ID_VENDOR=HP
|
||||
E: ID_VENDOR_ENC=HP\x20\x20\x20\x20\x20\x20
|
||||
E: ID_WWN=0x0000000000000000
|
||||
E: ID_WWN_VENDOR_EXTENSION=0x0000000000000000
|
||||
E: ID_WWN_WITH_EXTENSION=0x00000000000000000000000000000000
|
||||
E: MAJOR=104
|
||||
E: MINOR=0
|
||||
E: SUBSYSTEM=block
|
||||
E: TAGS=:systemd:
|
||||
E: USEC_INITIALIZED=2247
|
||||
1
src/test/disk_tests/cciss/disklist
Normal file
1
src/test/disk_tests/cciss/disklist
Normal file
@ -0,0 +1 @@
|
||||
cciss!c0d0
|
||||
16
src/test/disk_tests/cciss/disklist_expected.json
Normal file
16
src/test/disk_tests/cciss/disklist_expected.json
Normal file
@ -0,0 +1,16 @@
|
||||
{
|
||||
"cciss/c0d0" : {
|
||||
"wearout" : "N/A",
|
||||
"vendor" : "HP",
|
||||
"rpm" : -1,
|
||||
"type" : "unknown",
|
||||
"serial" : "SER111",
|
||||
"osdid" : -1,
|
||||
"health" : "UNKNOWN",
|
||||
"model" : "LOGICAL_VOLUME",
|
||||
"size" : 5120,
|
||||
"wwn" : "0x0000000000000000",
|
||||
"gpt" : 1,
|
||||
"devpath" : "/dev/cciss/c0d0"
|
||||
}
|
||||
}
|
||||
2
src/test/disk_tests/hdd_smart/disklist
Normal file
2
src/test/disk_tests/hdd_smart/disklist
Normal file
@ -0,0 +1,2 @@
|
||||
sda
|
||||
sdb
|
||||
30
src/test/disk_tests/hdd_smart/disklist_expected.json
Normal file
30
src/test/disk_tests/hdd_smart/disklist_expected.json
Normal file
@ -0,0 +1,30 @@
|
||||
{
|
||||
"sdb" : {
|
||||
"devpath" : "/dev/sdb",
|
||||
"size" : 1024000,
|
||||
"gpt" : 1,
|
||||
"osdid" : -1,
|
||||
"rpm" : 7200,
|
||||
"model" : "ST4000NM0033-9ZM170",
|
||||
"vendor" : "ATA",
|
||||
"health" : "PASSED",
|
||||
"type" : "hdd",
|
||||
"wwn" : "0x0000000000000000",
|
||||
"wearout" : "N/A",
|
||||
"serial" : "00000000"
|
||||
},
|
||||
"sda" : {
|
||||
"osdid" : -1,
|
||||
"size" : 1024000,
|
||||
"gpt" : 1,
|
||||
"devpath" : "/dev/sda",
|
||||
"model" : "ST4000DM000-1F2168",
|
||||
"rpm" : 5900,
|
||||
"type" : "hdd",
|
||||
"health" : "PASSED",
|
||||
"vendor" : "ATA",
|
||||
"serial" : "00000000",
|
||||
"wearout" : "N/A",
|
||||
"wwn" : "0x0000000000000000"
|
||||
}
|
||||
}
|
||||
1
src/test/disk_tests/hdd_smart/sda/device/vendor
Normal file
1
src/test/disk_tests/hdd_smart/sda/device/vendor
Normal file
@ -0,0 +1 @@
|
||||
ATA
|
||||
1
src/test/disk_tests/hdd_smart/sda/queue/rotational
Normal file
1
src/test/disk_tests/hdd_smart/sda/queue/rotational
Normal file
@ -0,0 +1 @@
|
||||
1
|
||||
1
src/test/disk_tests/hdd_smart/sda/size
Normal file
1
src/test/disk_tests/hdd_smart/sda/size
Normal file
@ -0,0 +1 @@
|
||||
2000
|
||||
5
src/test/disk_tests/hdd_smart/sda_health
Normal file
5
src/test/disk_tests/hdd_smart/sda_health
Normal file
@ -0,0 +1,5 @@
|
||||
smartctl 6.6 2016-05-31 r4324 [x86_64-linux-4.4.21-1-pve] (local build)
|
||||
Copyright (C) 2002-16, Bruce Allen, Christian Franke, www.smartmontools.org
|
||||
|
||||
=== START OF READ SMART DATA SECTION ===
|
||||
SMART overall-health self-assessment test result: PASSED
|
||||
40
src/test/disk_tests/hdd_smart/sda_smart
Normal file
40
src/test/disk_tests/hdd_smart/sda_smart
Normal file
@ -0,0 +1,40 @@
|
||||
smartctl 6.6 2016-05-31 r4324 [x86_64-linux-4.4.21-1-pve] (local build)
|
||||
Copyright (C) 2002-16, Bruce Allen, Christian Franke, www.smartmontools.org
|
||||
|
||||
=== START OF READ SMART DATA SECTION ===
|
||||
SMART overall-health self-assessment test result: PASSED
|
||||
|
||||
SMART Attributes Data Structure revision number: 10
|
||||
Vendor Specific SMART Attributes with Thresholds:
|
||||
ID# ATTRIBUTE_NAME FLAGS VALUE WORST THRESH FAIL RAW_VALUE
|
||||
1 Raw_Read_Error_Rate POSR-- 117 099 006 - 158983656
|
||||
3 Spin_Up_Time PO---- 092 091 000 - 0
|
||||
4 Start_Stop_Count -O--CK 100 100 020 - 189
|
||||
5 Reallocated_Sector_Ct PO--CK 100 100 010 - 0
|
||||
7 Seek_Error_Rate POSR-- 075 060 030 - 30779387
|
||||
9 Power_On_Hours -O--CK 099 099 000 - 1250
|
||||
10 Spin_Retry_Count PO--C- 100 100 097 - 0
|
||||
12 Power_Cycle_Count -O--CK 100 100 020 - 190
|
||||
183 Runtime_Bad_Block -O--CK 100 100 000 - 0
|
||||
184 End-to-End_Error -O--CK 100 100 099 - 0
|
||||
187 Reported_Uncorrect -O--CK 100 100 000 - 0
|
||||
188 Command_Timeout -O--CK 100 100 000 - 0 0 0
|
||||
189 High_Fly_Writes -O-RCK 100 100 000 - 0
|
||||
190 Airflow_Temperature_Cel -O---K 069 061 045 - 31 (Min/Max 20/33)
|
||||
191 G-Sense_Error_Rate -O--CK 100 100 000 - 0
|
||||
192 Power-Off_Retract_Count -O--CK 100 100 000 - 43
|
||||
193 Load_Cycle_Count -O--CK 100 100 000 - 201
|
||||
194 Temperature_Celsius -O---K 031 040 000 - 31 (0 17 0 0 0)
|
||||
197 Current_Pending_Sector -O--C- 100 100 000 - 0
|
||||
198 Offline_Uncorrectable ----C- 100 100 000 - 0
|
||||
199 UDMA_CRC_Error_Count -OSRCK 200 200 000 - 0
|
||||
240 Head_Flying_Hours ------ 100 253 000 - 1259h+06m+33.546s
|
||||
241 Total_LBAs_Written ------ 100 253 000 - 24013587236
|
||||
242 Total_LBAs_Read ------ 100 253 000 - 66916845706732
|
||||
||||||_ K auto-keep
|
||||
|||||__ C event count
|
||||
||||___ R error rate
|
||||
|||____ S speed/performance
|
||||
||_____ O updated online
|
||||
|______ P prefailure warning
|
||||
|
||||
270
src/test/disk_tests/hdd_smart/sda_smart_expected.json
Normal file
270
src/test/disk_tests/hdd_smart/sda_smart_expected.json
Normal file
@ -0,0 +1,270 @@
|
||||
{
|
||||
"attributes" : [
|
||||
{
|
||||
"threshold" : 6,
|
||||
"fail" : "-",
|
||||
"flags" : "POSR--",
|
||||
"normalized" : 117,
|
||||
"value" : 117,
|
||||
"id" : " 1",
|
||||
"raw" : "158983656",
|
||||
"name" : "Raw_Read_Error_Rate",
|
||||
"worst" : 99
|
||||
},
|
||||
{
|
||||
"flags" : "PO----",
|
||||
"normalized" : 92,
|
||||
"value" : 92,
|
||||
"raw" : "0",
|
||||
"name" : "Spin_Up_Time",
|
||||
"worst" : 91,
|
||||
"id" : " 3",
|
||||
"fail" : "-",
|
||||
"threshold" : 0
|
||||
},
|
||||
{
|
||||
"normalized" : 100,
|
||||
"value" : 100,
|
||||
"flags" : "-O--CK",
|
||||
"id" : " 4",
|
||||
"name" : "Start_Stop_Count",
|
||||
"worst" : 100,
|
||||
"raw" : "189",
|
||||
"threshold" : 20,
|
||||
"fail" : "-"
|
||||
},
|
||||
{
|
||||
"normalized" : 100,
|
||||
"value" : 100,
|
||||
"flags" : "PO--CK",
|
||||
"id" : " 5",
|
||||
"name" : "Reallocated_Sector_Ct",
|
||||
"worst" : 100,
|
||||
"raw" : "0",
|
||||
"threshold" : 10,
|
||||
"fail" : "-"
|
||||
},
|
||||
{
|
||||
"flags" : "POSR--",
|
||||
"normalized" : 75,
|
||||
"value" : 75,
|
||||
"raw" : "30779387",
|
||||
"worst" : 60,
|
||||
"name" : "Seek_Error_Rate",
|
||||
"id" : " 7",
|
||||
"fail" : "-",
|
||||
"threshold" : 30
|
||||
},
|
||||
{
|
||||
"raw" : "1250",
|
||||
"worst" : 99,
|
||||
"name" : "Power_On_Hours",
|
||||
"id" : " 9",
|
||||
"flags" : "-O--CK",
|
||||
"normalized" : 99,
|
||||
"value" : 99,
|
||||
"fail" : "-",
|
||||
"threshold" : 0
|
||||
},
|
||||
{
|
||||
"normalized" : 100,
|
||||
"value" : 100,
|
||||
"flags" : "PO--C-",
|
||||
"id" : " 10",
|
||||
"name" : "Spin_Retry_Count",
|
||||
"worst" : 100,
|
||||
"raw" : "0",
|
||||
"threshold" : 97,
|
||||
"fail" : "-"
|
||||
},
|
||||
{
|
||||
"flags" : "-O--CK",
|
||||
"normalized" : 100,
|
||||
"value" : 100,
|
||||
"id" : " 12",
|
||||
"raw" : "190",
|
||||
"worst" : 100,
|
||||
"name" : "Power_Cycle_Count",
|
||||
"threshold" : 20,
|
||||
"fail" : "-"
|
||||
},
|
||||
{
|
||||
"threshold" : 0,
|
||||
"fail" : "-",
|
||||
"flags" : "-O--CK",
|
||||
"normalized" : 100,
|
||||
"value" : 100,
|
||||
"id" : "183",
|
||||
"raw" : "0",
|
||||
"worst" : 100,
|
||||
"name" : "Runtime_Bad_Block"
|
||||
},
|
||||
{
|
||||
"fail" : "-",
|
||||
"threshold" : 99,
|
||||
"flags" : "-O--CK",
|
||||
"normalized" : 100,
|
||||
"value" : 100,
|
||||
"raw" : "0",
|
||||
"worst" : 100,
|
||||
"name" : "End-to-End_Error",
|
||||
"id" : "184"
|
||||
},
|
||||
{
|
||||
"worst" : 100,
|
||||
"name" : "Reported_Uncorrect",
|
||||
"raw" : "0",
|
||||
"id" : "187",
|
||||
"normalized" : 100,
|
||||
"value" : 100,
|
||||
"flags" : "-O--CK",
|
||||
"fail" : "-",
|
||||
"threshold" : 0
|
||||
},
|
||||
{
|
||||
"flags" : "-O--CK",
|
||||
"normalized" : 100,
|
||||
"value" : 100,
|
||||
"raw" : "0 0 0",
|
||||
"worst" : 100,
|
||||
"name" : "Command_Timeout",
|
||||
"id" : "188",
|
||||
"fail" : "-",
|
||||
"threshold" : 0
|
||||
},
|
||||
{
|
||||
"threshold" : 0,
|
||||
"fail" : "-",
|
||||
"flags" : "-O-RCK",
|
||||
"normalized" : 100,
|
||||
"value" : 100,
|
||||
"id" : "189",
|
||||
"raw" : "0",
|
||||
"name" : "High_Fly_Writes",
|
||||
"worst" : 100
|
||||
},
|
||||
{
|
||||
"worst" : 61,
|
||||
"name" : "Airflow_Temperature_Cel",
|
||||
"raw" : "31 (Min/Max 20/33)",
|
||||
"id" : "190",
|
||||
"normalized" : 69,
|
||||
"value" : 69,
|
||||
"flags" : "-O---K",
|
||||
"fail" : "-",
|
||||
"threshold" : 45
|
||||
},
|
||||
{
|
||||
"fail" : "-",
|
||||
"threshold" : 0,
|
||||
"worst" : 100,
|
||||
"name" : "G-Sense_Error_Rate",
|
||||
"raw" : "0",
|
||||
"id" : "191",
|
||||
"normalized" : 100,
|
||||
"value" : 100,
|
||||
"flags" : "-O--CK"
|
||||
},
|
||||
{
|
||||
"id" : "192",
|
||||
"raw" : "43",
|
||||
"name" : "Power-Off_Retract_Count",
|
||||
"worst" : 100,
|
||||
"flags" : "-O--CK",
|
||||
"normalized" : 100,
|
||||
"value" : 100,
|
||||
"threshold" : 0,
|
||||
"fail" : "-"
|
||||
},
|
||||
{
|
||||
"flags" : "-O--CK",
|
||||
"normalized" : 100,
|
||||
"value" : 100,
|
||||
"raw" : "201",
|
||||
"worst" : 100,
|
||||
"name" : "Load_Cycle_Count",
|
||||
"id" : "193",
|
||||
"fail" : "-",
|
||||
"threshold" : 0
|
||||
},
|
||||
{
|
||||
"fail" : "-",
|
||||
"threshold" : 0,
|
||||
"normalized" : 31,
|
||||
"value" : 31,
|
||||
"flags" : "-O---K",
|
||||
"name" : "Temperature_Celsius",
|
||||
"worst" : 40,
|
||||
"raw" : "31 (0 17 0 0 0)",
|
||||
"id" : "194"
|
||||
},
|
||||
{
|
||||
"normalized" : 100,
|
||||
"value" : 100,
|
||||
"flags" : "-O--C-",
|
||||
"id" : "197",
|
||||
"worst" : 100,
|
||||
"name" : "Current_Pending_Sector",
|
||||
"raw" : "0",
|
||||
"threshold" : 0,
|
||||
"fail" : "-"
|
||||
},
|
||||
{
|
||||
"worst" : 100,
|
||||
"name" : "Offline_Uncorrectable",
|
||||
"raw" : "0",
|
||||
"id" : "198",
|
||||
"normalized" : 100,
|
||||
"value" : 100,
|
||||
"flags" : "----C-",
|
||||
"fail" : "-",
|
||||
"threshold" : 0
|
||||
},
|
||||
{
|
||||
"threshold" : 0,
|
||||
"fail" : "-",
|
||||
"normalized" : 200,
|
||||
"value" : 200,
|
||||
"flags" : "-OSRCK",
|
||||
"id" : "199",
|
||||
"worst" : 200,
|
||||
"name" : "UDMA_CRC_Error_Count",
|
||||
"raw" : "0"
|
||||
},
|
||||
{
|
||||
"raw" : "1259h+06m+33.546s",
|
||||
"name" : "Head_Flying_Hours",
|
||||
"worst" : 253,
|
||||
"id" : "240",
|
||||
"flags" : "------",
|
||||
"normalized" : 100,
|
||||
"value" : 100,
|
||||
"fail" : "-",
|
||||
"threshold" : 0
|
||||
},
|
||||
{
|
||||
"fail" : "-",
|
||||
"threshold" : 0,
|
||||
"name" : "Total_LBAs_Written",
|
||||
"worst" : 253,
|
||||
"raw" : "24013587236",
|
||||
"id" : "241",
|
||||
"normalized" : 100,
|
||||
"value" : 100,
|
||||
"flags" : "------"
|
||||
},
|
||||
{
|
||||
"flags" : "------",
|
||||
"normalized" : 100,
|
||||
"value" : 100,
|
||||
"id" : "242",
|
||||
"raw" : "66916845706732",
|
||||
"worst" : 253,
|
||||
"name" : "Total_LBAs_Read",
|
||||
"threshold" : 0,
|
||||
"fail" : "-"
|
||||
}
|
||||
],
|
||||
"health" : "PASSED",
|
||||
"type" : "ata"
|
||||
}
|
||||
11
src/test/disk_tests/hdd_smart/sda_udevadm
Normal file
11
src/test/disk_tests/hdd_smart/sda_udevadm
Normal file
@ -0,0 +1,11 @@
|
||||
E: DEVNAME=/dev/sda
|
||||
E: DEVTYPE=disk
|
||||
E: ID_ATA_ROTATION_RATE_RPM=5900
|
||||
E: ID_BUS=ata
|
||||
E: ID_MODEL=ST4000DM000-1F2168
|
||||
E: ID_PART_TABLE_TYPE=gpt
|
||||
E: ID_PART_TABLE_UUID=4f2e07a6-5437-2b4e-b6e8-9cba98639324
|
||||
E: ID_SERIAL_SHORT=00000000
|
||||
E: ID_TYPE=disk
|
||||
E: ID_WWN=0x0000000000000000
|
||||
E: ID_WWN_WITH_EXTENSION=0x0000000000000000
|
||||
1
src/test/disk_tests/hdd_smart/sdb/device/vendor
Normal file
1
src/test/disk_tests/hdd_smart/sdb/device/vendor
Normal file
@ -0,0 +1 @@
|
||||
ATA
|
||||
1
src/test/disk_tests/hdd_smart/sdb/queue/rotational
Normal file
1
src/test/disk_tests/hdd_smart/sdb/queue/rotational
Normal file
@ -0,0 +1 @@
|
||||
1
|
||||
1
src/test/disk_tests/hdd_smart/sdb/size
Normal file
1
src/test/disk_tests/hdd_smart/sdb/size
Normal file
@ -0,0 +1 @@
|
||||
2000
|
||||
5
src/test/disk_tests/hdd_smart/sdb_health
Normal file
5
src/test/disk_tests/hdd_smart/sdb_health
Normal file
@ -0,0 +1,5 @@
|
||||
smartctl 6.6 2016-05-31 r4324 [x86_64-linux-4.4.21-1-pve] (local build)
|
||||
Copyright (C) 2002-16, Bruce Allen, Christian Franke, www.smartmontools.org
|
||||
|
||||
=== START OF READ SMART DATA SECTION ===
|
||||
SMART overall-health self-assessment test result: PASSED
|
||||
36
src/test/disk_tests/hdd_smart/sdb_smart
Normal file
36
src/test/disk_tests/hdd_smart/sdb_smart
Normal file
@ -0,0 +1,36 @@
|
||||
smartctl 6.4 2014-10-07 r4002 [x86_64-linux-4.4.10-1-pve] (local build)
|
||||
Copyright (C) 2002-14, Bruce Allen, Christian Franke, www.smartmontools.org
|
||||
|
||||
=== START OF READ SMART DATA SECTION ===
|
||||
SMART overall-health self-assessment test result: PASSED
|
||||
|
||||
SMART Attributes Data Structure revision number: 10
|
||||
Vendor Specific SMART Attributes with Thresholds:
|
||||
ID# ATTRIBUTE_NAME FLAG VALUE WORST THRESH FAIL RAW_VALUE
|
||||
1 Raw_Read_Error_Rate POSR-- 083 063 044 - 215697022
|
||||
3 Spin_Up_Time PO---- 092 091 000 - 0
|
||||
4 Start_Stop_Count -O--CK 100 100 020 - 265
|
||||
5 Reallocated_Sector_Ct PO--CK 100 100 010 - 0
|
||||
7 Seek_Error_Rate POSR-- 091 060 030 - 1572375006
|
||||
9 Power_On_Hours -O--CK 089 089 000 - 9885
|
||||
10 Spin_Retry_Count PO--C- 100 100 097 - 0
|
||||
12 Power_Cycle_Count -O--CK 100 100 020 - 265
|
||||
184 End-to-End_Error -O--CK 100 100 099 - 0
|
||||
187 Reported_Uncorrect -O--CK 100 100 000 - 0
|
||||
188 Command_Timeout -O--CK 100 100 000 - 0
|
||||
189 High_Fly_Writes -O--CK 100 100 000 - 0
|
||||
190 Airflow_Temperature_Cel -O-RCK 045 036 045 NOW 55 (147 229 55 24 0)
|
||||
191 G-Sense_Error_Rate -O---K 100 100 000 - 0
|
||||
192 Power-Off_Retract_Count -O--CK 100 100 000 - 57
|
||||
193 Load_Cycle_Count -O--CK 100 100 000 - 265
|
||||
194 Temperature_Celsius -O--CK 055 064 000 - 55 (0 16 0 0 0)
|
||||
195 Hardware_ECC_Recovered -O---K 023 013 000 - 215697022
|
||||
197 Current_Pending_Sector -O--C- 100 100 000 - 0
|
||||
198 Offline_Uncorrectable ----C- 100 100 000 - 0
|
||||
199 UDMA_CRC_Error_Count -OSRCK 200 200 000 - 0
|
||||
||||||_ K auto-keep
|
||||
|||||__ C event count
|
||||
||||___ R error rate
|
||||
|||____ S speed/performance
|
||||
||_____ O updated online
|
||||
|______ P prefailure warning
|
||||
237
src/test/disk_tests/hdd_smart/sdb_smart_expected.json
Normal file
237
src/test/disk_tests/hdd_smart/sdb_smart_expected.json
Normal file
@ -0,0 +1,237 @@
|
||||
{
|
||||
"attributes" : [
|
||||
{
|
||||
"threshold" : 44,
|
||||
"fail" : "-",
|
||||
"id" : " 1",
|
||||
"name" : "Raw_Read_Error_Rate",
|
||||
"worst" : 63,
|
||||
"raw" : "215697022",
|
||||
"normalized" : 83,
|
||||
"value" : 83,
|
||||
"flags" : "POSR--"
|
||||
},
|
||||
{
|
||||
"flags" : "PO----",
|
||||
"normalized" : 92,
|
||||
"value" : 92,
|
||||
"id" : " 3",
|
||||
"raw" : "0",
|
||||
"worst" : 91,
|
||||
"name" : "Spin_Up_Time",
|
||||
"threshold" : 0,
|
||||
"fail" : "-"
|
||||
},
|
||||
{
|
||||
"fail" : "-",
|
||||
"threshold" : 20,
|
||||
"normalized" : 100,
|
||||
"value" : 100,
|
||||
"flags" : "-O--CK",
|
||||
"worst" : 100,
|
||||
"name" : "Start_Stop_Count",
|
||||
"raw" : "265",
|
||||
"id" : " 4"
|
||||
},
|
||||
{
|
||||
"flags" : "PO--CK",
|
||||
"normalized" : 100,
|
||||
"value" : 100,
|
||||
"id" : " 5",
|
||||
"raw" : "0",
|
||||
"name" : "Reallocated_Sector_Ct",
|
||||
"worst" : 100,
|
||||
"threshold" : 10,
|
||||
"fail" : "-"
|
||||
},
|
||||
{
|
||||
"id" : " 7",
|
||||
"raw" : "1572375006",
|
||||
"name" : "Seek_Error_Rate",
|
||||
"worst" : 60,
|
||||
"flags" : "POSR--",
|
||||
"normalized" : 91,
|
||||
"value" : 91,
|
||||
"threshold" : 30,
|
||||
"fail" : "-"
|
||||
},
|
||||
{
|
||||
"raw" : "9885",
|
||||
"name" : "Power_On_Hours",
|
||||
"worst" : 89,
|
||||
"id" : " 9",
|
||||
"flags" : "-O--CK",
|
||||
"normalized" : 89,
|
||||
"value" : 89,
|
||||
"fail" : "-",
|
||||
"threshold" : 0
|
||||
},
|
||||
{
|
||||
"fail" : "-",
|
||||
"threshold" : 97,
|
||||
"normalized" : 100,
|
||||
"value" : 100,
|
||||
"flags" : "PO--C-",
|
||||
"worst" : 100,
|
||||
"name" : "Spin_Retry_Count",
|
||||
"raw" : "0",
|
||||
"id" : " 10"
|
||||
},
|
||||
{
|
||||
"threshold" : 20,
|
||||
"fail" : "-",
|
||||
"id" : " 12",
|
||||
"raw" : "265",
|
||||
"name" : "Power_Cycle_Count",
|
||||
"worst" : 100,
|
||||
"flags" : "-O--CK",
|
||||
"normalized" : 100,
|
||||
"value" : 100
|
||||
},
|
||||
{
|
||||
"name" : "End-to-End_Error",
|
||||
"worst" : 100,
|
||||
"raw" : "0",
|
||||
"id" : "184",
|
||||
"normalized" : 100,
|
||||
"value" : 100,
|
||||
"flags" : "-O--CK",
|
||||
"fail" : "-",
|
||||
"threshold" : 99
|
||||
},
|
||||
{
|
||||
"threshold" : 0,
|
||||
"fail" : "-",
|
||||
"id" : "187",
|
||||
"raw" : "0",
|
||||
"name" : "Reported_Uncorrect",
|
||||
"worst" : 100,
|
||||
"flags" : "-O--CK",
|
||||
"normalized" : 100,
|
||||
"value" : 100
|
||||
},
|
||||
{
|
||||
"threshold" : 0,
|
||||
"fail" : "-",
|
||||
"id" : "188",
|
||||
"raw" : "0",
|
||||
"name" : "Command_Timeout",
|
||||
"worst" : 100,
|
||||
"flags" : "-O--CK",
|
||||
"normalized" : 100,
|
||||
"value" : 100
|
||||
},
|
||||
{
|
||||
"fail" : "-",
|
||||
"threshold" : 0,
|
||||
"raw" : "0",
|
||||
"worst" : 100,
|
||||
"name" : "High_Fly_Writes",
|
||||
"id" : "189",
|
||||
"flags" : "-O--CK",
|
||||
"normalized" : 100,
|
||||
"value" : 100
|
||||
},
|
||||
{
|
||||
"flags" : "-O-RCK",
|
||||
"normalized" : 45,
|
||||
"value" : 45,
|
||||
"raw" : "55 (147 229 55 24 0)",
|
||||
"worst" : 36,
|
||||
"name" : "Airflow_Temperature_Cel",
|
||||
"id" : "190",
|
||||
"fail" : "NOW",
|
||||
"threshold" : 45
|
||||
},
|
||||
{
|
||||
"fail" : "-",
|
||||
"threshold" : 0,
|
||||
"raw" : "0",
|
||||
"worst" : 100,
|
||||
"name" : "G-Sense_Error_Rate",
|
||||
"id" : "191",
|
||||
"flags" : "-O---K",
|
||||
"normalized" : 100,
|
||||
"value" : 100
|
||||
},
|
||||
{
|
||||
"threshold" : 0,
|
||||
"fail" : "-",
|
||||
"id" : "192",
|
||||
"raw" : "57",
|
||||
"worst" : 100,
|
||||
"name" : "Power-Off_Retract_Count",
|
||||
"flags" : "-O--CK",
|
||||
"normalized" : 100,
|
||||
"value" : 100
|
||||
},
|
||||
{
|
||||
"fail" : "-",
|
||||
"threshold" : 0,
|
||||
"normalized" : 100,
|
||||
"value" : 100,
|
||||
"flags" : "-O--CK",
|
||||
"name" : "Load_Cycle_Count",
|
||||
"worst" : 100,
|
||||
"raw" : "265",
|
||||
"id" : "193"
|
||||
},
|
||||
{
|
||||
"threshold" : 0,
|
||||
"fail" : "-",
|
||||
"flags" : "-O--CK",
|
||||
"normalized" : 55,
|
||||
"value" : 55,
|
||||
"id" : "194",
|
||||
"raw" : "55 (0 16 0 0 0)",
|
||||
"name" : "Temperature_Celsius",
|
||||
"worst" : 64
|
||||
},
|
||||
{
|
||||
"threshold" : 0,
|
||||
"fail" : "-",
|
||||
"id" : "195",
|
||||
"name" : "Hardware_ECC_Recovered",
|
||||
"worst" : 13,
|
||||
"raw" : "215697022",
|
||||
"normalized" : 23,
|
||||
"value" : 23,
|
||||
"flags" : "-O---K"
|
||||
},
|
||||
{
|
||||
"worst" : 100,
|
||||
"name" : "Current_Pending_Sector",
|
||||
"raw" : "0",
|
||||
"id" : "197",
|
||||
"normalized" : 100,
|
||||
"value" : 100,
|
||||
"flags" : "-O--C-",
|
||||
"fail" : "-",
|
||||
"threshold" : 0
|
||||
},
|
||||
{
|
||||
"worst" : 100,
|
||||
"name" : "Offline_Uncorrectable",
|
||||
"raw" : "0",
|
||||
"id" : "198",
|
||||
"normalized" : 100,
|
||||
"value" : 100,
|
||||
"flags" : "----C-",
|
||||
"fail" : "-",
|
||||
"threshold" : 0
|
||||
},
|
||||
{
|
||||
"fail" : "-",
|
||||
"threshold" : 0,
|
||||
"normalized" : 200,
|
||||
"value" : 200,
|
||||
"flags" : "-OSRCK",
|
||||
"worst" : 200,
|
||||
"name" : "UDMA_CRC_Error_Count",
|
||||
"raw" : "0",
|
||||
"id" : "199"
|
||||
}
|
||||
],
|
||||
"type" : "ata",
|
||||
"health" : "PASSED"
|
||||
}
|
||||
11
src/test/disk_tests/hdd_smart/sdb_udevadm
Normal file
11
src/test/disk_tests/hdd_smart/sdb_udevadm
Normal file
@ -0,0 +1,11 @@
|
||||
E: DEVNAME=/dev/sdb
|
||||
E: DEVTYPE=disk
|
||||
E: ID_ATA_ROTATION_RATE_RPM=7200
|
||||
E: ID_BUS=ata
|
||||
E: ID_MODEL=ST4000NM0033-9ZM170
|
||||
E: ID_PART_TABLE_TYPE=gpt
|
||||
E: ID_PART_TABLE_UUID=4f2e07a6-5437-2b4e-b6e8-9cba98639324
|
||||
E: ID_SERIAL_SHORT=00000000
|
||||
E: ID_TYPE=disk
|
||||
E: ID_WWN=0x0000000000000000
|
||||
E: ID_WWN_WITH_EXTENSION=0x0000000000000000
|
||||
1
src/test/disk_tests/nvme_smart/disklist
Normal file
1
src/test/disk_tests/nvme_smart/disklist
Normal file
@ -0,0 +1 @@
|
||||
nvme0n1
|
||||
16
src/test/disk_tests/nvme_smart/disklist_expected.json
Normal file
16
src/test/disk_tests/nvme_smart/disklist_expected.json
Normal file
@ -0,0 +1,16 @@
|
||||
{
|
||||
"nvme0n1" : {
|
||||
"wearout" : 69,
|
||||
"vendor" : "unknown",
|
||||
"size" : 512000,
|
||||
"health" : "PASSED",
|
||||
"serial" : "unknown",
|
||||
"model" : "NVME MODEL 1",
|
||||
"rpm" : 0,
|
||||
"osdid" : -1,
|
||||
"devpath" : "/dev/nvme0n1",
|
||||
"gpt" : 0,
|
||||
"wwn" : "unknown",
|
||||
"type" : "nvme"
|
||||
}
|
||||
}
|
||||
1
src/test/disk_tests/nvme_smart/nvme0n1/device/model
Normal file
1
src/test/disk_tests/nvme_smart/nvme0n1/device/model
Normal file
@ -0,0 +1 @@
|
||||
NVME MODEL 1
|
||||
1
src/test/disk_tests/nvme_smart/nvme0n1/queue/rotational
Normal file
1
src/test/disk_tests/nvme_smart/nvme0n1/queue/rotational
Normal file
@ -0,0 +1 @@
|
||||
0
|
||||
1
src/test/disk_tests/nvme_smart/nvme0n1/size
Normal file
1
src/test/disk_tests/nvme_smart/nvme0n1/size
Normal file
@ -0,0 +1 @@
|
||||
1000
|
||||
22
src/test/disk_tests/nvme_smart/nvme0n1_smart
Normal file
22
src/test/disk_tests/nvme_smart/nvme0n1_smart
Normal file
@ -0,0 +1,22 @@
|
||||
smartctl 6.6 2016-05-31 r4324 [x86_64-linux-4.4.19-1-pve] (local build)
|
||||
Copyright (C) 2002-16, Bruce Allen, Christian Franke, www.smartmontools.org
|
||||
|
||||
=== START OF SMART DATA SECTION ===
|
||||
SMART overall-health self-assessment test result: PASSED
|
||||
|
||||
SMART/Health Information (NVMe Log 0x02, NSID 0xffffffff)
|
||||
Critical Warning: 0x00
|
||||
Temperature: 32 Celsius
|
||||
Available Spare: 100%
|
||||
Available Spare Threshold: 10%
|
||||
Percentage Used: 31%
|
||||
Data Units Read: 1,299,288 [665 GB]
|
||||
Data Units Written: 5,592,478 [2.86 TB]
|
||||
Host Read Commands: 30,360,807
|
||||
Host Write Commands: 470,356,196
|
||||
Controller Busy Time: 12
|
||||
Power Cycles: 98
|
||||
Power On Hours: 687
|
||||
Unsafe Shutdowns: 21
|
||||
Media and Data Integrity Errors: 0
|
||||
Error Information Log Entries: 0
|
||||
@ -0,0 +1,6 @@
|
||||
{
|
||||
"text" : "\nSMART/Health Information (NVMe Log 0x02, NSID 0xffffffff)\nCritical Warning: 0x00\nTemperature: 32 Celsius\nAvailable Spare: 100%\nAvailable Spare Threshold: 10%\nPercentage Used: 31%\nData Units Read: 1,299,288 [665 GB]\nData Units Written: 5,592,478 [2.86 TB]\nHost Read Commands: 30,360,807\nHost Write Commands: 470,356,196\nController Busy Time: 12\nPower Cycles: 98\nPower On Hours: 687\nUnsafe Shutdowns: 21\nMedia and Data Integrity Errors: 0\nError Information Log Entries: 0\n",
|
||||
"health" : "PASSED",
|
||||
"type" : "text",
|
||||
"wearout": 69
|
||||
}
|
||||
18
src/test/disk_tests/nvme_smart/nvme0n1_udevadm
Normal file
18
src/test/disk_tests/nvme_smart/nvme0n1_udevadm
Normal file
@ -0,0 +1,18 @@
|
||||
|
||||
P: /devices/pci0000:00/0000:00:01.1/0000:02:00.0/nvme/nvme0/nvme0n1
|
||||
N: nvme0n1
|
||||
S: disk/by-id/lvm-pv-uuid-Py4eod-qfzj-i8Q3-Dxu6-xf0Q-H3Wr-w5Fo8V
|
||||
E: DEVLINKS=/dev/disk/by-id/lvm-pv-uuid-Py4eod-qfzj-i8Q3-Dxu6-xf0Q-H3Wr-w5Fo8V
|
||||
E: DEVNAME=/dev/nvme0n1
|
||||
E: DEVPATH=/devices/pci0000:00/0000:00:01.1/0000:02:00.0/nvme/nvme0/nvme0n1
|
||||
E: DEVTYPE=disk
|
||||
E: ID_FS_TYPE=LVM2_member
|
||||
E: ID_FS_USAGE=raid
|
||||
E: ID_FS_UUID=Py4eod-qfzj-i8Q3-Dxu6-xf0Q-H3Wr-w5Fo8V
|
||||
E: ID_FS_UUID_ENC=Py4eod-qfzj-i8Q3-Dxu6-xf0Q-H3Wr-w5Fo8V
|
||||
E: ID_FS_VERSION=LVM2 001
|
||||
E: MAJOR=259
|
||||
E: MINOR=0
|
||||
E: SUBSYSTEM=block
|
||||
E: TAGS=:systemd:
|
||||
E: USEC_INITIALIZED=3842
|
||||
1
src/test/disk_tests/sas/disklist
Normal file
1
src/test/disk_tests/sas/disklist
Normal file
@ -0,0 +1 @@
|
||||
sda
|
||||
17
src/test/disk_tests/sas/disklist_expected.json
Normal file
17
src/test/disk_tests/sas/disklist_expected.json
Normal file
@ -0,0 +1,17 @@
|
||||
{
|
||||
"sda" : {
|
||||
"gpt" : 1,
|
||||
"devpath" : "/dev/sda",
|
||||
"type" : "unknown",
|
||||
"model" : "MODEL1",
|
||||
"health" : "UNKNOWN",
|
||||
"osdid" : -1,
|
||||
"wwn" : "0x0000000000000000",
|
||||
"vendor" : "VENDOR1",
|
||||
"rpm" : -1,
|
||||
"size" : 5120000,
|
||||
"serial" : "SER2",
|
||||
"wearout" : "N/A",
|
||||
"by_id_link" : "/dev/disk/by-id/scsi-00000000000000000"
|
||||
}
|
||||
}
|
||||
1
src/test/disk_tests/sas/sda/device/model
Normal file
1
src/test/disk_tests/sas/sda/device/model
Normal file
@ -0,0 +1 @@
|
||||
MODEL1
|
||||
1
src/test/disk_tests/sas/sda/device/vendor
Normal file
1
src/test/disk_tests/sas/sda/device/vendor
Normal file
@ -0,0 +1 @@
|
||||
VENDOR1
|
||||
1
src/test/disk_tests/sas/sda/queue/rotational
Normal file
1
src/test/disk_tests/sas/sda/queue/rotational
Normal file
@ -0,0 +1 @@
|
||||
1
|
||||
1
src/test/disk_tests/sas/sda/size
Normal file
1
src/test/disk_tests/sas/sda/size
Normal file
@ -0,0 +1 @@
|
||||
10000
|
||||
20
src/test/disk_tests/sas/sda_smart
Normal file
20
src/test/disk_tests/sas/sda_smart
Normal file
@ -0,0 +1,20 @@
|
||||
=== START OF READ SMART DATA SECTION ===
|
||||
SMART Health Status: OK
|
||||
|
||||
Percentage used endurance indicator: 0%
|
||||
Current Drive Temperature: 20 C
|
||||
Drive Trip Temperature: 70 C
|
||||
|
||||
Manufactured in week 47 of year 2012
|
||||
Specified cycle count over device lifetime: 0
|
||||
Accumulated start-stop cycles: 0
|
||||
Specified load-unload count over device lifetime: 0
|
||||
Accumulated load-unload cycles: 0
|
||||
Elements in grown defect list: 0
|
||||
|
||||
Vendor (Seagate) cache information
|
||||
Blocks sent to initiator = 1286675833552896
|
||||
|
||||
Vendor (Seagate/Hitachi) factory information
|
||||
number of hours powered up = 7127.12
|
||||
number of minutes until next internal SMART test = 0
|
||||
6
src/test/disk_tests/sas/sda_smart_expected.json
Normal file
6
src/test/disk_tests/sas/sda_smart_expected.json
Normal file
@ -0,0 +1,6 @@
|
||||
{
|
||||
"health" : "OK",
|
||||
"text" : "\nPercentage used endurance indicator: 0%\nCurrent Drive Temperature: 20 C\nDrive Trip Temperature: 70 C\n\nManufactured in week 47 of year 2012\nSpecified cycle count over device lifetime: 0\nAccumulated start-stop cycles: 0\nSpecified load-unload count over device lifetime: 0\nAccumulated load-unload cycles: 0\nElements in grown defect list: 0\n\nVendor (Seagate) cache information\n Blocks sent to initiator = 1286675833552896\n\nVendor (Seagate/Hitachi) factory information\n number of hours powered up = 7127.12\n number of minutes until next internal SMART test = 0\n",
|
||||
"type" : "text",
|
||||
"wearout": 100
|
||||
}
|
||||
31
src/test/disk_tests/sas/sda_udevadm
Normal file
31
src/test/disk_tests/sas/sda_udevadm
Normal file
@ -0,0 +1,31 @@
|
||||
P: /devices/pci0000:00/0000:00:03.0/0000:02:00.0/host4/port-4:0/end_device-4:0/target4:0:0/4:0:0:0/block/sda
|
||||
N: sda
|
||||
S: disk/by-id/scsi-00000000000000000
|
||||
S: disk/by-id/wwn-0x0000000000000000
|
||||
S: disk/by-path/pci-0000:02:00.0-sas-0x0000000000000000-lun-0
|
||||
E: DEVLINKS=/dev/disk/by-id/scsi-00000000000000000 /dev/disk/by-id/wwn-0x0000000000000000 /dev/disk/by-path/pci-0000:02:00.0-sas-0x0000000000000000-lun-0
|
||||
E: DEVNAME=/dev/sda
|
||||
E: DEVPATH=/devices/pci0000:00/0000:00:03.0/0000:02:00.0/host4/port-4:0/end_device-4:0/target4:0:0/4:0:0:0/block/sda
|
||||
E: DEVTYPE=disk
|
||||
E: ID_BUS=scsi
|
||||
E: ID_MODEL=MODEL1
|
||||
E: ID_MODEL_ENC=MODEL1\x20\x20\x20\x20\x20\x20
|
||||
E: ID_PART_TABLE_TYPE=gpt
|
||||
E: ID_PART_TABLE_UUID=605740f0-44a1-4dc5-9fea-bde166df963e
|
||||
E: ID_PATH=pci-0000:02:00.0-sas-0x0000000000000000-lun-0
|
||||
E: ID_PATH_TAG=pci-0000_02_00_0-sas-0x0000000000000000-lun-0
|
||||
E: ID_REVISION=ES64
|
||||
E: ID_SCSI=1
|
||||
E: ID_SCSI_SERIAL=SERIAL
|
||||
E: ID_SERIAL=SERIAL2
|
||||
E: ID_SERIAL_SHORT=SER2
|
||||
E: ID_TYPE=disk
|
||||
E: ID_VENDOR=VENDOR1
|
||||
E: ID_VENDOR_ENC=VENDOR1\x20
|
||||
E: ID_WWN=0x0000000000000000
|
||||
E: ID_WWN_WITH_EXTENSION=0x0000000000000000
|
||||
E: MAJOR=8
|
||||
E: MINOR=0
|
||||
E: SUBSYSTEM=block
|
||||
E: TAGS=:systemd:
|
||||
E: USEC_INITIALIZED=667541
|
||||
1
src/test/disk_tests/sas_ssd/disklist
Normal file
1
src/test/disk_tests/sas_ssd/disklist
Normal file
@ -0,0 +1 @@
|
||||
sda
|
||||
17
src/test/disk_tests/sas_ssd/disklist_expected.json
Normal file
17
src/test/disk_tests/sas_ssd/disklist_expected.json
Normal file
@ -0,0 +1,17 @@
|
||||
{
|
||||
"sda" : {
|
||||
"gpt" : 1,
|
||||
"devpath" : "/dev/sda",
|
||||
"type" : "ssd",
|
||||
"model" : "MODEL1",
|
||||
"health" : "OK",
|
||||
"osdid" : -1,
|
||||
"wwn" : "0x0000000000000000",
|
||||
"vendor" : "VENDOR1",
|
||||
"rpm" : 0,
|
||||
"size" : 5120000,
|
||||
"serial" : "SER2",
|
||||
"wearout" : 100,
|
||||
"by_id_link" : "/dev/disk/by-id/scsi-00000000000000000"
|
||||
}
|
||||
}
|
||||
1
src/test/disk_tests/sas_ssd/sda/device/model
Normal file
1
src/test/disk_tests/sas_ssd/sda/device/model
Normal file
@ -0,0 +1 @@
|
||||
MODEL1
|
||||
1
src/test/disk_tests/sas_ssd/sda/device/vendor
Normal file
1
src/test/disk_tests/sas_ssd/sda/device/vendor
Normal file
@ -0,0 +1 @@
|
||||
VENDOR1
|
||||
1
src/test/disk_tests/sas_ssd/sda/queue/rotational
Normal file
1
src/test/disk_tests/sas_ssd/sda/queue/rotational
Normal file
@ -0,0 +1 @@
|
||||
0
|
||||
1
src/test/disk_tests/sas_ssd/sda/size
Normal file
1
src/test/disk_tests/sas_ssd/sda/size
Normal file
@ -0,0 +1 @@
|
||||
10000
|
||||
20
src/test/disk_tests/sas_ssd/sda_smart
Normal file
20
src/test/disk_tests/sas_ssd/sda_smart
Normal file
@ -0,0 +1,20 @@
|
||||
=== START OF READ SMART DATA SECTION ===
|
||||
SMART Health Status: OK
|
||||
|
||||
Percentage used endurance indicator: 0%
|
||||
Current Drive Temperature: 20 C
|
||||
Drive Trip Temperature: 70 C
|
||||
|
||||
Manufactured in week 47 of year 2012
|
||||
Specified cycle count over device lifetime: 0
|
||||
Accumulated start-stop cycles: 0
|
||||
Specified load-unload count over device lifetime: 0
|
||||
Accumulated load-unload cycles: 0
|
||||
Elements in grown defect list: 0
|
||||
|
||||
Vendor (Seagate) cache information
|
||||
Blocks sent to initiator = 1286675833552896
|
||||
|
||||
Vendor (Seagate/Hitachi) factory information
|
||||
number of hours powered up = 7127.12
|
||||
number of minutes until next internal SMART test = 0
|
||||
6
src/test/disk_tests/sas_ssd/sda_smart_expected.json
Normal file
6
src/test/disk_tests/sas_ssd/sda_smart_expected.json
Normal file
@ -0,0 +1,6 @@
|
||||
{
|
||||
"health" : "OK",
|
||||
"text" : "\nPercentage used endurance indicator: 0%\nCurrent Drive Temperature: 20 C\nDrive Trip Temperature: 70 C\n\nManufactured in week 47 of year 2012\nSpecified cycle count over device lifetime: 0\nAccumulated start-stop cycles: 0\nSpecified load-unload count over device lifetime: 0\nAccumulated load-unload cycles: 0\nElements in grown defect list: 0\n\nVendor (Seagate) cache information\n Blocks sent to initiator = 1286675833552896\n\nVendor (Seagate/Hitachi) factory information\n number of hours powered up = 7127.12\n number of minutes until next internal SMART test = 0\n",
|
||||
"type" : "text",
|
||||
"wearout": 100
|
||||
}
|
||||
31
src/test/disk_tests/sas_ssd/sda_udevadm
Normal file
31
src/test/disk_tests/sas_ssd/sda_udevadm
Normal file
@ -0,0 +1,31 @@
|
||||
P: /devices/pci0000:00/0000:00:03.0/0000:02:00.0/host4/port-4:0/end_device-4:0/target4:0:0/4:0:0:0/block/sda
|
||||
N: sda
|
||||
S: disk/by-id/scsi-00000000000000000
|
||||
S: disk/by-id/wwn-0x0000000000000000
|
||||
S: disk/by-path/pci-0000:02:00.0-sas-0x0000000000000000-lun-0
|
||||
E: DEVLINKS=/dev/disk/by-id/scsi-00000000000000000 /dev/disk/by-id/wwn-0x0000000000000000 /dev/disk/by-path/pci-0000:02:00.0-sas-0x0000000000000000-lun-0
|
||||
E: DEVNAME=/dev/sda
|
||||
E: DEVPATH=/devices/pci0000:00/0000:00:03.0/0000:02:00.0/host4/port-4:0/end_device-4:0/target4:0:0/4:0:0:0/block/sda
|
||||
E: DEVTYPE=disk
|
||||
E: ID_BUS=scsi
|
||||
E: ID_MODEL=MODEL1
|
||||
E: ID_MODEL_ENC=MODEL1\x20\x20\x20\x20\x20\x20
|
||||
E: ID_PART_TABLE_TYPE=gpt
|
||||
E: ID_PART_TABLE_UUID=605740f0-44a1-4dc5-9fea-bde166df963e
|
||||
E: ID_PATH=pci-0000:02:00.0-sas-0x0000000000000000-lun-0
|
||||
E: ID_PATH_TAG=pci-0000_02_00_0-sas-0x0000000000000000-lun-0
|
||||
E: ID_REVISION=ES64
|
||||
E: ID_SCSI=1
|
||||
E: ID_SCSI_SERIAL=SERIAL
|
||||
E: ID_SERIAL=SERIAL2
|
||||
E: ID_SERIAL_SHORT=SER2
|
||||
E: ID_TYPE=disk
|
||||
E: ID_VENDOR=VENDOR1
|
||||
E: ID_VENDOR_ENC=VENDOR1\x20
|
||||
E: ID_WWN=0x0000000000000000
|
||||
E: ID_WWN_WITH_EXTENSION=0x0000000000000000
|
||||
E: MAJOR=8
|
||||
E: MINOR=0
|
||||
E: SUBSYSTEM=block
|
||||
E: TAGS=:systemd:
|
||||
E: USEC_INITIALIZED=667541
|
||||
5
src/test/disk_tests/ssd_smart/disklist
Normal file
5
src/test/disk_tests/ssd_smart/disklist
Normal file
@ -0,0 +1,5 @@
|
||||
sda
|
||||
sdb
|
||||
sdc
|
||||
sdd
|
||||
sde
|
||||
72
src/test/disk_tests/ssd_smart/disklist_expected.json
Normal file
72
src/test/disk_tests/ssd_smart/disklist_expected.json
Normal file
@ -0,0 +1,72 @@
|
||||
{
|
||||
"sda" : {
|
||||
"serial" : "000000000000",
|
||||
"vendor" : "ATA",
|
||||
"rpm" : 0,
|
||||
"gpt" : 1,
|
||||
"health" : "PASSED",
|
||||
"wearout" : "100",
|
||||
"osdid" : -1,
|
||||
"size" : 512000,
|
||||
"type" : "ssd",
|
||||
"devpath" : "/dev/sda",
|
||||
"model" : "Crucial_CT500MX200SSD1",
|
||||
"wwn" : "0x0000000000000000"
|
||||
},
|
||||
"sdb" : {
|
||||
"model" : "INTEL_SSDSC2BB080G6",
|
||||
"devpath" : "/dev/sdb",
|
||||
"osdid" : -1,
|
||||
"type" : "ssd",
|
||||
"size" : 512000,
|
||||
"wwn" : "0x0000000000000000",
|
||||
"gpt" : 1,
|
||||
"rpm" : 0,
|
||||
"vendor" : "ATA",
|
||||
"serial" : "000000000000000000",
|
||||
"wearout" : "97",
|
||||
"health" : "PASSED"
|
||||
},
|
||||
"sdc" : {
|
||||
"wwn" : "0x0000000000000000",
|
||||
"devpath" : "/dev/sdc",
|
||||
"model" : "Samsung SSD 850 PRO 512GB",
|
||||
"osdid" : -1,
|
||||
"type" : "ssd",
|
||||
"size" : 512000,
|
||||
"wearout" : "99",
|
||||
"health" : "PASSED",
|
||||
"gpt" : 1,
|
||||
"rpm" : 0,
|
||||
"vendor" : "ATA",
|
||||
"serial" : "000000000000"
|
||||
},
|
||||
"sdd" : {
|
||||
"rpm" : 0,
|
||||
"gpt" : 1,
|
||||
"serial" : "000000000000",
|
||||
"vendor" : "ATA",
|
||||
"wearout" : "100",
|
||||
"health" : "PASSED",
|
||||
"devpath" : "/dev/sdd",
|
||||
"model" : "SanDisk SD8SB8U1T001122",
|
||||
"size" : 512000,
|
||||
"osdid" : -1,
|
||||
"type" : "ssd",
|
||||
"wwn" : "0x0000000000000000"
|
||||
},
|
||||
"sde" : {
|
||||
"type" : "ssd",
|
||||
"osdid" : -1,
|
||||
"size" : 512000,
|
||||
"model" : "KINGSTON SHFS37A120G",
|
||||
"devpath" : "/dev/sde",
|
||||
"wwn" : "0x0000000000000000",
|
||||
"vendor" : "ATA",
|
||||
"serial" : "000000000000",
|
||||
"gpt" : 1,
|
||||
"rpm" : 0,
|
||||
"health" : "PASSED",
|
||||
"wearout" : "91"
|
||||
}
|
||||
}
|
||||
1
src/test/disk_tests/ssd_smart/sda/device/vendor
Normal file
1
src/test/disk_tests/ssd_smart/sda/device/vendor
Normal file
@ -0,0 +1 @@
|
||||
ATA
|
||||
1
src/test/disk_tests/ssd_smart/sda/queue/rotational
Normal file
1
src/test/disk_tests/ssd_smart/sda/queue/rotational
Normal file
@ -0,0 +1 @@
|
||||
0
|
||||
1
src/test/disk_tests/ssd_smart/sda/size
Normal file
1
src/test/disk_tests/ssd_smart/sda/size
Normal file
@ -0,0 +1 @@
|
||||
1000
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user