drop support for using GlusterFS directly

As the GlusterFS project is unmaintained since a while and other
projects like QEMU also drop support for using it natively.

One can still use the gluster tools to mount an instance manually and
then use it as directory storage; the better (long term) option will
be to replace the storage server with something maintained though, as
PVE 8 will be supported until the middle of 2026 users have some time
before they need to decide what way they will go.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
This commit is contained in:
Thomas Lamprecht
2025-06-16 16:05:57 +02:00
parent a734efcbd3
commit 7669a99e97
7 changed files with 1 additions and 447 deletions

1
debian/control vendored
View File

@ -32,7 +32,6 @@ Depends: bzip2,
ceph-fuse,
cifs-utils,
cstream,
glusterfs-client (>= 3.4.0-2),
libfile-chdir-perl,
libposix-strptime-perl,
libpve-access-control (>= 8.1.2),

View File

@ -43,7 +43,6 @@ __PACKAGE__->register_method({
my $res = [
{ method => 'cifs' },
{ method => 'glusterfs' },
{ method => 'iscsi' },
{ method => 'lvm' },
{ method => 'nfs' },
@ -245,58 +244,6 @@ __PACKAGE__->register_method({
},
});
# Note: GlusterFS currently does not have an equivalent of showmount.
# As workaround, we simply use nfs showmount.
# see http://www.gluster.org/category/volumes/
__PACKAGE__->register_method({
name => 'glusterfsscan',
path => 'glusterfs',
method => 'GET',
description => "Scan remote GlusterFS server.",
protected => 1,
proxyto => "node",
permissions => {
check => ['perm', '/storage', ['Datastore.Allocate']],
},
parameters => {
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
server => {
description => "The server address (name or IP).",
type => 'string',
format => 'pve-storage-server',
},
},
},
returns => {
type => 'array',
items => {
type => "object",
properties => {
volname => {
description => "The volume name.",
type => 'string',
},
},
},
},
code => sub {
my ($param) = @_;
my $server = $param->{server};
my $res = PVE::Storage::scan_nfs($server);
my $data = [];
foreach my $path (sort keys %$res) {
if ($path =~ m!^/([^\s/]+)$!) {
push @$data, { volname => $1 };
}
}
return $data;
},
});
__PACKAGE__->register_method({
name => 'iscsiscan',
path => 'iscsi',

View File

@ -674,19 +674,6 @@ our $cmddef = {
}
},
],
glusterfs => [
"PVE::API2::Storage::Scan",
'glusterfsscan',
['server'],
{ node => $nodename },
sub {
my $res = shift;
foreach my $rec (@$res) {
printf "%s\n", $rec->{volname};
}
},
],
iscsi => [
"PVE::API2::Storage::Scan",
'iscsiscan',
@ -753,7 +740,6 @@ our $cmddef = {
},
nfsscan => { alias => 'scan nfs' },
cifsscan => { alias => 'scan cifs' },
glusterfsscan => { alias => 'scan glusterfs' },
iscsiscan => { alias => 'scan iscsi' },
lvmscan => { alias => 'scan lvm' },
lvmthinscan => { alias => 'scan lvmthin' },

View File

@ -34,7 +34,6 @@ use PVE::Storage::ISCSIPlugin;
use PVE::Storage::RBDPlugin;
use PVE::Storage::CephFSPlugin;
use PVE::Storage::ISCSIDirectPlugin;
use PVE::Storage::GlusterfsPlugin;
use PVE::Storage::ZFSPoolPlugin;
use PVE::Storage::ZFSPlugin;
use PVE::Storage::PBSPlugin;
@ -60,7 +59,6 @@ PVE::Storage::ISCSIPlugin->register();
PVE::Storage::RBDPlugin->register();
PVE::Storage::CephFSPlugin->register();
PVE::Storage::ISCSIDirectPlugin->register();
PVE::Storage::GlusterfsPlugin->register();
PVE::Storage::ZFSPoolPlugin->register();
PVE::Storage::ZFSPlugin->register();
PVE::Storage::PBSPlugin->register();

View File

@ -1,375 +0,0 @@
package PVE::Storage::GlusterfsPlugin;
use strict;
use warnings;
use IO::File;
use File::Path;
use PVE::Tools qw(run_command);
use PVE::ProcFSTools;
use PVE::Network;
use PVE::Storage::Plugin;
use PVE::JSONSchema qw(get_standard_option);
use base qw(PVE::Storage::Plugin);
# Glusterfs helper functions
my $server_test_results = {};
my $get_active_server = sub {
my ($scfg, $return_default_if_offline) = @_;
my $defaultserver = $scfg->{server} ? $scfg->{server} : 'localhost';
if ($return_default_if_offline && !defined($scfg->{server2})) {
# avoid delays (there is no backup server anyways)
return $defaultserver;
}
my $serverlist = [$defaultserver];
push @$serverlist, $scfg->{server2} if $scfg->{server2};
my $ctime = time();
foreach my $server (@$serverlist) {
my $stat = $server_test_results->{$server};
return $server if $stat && $stat->{active} && (($ctime - $stat->{time}) <= 2);
}
foreach my $server (@$serverlist) {
my $status = 0;
if ($server && $server ne 'localhost' && $server ne '127.0.0.1' && $server ne '::1') {
# ping the gluster daemon default port (24007) as heuristic
$status = PVE::Network::tcp_ping($server, 24007, 2);
} else {
my $parser = sub {
my $line = shift;
if ($line =~ m/Status: Started$/) {
$status = 1;
}
};
my $cmd = ['/usr/sbin/gluster', 'volume', 'info', $scfg->{volume}];
run_command(
$cmd,
errmsg => "glusterfs error",
errfunc => sub { },
outfunc => $parser,
);
}
$server_test_results->{$server} = { time => time(), active => $status };
return $server if $status;
}
return $defaultserver if $return_default_if_offline;
return undef;
};
sub glusterfs_is_mounted {
my ($volume, $mountpoint, $mountdata) = @_;
$mountdata = PVE::ProcFSTools::parse_proc_mounts() if !$mountdata;
return $mountpoint if grep {
$_->[2] eq 'fuse.glusterfs'
&& $_->[0] =~ /^\S+:\Q$volume\E$/
&& $_->[1] eq $mountpoint
} @$mountdata;
return undef;
}
sub glusterfs_mount {
my ($server, $volume, $mountpoint) = @_;
my $source = "$server:$volume";
my $cmd = ['/bin/mount', '-t', 'glusterfs', $source, $mountpoint];
run_command($cmd, errmsg => "mount error");
}
# Configuration
sub type {
return 'glusterfs';
}
sub plugindata {
return {
content => [
{ images => 1, vztmpl => 1, iso => 1, backup => 1, snippets => 1, import => 1 },
{ images => 1 },
],
format => [{ raw => 1, qcow2 => 1, vmdk => 1 }, 'raw'],
'sensitive-properties' => {},
};
}
sub properties {
return {
volume => {
description => "Glusterfs Volume.",
type => 'string',
},
server2 => {
description => "Backup volfile server IP or DNS name.",
type => 'string',
format => 'pve-storage-server',
requires => 'server',
},
transport => {
description => "Gluster transport: tcp or rdma",
type => 'string',
enum => ['tcp', 'rdma', 'unix'],
},
};
}
sub options {
return {
path => { fixed => 1 },
server => { optional => 1 },
server2 => { optional => 1 },
volume => { fixed => 1 },
transport => { optional => 1 },
nodes => { optional => 1 },
disable => { optional => 1 },
maxfiles => { optional => 1 },
'prune-backups' => { optional => 1 },
'max-protected-backups' => { optional => 1 },
content => { optional => 1 },
format => { optional => 1 },
mkdir => { optional => 1 },
'create-base-path' => { optional => 1 },
'create-subdirs' => { optional => 1 },
bwlimit => { optional => 1 },
preallocation => { optional => 1 },
};
}
sub check_config {
my ($class, $sectionId, $config, $create, $skipSchemaCheck) = @_;
$config->{path} = "/mnt/pve/$sectionId" if $create && !$config->{path};
return $class->SUPER::check_config($sectionId, $config, $create, $skipSchemaCheck);
}
# Storage implementation
sub parse_name_dir {
my $name = shift;
if ($name =~ m!^((base-)?[^/\s]+\.(raw|qcow2|vmdk))$!) {
return ($1, $3, $2);
}
die "unable to parse volume filename '$name'\n";
}
sub path {
my ($class, $scfg, $volname, $storeid, $snapname) = @_;
my ($vtype, $name, $vmid, undef, undef, $isBase, $format) = $class->parse_volname($volname);
# Note: qcow2/qed has internal snapshot, so path is always
# the same (with or without snapshot => same file).
die "can't snapshot this image format\n"
if defined($snapname) && $format !~ m/^(qcow2|qed)$/;
my $path = undef;
if ($vtype eq 'images') {
my $server = &$get_active_server($scfg, 1);
my $glustervolume = $scfg->{volume};
my $transport = $scfg->{transport};
my $protocol = "gluster";
if ($transport) {
$protocol = "gluster+$transport";
}
$path = "$protocol://$server/$glustervolume/images/$vmid/$name";
} else {
my $dir = $class->get_subdir($scfg, $vtype);
$path = "$dir/$name";
}
return wantarray ? ($path, $vmid, $vtype) : $path;
}
sub clone_image {
my ($class, $scfg, $storeid, $volname, $vmid, $snap) = @_;
die "storage definition has no path\n" if !$scfg->{path};
my ($vtype, $basename, $basevmid, undef, undef, $isBase, $format) =
$class->parse_volname($volname);
die "clone_image on wrong vtype '$vtype'\n" if $vtype ne 'images';
die "this storage type does not support clone_image on snapshot\n" if $snap;
die "this storage type does not support clone_image on subvolumes\n" if $format eq 'subvol';
die "clone_image only works on base images\n" if !$isBase;
my $imagedir = $class->get_subdir($scfg, 'images');
$imagedir .= "/$vmid";
mkpath $imagedir;
my $name = $class->find_free_diskname($storeid, $scfg, $vmid, "qcow2", 1);
warn "clone $volname: $vtype, $name, $vmid to $name (base=../$basevmid/$basename)\n";
my $path = "$imagedir/$name";
die "disk image '$path' already exists\n" if -e $path;
my $server = &$get_active_server($scfg, 1);
my $glustervolume = $scfg->{volume};
my $volumepath = "gluster://$server/$glustervolume/images/$vmid/$name";
my $cmd = [
'/usr/bin/qemu-img',
'create',
'-b',
"../$basevmid/$basename",
'-F',
$format,
'-f',
'qcow2',
$volumepath,
];
run_command($cmd, errmsg => "unable to create image");
return "$basevmid/$basename/$vmid/$name";
}
sub alloc_image {
my ($class, $storeid, $scfg, $vmid, $fmt, $name, $size) = @_;
my $imagedir = $class->get_subdir($scfg, 'images');
$imagedir .= "/$vmid";
mkpath $imagedir;
$name = $class->find_free_diskname($storeid, $scfg, $vmid, $fmt, 1) if !$name;
my (undef, $tmpfmt) = parse_name_dir($name);
die "illegal name '$name' - wrong extension for format ('$tmpfmt != '$fmt')\n"
if $tmpfmt ne $fmt;
my $path = "$imagedir/$name";
die "disk image '$path' already exists\n" if -e $path;
my $server = &$get_active_server($scfg, 1);
my $glustervolume = $scfg->{volume};
my $volumepath = "gluster://$server/$glustervolume/images/$vmid/$name";
my $cmd = ['/usr/bin/qemu-img', 'create'];
my $prealloc_opt = PVE::Storage::Plugin::preallocation_cmd_option($scfg, $fmt);
push @$cmd, '-o', $prealloc_opt if defined($prealloc_opt);
push @$cmd, '-f', $fmt, $volumepath, "${size}K";
eval { run_command($cmd, errmsg => "unable to create image"); };
if ($@) {
unlink $path;
rmdir $imagedir;
die "$@";
}
return "$vmid/$name";
}
sub status {
my ($class, $storeid, $scfg, $cache) = @_;
$cache->{mountdata} = PVE::ProcFSTools::parse_proc_mounts()
if !$cache->{mountdata};
my $path = $scfg->{path};
my $volume = $scfg->{volume};
return undef if !glusterfs_is_mounted($volume, $path, $cache->{mountdata});
return $class->SUPER::status($storeid, $scfg, $cache);
}
sub activate_storage {
my ($class, $storeid, $scfg, $cache) = @_;
$cache->{mountdata} = PVE::ProcFSTools::parse_proc_mounts()
if !$cache->{mountdata};
my $path = $scfg->{path};
my $volume = $scfg->{volume};
if (!glusterfs_is_mounted($volume, $path, $cache->{mountdata})) {
$class->config_aware_base_mkdir($scfg, $path);
die "unable to activate storage '$storeid' - " . "directory '$path' does not exist\n"
if !-d $path;
my $server = &$get_active_server($scfg, 1);
glusterfs_mount($server, $volume, $path);
}
$class->SUPER::activate_storage($storeid, $scfg, $cache);
}
sub deactivate_storage {
my ($class, $storeid, $scfg, $cache) = @_;
$cache->{mountdata} = PVE::ProcFSTools::parse_proc_mounts()
if !$cache->{mountdata};
my $path = $scfg->{path};
my $volume = $scfg->{volume};
if (glusterfs_is_mounted($volume, $path, $cache->{mountdata})) {
my $cmd = ['/bin/umount', $path];
run_command($cmd, errmsg => 'umount error');
}
}
sub activate_volume {
my ($class, $storeid, $scfg, $volname, $snapname, $cache) = @_;
# do nothing by default
}
sub deactivate_volume {
my ($class, $storeid, $scfg, $volname, $snapname, $cache) = @_;
# do nothing by default
}
sub check_connection {
my ($class, $storeid, $scfg, $cache) = @_;
my $server = &$get_active_server($scfg);
return defined($server) ? 1 : 0;
}
sub get_import_metadata {
return PVE::Storage::DirPlugin::get_import_metadata(@_);
}
1;

View File

@ -9,7 +9,6 @@ SOURCES= \
CephFSPlugin.pm \
RBDPlugin.pm \
ISCSIDirectPlugin.pm \
GlusterfsPlugin.pm \
ZFSPoolPlugin.pm \
ZFSPlugin.pm \
PBSPlugin.pm \

View File

@ -35,7 +35,7 @@ our @COMMON_TAR_FLAGS = qw(
);
our @SHARED_STORAGE = (
'iscsi', 'nfs', 'cifs', 'rbd', 'cephfs', 'iscsidirect', 'glusterfs', 'zfs', 'drbd', 'pbs',
'iscsi', 'nfs', 'cifs', 'rbd', 'cephfs', 'iscsidirect', 'zfs', 'drbd', 'pbs',
);
our $QCOW2_PREALLOCATION = {