Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix(NFSSR): ensure we can attach SR during attach_from_config call #44

Open
wants to merge 20 commits into
base: 2.30.8-8.2
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
20 commits
Select commit Hold shift + click to select a range
b7d3ea7
backport of ccd121cc248d79b749a63d4ad099e6d5f4b8b588: CA-354692: chec…
MarkSymsCtx May 20, 2021
3a0c67d
Update xs-sm.service's description for XCP-ng
stormi Aug 13, 2020
a8168e1
Add TrueNAS multipath config
stormi Aug 13, 2020
6120e7f
feat(drivers): add CephFS, GlusterFS and XFS drivers
Wescoeur Jul 20, 2020
dbfbe5f
feat(drivers): add ZFS driver to avoid losing VDI metadata (xcp-ng/xc…
Wescoeur Aug 12, 2020
696bd1c
Re-add the ext4 driver for users who need to transition
stormi Aug 13, 2020
b06f04f
feat(drivers): add LinstorSR driver
Wescoeur Mar 16, 2020
d64ac06
feat(tests): add unit tests concerning ZFS (close xcp-ng/xcp#425)
Wescoeur Oct 27, 2020
68e67e6
If no NFS ACLs provided, assume everyone:
benjamreis Feb 25, 2021
769a4b9
Added SM Driver for MooseFS
xandrus Jan 29, 2021
b53271b
Avoid usage of `umount` in `ISOSR` when `legacy_mode` is used
Wescoeur Dec 2, 2021
9bccc87
MooseFS SR uses now UUID subdirs for each SR
Wescoeur May 18, 2022
b018cd2
Fix is_open call for many drivers (#25)
Wescoeur Jun 23, 2022
bdb7ced
Remove SR_CACHING capability for many SR types (#24)
Wescoeur Jun 23, 2022
4de671d
Remove `SR_PROBE` from ZFS capabilities (#37)
benjamreis Aug 4, 2023
0aec61e
Fix vdi-ref when static vdis are used
gthvn1 Aug 16, 2023
9e7f3ac
Tell users not to edit multipath.conf directly
stormi Jan 27, 2023
20616a7
Add custom.conf multipath configuration file
stormi Jan 27, 2023
398471d
Install /etc/multipath/conf.d/custom.conf
stormi Aug 25, 2023
b04358e
fix(NFSSR): ensure we can attach SR during attach_from_config call
Wescoeur Oct 11, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 20 additions & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ SM_DRIVERS += udev
SM_DRIVERS += ISO
SM_DRIVERS += HBA
SM_DRIVERS += RawHBA
SM_DRIVERS += Linstor
SM_DRIVERS += LVHD
SM_DRIVERS += LVHDoISCSI
SM_DRIVERS += LVHDoHBA
Expand All @@ -17,6 +18,12 @@ SM_DRIVERS += OCFSoHBA
SM_DRIVERS += SHM
SM_DRIVERS += SMB
SM_DRIVERS += LVHDoFCoE
SM_DRIVERS += CephFS
SM_DRIVERS += GlusterFS
SM_DRIVERS += XFS
SM_DRIVERS += ZFS
SM_DRIVERS += EXT4
SM_DRIVERS += MooseFS

SM_LIBS := SR
SM_LIBS += SRCommand
Expand All @@ -30,6 +37,9 @@ SM_LIBS += verifyVHDsOnSR
SM_LIBS += scsiutil
SM_LIBS += scsi_host_rescan
SM_LIBS += vhdutil
SM_LIBS += linstorjournaler
SM_LIBS += linstorvhdutil
SM_LIBS += linstorvolumemanager
SM_LIBS += lvhdutil
SM_LIBS += cifutils
SM_LIBS += xs_errors
Expand Down Expand Up @@ -68,6 +78,7 @@ SM_LIBS += sr_health_check
UDEV_RULES = 65-multipath 55-xs-mpath-scsidev 57-usb 58-xapi
MPATH_DAEMON = sm-multipath
MPATH_CONF = multipath.conf
MPATH_CUSTOM_CONF = custom.conf
SMLOG_CONF = SMlog

SM_XML := XE_SR_ERRORCODES
Expand All @@ -83,6 +94,7 @@ UDEV_SCRIPTS_DIR := /etc/udev/scripts/
SYSTEMD_SERVICE_DIR := /usr/lib/systemd/system/
INIT_DIR := /etc/rc.d/init.d/
MPATH_CONF_DIR := /etc/multipath.xenserver/
MPATH_CUSTOM_CONF_DIR := /etc/multipath/conf.d/
MODPROBE_DIR := /etc/modprobe.d/
EXTENSION_SCRIPT_DEST := /etc/xapi.d/extensions/
LOGROTATE_DIR := /etc/logrotate.d/
Expand All @@ -95,6 +107,7 @@ SM_PY_FILES = $(foreach LIB, $(SM_LIBS), drivers/$(LIB).py) $(foreach DRIVER, $(
.PHONY: build
build:
make -C dcopy
make -C linstor

.PHONY: precommit
precommit: build
Expand Down Expand Up @@ -135,6 +148,7 @@ install: precheck
mkdir -p $(SM_STAGING)$(INIT_DIR)
mkdir -p $(SM_STAGING)$(SYSTEMD_SERVICE_DIR)
mkdir -p $(SM_STAGING)$(MPATH_CONF_DIR)
mkdir -p $(SM_STAGING)$(MPATH_CUSTOM_CONF_DIR)
mkdir -p $(SM_STAGING)$(MODPROBE_DIR)
mkdir -p $(SM_STAGING)$(LOGROTATE_DIR)
mkdir -p $(SM_STAGING)$(DEBUG_DEST)
Expand All @@ -152,6 +166,8 @@ install: precheck
$(SM_STAGING)$(SM_DEST)/plugins/
install -m 644 multipath/$(MPATH_CONF) \
$(SM_STAGING)/$(MPATH_CONF_DIR)
install -m 644 multipath/$(MPATH_CUSTOM_CONF) \
$(SM_STAGING)/$(MPATH_CUSTOM_CONF_DIR)
install -m 755 multipath/sm-multipath \
$(SM_STAGING)/$(INIT_DIR)
install -m 755 multipath/multipath-root-setup \
Expand All @@ -174,6 +190,8 @@ install: precheck
$(SM_STAGING)/$(SYSTEMD_SERVICE_DIR)
install -m 644 systemd/sr_health_check.timer \
$(SM_STAGING)/$(SYSTEMD_SERVICE_DIR)
install -m 644 systemd/linstor-monitor.service \
$(SM_STAGING)/$(SYSTEMD_SERVICE_DIR)
for i in $(UDEV_RULES); do \
install -m 644 udev/$$i.rules \
$(SM_STAGING)$(UDEV_RULES_DIR); done
Expand All @@ -194,6 +212,7 @@ install: precheck
cd $(SM_STAGING)$(SM_DEST) && rm -f OCFSoHBASR
ln -sf $(SM_DEST)mpathutil.py $(SM_STAGING)/sbin/mpathutil
install -m 755 drivers/02-vhdcleanup $(SM_STAGING)$(MASTER_SCRIPT_DEST)
install -m 755 drivers/linstor-manager $(SM_STAGING)$(PLUGIN_SCRIPT_DEST)
install -m 755 drivers/lvhd-thin $(SM_STAGING)$(PLUGIN_SCRIPT_DEST)
install -m 755 drivers/on_slave.py $(SM_STAGING)$(PLUGIN_SCRIPT_DEST)/on-slave
install -m 755 drivers/testing-hooks $(SM_STAGING)$(PLUGIN_SCRIPT_DEST)
Expand All @@ -212,6 +231,7 @@ install: precheck
install -m 755 scripts/kickpipe $(SM_STAGING)$(LIBEXEC)
install -m 755 scripts/set-iscsi-initiator $(SM_STAGING)$(LIBEXEC)
$(MAKE) -C dcopy install DESTDIR=$(SM_STAGING)
$(MAKE) -C linstor install DESTDIR=$(SM_STAGING)
ln -sf $(SM_DEST)blktap2.py $(SM_STAGING)$(BIN_DEST)/blktap2
ln -sf $(SM_DEST)lcache.py $(SM_STAGING)$(BIN_DEST)tapdisk-cache-stats
ln -sf /dev/null $(SM_STAGING)$(UDEV_RULES_DIR)/69-dm-lvm-metad.rules
Expand All @@ -225,4 +245,3 @@ install: precheck
.PHONY: clean
clean:
rm -rf $(SM_STAGING)

296 changes: 296 additions & 0 deletions drivers/CephFSSR.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,296 @@
#!/usr/bin/env python
#
# Original work copyright (C) Citrix systems
# Modified work copyright (C) Vates SAS and XCP-ng community
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation; version 2.1 only.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# CEPHFSSR: Based on FileSR, mounts ceph fs share

import errno
import os
import syslog as _syslog
import xmlrpclib
from syslog import syslog

# careful with the import order here
# FileSR has a circular dependency:
# FileSR -> blktap2 -> lvutil -> EXTSR -> FileSR
# importing in this order seems to avoid triggering the issue.
import SR
import SRCommand
import FileSR
# end of careful
import cleanup
import util
import vhdutil
import xs_errors
from lock import Lock

CAPABILITIES = ["SR_PROBE", "SR_UPDATE",
"VDI_CREATE", "VDI_DELETE", "VDI_ATTACH", "VDI_DETACH",
"VDI_UPDATE", "VDI_CLONE", "VDI_SNAPSHOT", "VDI_RESIZE", "VDI_MIRROR",
"VDI_GENERATE_CONFIG",
"VDI_RESET_ON_BOOT/2", "ATOMIC_PAUSE"]

CONFIGURATION = [
['server', 'Ceph server(s) (required, ex: "192.168.0.12" or "10.10.10.10,10.10.10.26")'],
['serverpath', 'Ceph FS path (required, ex: "/")'],
['serverport', 'ex: 6789'],
['options', 'Ceph FS client name, and secretfile (required, ex: "name=admin,secretfile=/etc/ceph/admin.secret")']
]

DRIVER_INFO = {
'name': 'CephFS VHD',
'description': 'SR plugin which stores disks as VHD files on a CephFS storage',
'vendor': 'Vates SAS',
'copyright': '(C) 2020 Vates SAS',
'driver_version': '1.0',
'required_api_version': '1.0',
'capabilities': CAPABILITIES,
'configuration': CONFIGURATION
}

DRIVER_CONFIG = {"ATTACH_FROM_CONFIG_WITH_TAPDISK": True}

# The mountpoint for the directory when performing an sr_probe. All probes
# are guaranteed to be serialised by xapi, so this single mountpoint is fine.
PROBE_MOUNTPOINT = os.path.join(SR.MOUNT_BASE, "probe")


class CephFSException(Exception):
def __init__(self, errstr):
self.errstr = errstr


# mountpoint = /var/run/sr-mount/CephFS/uuid
# linkpath = mountpoint/uuid - path to SR directory on share
# path = /var/run/sr-mount/uuid - symlink to SR directory on share
class CephFSSR(FileSR.FileSR):
"""Ceph file-based storage repository"""

DRIVER_TYPE = 'cephfs'

def handles(sr_type):
# fudge, because the parent class (FileSR) checks for smb to alter its behavior
return sr_type == CephFSSR.DRIVER_TYPE or sr_type == 'smb'

handles = staticmethod(handles)

def load(self, sr_uuid):
if not self._is_ceph_available():
raise xs_errors.XenError(
'SRUnavailable',
opterr='ceph is not installed'
)

self.ops_exclusive = FileSR.OPS_EXCLUSIVE
self.lock = Lock(vhdutil.LOCK_TYPE_SR, self.uuid)
self.sr_vditype = SR.DEFAULT_TAP
self.driver_config = DRIVER_CONFIG
if 'server' not in self.dconf:
raise xs_errors.XenError('ConfigServerMissing')
self.remoteserver = self.dconf['server']
self.remotepath = self.dconf['serverpath']
# if serverport is not specified, use default 6789
if 'serverport' not in self.dconf:
self.remoteport = "6789"
else:
self.remoteport = self.dconf['serverport']
if self.sr_ref and self.session is not None:
self.sm_config = self.session.xenapi.SR.get_sm_config(self.sr_ref)
else:
self.sm_config = self.srcmd.params.get('sr_sm_config') or {}
self.mountpoint = os.path.join(SR.MOUNT_BASE, 'CephFS', sr_uuid)
self.linkpath = os.path.join(self.mountpoint, sr_uuid or "")
self.path = os.path.join(SR.MOUNT_BASE, sr_uuid)
self._check_o_direct()

def checkmount(self):
return util.ioretry(lambda: ((util.pathexists(self.mountpoint) and
util.ismount(self.mountpoint)) and
util.pathexists(self.path)))

def mount(self, mountpoint=None):
"""Mount the remote ceph export at 'mountpoint'"""
if mountpoint is None:
mountpoint = self.mountpoint
elif not util.is_string(mountpoint) or mountpoint == "":
raise CephFSException("mountpoint not a string object")

try:
if not util.ioretry(lambda: util.isdir(mountpoint)):
util.ioretry(lambda: util.makedirs(mountpoint))
except util.CommandException, inst:
raise CephFSException("Failed to make directory: code is %d" % inst.code)

try:
options = []
if self.dconf.has_key('options'):
options.append(self.dconf['options'])
if options:
options = ['-o', ','.join(options)]
command = ["mount", '-t', 'ceph', self.remoteserver+":"+self.remoteport+":"+self.remotepath, mountpoint] + options
util.ioretry(lambda: util.pread(command), errlist=[errno.EPIPE, errno.EIO], maxretry=2, nofail=True)
except util.CommandException, inst:
syslog(_syslog.LOG_ERR, 'CephFS mount failed ' + inst.__str__())
raise CephFSException("mount failed with return code %d" % inst.code)

# Sanity check to ensure that the user has at least RO access to the
# mounted share. Windows sharing and security settings can be tricky.
try:
util.listdir(mountpoint)
except util.CommandException:
try:
self.unmount(mountpoint, True)
except CephFSException:
util.logException('CephFSSR.unmount()')
raise CephFSException("Permission denied. Please check user privileges.")

def unmount(self, mountpoint, rmmountpoint):
try:
util.pread(["umount", mountpoint])
except util.CommandException, inst:
raise CephFSException("umount failed with return code %d" % inst.code)
if rmmountpoint:
try:
os.rmdir(mountpoint)
except OSError, inst:
raise CephFSException("rmdir failed with error '%s'" % inst.strerror)

def attach(self, sr_uuid):
if not self.checkmount():
try:
self.mount()
os.symlink(self.linkpath, self.path)
except CephFSException, exc:
raise SR.SROSError(12, exc.errstr)
self.attached = True

def probe(self):
try:
self.mount(PROBE_MOUNTPOINT)
sr_list = filter(util.match_uuid, util.listdir(PROBE_MOUNTPOINT))
self.unmount(PROBE_MOUNTPOINT, True)
except (util.CommandException, xs_errors.XenError):
raise
# Create a dictionary from the SR uuids to feed SRtoXML()
sr_dict = {sr_uuid: {} for sr_uuid in sr_list}
return util.SRtoXML(sr_dict)

def detach(self, sr_uuid):
if not self.checkmount():
return
util.SMlog("Aborting GC/coalesce")
cleanup.abort(self.uuid)
# Change directory to avoid unmount conflicts
os.chdir(SR.MOUNT_BASE)
self.unmount(self.mountpoint, True)
os.unlink(self.path)
self.attached = False

def create(self, sr_uuid, size):
if self.checkmount():
raise SR.SROSError(113, 'CephFS mount point already attached')

try:
self.mount()
except CephFSException, exc:
# noinspection PyBroadException
try:
os.rmdir(self.mountpoint)
except:
# we have no recovery strategy
pass
raise SR.SROSError(111, "CephFS mount error [opterr=%s]" % exc.errstr)

if util.ioretry(lambda: util.pathexists(self.linkpath)):
if len(util.ioretry(lambda: util.listdir(self.linkpath))) != 0:
self.detach(sr_uuid)
raise xs_errors.XenError('SRExists')
else:
try:
util.ioretry(lambda: util.makedirs(self.linkpath))
os.symlink(self.linkpath, self.path)
except util.CommandException, inst:
if inst.code != errno.EEXIST:
try:
self.unmount(self.mountpoint, True)
except CephFSException:
util.logException('CephFSSR.unmount()')
raise SR.SROSError(116,
"Failed to create CephFS SR. remote directory creation error: {}".format(
os.strerror(inst.code)))
self.detach(sr_uuid)

def delete(self, sr_uuid):
# try to remove/delete non VDI contents first
super(CephFSSR, self).delete(sr_uuid)
try:
if self.checkmount():
self.detach(sr_uuid)
self.mount()
if util.ioretry(lambda: util.pathexists(self.linkpath)):
util.ioretry(lambda: os.rmdir(self.linkpath))
util.SMlog(str(self.unmount(self.mountpoint, True)))
except util.CommandException, inst:
self.detach(sr_uuid)
if inst.code != errno.ENOENT:
raise SR.SROSError(114, "Failed to remove CephFS mount point")

def vdi(self, uuid, loadLocked=False):
return CephFSFileVDI(self, uuid)

@staticmethod
def _is_ceph_available():
import distutils.spawn
return distutils.spawn.find_executable('ceph')

class CephFSFileVDI(FileSR.FileVDI):
def attach(self, sr_uuid, vdi_uuid):
if not hasattr(self, 'xenstore_data'):
self.xenstore_data = {}

self.xenstore_data['storage-type'] = CephFSSR.DRIVER_TYPE

return super(CephFSFileVDI, self).attach(sr_uuid, vdi_uuid)

def generate_config(self, sr_uuid, vdi_uuid):
util.SMlog("SMBFileVDI.generate_config")
if not util.pathexists(self.path):
raise xs_errors.XenError('VDIUnavailable')
resp = {'device_config': self.sr.dconf,
'sr_uuid': sr_uuid,
'vdi_uuid': vdi_uuid,
'sr_sm_config': self.sr.sm_config,
'command': 'vdi_attach_from_config'}
# Return the 'config' encoded within a normal XMLRPC response so that
# we can use the regular response/error parsing code.
config = xmlrpclib.dumps(tuple([resp]), "vdi_attach_from_config")
return xmlrpclib.dumps((config,), "", True)

def attach_from_config(self, sr_uuid, vdi_uuid):
try:
if not util.pathexists(self.sr.path):
self.sr.attach(sr_uuid)
except:
util.logException("SMBFileVDI.attach_from_config")
raise xs_errors.XenError('SRUnavailable',
opterr='Unable to attach from config')


if __name__ == '__main__':
SRCommand.run(CephFSSR, DRIVER_INFO)
else:
SR.registerSR(CephFSSR)
Loading