various automount fixes
This commit is contained in:
@ -122,12 +122,12 @@ test_target1/test_source2/fs2/sub encryptionroot -
|
||||
self.prepare_encrypted_dataset("22222222", "test_target1/encryptedtarget")
|
||||
|
||||
with patch('time.strftime', return_value="test-20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --encrypt --debug --allow-empty --exclude-received".split(" ")).run())
|
||||
self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --encrypt --debug --no-snapshot --exclude-received".split(" ")).run())
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --encrypt --debug --allow-empty --exclude-received --clear-mountpoint".split(" ")).run())
|
||||
self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --encrypt --debug --no-snapshot --exclude-received --clear-mountpoint".split(" ")).run())
|
||||
|
||||
with patch('time.strftime', return_value="test-20101111000001"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --encrypt --debug --allow-empty --exclude-received".split(" ")).run())
|
||||
self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --encrypt --debug --no-snapshot --exclude-received --force".split(" ")).run())
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --encrypt --debug --allow-empty --exclude-received --clear-mountpoint".split(" ")).run())
|
||||
self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --encrypt --debug --no-snapshot --exclude-received --clear-mountpoint".split(" ")).run())
|
||||
|
||||
r = shelltest("zfs get -r -t filesystem encryptionroot test_target1")
|
||||
self.assertEqual(r, """
|
||||
@ -158,16 +158,16 @@ test_target1/test_source2/fs2/sub encryptionroot -
|
||||
|
||||
with patch('time.strftime', return_value="test-20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup(
|
||||
"test test_target1 --verbose --no-progress --decrypt --encrypt --debug --allow-empty --exclude-received".split(" ")).run())
|
||||
"test test_target1 --verbose --no-progress --decrypt --encrypt --debug --allow-empty --exclude-received --clear-mountpoint".split(" ")).run())
|
||||
self.assertFalse(ZfsAutobackup(
|
||||
"test test_target1/encryptedtarget --verbose --no-progress --decrypt --encrypt --debug --no-snapshot --exclude-received --force".split(
|
||||
"test test_target1/encryptedtarget --verbose --no-progress --decrypt --encrypt --debug --no-snapshot --exclude-received --clear-mountpoint".split(
|
||||
" ")).run())
|
||||
|
||||
with patch('time.strftime', return_value="test-20101111000001"):
|
||||
self.assertFalse(ZfsAutobackup(
|
||||
"test test_target1 --verbose --no-progress --decrypt --encrypt --debug --allow-empty --exclude-received".split(" ")).run())
|
||||
self.assertFalse(ZfsAutobackup(
|
||||
"test test_target1/encryptedtarget --verbose --no-progress --decrypt --encrypt --debug --no-snapshot --exclude-received --force".split(
|
||||
"test test_target1/encryptedtarget --verbose --no-progress --decrypt --encrypt --debug --no-snapshot --exclude-received".split(
|
||||
" ")).run())
|
||||
|
||||
r = shelltest("zfs get -r -t filesystem encryptionroot test_target1")
|
||||
|
||||
@ -63,7 +63,6 @@ test_target1/test_source2/fs2/sub@test-20101111000001
|
||||
|
||||
#create 2 snapshots with the same name, which are invalid as common snapshot
|
||||
shelltest("zfs snapshot test_source1/fs1@invalid")
|
||||
shelltest("zfs mount test_target1/test_source1/fs1")
|
||||
shelltest("touch /test_target1/test_source1/fs1/shouldnotbeHere")
|
||||
shelltest("zfs snapshot test_target1/test_source1/fs1@invalid")
|
||||
|
||||
@ -137,3 +136,19 @@ test_target1/test_source2/fs2/sub@test-20101111000001
|
||||
/test_target1
|
||||
""")
|
||||
|
||||
|
||||
# def test_stuff(self):
|
||||
#
|
||||
#
|
||||
# shelltest("zfs set autobackup:test=true test_source2")
|
||||
# # shelltest("zfs set readonly=on test_target1")
|
||||
#
|
||||
# with patch('time.strftime', return_value="test-20101111000000"):
|
||||
# self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --clear-mountpoint".split(" ")).run())
|
||||
#
|
||||
# # shelltest("zfs mount test_target1/test_source2/fs2/sub" )
|
||||
#
|
||||
# with patch('time.strftime', return_value="test-20101111000001"):
|
||||
# self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --rollback".split(" ")).run())
|
||||
|
||||
|
||||
|
||||
@ -10,7 +10,7 @@ class CliBase(object):
|
||||
Overridden in subclasses that add stuff for the specific programs."""
|
||||
|
||||
# also used by setup.py
|
||||
VERSION = "3.2.3-beta"
|
||||
VERSION = "3.3-beta"
|
||||
HEADER = "{} v{} - (c)2022 E.H.Eefting (edwin@datux.nl)".format(os.path.basename(sys.argv[0]), VERSION)
|
||||
|
||||
def __init__(self, argv, print_arguments=True):
|
||||
|
||||
@ -352,6 +352,7 @@ class ZfsAutobackup(ZfsAuto):
|
||||
and target_dataset.parent \
|
||||
and target_dataset.parent not in target_datasets \
|
||||
and not target_dataset.parent.exists:
|
||||
target_dataset.debug("Creating unmountable parents")
|
||||
target_dataset.parent.create_filesystem(parents=True)
|
||||
|
||||
# determine common zpool features (cached, so no problem we call it often)
|
||||
|
||||
@ -196,8 +196,7 @@ class ZfsDataset:
|
||||
self.verbose("Selected")
|
||||
return True
|
||||
|
||||
|
||||
@CachedProperty
|
||||
@property
|
||||
def parent(self):
|
||||
"""get zfs-parent of this dataset. for snapshots this means it will get
|
||||
the filesystem/volume that it belongs to. otherwise it will return the
|
||||
@ -258,19 +257,27 @@ class ZfsDataset:
|
||||
return None
|
||||
|
||||
@CachedProperty
|
||||
def exists_check(self):
|
||||
"""check on disk if it exists"""
|
||||
self.debug("Checking if dataset exists")
|
||||
return (len(self.zfs_node.run(tab_split=True, cmd=["zfs", "list", self.name], readonly=True,
|
||||
valid_exitcodes=[0, 1],
|
||||
hide_errors=True)) > 0)
|
||||
|
||||
@property
|
||||
def exists(self):
|
||||
"""check if dataset exists. Use force to force a specific value to be
|
||||
cached, if you already know. Useful for performance reasons
|
||||
"""returns True if dataset should exist.
|
||||
Use force_exists to force a specific value, if you already know. Useful for performance and test reasons
|
||||
"""
|
||||
|
||||
if self.force_exists is not None:
|
||||
self.debug("Checking if dataset exists: was forced to {}".format(self.force_exists))
|
||||
if self.force_exists:
|
||||
self.debug("Dataset should exist")
|
||||
else:
|
||||
self.debug("Dataset should not exist")
|
||||
return self.force_exists
|
||||
else:
|
||||
self.debug("Checking if dataset exists")
|
||||
|
||||
return (self.zfs_node.run(tab_split=True, cmd=["zfs", "list", self.name], readonly=True, valid_exitcodes=[0, 1],
|
||||
hide_errors=True) and True)
|
||||
return self.exists_check
|
||||
|
||||
def create_filesystem(self, parents=False, unmountable=True):
|
||||
"""create a filesystem
|
||||
@ -339,7 +346,6 @@ class ZfsDataset:
|
||||
if len(pair) == 2:
|
||||
ret[pair[0]] = pair[1]
|
||||
|
||||
|
||||
return ret
|
||||
|
||||
def is_changed(self, min_changed_bytes=1):
|
||||
@ -353,7 +359,6 @@ class ZfsDataset:
|
||||
if min_changed_bytes == 0:
|
||||
return True
|
||||
|
||||
|
||||
if int(self.properties['written']) < min_changed_bytes:
|
||||
return False
|
||||
else:
|
||||
@ -444,6 +449,7 @@ class ZfsDataset:
|
||||
:rtype: ZfsDataset
|
||||
"""
|
||||
|
||||
#FIXME: dont check for existance. (currenlty needed for _add_virtual_snapshots)
|
||||
if not self.exists:
|
||||
return []
|
||||
|
||||
@ -570,7 +576,8 @@ class ZfsDataset:
|
||||
|
||||
return self.from_names(names[1:], force_exists=True)
|
||||
|
||||
def send_pipe(self, features, prev_snapshot, resume_token, show_progress, raw, send_properties, write_embedded, send_pipes, zfs_compressed):
|
||||
def send_pipe(self, features, prev_snapshot, resume_token, show_progress, raw, send_properties, write_embedded,
|
||||
send_pipes, zfs_compressed):
|
||||
"""returns a pipe with zfs send output for this snapshot
|
||||
|
||||
resume_token: resume sending from this token. (in that case we don't
|
||||
@ -631,7 +638,8 @@ class ZfsDataset:
|
||||
|
||||
return output_pipe
|
||||
|
||||
def recv_pipe(self, pipe, features, recv_pipes, filter_properties=None, set_properties=None, ignore_exit_code=False, force=False):
|
||||
def recv_pipe(self, pipe, features, recv_pipes, filter_properties=None, set_properties=None, ignore_exit_code=False,
|
||||
force=False):
|
||||
"""starts a zfs recv for this snapshot and uses pipe as input
|
||||
|
||||
note: you can it both on a snapshot or filesystem object. The
|
||||
@ -703,11 +711,14 @@ class ZfsDataset:
|
||||
self.error("error during transfer")
|
||||
raise (Exception("Target doesn't exist after transfer, something went wrong."))
|
||||
|
||||
# at this point we're sure the actual dataset exists
|
||||
self.parent.force_exists = True
|
||||
|
||||
def automount(self):
|
||||
"""mount the dataset as if one did a zfs mount -a, but only for this dataset"""
|
||||
|
||||
"""Mount the dataset as if one did a zfs mount -a, but only for this dataset
|
||||
Failure to mount doesnt result in an exception, but outputs errors to STDERR.
|
||||
|
||||
"""
|
||||
|
||||
self.debug("Auto mounting")
|
||||
|
||||
@ -726,8 +737,7 @@ class ZfsDataset:
|
||||
if self.properties['encryption'] != 'off' and self.properties['keystatus'] == 'unavailable':
|
||||
return
|
||||
|
||||
self.zfs_node.run(["zfs", "mount", self.name])
|
||||
|
||||
self.zfs_node.run(["zfs", "mount", self.name], valid_exitcodes=[0,1])
|
||||
|
||||
def transfer_snapshot(self, target_snapshot, features, prev_snapshot, show_progress,
|
||||
filter_properties, set_properties, ignore_recv_exit_code, resume_token,
|
||||
@ -770,18 +780,18 @@ class ZfsDataset:
|
||||
|
||||
# do it
|
||||
pipe = self.send_pipe(features=features, show_progress=show_progress, prev_snapshot=prev_snapshot,
|
||||
resume_token=resume_token, raw=raw, send_properties=send_properties, write_embedded=write_embedded, send_pipes=send_pipes, zfs_compressed=zfs_compressed)
|
||||
resume_token=resume_token, raw=raw, send_properties=send_properties,
|
||||
write_embedded=write_embedded, send_pipes=send_pipes, zfs_compressed=zfs_compressed)
|
||||
target_snapshot.recv_pipe(pipe, features=features, filter_properties=filter_properties,
|
||||
set_properties=set_properties, ignore_exit_code=ignore_recv_exit_code, recv_pipes=recv_pipes, force=force)
|
||||
set_properties=set_properties, ignore_exit_code=ignore_recv_exit_code,
|
||||
recv_pipes=recv_pipes, force=force)
|
||||
|
||||
# try to automount it, if its the initial transfer
|
||||
if not prev_snapshot:
|
||||
target_snapshot.parent.force_exists=True
|
||||
# in test mode it doesnt actually exist, so dont try to mount it/read properties
|
||||
if not target_snapshot.zfs_node.readonly:
|
||||
target_snapshot.parent.automount()
|
||||
|
||||
|
||||
def abort_resume(self):
|
||||
"""abort current resume state"""
|
||||
self.debug("Aborting resume")
|
||||
@ -967,7 +977,8 @@ class ZfsDataset:
|
||||
while snapshot:
|
||||
# create virtual target snapsho
|
||||
# NOTE: with force_exist we're telling the dataset it doesnt exist yet. (e.g. its virtual)
|
||||
virtual_snapshot = self.zfs_node.get_dataset(self.filesystem_name + "@" + snapshot.snapshot_name, force_exists=False)
|
||||
virtual_snapshot = self.zfs_node.get_dataset(self.filesystem_name + "@" + snapshot.snapshot_name,
|
||||
force_exists=False)
|
||||
self.snapshots.append(virtual_snapshot)
|
||||
snapshot = source_dataset.find_next_snapshot(snapshot, also_other_snapshots)
|
||||
|
||||
@ -1082,7 +1093,6 @@ class ZfsDataset:
|
||||
if len(incompatible_target_snapshots) > 0:
|
||||
self.rollback()
|
||||
|
||||
|
||||
def sync_snapshots(self, target_dataset, features, show_progress, filter_properties, set_properties,
|
||||
ignore_recv_exit_code, holds, rollback, decrypt, encrypt, also_other_snapshots,
|
||||
no_send, destroy_incompatible, send_pipes, recv_pipes, zfs_compressed, force, guid_check):
|
||||
@ -1125,7 +1135,8 @@ class ZfsDataset:
|
||||
|
||||
(common_snapshot, start_snapshot, source_obsoletes, target_obsoletes, target_keeps,
|
||||
incompatible_target_snapshots) = \
|
||||
self._plan_sync(target_dataset=target_dataset, also_other_snapshots=also_other_snapshots, guid_check=guid_check, raw=raw)
|
||||
self._plan_sync(target_dataset=target_dataset, also_other_snapshots=also_other_snapshots,
|
||||
guid_check=guid_check, raw=raw)
|
||||
|
||||
# NOTE: we do this because we dont want filesystems to fillup when backups keep failing.
|
||||
# Also usefull with no_send to still cleanup stuff.
|
||||
@ -1143,9 +1154,8 @@ class ZfsDataset:
|
||||
# check if we can resume
|
||||
resume_token = self._validate_resume_token(target_dataset, start_snapshot)
|
||||
|
||||
|
||||
|
||||
(active_filter_properties, active_set_properties) = self.get_allowed_properties(filter_properties, set_properties)
|
||||
(active_filter_properties, active_set_properties) = self.get_allowed_properties(filter_properties,
|
||||
set_properties)
|
||||
|
||||
# encrypt at target?
|
||||
if encrypt and not raw:
|
||||
@ -1153,7 +1163,6 @@ class ZfsDataset:
|
||||
active_filter_properties.extend(["keylocation", "pbkdf2iters", "keyformat", "encryption"])
|
||||
write_embedded = False
|
||||
|
||||
|
||||
# now actually transfer the snapshots
|
||||
prev_source_snapshot = common_snapshot
|
||||
source_snapshot = start_snapshot
|
||||
@ -1229,7 +1238,6 @@ class ZfsDataset:
|
||||
"umount", self.name
|
||||
]
|
||||
|
||||
|
||||
self.zfs_node.run(cmd=cmd, valid_exitcodes=[0])
|
||||
|
||||
def clone(self, name):
|
||||
@ -1270,4 +1278,3 @@ class ZfsDataset:
|
||||
self.zfs_node.run(cmd=cmd, valid_exitcodes=[0])
|
||||
|
||||
self.invalidate()
|
||||
|
||||
|
||||
Reference in New Issue
Block a user