various automount fixes
This commit is contained in:
@ -122,12 +122,12 @@ test_target1/test_source2/fs2/sub encryptionroot -
|
|||||||
self.prepare_encrypted_dataset("22222222", "test_target1/encryptedtarget")
|
self.prepare_encrypted_dataset("22222222", "test_target1/encryptedtarget")
|
||||||
|
|
||||||
with patch('time.strftime', return_value="test-20101111000000"):
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --encrypt --debug --allow-empty --exclude-received".split(" ")).run())
|
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --encrypt --debug --allow-empty --exclude-received --clear-mountpoint".split(" ")).run())
|
||||||
self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --encrypt --debug --no-snapshot --exclude-received".split(" ")).run())
|
self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --encrypt --debug --no-snapshot --exclude-received --clear-mountpoint".split(" ")).run())
|
||||||
|
|
||||||
with patch('time.strftime', return_value="test-20101111000001"):
|
with patch('time.strftime', return_value="test-20101111000001"):
|
||||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --encrypt --debug --allow-empty --exclude-received".split(" ")).run())
|
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --encrypt --debug --allow-empty --exclude-received --clear-mountpoint".split(" ")).run())
|
||||||
self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --encrypt --debug --no-snapshot --exclude-received --force".split(" ")).run())
|
self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --encrypt --debug --no-snapshot --exclude-received --clear-mountpoint".split(" ")).run())
|
||||||
|
|
||||||
r = shelltest("zfs get -r -t filesystem encryptionroot test_target1")
|
r = shelltest("zfs get -r -t filesystem encryptionroot test_target1")
|
||||||
self.assertEqual(r, """
|
self.assertEqual(r, """
|
||||||
@ -158,16 +158,16 @@ test_target1/test_source2/fs2/sub encryptionroot -
|
|||||||
|
|
||||||
with patch('time.strftime', return_value="test-20101111000000"):
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
self.assertFalse(ZfsAutobackup(
|
self.assertFalse(ZfsAutobackup(
|
||||||
"test test_target1 --verbose --no-progress --decrypt --encrypt --debug --allow-empty --exclude-received".split(" ")).run())
|
"test test_target1 --verbose --no-progress --decrypt --encrypt --debug --allow-empty --exclude-received --clear-mountpoint".split(" ")).run())
|
||||||
self.assertFalse(ZfsAutobackup(
|
self.assertFalse(ZfsAutobackup(
|
||||||
"test test_target1/encryptedtarget --verbose --no-progress --decrypt --encrypt --debug --no-snapshot --exclude-received --force".split(
|
"test test_target1/encryptedtarget --verbose --no-progress --decrypt --encrypt --debug --no-snapshot --exclude-received --clear-mountpoint".split(
|
||||||
" ")).run())
|
" ")).run())
|
||||||
|
|
||||||
with patch('time.strftime', return_value="test-20101111000001"):
|
with patch('time.strftime', return_value="test-20101111000001"):
|
||||||
self.assertFalse(ZfsAutobackup(
|
self.assertFalse(ZfsAutobackup(
|
||||||
"test test_target1 --verbose --no-progress --decrypt --encrypt --debug --allow-empty --exclude-received".split(" ")).run())
|
"test test_target1 --verbose --no-progress --decrypt --encrypt --debug --allow-empty --exclude-received".split(" ")).run())
|
||||||
self.assertFalse(ZfsAutobackup(
|
self.assertFalse(ZfsAutobackup(
|
||||||
"test test_target1/encryptedtarget --verbose --no-progress --decrypt --encrypt --debug --no-snapshot --exclude-received --force".split(
|
"test test_target1/encryptedtarget --verbose --no-progress --decrypt --encrypt --debug --no-snapshot --exclude-received".split(
|
||||||
" ")).run())
|
" ")).run())
|
||||||
|
|
||||||
r = shelltest("zfs get -r -t filesystem encryptionroot test_target1")
|
r = shelltest("zfs get -r -t filesystem encryptionroot test_target1")
|
||||||
|
|||||||
@ -63,7 +63,6 @@ test_target1/test_source2/fs2/sub@test-20101111000001
|
|||||||
|
|
||||||
#create 2 snapshots with the same name, which are invalid as common snapshot
|
#create 2 snapshots with the same name, which are invalid as common snapshot
|
||||||
shelltest("zfs snapshot test_source1/fs1@invalid")
|
shelltest("zfs snapshot test_source1/fs1@invalid")
|
||||||
shelltest("zfs mount test_target1/test_source1/fs1")
|
|
||||||
shelltest("touch /test_target1/test_source1/fs1/shouldnotbeHere")
|
shelltest("touch /test_target1/test_source1/fs1/shouldnotbeHere")
|
||||||
shelltest("zfs snapshot test_target1/test_source1/fs1@invalid")
|
shelltest("zfs snapshot test_target1/test_source1/fs1@invalid")
|
||||||
|
|
||||||
@ -137,3 +136,19 @@ test_target1/test_source2/fs2/sub@test-20101111000001
|
|||||||
/test_target1
|
/test_target1
|
||||||
""")
|
""")
|
||||||
|
|
||||||
|
|
||||||
|
# def test_stuff(self):
|
||||||
|
#
|
||||||
|
#
|
||||||
|
# shelltest("zfs set autobackup:test=true test_source2")
|
||||||
|
# # shelltest("zfs set readonly=on test_target1")
|
||||||
|
#
|
||||||
|
# with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
# self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --clear-mountpoint".split(" ")).run())
|
||||||
|
#
|
||||||
|
# # shelltest("zfs mount test_target1/test_source2/fs2/sub" )
|
||||||
|
#
|
||||||
|
# with patch('time.strftime', return_value="test-20101111000001"):
|
||||||
|
# self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --rollback".split(" ")).run())
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@ -10,7 +10,7 @@ class CliBase(object):
|
|||||||
Overridden in subclasses that add stuff for the specific programs."""
|
Overridden in subclasses that add stuff for the specific programs."""
|
||||||
|
|
||||||
# also used by setup.py
|
# also used by setup.py
|
||||||
VERSION = "3.2.3-beta"
|
VERSION = "3.3-beta"
|
||||||
HEADER = "{} v{} - (c)2022 E.H.Eefting (edwin@datux.nl)".format(os.path.basename(sys.argv[0]), VERSION)
|
HEADER = "{} v{} - (c)2022 E.H.Eefting (edwin@datux.nl)".format(os.path.basename(sys.argv[0]), VERSION)
|
||||||
|
|
||||||
def __init__(self, argv, print_arguments=True):
|
def __init__(self, argv, print_arguments=True):
|
||||||
|
|||||||
@ -352,6 +352,7 @@ class ZfsAutobackup(ZfsAuto):
|
|||||||
and target_dataset.parent \
|
and target_dataset.parent \
|
||||||
and target_dataset.parent not in target_datasets \
|
and target_dataset.parent not in target_datasets \
|
||||||
and not target_dataset.parent.exists:
|
and not target_dataset.parent.exists:
|
||||||
|
target_dataset.debug("Creating unmountable parents")
|
||||||
target_dataset.parent.create_filesystem(parents=True)
|
target_dataset.parent.create_filesystem(parents=True)
|
||||||
|
|
||||||
# determine common zpool features (cached, so no problem we call it often)
|
# determine common zpool features (cached, so no problem we call it often)
|
||||||
|
|||||||
@ -88,8 +88,8 @@ class ZfsDataset:
|
|||||||
Args:
|
Args:
|
||||||
:type count: int
|
:type count: int
|
||||||
"""
|
"""
|
||||||
components=self.split_path()
|
components = self.split_path()
|
||||||
if count>len(components):
|
if count > len(components):
|
||||||
raise Exception("Trying to strip too much from path ({} items from {})".format(count, self.name))
|
raise Exception("Trying to strip too much from path ({} items from {})".format(count, self.name))
|
||||||
|
|
||||||
return "/".join(components[count:])
|
return "/".join(components[count:])
|
||||||
@ -196,8 +196,7 @@ class ZfsDataset:
|
|||||||
self.verbose("Selected")
|
self.verbose("Selected")
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
@property
|
||||||
@CachedProperty
|
|
||||||
def parent(self):
|
def parent(self):
|
||||||
"""get zfs-parent of this dataset. for snapshots this means it will get
|
"""get zfs-parent of this dataset. for snapshots this means it will get
|
||||||
the filesystem/volume that it belongs to. otherwise it will return the
|
the filesystem/volume that it belongs to. otherwise it will return the
|
||||||
@ -211,7 +210,7 @@ class ZfsDataset:
|
|||||||
if self.is_snapshot:
|
if self.is_snapshot:
|
||||||
return self.zfs_node.get_dataset(self.filesystem_name)
|
return self.zfs_node.get_dataset(self.filesystem_name)
|
||||||
else:
|
else:
|
||||||
stripped=self.rstrip_path(1)
|
stripped = self.rstrip_path(1)
|
||||||
if stripped:
|
if stripped:
|
||||||
return self.zfs_node.get_dataset(stripped)
|
return self.zfs_node.get_dataset(stripped)
|
||||||
else:
|
else:
|
||||||
@ -258,19 +257,27 @@ class ZfsDataset:
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
@CachedProperty
|
@CachedProperty
|
||||||
|
def exists_check(self):
|
||||||
|
"""check on disk if it exists"""
|
||||||
|
self.debug("Checking if dataset exists")
|
||||||
|
return (len(self.zfs_node.run(tab_split=True, cmd=["zfs", "list", self.name], readonly=True,
|
||||||
|
valid_exitcodes=[0, 1],
|
||||||
|
hide_errors=True)) > 0)
|
||||||
|
|
||||||
|
@property
|
||||||
def exists(self):
|
def exists(self):
|
||||||
"""check if dataset exists. Use force to force a specific value to be
|
"""returns True if dataset should exist.
|
||||||
cached, if you already know. Useful for performance reasons
|
Use force_exists to force a specific value, if you already know. Useful for performance and test reasons
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if self.force_exists is not None:
|
if self.force_exists is not None:
|
||||||
self.debug("Checking if dataset exists: was forced to {}".format(self.force_exists))
|
if self.force_exists:
|
||||||
|
self.debug("Dataset should exist")
|
||||||
|
else:
|
||||||
|
self.debug("Dataset should not exist")
|
||||||
return self.force_exists
|
return self.force_exists
|
||||||
else:
|
else:
|
||||||
self.debug("Checking if dataset exists")
|
return self.exists_check
|
||||||
|
|
||||||
return (self.zfs_node.run(tab_split=True, cmd=["zfs", "list", self.name], readonly=True, valid_exitcodes=[0, 1],
|
|
||||||
hide_errors=True) and True)
|
|
||||||
|
|
||||||
def create_filesystem(self, parents=False, unmountable=True):
|
def create_filesystem(self, parents=False, unmountable=True):
|
||||||
"""create a filesystem
|
"""create a filesystem
|
||||||
@ -279,14 +286,14 @@ class ZfsDataset:
|
|||||||
:type parents: bool
|
:type parents: bool
|
||||||
"""
|
"""
|
||||||
|
|
||||||
#recurse up
|
# recurse up
|
||||||
if parents and self.parent and not self.parent.exists:
|
if parents and self.parent and not self.parent.exists:
|
||||||
self.parent.create_filesystem(parents, unmountable)
|
self.parent.create_filesystem(parents, unmountable)
|
||||||
|
|
||||||
cmd=["zfs", "create"]
|
cmd = ["zfs", "create"]
|
||||||
|
|
||||||
if unmountable:
|
if unmountable:
|
||||||
cmd.extend( ["-o", "canmount=off"])
|
cmd.extend(["-o", "canmount=off"])
|
||||||
|
|
||||||
cmd.append(self.name)
|
cmd.append(self.name)
|
||||||
self.zfs_node.run(cmd)
|
self.zfs_node.run(cmd)
|
||||||
@ -339,7 +346,6 @@ class ZfsDataset:
|
|||||||
if len(pair) == 2:
|
if len(pair) == 2:
|
||||||
ret[pair[0]] = pair[1]
|
ret[pair[0]] = pair[1]
|
||||||
|
|
||||||
|
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
def is_changed(self, min_changed_bytes=1):
|
def is_changed(self, min_changed_bytes=1):
|
||||||
@ -353,7 +359,6 @@ class ZfsDataset:
|
|||||||
if min_changed_bytes == 0:
|
if min_changed_bytes == 0:
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
if int(self.properties['written']) < min_changed_bytes:
|
if int(self.properties['written']) < min_changed_bytes:
|
||||||
return False
|
return False
|
||||||
else:
|
else:
|
||||||
@ -444,6 +449,7 @@ class ZfsDataset:
|
|||||||
:rtype: ZfsDataset
|
:rtype: ZfsDataset
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
#FIXME: dont check for existance. (currenlty needed for _add_virtual_snapshots)
|
||||||
if not self.exists:
|
if not self.exists:
|
||||||
return []
|
return []
|
||||||
|
|
||||||
@ -570,7 +576,8 @@ class ZfsDataset:
|
|||||||
|
|
||||||
return self.from_names(names[1:], force_exists=True)
|
return self.from_names(names[1:], force_exists=True)
|
||||||
|
|
||||||
def send_pipe(self, features, prev_snapshot, resume_token, show_progress, raw, send_properties, write_embedded, send_pipes, zfs_compressed):
|
def send_pipe(self, features, prev_snapshot, resume_token, show_progress, raw, send_properties, write_embedded,
|
||||||
|
send_pipes, zfs_compressed):
|
||||||
"""returns a pipe with zfs send output for this snapshot
|
"""returns a pipe with zfs send output for this snapshot
|
||||||
|
|
||||||
resume_token: resume sending from this token. (in that case we don't
|
resume_token: resume sending from this token. (in that case we don't
|
||||||
@ -631,7 +638,8 @@ class ZfsDataset:
|
|||||||
|
|
||||||
return output_pipe
|
return output_pipe
|
||||||
|
|
||||||
def recv_pipe(self, pipe, features, recv_pipes, filter_properties=None, set_properties=None, ignore_exit_code=False, force=False):
|
def recv_pipe(self, pipe, features, recv_pipes, filter_properties=None, set_properties=None, ignore_exit_code=False,
|
||||||
|
force=False):
|
||||||
"""starts a zfs recv for this snapshot and uses pipe as input
|
"""starts a zfs recv for this snapshot and uses pipe as input
|
||||||
|
|
||||||
note: you can it both on a snapshot or filesystem object. The
|
note: you can it both on a snapshot or filesystem object. The
|
||||||
@ -703,31 +711,33 @@ class ZfsDataset:
|
|||||||
self.error("error during transfer")
|
self.error("error during transfer")
|
||||||
raise (Exception("Target doesn't exist after transfer, something went wrong."))
|
raise (Exception("Target doesn't exist after transfer, something went wrong."))
|
||||||
|
|
||||||
|
# at this point we're sure the actual dataset exists
|
||||||
|
self.parent.force_exists = True
|
||||||
|
|
||||||
def automount(self):
|
def automount(self):
|
||||||
"""mount the dataset as if one did a zfs mount -a, but only for this dataset"""
|
"""Mount the dataset as if one did a zfs mount -a, but only for this dataset
|
||||||
|
Failure to mount doesnt result in an exception, but outputs errors to STDERR.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
self.debug("Auto mounting")
|
self.debug("Auto mounting")
|
||||||
|
|
||||||
if self.properties['type']!="filesystem":
|
if self.properties['type'] != "filesystem":
|
||||||
return
|
return
|
||||||
|
|
||||||
if self.properties['canmount']!='on':
|
if self.properties['canmount'] != 'on':
|
||||||
return
|
return
|
||||||
|
|
||||||
if self.properties['mountpoint']=='legacy':
|
if self.properties['mountpoint'] == 'legacy':
|
||||||
return
|
return
|
||||||
|
|
||||||
if self.properties['mountpoint']=='none':
|
if self.properties['mountpoint'] == 'none':
|
||||||
return
|
return
|
||||||
|
|
||||||
if self.properties['encryption']!='off' and self.properties['keystatus']=='unavailable':
|
if self.properties['encryption'] != 'off' and self.properties['keystatus'] == 'unavailable':
|
||||||
return
|
return
|
||||||
|
|
||||||
self.zfs_node.run(["zfs", "mount", self.name])
|
self.zfs_node.run(["zfs", "mount", self.name], valid_exitcodes=[0,1])
|
||||||
|
|
||||||
|
|
||||||
def transfer_snapshot(self, target_snapshot, features, prev_snapshot, show_progress,
|
def transfer_snapshot(self, target_snapshot, features, prev_snapshot, show_progress,
|
||||||
filter_properties, set_properties, ignore_recv_exit_code, resume_token,
|
filter_properties, set_properties, ignore_recv_exit_code, resume_token,
|
||||||
@ -770,18 +780,18 @@ class ZfsDataset:
|
|||||||
|
|
||||||
# do it
|
# do it
|
||||||
pipe = self.send_pipe(features=features, show_progress=show_progress, prev_snapshot=prev_snapshot,
|
pipe = self.send_pipe(features=features, show_progress=show_progress, prev_snapshot=prev_snapshot,
|
||||||
resume_token=resume_token, raw=raw, send_properties=send_properties, write_embedded=write_embedded, send_pipes=send_pipes, zfs_compressed=zfs_compressed)
|
resume_token=resume_token, raw=raw, send_properties=send_properties,
|
||||||
|
write_embedded=write_embedded, send_pipes=send_pipes, zfs_compressed=zfs_compressed)
|
||||||
target_snapshot.recv_pipe(pipe, features=features, filter_properties=filter_properties,
|
target_snapshot.recv_pipe(pipe, features=features, filter_properties=filter_properties,
|
||||||
set_properties=set_properties, ignore_exit_code=ignore_recv_exit_code, recv_pipes=recv_pipes, force=force)
|
set_properties=set_properties, ignore_exit_code=ignore_recv_exit_code,
|
||||||
|
recv_pipes=recv_pipes, force=force)
|
||||||
|
|
||||||
#try to automount it, if its the initial transfer
|
# try to automount it, if its the initial transfer
|
||||||
if not prev_snapshot:
|
if not prev_snapshot:
|
||||||
target_snapshot.parent.force_exists=True
|
# in test mode it doesnt actually exist, so dont try to mount it/read properties
|
||||||
#in test mode it doesnt actually exist, so dont try to mount it/read properties
|
|
||||||
if not target_snapshot.zfs_node.readonly:
|
if not target_snapshot.zfs_node.readonly:
|
||||||
target_snapshot.parent.automount()
|
target_snapshot.parent.automount()
|
||||||
|
|
||||||
|
|
||||||
def abort_resume(self):
|
def abort_resume(self):
|
||||||
"""abort current resume state"""
|
"""abort current resume state"""
|
||||||
self.debug("Aborting resume")
|
self.debug("Aborting resume")
|
||||||
@ -872,9 +882,9 @@ class ZfsDataset:
|
|||||||
return None
|
return None
|
||||||
else:
|
else:
|
||||||
for source_snapshot in reversed(self.snapshots):
|
for source_snapshot in reversed(self.snapshots):
|
||||||
target_snapshot=target_dataset.find_snapshot(source_snapshot)
|
target_snapshot = target_dataset.find_snapshot(source_snapshot)
|
||||||
if target_snapshot:
|
if target_snapshot:
|
||||||
if guid_check and source_snapshot.properties['guid']!=target_snapshot.properties['guid']:
|
if guid_check and source_snapshot.properties['guid'] != target_snapshot.properties['guid']:
|
||||||
target_snapshot.warning("Common snapshot has invalid guid, ignoring.")
|
target_snapshot.warning("Common snapshot has invalid guid, ignoring.")
|
||||||
else:
|
else:
|
||||||
target_snapshot.debug("common snapshot")
|
target_snapshot.debug("common snapshot")
|
||||||
@ -967,7 +977,8 @@ class ZfsDataset:
|
|||||||
while snapshot:
|
while snapshot:
|
||||||
# create virtual target snapsho
|
# create virtual target snapsho
|
||||||
# NOTE: with force_exist we're telling the dataset it doesnt exist yet. (e.g. its virtual)
|
# NOTE: with force_exist we're telling the dataset it doesnt exist yet. (e.g. its virtual)
|
||||||
virtual_snapshot = self.zfs_node.get_dataset(self.filesystem_name + "@" + snapshot.snapshot_name, force_exists=False)
|
virtual_snapshot = self.zfs_node.get_dataset(self.filesystem_name + "@" + snapshot.snapshot_name,
|
||||||
|
force_exists=False)
|
||||||
self.snapshots.append(virtual_snapshot)
|
self.snapshots.append(virtual_snapshot)
|
||||||
snapshot = source_dataset.find_next_snapshot(snapshot, also_other_snapshots)
|
snapshot = source_dataset.find_next_snapshot(snapshot, also_other_snapshots)
|
||||||
|
|
||||||
@ -1001,7 +1012,7 @@ class ZfsDataset:
|
|||||||
# on target: destroy everything thats obsolete, except common_snapshot
|
# on target: destroy everything thats obsolete, except common_snapshot
|
||||||
for target_snapshot in target_dataset.snapshots:
|
for target_snapshot in target_dataset.snapshots:
|
||||||
if (target_snapshot in target_obsoletes) \
|
if (target_snapshot in target_obsoletes) \
|
||||||
and ( not common_snapshot or (target_snapshot.snapshot_name != common_snapshot.snapshot_name)):
|
and (not common_snapshot or (target_snapshot.snapshot_name != common_snapshot.snapshot_name)):
|
||||||
if target_snapshot.exists:
|
if target_snapshot.exists:
|
||||||
target_snapshot.destroy()
|
target_snapshot.destroy()
|
||||||
|
|
||||||
@ -1014,7 +1025,7 @@ class ZfsDataset:
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
if target_dataset.exists and 'receive_resume_token' in target_dataset.properties:
|
if target_dataset.exists and 'receive_resume_token' in target_dataset.properties:
|
||||||
if start_snapshot==None:
|
if start_snapshot == None:
|
||||||
target_dataset.verbose("Aborting resume, its obsolete.")
|
target_dataset.verbose("Aborting resume, its obsolete.")
|
||||||
target_dataset.abort_resume()
|
target_dataset.abort_resume()
|
||||||
else:
|
else:
|
||||||
@ -1079,10 +1090,9 @@ class ZfsDataset:
|
|||||||
snapshot.destroy(fail_exception=True)
|
snapshot.destroy(fail_exception=True)
|
||||||
self.snapshots.remove(snapshot)
|
self.snapshots.remove(snapshot)
|
||||||
|
|
||||||
if len(incompatible_target_snapshots)>0:
|
if len(incompatible_target_snapshots) > 0:
|
||||||
self.rollback()
|
self.rollback()
|
||||||
|
|
||||||
|
|
||||||
def sync_snapshots(self, target_dataset, features, show_progress, filter_properties, set_properties,
|
def sync_snapshots(self, target_dataset, features, show_progress, filter_properties, set_properties,
|
||||||
ignore_recv_exit_code, holds, rollback, decrypt, encrypt, also_other_snapshots,
|
ignore_recv_exit_code, holds, rollback, decrypt, encrypt, also_other_snapshots,
|
||||||
no_send, destroy_incompatible, send_pipes, recv_pipes, zfs_compressed, force, guid_check):
|
no_send, destroy_incompatible, send_pipes, recv_pipes, zfs_compressed, force, guid_check):
|
||||||
@ -1108,24 +1118,25 @@ class ZfsDataset:
|
|||||||
|
|
||||||
# self.verbose("-> {}".format(target_dataset))
|
# self.verbose("-> {}".format(target_dataset))
|
||||||
|
|
||||||
#defaults for these settings if there is no encryption stuff going on:
|
# defaults for these settings if there is no encryption stuff going on:
|
||||||
send_properties = True
|
send_properties = True
|
||||||
raw = False
|
raw = False
|
||||||
write_embedded = True
|
write_embedded = True
|
||||||
|
|
||||||
# source dataset encrypted?
|
# source dataset encrypted?
|
||||||
if self.properties.get('encryption', 'off')!='off':
|
if self.properties.get('encryption', 'off') != 'off':
|
||||||
# user wants to send it over decrypted?
|
# user wants to send it over decrypted?
|
||||||
if decrypt:
|
if decrypt:
|
||||||
# when decrypting, zfs cant send properties
|
# when decrypting, zfs cant send properties
|
||||||
send_properties=False
|
send_properties = False
|
||||||
else:
|
else:
|
||||||
# keep data encrypted by sending it raw (including properties)
|
# keep data encrypted by sending it raw (including properties)
|
||||||
raw=True
|
raw = True
|
||||||
|
|
||||||
(common_snapshot, start_snapshot, source_obsoletes, target_obsoletes, target_keeps,
|
(common_snapshot, start_snapshot, source_obsoletes, target_obsoletes, target_keeps,
|
||||||
incompatible_target_snapshots) = \
|
incompatible_target_snapshots) = \
|
||||||
self._plan_sync(target_dataset=target_dataset, also_other_snapshots=also_other_snapshots, guid_check=guid_check, raw=raw)
|
self._plan_sync(target_dataset=target_dataset, also_other_snapshots=also_other_snapshots,
|
||||||
|
guid_check=guid_check, raw=raw)
|
||||||
|
|
||||||
# NOTE: we do this because we dont want filesystems to fillup when backups keep failing.
|
# NOTE: we do this because we dont want filesystems to fillup when backups keep failing.
|
||||||
# Also usefull with no_send to still cleanup stuff.
|
# Also usefull with no_send to still cleanup stuff.
|
||||||
@ -1143,28 +1154,26 @@ class ZfsDataset:
|
|||||||
# check if we can resume
|
# check if we can resume
|
||||||
resume_token = self._validate_resume_token(target_dataset, start_snapshot)
|
resume_token = self._validate_resume_token(target_dataset, start_snapshot)
|
||||||
|
|
||||||
|
(active_filter_properties, active_set_properties) = self.get_allowed_properties(filter_properties,
|
||||||
|
set_properties)
|
||||||
(active_filter_properties, active_set_properties) = self.get_allowed_properties(filter_properties, set_properties)
|
|
||||||
|
|
||||||
# encrypt at target?
|
# encrypt at target?
|
||||||
if encrypt and not raw:
|
if encrypt and not raw:
|
||||||
# filter out encryption properties to let encryption on the target take place
|
# filter out encryption properties to let encryption on the target take place
|
||||||
active_filter_properties.extend(["keylocation","pbkdf2iters","keyformat", "encryption"])
|
active_filter_properties.extend(["keylocation", "pbkdf2iters", "keyformat", "encryption"])
|
||||||
write_embedded=False
|
write_embedded = False
|
||||||
|
|
||||||
|
|
||||||
# now actually transfer the snapshots
|
# now actually transfer the snapshots
|
||||||
prev_source_snapshot = common_snapshot
|
prev_source_snapshot = common_snapshot
|
||||||
source_snapshot = start_snapshot
|
source_snapshot = start_snapshot
|
||||||
do_rollback=rollback
|
do_rollback = rollback
|
||||||
while source_snapshot:
|
while source_snapshot:
|
||||||
target_snapshot = target_dataset.find_snapshot(source_snapshot) # still virtual
|
target_snapshot = target_dataset.find_snapshot(source_snapshot) # still virtual
|
||||||
|
|
||||||
# does target actually want it?
|
# does target actually want it?
|
||||||
if target_snapshot not in target_obsoletes:
|
if target_snapshot not in target_obsoletes:
|
||||||
|
|
||||||
#do the rollback, one time at first transfer
|
# do the rollback, one time at first transfer
|
||||||
if do_rollback:
|
if do_rollback:
|
||||||
target_dataset.rollback()
|
target_dataset.rollback()
|
||||||
do_rollback = False
|
do_rollback = False
|
||||||
@ -1229,7 +1238,6 @@ class ZfsDataset:
|
|||||||
"umount", self.name
|
"umount", self.name
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
self.zfs_node.run(cmd=cmd, valid_exitcodes=[0])
|
self.zfs_node.run(cmd=cmd, valid_exitcodes=[0])
|
||||||
|
|
||||||
def clone(self, name):
|
def clone(self, name):
|
||||||
@ -1270,4 +1278,3 @@ class ZfsDataset:
|
|||||||
self.zfs_node.run(cmd=cmd, valid_exitcodes=[0])
|
self.zfs_node.run(cmd=cmd, valid_exitcodes=[0])
|
||||||
|
|
||||||
self.invalidate()
|
self.invalidate()
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user