Compare commits

...

3 Commits

5 changed files with 65 additions and 19 deletions

View File

@ -227,11 +227,11 @@ test_target1/test_source2/fs2/sub@test-20101111000000
# incremental, doesnt want previous anymore
with patch('time.strftime', return_value="20101111000002"):
self.assertFalse(ZfsAutobackup(
"test test_target1 --no-progress --verbose --keep-target=0 --debug --allow-empty".split(" ")).run())
"test test_target1 --no-progress --verbose --keep-target=0 --allow-empty".split(" ")).run())
print(buf.getvalue())
self.assertIn(": aborting resume, since", buf.getvalue())
self.assertIn("Aborting resume, we dont want that snapshot anymore.", buf.getvalue())
r = shelltest("zfs list -H -o name -r -t all test_target1")
self.assertMultiLineEqual(r, """
@ -247,6 +247,34 @@ test_target1/test_source2/fs2/sub
test_target1/test_source2/fs2/sub@test-20101111000002
""")
# test with empty snapshot list (this was a bug)
def test_abort_resume_emptysnapshotlist(self):
if "0.6.5" in ZFS_USERSPACE:
self.skipTest("Resume not supported in this ZFS userspace version")
with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
# generate resume
with patch('time.strftime', return_value="20101111000001"):
self.generate_resume()
shelltest("zfs destroy test_source1/fs1@test-20101111000001")
with OutputIO() as buf:
with redirect_stdout(buf):
# incremental, doesnt want previous anymore
with patch('time.strftime', return_value="20101111000002"):
self.assertFalse(ZfsAutobackup(
"test test_target1 --no-progress --verbose --no-snapshot".split(
" ")).run())
print(buf.getvalue())
self.assertIn("Aborting resume, its obsolete", buf.getvalue())
def test_missing_common(self):
with patch('time.strftime', return_value="20101111000000"):

View File

@ -890,7 +890,7 @@ test_target1/test_source2/fs2/sub@test-20101111000003
n=ZfsNode("test",l)
d=ZfsDataset(n,"test_source1@test")
sp=d.send_pipe([], prev_snapshot=None, resume_token=None, show_progress=True, raw=False, send_pipes=[], send_properties=True, write_embedded=True)
sp=d.send_pipe([], prev_snapshot=None, resume_token=None, show_progress=True, raw=False, send_pipes=[], send_properties=True, write_embedded=True, zfs_compressed=True)
with OutputIO() as buf:

View File

@ -71,4 +71,11 @@ test_target1/b/test_source1/fs1/sub@test-20101111000000
test_target1/b/test_source2/fs2/sub@test-20101111000000
test_target1/b/test_target1/a/test_source1/fs1@test-20101111000000
test_target1/b/test_target1/a/test_source1/fs1/sub@test-20101111000000
""")
""")
def test_zfs_compressed(self):
with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(
ZfsAutobackup("test test_target1 --no-progress --verbose --debug --zfs-compressed".split(" ")).run())

View File

@ -16,7 +16,7 @@ from zfs_autobackup.ThinnerRule import ThinnerRule
class ZfsAutobackup:
"""main class"""
VERSION = "3.1-rc1"
VERSION = "3.1-rc2"
HEADER = "zfs-autobackup v{} - (c)2021 E.H.Eefting (edwin@datux.nl)".format(VERSION)
def __init__(self, argv, print_arguments=True):
@ -99,6 +99,9 @@ class ZfsAutobackup:
parser.add_argument('--encrypt', action='store_true',
help='Encrypt data after receiving it.')
parser.add_argument('--zfs-compressed', action='store_true',
help='Transfer blocks that already have zfs-compression as-is.')
parser.add_argument('--test', action='store_true',
help='dont change anything, just show what would be done (still does all read-only '
'operations)')
@ -166,6 +169,9 @@ class ZfsAutobackup:
if args.compress and args.ssh_source is None and args.ssh_target is None:
self.warning("Using compression, but transfer is local.")
if args.compress and args.zfs_compressed:
self.warning("Using --compress with --zfs-compress, might be inefficient.")
def verbose(self, txt):
self.log.verbose(txt)
@ -381,7 +387,7 @@ class ZfsAutobackup:
no_send=self.args.no_send,
destroy_incompatible=self.args.destroy_incompatible,
send_pipes=send_pipes, recv_pipes=recv_pipes,
decrypt=self.args.decrypt, encrypt=self.args.encrypt, )
decrypt=self.args.decrypt, encrypt=self.args.encrypt, zfs_compressed=self.args.zfs_compressed )
except Exception as e:
fail_count = fail_count + 1
source_dataset.error("FAILED: " + str(e))

View File

@ -503,7 +503,7 @@ class ZfsDataset:
return self.from_names(names[1:])
def send_pipe(self, features, prev_snapshot, resume_token, show_progress, raw, send_properties, write_embedded, send_pipes):
def send_pipe(self, features, prev_snapshot, resume_token, show_progress, raw, send_properties, write_embedded, send_pipes, zfs_compressed):
"""returns a pipe with zfs send output for this snapshot
resume_token: resume sending from this token. (in that case we don't
@ -530,7 +530,7 @@ class ZfsDataset:
if write_embedded and 'embedded_data' in features and "-e" in self.zfs_node.supported_send_options:
cmd.append("--embed") # WRITE_EMBEDDED, more compact stream
if "-c" in self.zfs_node.supported_send_options:
if zfs_compressed and "-c" in self.zfs_node.supported_send_options:
cmd.append("--compressed") # use compressed WRITE records
# raw? (send over encrypted data in its original encrypted form without decrypting)
@ -634,7 +634,7 @@ class ZfsDataset:
def transfer_snapshot(self, target_snapshot, features, prev_snapshot, show_progress,
filter_properties, set_properties, ignore_recv_exit_code, resume_token,
raw, send_properties, write_embedded, send_pipes, recv_pipes):
raw, send_properties, write_embedded, send_pipes, recv_pipes, zfs_compressed):
"""transfer this snapshot to target_snapshot. specify prev_snapshot for
incremental transfer
@ -673,12 +673,13 @@ class ZfsDataset:
# do it
pipe = self.send_pipe(features=features, show_progress=show_progress, prev_snapshot=prev_snapshot,
resume_token=resume_token, raw=raw, send_properties=send_properties, write_embedded=write_embedded, send_pipes=send_pipes)
resume_token=resume_token, raw=raw, send_properties=send_properties, write_embedded=write_embedded, send_pipes=send_pipes, zfs_compressed=zfs_compressed)
target_snapshot.recv_pipe(pipe, features=features, filter_properties=filter_properties,
set_properties=set_properties, ignore_exit_code=ignore_recv_exit_code, recv_pipes=recv_pipes)
def abort_resume(self):
"""abort current resume state"""
self.debug("Aborting resume")
self.zfs_node.run(["zfs", "recv", "-A", self.name])
def rollback(self):
@ -901,14 +902,18 @@ class ZfsDataset:
"""
if 'receive_resume_token' in target_dataset.properties:
resume_token = target_dataset.properties['receive_resume_token']
# not valid anymore?
resume_snapshot = self.get_resume_snapshot(resume_token)
if not resume_snapshot or start_snapshot.snapshot_name != resume_snapshot.snapshot_name:
target_dataset.verbose("Cant resume, resume token no longer valid.")
if start_snapshot==None:
target_dataset.verbose("Aborting resume, its obsolete.")
target_dataset.abort_resume()
else:
return resume_token
resume_token = target_dataset.properties['receive_resume_token']
# not valid anymore
resume_snapshot = self.get_resume_snapshot(resume_token)
if not resume_snapshot or start_snapshot.snapshot_name != resume_snapshot.snapshot_name:
target_dataset.verbose("Aborting resume, its no longer valid.")
target_dataset.abort_resume()
else:
return resume_token
def _plan_sync(self, target_dataset, also_other_snapshots):
"""plan where to start syncing and what to sync and what to keep
@ -963,7 +968,7 @@ class ZfsDataset:
def sync_snapshots(self, target_dataset, features, show_progress, filter_properties, set_properties,
ignore_recv_exit_code, holds, rollback, decrypt, encrypt, also_other_snapshots,
no_send, destroy_incompatible, send_pipes, recv_pipes):
no_send, destroy_incompatible, send_pipes, recv_pipes, zfs_compressed):
"""sync this dataset's snapshots to target_dataset, while also thinning
out old snapshots along the way.
@ -1046,7 +1051,7 @@ class ZfsDataset:
filter_properties=active_filter_properties,
set_properties=active_set_properties,
ignore_recv_exit_code=ignore_recv_exit_code,
resume_token=resume_token, write_embedded=write_embedded, raw=raw, send_properties=send_properties, send_pipes=send_pipes, recv_pipes=recv_pipes)
resume_token=resume_token, write_embedded=write_embedded, raw=raw, send_properties=send_properties, send_pipes=send_pipes, recv_pipes=recv_pipes, zfs_compressed=zfs_compressed)
resume_token = None
@ -1075,7 +1080,7 @@ class ZfsDataset:
source_snapshot.debug("skipped (target doesn't need it)")
# was it actually a resume?
if resume_token:
target_dataset.debug("aborting resume, since we don't want that snapshot anymore")
target_dataset.verbose("Aborting resume, we dont want that snapshot anymore.")
target_dataset.abort_resume()
resume_token = None