Compare commits

...

2 Commits

4 changed files with 22 additions and 9 deletions

View File

@ -890,7 +890,7 @@ test_target1/test_source2/fs2/sub@test-20101111000003
n=ZfsNode("test",l) n=ZfsNode("test",l)
d=ZfsDataset(n,"test_source1@test") d=ZfsDataset(n,"test_source1@test")
sp=d.send_pipe([], prev_snapshot=None, resume_token=None, show_progress=True, raw=False, send_pipes=[], send_properties=True, write_embedded=True) sp=d.send_pipe([], prev_snapshot=None, resume_token=None, show_progress=True, raw=False, send_pipes=[], send_properties=True, write_embedded=True, zfs_compressed=True)
with OutputIO() as buf: with OutputIO() as buf:

View File

@ -71,4 +71,11 @@ test_target1/b/test_source1/fs1/sub@test-20101111000000
test_target1/b/test_source2/fs2/sub@test-20101111000000 test_target1/b/test_source2/fs2/sub@test-20101111000000
test_target1/b/test_target1/a/test_source1/fs1@test-20101111000000 test_target1/b/test_target1/a/test_source1/fs1@test-20101111000000
test_target1/b/test_target1/a/test_source1/fs1/sub@test-20101111000000 test_target1/b/test_target1/a/test_source1/fs1/sub@test-20101111000000
""") """)
def test_zfs_compressed(self):
with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(
ZfsAutobackup("test test_target1 --no-progress --verbose --debug --zfs-compressed".split(" ")).run())

View File

@ -99,6 +99,9 @@ class ZfsAutobackup:
parser.add_argument('--encrypt', action='store_true', parser.add_argument('--encrypt', action='store_true',
help='Encrypt data after receiving it.') help='Encrypt data after receiving it.')
parser.add_argument('--zfs-compressed', action='store_true',
help='Transfer blocks that already have zfs-compression as-is.')
parser.add_argument('--test', action='store_true', parser.add_argument('--test', action='store_true',
help='dont change anything, just show what would be done (still does all read-only ' help='dont change anything, just show what would be done (still does all read-only '
'operations)') 'operations)')
@ -166,6 +169,9 @@ class ZfsAutobackup:
if args.compress and args.ssh_source is None and args.ssh_target is None: if args.compress and args.ssh_source is None and args.ssh_target is None:
self.warning("Using compression, but transfer is local.") self.warning("Using compression, but transfer is local.")
if args.compress and args.zfs_compressed:
self.warning("Using --compress with --zfs-compress, might be inefficient.")
def verbose(self, txt): def verbose(self, txt):
self.log.verbose(txt) self.log.verbose(txt)
@ -381,7 +387,7 @@ class ZfsAutobackup:
no_send=self.args.no_send, no_send=self.args.no_send,
destroy_incompatible=self.args.destroy_incompatible, destroy_incompatible=self.args.destroy_incompatible,
send_pipes=send_pipes, recv_pipes=recv_pipes, send_pipes=send_pipes, recv_pipes=recv_pipes,
decrypt=self.args.decrypt, encrypt=self.args.encrypt, ) decrypt=self.args.decrypt, encrypt=self.args.encrypt, zfs_compressed=self.args.zfs_compressed )
except Exception as e: except Exception as e:
fail_count = fail_count + 1 fail_count = fail_count + 1
source_dataset.error("FAILED: " + str(e)) source_dataset.error("FAILED: " + str(e))

View File

@ -503,7 +503,7 @@ class ZfsDataset:
return self.from_names(names[1:]) return self.from_names(names[1:])
def send_pipe(self, features, prev_snapshot, resume_token, show_progress, raw, send_properties, write_embedded, send_pipes): def send_pipe(self, features, prev_snapshot, resume_token, show_progress, raw, send_properties, write_embedded, send_pipes, zfs_compressed):
"""returns a pipe with zfs send output for this snapshot """returns a pipe with zfs send output for this snapshot
resume_token: resume sending from this token. (in that case we don't resume_token: resume sending from this token. (in that case we don't
@ -530,7 +530,7 @@ class ZfsDataset:
if write_embedded and 'embedded_data' in features and "-e" in self.zfs_node.supported_send_options: if write_embedded and 'embedded_data' in features and "-e" in self.zfs_node.supported_send_options:
cmd.append("--embed") # WRITE_EMBEDDED, more compact stream cmd.append("--embed") # WRITE_EMBEDDED, more compact stream
if "-c" in self.zfs_node.supported_send_options: if zfs_compressed and "-c" in self.zfs_node.supported_send_options:
cmd.append("--compressed") # use compressed WRITE records cmd.append("--compressed") # use compressed WRITE records
# raw? (send over encrypted data in its original encrypted form without decrypting) # raw? (send over encrypted data in its original encrypted form without decrypting)
@ -634,7 +634,7 @@ class ZfsDataset:
def transfer_snapshot(self, target_snapshot, features, prev_snapshot, show_progress, def transfer_snapshot(self, target_snapshot, features, prev_snapshot, show_progress,
filter_properties, set_properties, ignore_recv_exit_code, resume_token, filter_properties, set_properties, ignore_recv_exit_code, resume_token,
raw, send_properties, write_embedded, send_pipes, recv_pipes): raw, send_properties, write_embedded, send_pipes, recv_pipes, zfs_compressed):
"""transfer this snapshot to target_snapshot. specify prev_snapshot for """transfer this snapshot to target_snapshot. specify prev_snapshot for
incremental transfer incremental transfer
@ -673,7 +673,7 @@ class ZfsDataset:
# do it # do it
pipe = self.send_pipe(features=features, show_progress=show_progress, prev_snapshot=prev_snapshot, pipe = self.send_pipe(features=features, show_progress=show_progress, prev_snapshot=prev_snapshot,
resume_token=resume_token, raw=raw, send_properties=send_properties, write_embedded=write_embedded, send_pipes=send_pipes) resume_token=resume_token, raw=raw, send_properties=send_properties, write_embedded=write_embedded, send_pipes=send_pipes, zfs_compressed=zfs_compressed)
target_snapshot.recv_pipe(pipe, features=features, filter_properties=filter_properties, target_snapshot.recv_pipe(pipe, features=features, filter_properties=filter_properties,
set_properties=set_properties, ignore_exit_code=ignore_recv_exit_code, recv_pipes=recv_pipes) set_properties=set_properties, ignore_exit_code=ignore_recv_exit_code, recv_pipes=recv_pipes)
@ -968,7 +968,7 @@ class ZfsDataset:
def sync_snapshots(self, target_dataset, features, show_progress, filter_properties, set_properties, def sync_snapshots(self, target_dataset, features, show_progress, filter_properties, set_properties,
ignore_recv_exit_code, holds, rollback, decrypt, encrypt, also_other_snapshots, ignore_recv_exit_code, holds, rollback, decrypt, encrypt, also_other_snapshots,
no_send, destroy_incompatible, send_pipes, recv_pipes): no_send, destroy_incompatible, send_pipes, recv_pipes, zfs_compressed):
"""sync this dataset's snapshots to target_dataset, while also thinning """sync this dataset's snapshots to target_dataset, while also thinning
out old snapshots along the way. out old snapshots along the way.
@ -1051,7 +1051,7 @@ class ZfsDataset:
filter_properties=active_filter_properties, filter_properties=active_filter_properties,
set_properties=active_set_properties, set_properties=active_set_properties,
ignore_recv_exit_code=ignore_recv_exit_code, ignore_recv_exit_code=ignore_recv_exit_code,
resume_token=resume_token, write_embedded=write_embedded, raw=raw, send_properties=send_properties, send_pipes=send_pipes, recv_pipes=recv_pipes) resume_token=resume_token, write_embedded=write_embedded, raw=raw, send_properties=send_properties, send_pipes=send_pipes, recv_pipes=recv_pipes, zfs_compressed=zfs_compressed)
resume_token = None resume_token = None