Compare commits

...

10 Commits

6 changed files with 121 additions and 15 deletions

View File

@ -398,7 +398,14 @@ Note 1: The --encrypt option will rely on inheriting encryption parameters from
Note 2: Decide what you want at an early stage: If you change the --encrypt or --decrypt parameter after the inital sync you might get weird and wonderfull errors. (nothing dangerous) Note 2: Decide what you want at an early stage: If you change the --encrypt or --decrypt parameter after the inital sync you might get weird and wonderfull errors. (nothing dangerous)
I'll add some tips when the issues start to get in on github. :) **Some common errors while using zfs encryption:**
```
cannot receive incremental stream: kernel modules must be upgraded to receive this stream.
```
This happens if you forget to use --encrypt, while the target datasets are already encrypted. (Very strange error message indeed)
## Transfer buffering, compression and rate limiting. ## Transfer buffering, compression and rate limiting.

View File

@ -56,3 +56,50 @@ test_target1/test_source2/fs2/sub@test-20101111000000
test_target1/test_source2/fs2/sub@test-20101111000001 test_target1/test_source2/fs2/sub@test-20101111000001
test_target1/test_source2/fs2/sub@test-20101111000002 test_target1/test_source2/fs2/sub@test-20101111000002
""") """)
def test_excludepaths(self):
"""Test issue #103"""
shelltest("zfs create test_target1/target_shouldnotbeexcluded")
shelltest("zfs set autobackup:test=true test_target1/target_shouldnotbeexcluded")
shelltest("zfs create test_target1/target")
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup(
"test test_target1/target --no-progress --verbose --allow-empty".split(
" ")).run())
r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
self.assertMultiLineEqual(r, """
test_source1
test_source1/fs1
test_source1/fs1@test-20101111000000
test_source1/fs1/sub
test_source1/fs1/sub@test-20101111000000
test_source2
test_source2/fs2
test_source2/fs2/sub
test_source2/fs2/sub@test-20101111000000
test_source2/fs3
test_source2/fs3/sub
test_target1
test_target1/target
test_target1/target/test_source1
test_target1/target/test_source1/fs1
test_target1/target/test_source1/fs1@test-20101111000000
test_target1/target/test_source1/fs1/sub
test_target1/target/test_source1/fs1/sub@test-20101111000000
test_target1/target/test_source2
test_target1/target/test_source2/fs2
test_target1/target/test_source2/fs2/sub
test_target1/target/test_source2/fs2/sub@test-20101111000000
test_target1/target/test_target1
test_target1/target/test_target1/target_shouldnotbeexcluded
test_target1/target/test_target1/target_shouldnotbeexcluded@test-20101111000000
test_target1/target_shouldnotbeexcluded
test_target1/target_shouldnotbeexcluded@test-20101111000000
""")

View File

@ -418,6 +418,13 @@ test_target1/fs2/sub
test_target1/fs2/sub@test-20101111000000 test_target1/fs2/sub@test-20101111000000
""") """)
def test_strippath_collision(self):
with self.assertRaisesRegexp(Exception,"collision"):
ZfsAutobackup("test test_target1 --verbose --strip-path=2 --no-progress --debug".split(" ")).run()
def test_strippath_toomuch(self):
with self.assertRaisesRegexp(Exception,"too much"):
ZfsAutobackup("test test_target1 --verbose --strip-path=3 --no-progress --debug".split(" ")).run()
def test_clearrefres(self): def test_clearrefres(self):

View File

@ -9,12 +9,12 @@ from .ZfsDataset import ZfsDataset
from .LogConsole import LogConsole from .LogConsole import LogConsole
from .ZfsNode import ZfsNode from .ZfsNode import ZfsNode
from .ThinnerRule import ThinnerRule from .ThinnerRule import ThinnerRule
import os.path
class ZfsAutobackup: class ZfsAutobackup:
"""main class""" """main class"""
VERSION = "3.1.1-beta1" VERSION = "3.1.3"
HEADER = "zfs-autobackup v{} - (c)2021 E.H.Eefting (edwin@datux.nl)".format(VERSION) HEADER = "zfs-autobackup v{} - (c)2021 E.H.Eefting (edwin@datux.nl)".format(VERSION)
def __init__(self, argv, print_arguments=True): def __init__(self, argv, print_arguments=True):
@ -87,6 +87,8 @@ class ZfsAutobackup:
parser.add_argument('--rollback', action='store_true', parser.add_argument('--rollback', action='store_true',
help='Rollback changes to the latest target snapshot before starting. (normally you can ' help='Rollback changes to the latest target snapshot before starting. (normally you can '
'prevent changes by setting the readonly property on the target_path to on)') 'prevent changes by setting the readonly property on the target_path to on)')
parser.add_argument('--force', '-F', action='store_true',
help='Use zfs -F option to force overwrite/rollback. (Usefull with --strip-path=1, but use with care)')
parser.add_argument('--destroy-incompatible', action='store_true', parser.add_argument('--destroy-incompatible', action='store_true',
help='Destroy incompatible snapshots on target. Use with care! (implies --rollback)') help='Destroy incompatible snapshots on target. Use with care! (implies --rollback)')
parser.add_argument('--destroy-missing', metavar="SCHEDULE", type=str, default=None, parser.add_argument('--destroy-missing', metavar="SCHEDULE", type=str, default=None,
@ -364,6 +366,29 @@ class ZfsAutobackup:
return ret return ret
def make_target_name(self, source_dataset):
"""make target_name from a source_dataset"""
stripped=source_dataset.lstrip_path(self.args.strip_path)
if stripped!="":
return self.args.target_path + "/" + stripped
else:
return self.args.target_path
def check_target_names(self, source_node, source_datasets, target_node):
"""check all target names for collesions etc due to strip-options"""
self.debug("Checking target names:")
target_datasets={}
for source_dataset in source_datasets:
target_name = self.make_target_name(source_dataset)
source_dataset.debug("-> {}".format(target_name))
if target_name in target_datasets:
raise Exception("Target collision: Target path {} encountered twice, due to: {} and {}".format(target_name, source_dataset, target_datasets[target_name]))
target_datasets[target_name]=source_dataset
# NOTE: this method also uses self.args. args that need extra processing are passed as function parameters: # NOTE: this method also uses self.args. args that need extra processing are passed as function parameters:
def sync_datasets(self, source_node, source_datasets, target_node): def sync_datasets(self, source_node, source_datasets, target_node):
"""Sync datasets, or thin-only on both sides """Sync datasets, or thin-only on both sides
@ -387,13 +412,14 @@ class ZfsAutobackup:
try: try:
# determine corresponding target_dataset # determine corresponding target_dataset
target_name = self.args.target_path + "/" + source_dataset.lstrip_path(self.args.strip_path) target_name = self.make_target_name(source_dataset)
target_dataset = ZfsDataset(target_node, target_name) target_dataset = ZfsDataset(target_node, target_name)
target_datasets.append(target_dataset) target_datasets.append(target_dataset)
# ensure parents exists # ensure parents exists
# TODO: this isnt perfect yet, in some cases it can create parents when it shouldn't. # TODO: this isnt perfect yet, in some cases it can create parents when it shouldn't.
if not self.args.no_send \ if not self.args.no_send \
and target_dataset.parent \
and target_dataset.parent not in target_datasets \ and target_dataset.parent not in target_datasets \
and not target_dataset.parent.exists: and not target_dataset.parent.exists:
target_dataset.parent.create_filesystem(parents=True) target_dataset.parent.create_filesystem(parents=True)
@ -414,7 +440,7 @@ class ZfsAutobackup:
destroy_incompatible=self.args.destroy_incompatible, destroy_incompatible=self.args.destroy_incompatible,
send_pipes=send_pipes, recv_pipes=recv_pipes, send_pipes=send_pipes, recv_pipes=recv_pipes,
decrypt=self.args.decrypt, encrypt=self.args.encrypt, decrypt=self.args.decrypt, encrypt=self.args.encrypt,
zfs_compressed=self.args.zfs_compressed) zfs_compressed=self.args.zfs_compressed, force=self.args.force)
except Exception as e: except Exception as e:
fail_count = fail_count + 1 fail_count = fail_count + 1
source_dataset.error("FAILED: " + str(e)) source_dataset.error("FAILED: " + str(e))
@ -560,6 +586,9 @@ class ZfsAutobackup:
raise (Exception( raise (Exception(
"Target path '{}' does not exist. Please create this dataset first.".format(target_dataset))) "Target path '{}' does not exist. Please create this dataset first.".format(target_dataset)))
# check for collisions due to strip-path
self.check_target_names(source_node, source_datasets, target_node)
# do the actual sync # do the actual sync
# NOTE: even with no_send, no_thinning and no_snapshot it does a usefull thing because it checks if the common snapshots and shows incompatible snapshots # NOTE: even with no_send, no_thinning and no_snapshot it does a usefull thing because it checks if the common snapshots and shows incompatible snapshots
fail_count = self.sync_datasets( fail_count = self.sync_datasets(

View File

@ -79,7 +79,11 @@ class ZfsDataset:
Args: Args:
:type count: int :type count: int
""" """
return "/".join(self.split_path()[count:]) components=self.split_path()
if count>len(components):
raise Exception("Trying to strip too much from path ({} items from {})".format(count, self.name))
return "/".join(components[count:])
def rstrip_path(self, count): def rstrip_path(self, count):
"""return name with last count components stripped """return name with last count components stripped
@ -158,7 +162,8 @@ class ZfsDataset:
# our path starts with one of the excluded paths? # our path starts with one of the excluded paths?
for exclude_path in exclude_paths: for exclude_path in exclude_paths:
if self.name.startswith(exclude_path): # if self.name.startswith(exclude_path):
if (self.name + "/").startswith(exclude_path + "/"):
# too noisy for verbose # too noisy for verbose
self.debug("Excluded (path in exclude list)") self.debug("Excluded (path in exclude list)")
return False return False
@ -187,7 +192,11 @@ class ZfsDataset:
if self.is_snapshot: if self.is_snapshot:
return ZfsDataset(self.zfs_node, self.filesystem_name) return ZfsDataset(self.zfs_node, self.filesystem_name)
else: else:
return ZfsDataset(self.zfs_node, self.rstrip_path(1)) stripped=self.rstrip_path(1)
if stripped:
return ZfsDataset(self.zfs_node, stripped)
else:
return None
# NOTE: unused for now # NOTE: unused for now
# def find_prev_snapshot(self, snapshot, also_other_snapshots=False): # def find_prev_snapshot(self, snapshot, also_other_snapshots=False):
@ -325,7 +334,6 @@ class ZfsDataset:
try: try:
test = self.timestamp test = self.timestamp
except ValueError as e: except ValueError as e:
self.error(str(e))
return False return False
return True return True
@ -577,7 +585,7 @@ class ZfsDataset:
return output_pipe return output_pipe
def recv_pipe(self, pipe, features, recv_pipes, filter_properties=None, set_properties=None, ignore_exit_code=False): def recv_pipe(self, pipe, features, recv_pipes, filter_properties=None, set_properties=None, ignore_exit_code=False, force=False):
"""starts a zfs recv for this snapshot and uses pipe as input """starts a zfs recv for this snapshot and uses pipe as input
note: you can it both on a snapshot or filesystem object. The note: you can it both on a snapshot or filesystem object. The
@ -618,6 +626,9 @@ class ZfsDataset:
# verbose output # verbose output
cmd.append("-v") cmd.append("-v")
if force:
cmd.append("-F")
if 'extensible_dataset' in features and "-s" in self.zfs_node.supported_recv_options: if 'extensible_dataset' in features and "-s" in self.zfs_node.supported_recv_options:
# support resuming # support resuming
self.debug("Enabled resume support") self.debug("Enabled resume support")
@ -648,7 +659,7 @@ class ZfsDataset:
def transfer_snapshot(self, target_snapshot, features, prev_snapshot, show_progress, def transfer_snapshot(self, target_snapshot, features, prev_snapshot, show_progress,
filter_properties, set_properties, ignore_recv_exit_code, resume_token, filter_properties, set_properties, ignore_recv_exit_code, resume_token,
raw, send_properties, write_embedded, send_pipes, recv_pipes, zfs_compressed): raw, send_properties, write_embedded, send_pipes, recv_pipes, zfs_compressed, force):
"""transfer this snapshot to target_snapshot. specify prev_snapshot for """transfer this snapshot to target_snapshot. specify prev_snapshot for
incremental transfer incremental transfer
@ -689,7 +700,7 @@ class ZfsDataset:
pipe = self.send_pipe(features=features, show_progress=show_progress, prev_snapshot=prev_snapshot, pipe = self.send_pipe(features=features, show_progress=show_progress, prev_snapshot=prev_snapshot,
resume_token=resume_token, raw=raw, send_properties=send_properties, write_embedded=write_embedded, send_pipes=send_pipes, zfs_compressed=zfs_compressed) resume_token=resume_token, raw=raw, send_properties=send_properties, write_embedded=write_embedded, send_pipes=send_pipes, zfs_compressed=zfs_compressed)
target_snapshot.recv_pipe(pipe, features=features, filter_properties=filter_properties, target_snapshot.recv_pipe(pipe, features=features, filter_properties=filter_properties,
set_properties=set_properties, ignore_exit_code=ignore_recv_exit_code, recv_pipes=recv_pipes) set_properties=set_properties, ignore_exit_code=ignore_recv_exit_code, recv_pipes=recv_pipes, force=force)
def abort_resume(self): def abort_resume(self):
"""abort current resume state""" """abort current resume state"""
@ -986,7 +997,7 @@ class ZfsDataset:
def sync_snapshots(self, target_dataset, features, show_progress, filter_properties, set_properties, def sync_snapshots(self, target_dataset, features, show_progress, filter_properties, set_properties,
ignore_recv_exit_code, holds, rollback, decrypt, encrypt, also_other_snapshots, ignore_recv_exit_code, holds, rollback, decrypt, encrypt, also_other_snapshots,
no_send, destroy_incompatible, send_pipes, recv_pipes, zfs_compressed): no_send, destroy_incompatible, send_pipes, recv_pipes, zfs_compressed, force):
"""sync this dataset's snapshots to target_dataset, while also thinning """sync this dataset's snapshots to target_dataset, while also thinning
out old snapshots along the way. out old snapshots along the way.
@ -1007,6 +1018,8 @@ class ZfsDataset:
:type destroy_incompatible: bool :type destroy_incompatible: bool
""" """
self.verbose("sending to {}".format(target_dataset))
(common_snapshot, start_snapshot, source_obsoletes, target_obsoletes, target_keeps, (common_snapshot, start_snapshot, source_obsoletes, target_obsoletes, target_keeps,
incompatible_target_snapshots) = \ incompatible_target_snapshots) = \
self._plan_sync(target_dataset=target_dataset, also_other_snapshots=also_other_snapshots) self._plan_sync(target_dataset=target_dataset, also_other_snapshots=also_other_snapshots)
@ -1069,7 +1082,9 @@ class ZfsDataset:
filter_properties=active_filter_properties, filter_properties=active_filter_properties,
set_properties=active_set_properties, set_properties=active_set_properties,
ignore_recv_exit_code=ignore_recv_exit_code, ignore_recv_exit_code=ignore_recv_exit_code,
resume_token=resume_token, write_embedded=write_embedded, raw=raw, send_properties=send_properties, send_pipes=send_pipes, recv_pipes=recv_pipes, zfs_compressed=zfs_compressed) resume_token=resume_token, write_embedded=write_embedded, raw=raw,
send_properties=send_properties, send_pipes=send_pipes,
recv_pipes=recv_pipes, zfs_compressed=zfs_compressed, force=force)
resume_token = None resume_token = None

View File

@ -6,4 +6,5 @@ def cli():
from .ZfsAutobackup import ZfsAutobackup from .ZfsAutobackup import ZfsAutobackup
zfs_autobackup = ZfsAutobackup(sys.argv[1:], False) zfs_autobackup = ZfsAutobackup(sys.argv[1:], False)
sys.exit(zfs_autobackup.run()) failed_datasets=zfs_autobackup.run()
sys.exit(min(failed_datasets,255))