Compare commits

..

9 Commits

6 changed files with 298 additions and 191 deletions

View File

@ -669,4 +669,4 @@ This script will also send the backup status to Zabbix. (if you've installed my
This project was sponsorred by: This project was sponsorred by:
* (None so far) * JetBrains (Provided me with a license for their whole professional product line, https://www.jetbrains.com/pycharm/ )

View File

@ -96,7 +96,7 @@ class TestZfsNode(unittest2.TestCase):
#now tries to destroy our own last snapshot (before the final destroy of the dataset) #now tries to destroy our own last snapshot (before the final destroy of the dataset)
self.assertIn("fs1@test-20101111000000: Destroying", buf.getvalue()) self.assertIn("fs1@test-20101111000000: Destroying", buf.getvalue())
#but cant finish because still in use: #but cant finish because still in use:
self.assertIn("fs1: Error during destoy missing", buf.getvalue()) self.assertIn("fs1: Error during --destroy-missing", buf.getvalue())
shelltest("zfs destroy test_target1/clone1") shelltest("zfs destroy test_target1/clone1")

View File

@ -0,0 +1,49 @@
from basetest import *
import time
class TestZfsAutobackup31(unittest2.TestCase):
def setUp(self):
prepare_zpools()
self.longMessage=True
def test_no_thinning(self):
with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty".split(" ")).run())
with patch('time.strftime', return_value="20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty --keep-target=0 --keep-source=0 --no-thinning".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
self.assertMultiLineEqual(r,"""
test_source1
test_source1/fs1
test_source1/fs1@test-20101111000000
test_source1/fs1@test-20101111000001
test_source1/fs1/sub
test_source1/fs1/sub@test-20101111000000
test_source1/fs1/sub@test-20101111000001
test_source2
test_source2/fs2
test_source2/fs2/sub
test_source2/fs2/sub@test-20101111000000
test_source2/fs2/sub@test-20101111000001
test_source2/fs3
test_source2/fs3/sub
test_target1
test_target1/test_source1
test_target1/test_source1/fs1
test_target1/test_source1/fs1@test-20101111000000
test_target1/test_source1/fs1@test-20101111000001
test_target1/test_source1/fs1/sub
test_target1/test_source1/fs1/sub@test-20101111000000
test_target1/test_source1/fs1/sub@test-20101111000001
test_target1/test_source2
test_target1/test_source2/fs2
test_target1/test_source2/fs2/sub
test_target1/test_source2/fs2/sub@test-20101111000000
test_target1/test_source2/fs2/sub@test-20101111000001
""")

View File

@ -115,7 +115,7 @@ test_target1
def test_supportedrecvoptions(self): def test_supportedrecvoptions(self):
logger=LogStub() logger=LogStub()
description="[Source]" description="[Source]"
#NOTE: this couldnt hang via ssh if we dont close filehandles properly. (which was a previous bug) #NOTE: this could hang via ssh if we dont close filehandles properly. (which was a previous bug)
node=ZfsNode("test", logger, description=description, ssh_to='localhost') node=ZfsNode("test", logger, description=description, ssh_to='localhost')
self.assertIsInstance(node.supported_recv_options, list) self.assertIsInstance(node.supported_recv_options, list)

View File

@ -12,7 +12,7 @@ from zfs_autobackup.ThinnerRule import ThinnerRule
class ZfsAutobackup: class ZfsAutobackup:
"""main class""" """main class"""
VERSION = "3.0.1-beta8" VERSION = "3.1-beta1"
HEADER = "zfs-autobackup v{} - Copyright 2020 E.H.Eefting (edwin@datux.nl)".format(VERSION) HEADER = "zfs-autobackup v{} - Copyright 2020 E.H.Eefting (edwin@datux.nl)".format(VERSION)
def __init__(self, argv, print_arguments=True): def __init__(self, argv, print_arguments=True):
@ -47,7 +47,9 @@ class ZfsAutobackup:
help='Don\'t create new snapshots (useful for finishing uncompleted backups, or cleanups)') help='Don\'t create new snapshots (useful for finishing uncompleted backups, or cleanups)')
parser.add_argument('--no-send', action='store_true', parser.add_argument('--no-send', action='store_true',
help='Don\'t send snapshots (useful for cleanups, or if you want a serperate send-cronjob)') help='Don\'t send snapshots (useful for cleanups, or if you want a serperate send-cronjob)')
# parser.add_argument('--no-thinning', action='store_true', help='Don\'t run the thinner.') parser.add_argument('--no-thinning', action='store_true', help="Do not destroy any snapshots.")
parser.add_argument('--no-holds', action='store_true',
help='Don\'t hold snapshots. (Faster. Allows you to destroy common snapshot.)')
parser.add_argument('--min-change', type=int, default=1, parser.add_argument('--min-change', type=int, default=1,
help='Number of bytes written after which we consider a dataset changed (default %(' help='Number of bytes written after which we consider a dataset changed (default %('
'default)s)') 'default)s)')
@ -56,8 +58,6 @@ class ZfsAutobackup:
parser.add_argument('--ignore-replicated', action='store_true', parser.add_argument('--ignore-replicated', action='store_true',
help='Ignore datasets that seem to be replicated some other way. (No changes since ' help='Ignore datasets that seem to be replicated some other way. (No changes since '
'lastest snapshot. Useful for proxmox HA replication)') 'lastest snapshot. Useful for proxmox HA replication)')
parser.add_argument('--no-holds', action='store_true',
help='Don\'t hold snapshots. (Faster)')
parser.add_argument('--resume', action='store_true', help=argparse.SUPPRESS) parser.add_argument('--resume', action='store_true', help=argparse.SUPPRESS)
parser.add_argument('--strip-path', default=0, type=int, parser.add_argument('--strip-path', default=0, type=int,
@ -104,6 +104,7 @@ class ZfsAutobackup:
help='show zfs progress output. Enabled automaticly on ttys. (use --no-progress to disable)') help='show zfs progress output. Enabled automaticly on ttys. (use --no-progress to disable)')
parser.add_argument('--no-progress', action='store_true', help=argparse.SUPPRESS) # needed to workaround a zfs recv -v bug parser.add_argument('--no-progress', action='store_true', help=argparse.SUPPRESS) # needed to workaround a zfs recv -v bug
# note args is the only global variable we use, since its a global readonly setting anyway # note args is the only global variable we use, since its a global readonly setting anyway
args = parser.parse_args(argv) args = parser.parse_args(argv)
@ -147,92 +148,9 @@ class ZfsAutobackup:
self.log.verbose("") self.log.verbose("")
self.log.verbose("#### " + title) self.log.verbose("#### " + title)
# sync datasets, or thin-only on both sides # NOTE: this method also uses self.args. args that need extra processing are passed as function parameters:
# target is needed for this.
def sync_datasets(self, source_node, source_datasets):
description = "[Target]"
self.set_title("Target settings")
target_thinner = Thinner(self.args.keep_target)
target_node = ZfsNode(self.args.backup_name, self, ssh_config=self.args.ssh_config, ssh_to=self.args.ssh_target,
readonly=self.args.test, debug_output=self.args.debug_output, description=description,
thinner=target_thinner)
target_node.verbose("Receive datasets under: {}".format(self.args.target_path))
if self.args.no_send:
self.set_title("Thinning source and target")
else:
self.set_title("Sending and thinning")
# check if exists, to prevent vague errors
target_dataset = ZfsDataset(target_node, self.args.target_path)
if not target_dataset.exists:
self.error("Target path '{}' does not exist. Please create this dataset first.".format(target_dataset))
return 255
if self.args.filter_properties:
filter_properties = self.args.filter_properties.split(",")
else:
filter_properties = []
if self.args.set_properties:
set_properties = self.args.set_properties.split(",")
else:
set_properties = []
if self.args.clear_refreservation:
filter_properties.append("refreservation")
if self.args.clear_mountpoint:
set_properties.append("canmount=noauto")
# sync datasets
fail_count = 0
target_datasets = []
for source_dataset in source_datasets:
try:
# determine corresponding target_dataset
target_name = self.args.target_path + "/" + source_dataset.lstrip_path(self.args.strip_path)
target_dataset = ZfsDataset(target_node, target_name)
target_datasets.append(target_dataset)
# ensure parents exists
# TODO: this isnt perfect yet, in some cases it can create parents when it shouldn't.
if not self.args.no_send \
and target_dataset.parent not in target_datasets \
and not target_dataset.parent.exists:
target_dataset.parent.create_filesystem(parents=True)
# determine common zpool features
source_features = source_node.get_zfs_pool(source_dataset.split_path()[0]).features
target_features = target_node.get_zfs_pool(target_dataset.split_path()[0]).features
common_features = source_features and target_features
# source_dataset.debug("Common features: {}".format(common_features))
source_dataset.sync_snapshots(target_dataset, show_progress=self.args.progress,
features=common_features, filter_properties=filter_properties,
set_properties=set_properties,
ignore_recv_exit_code=self.args.ignore_transfer_errors,
holds=not self.args.no_holds, rollback=self.args.rollback,
raw=self.args.raw, other_snapshots=self.args.other_snapshots,
no_send=self.args.no_send,
destroy_incompatible=self.args.destroy_incompatible)
except Exception as e:
fail_count = fail_count + 1
source_dataset.error("FAILED: " + str(e))
if self.args.debug:
raise
# if not self.args.no_thinning:
self.thin_missing_targets(ZfsDataset(target_node, self.args.target_path), target_datasets)
return fail_count
def thin_missing_targets(self, target_dataset, used_target_datasets): def thin_missing_targets(self, target_dataset, used_target_datasets):
"""thin/destroy target datasets that are missing on the source.""" """thin target datasets that are missing on the source."""
self.debug("Thinning obsolete datasets") self.debug("Thinning obsolete datasets")
@ -242,12 +160,23 @@ class ZfsAutobackup:
dataset.debug("Missing on source, thinning") dataset.debug("Missing on source, thinning")
dataset.thin() dataset.thin()
# destroy_missing enabled? except Exception as e:
if self.args.destroy_missing is not None: dataset.error("Error during thinning of missing datasets ({})".format(str(e)))
# NOTE: this method also uses self.args. args that need extra processing are passed as function parameters:
def destroy_missing_targets(self, target_dataset, used_target_datasets):
"""destroy target datasets that are missing on the source and that meet the requirements"""
self.debug("Destroying obsolete datasets")
for dataset in target_dataset.recursive_datasets:
try:
if dataset not in used_target_datasets:
# cant do anything without our own snapshots # cant do anything without our own snapshots
if not dataset.our_snapshots: if not dataset.our_snapshots:
if dataset.datasets: if dataset.datasets:
# its not a leaf, just ignore
dataset.debug("Destroy missing: ignoring") dataset.debug("Destroy missing: ignoring")
else: else:
dataset.verbose( dataset.verbose(
@ -284,7 +213,58 @@ class ZfsAutobackup:
dataset.destroy(fail_exception=True) dataset.destroy(fail_exception=True)
except Exception as e: except Exception as e:
dataset.error("Error during destoy missing ({})".format(str(e))) dataset.error("Error during --destroy-missing: {}".format(str(e)))
# NOTE: this method also uses self.args. args that need extra processing are passed as function parameters:
def sync_datasets(self, source_node, source_datasets, target_node):
"""Sync datasets, or thin-only on both sides"""
fail_count = 0
target_datasets = []
for source_dataset in source_datasets:
try:
# determine corresponding target_dataset
target_name = self.args.target_path + "/" + source_dataset.lstrip_path(self.args.strip_path)
target_dataset = ZfsDataset(target_node, target_name)
target_datasets.append(target_dataset)
# ensure parents exists
# TODO: this isnt perfect yet, in some cases it can create parents when it shouldn't.
if not self.args.no_send \
and target_dataset.parent not in target_datasets \
and not target_dataset.parent.exists:
target_dataset.parent.create_filesystem(parents=True)
# determine common zpool features (cached, so no problem we call it often)
source_features = source_node.get_zfs_pool(source_dataset.split_path()[0]).features
target_features = target_node.get_zfs_pool(target_dataset.split_path()[0]).features
common_features = source_features and target_features
# sync the snapshots of this dataset
source_dataset.sync_snapshots(target_dataset, show_progress=self.args.progress,
features=common_features, filter_properties=self.filter_properties_list(),
set_properties=self.set_properties_list(),
ignore_recv_exit_code=self.args.ignore_transfer_errors,
holds=not self.args.no_holds, rollback=self.args.rollback,
raw=self.args.raw, also_other_snapshots=self.args.other_snapshots,
no_send=self.args.no_send,
destroy_incompatible=self.args.destroy_incompatible,
no_thinning=self.args.no_thinning)
except Exception as e:
fail_count = fail_count + 1
source_dataset.error("FAILED: " + str(e))
if self.args.debug:
raise
target_path_dataset=ZfsDataset(target_node, self.args.target_path)
if not self.args.no_thinning:
self.thin_missing_targets(target_dataset=target_path_dataset, used_target_datasets=target_datasets)
if self.args.destroy_missing is not None:
self.destroy_missing_targets(target_dataset=target_path_dataset, used_target_datasets=target_datasets)
return fail_count
def thin_source(self, source_datasets): def thin_source(self, source_datasets):
@ -293,6 +273,44 @@ class ZfsAutobackup:
for source_dataset in source_datasets: for source_dataset in source_datasets:
source_dataset.thin(skip_holds=True) source_dataset.thin(skip_holds=True)
def filter_replicated(self, datasets):
if not self.args.ignore_replicated:
return datasets
else:
self.set_title("Filtering already replicated filesystems")
ret = []
for dataset in datasets:
if dataset.is_changed(self.args.min_change):
ret.append(dataset)
else:
dataset.verbose("Ignoring, already replicated")
return(ret)
def filter_properties_list(self):
if self.args.filter_properties:
filter_properties = self.args.filter_properties.split(",")
else:
filter_properties = []
if self.args.clear_refreservation:
filter_properties.append("refreservation")
return filter_properties
def set_properties_list(self):
if self.args.set_properties:
set_properties = self.args.set_properties.split(",")
else:
set_properties = []
if self.args.clear_mountpoint:
set_properties.append("canmount=noauto")
return set_properties
def run(self): def run(self):
try: try:
@ -322,18 +340,8 @@ class ZfsAutobackup:
self.args.backup_name)) self.args.backup_name))
return 255 return 255
source_datasets = []
# filter out already replicated stuff? # filter out already replicated stuff?
if not self.args.ignore_replicated: source_datasets = self.filter_replicated(selected_source_datasets)
source_datasets = selected_source_datasets
else:
self.set_title("Filtering already replicated filesystems")
for selected_source_dataset in selected_source_datasets:
if selected_source_dataset.is_changed(self.args.min_change):
source_datasets.append(selected_source_dataset)
else:
selected_source_dataset.verbose("Ignoring, already replicated")
if not self.args.no_snapshot: if not self.args.no_snapshot:
self.set_title("Snapshotting") self.set_title("Snapshotting")
@ -342,8 +350,36 @@ class ZfsAutobackup:
# if target is specified, we sync the datasets, otherwise we just thin the source. (e.g. snapshot mode) # if target is specified, we sync the datasets, otherwise we just thin the source. (e.g. snapshot mode)
if self.args.target_path: if self.args.target_path:
fail_count = self.sync_datasets(source_node, source_datasets)
# create target_node
self.set_title("Target settings")
target_thinner = Thinner(self.args.keep_target)
target_node = ZfsNode(self.args.backup_name, self, ssh_config=self.args.ssh_config,
ssh_to=self.args.ssh_target,
readonly=self.args.test, debug_output=self.args.debug_output,
description="[Target]",
thinner=target_thinner)
target_node.verbose("Receive datasets under: {}".format(self.args.target_path))
if self.args.no_send:
self.set_title("Thinning source and target")
else: else:
self.set_title("Sending and thinning")
# check if exists, to prevent vague errors
target_dataset = ZfsDataset(target_node, self.args.target_path)
if not target_dataset.exists:
raise(Exception(
"Target path '{}' does not exist. Please create this dataset first.".format(target_dataset)))
# do the actual sync
fail_count = self.sync_datasets(
source_node=source_node,
source_datasets=source_datasets,
target_node=target_node)
else:
if not self.args.no_thinning:
self.thin_source(source_datasets) self.thin_source(source_datasets)
fail_count = 0 fail_count = 0

View File

@ -102,10 +102,10 @@ class ZfsDataset:
else: else:
return ZfsDataset(self.zfs_node, self.rstrip_path(1)) return ZfsDataset(self.zfs_node, self.rstrip_path(1))
def find_prev_snapshot(self, snapshot, other_snapshots=False): def find_prev_snapshot(self, snapshot, also_other_snapshots=False):
"""find previous snapshot in this dataset. None if it doesn't exist. """find previous snapshot in this dataset. None if it doesn't exist.
other_snapshots: set to true to also return snapshots that where not created by us. (is_ours) also_other_snapshots: set to true to also return snapshots that where not created by us. (is_ours)
""" """
if self.is_snapshot: if self.is_snapshot:
@ -114,11 +114,11 @@ class ZfsDataset:
index = self.find_snapshot_index(snapshot) index = self.find_snapshot_index(snapshot)
while index: while index:
index = index - 1 index = index - 1
if other_snapshots or self.snapshots[index].is_ours(): if also_other_snapshots or self.snapshots[index].is_ours():
return self.snapshots[index] return self.snapshots[index]
return None return None
def find_next_snapshot(self, snapshot, other_snapshots=False): def find_next_snapshot(self, snapshot, also_other_snapshots=False):
"""find next snapshot in this dataset. None if it doesn't exist""" """find next snapshot in this dataset. None if it doesn't exist"""
if self.is_snapshot: if self.is_snapshot:
@ -127,7 +127,7 @@ class ZfsDataset:
index = self.find_snapshot_index(snapshot) index = self.find_snapshot_index(snapshot)
while index is not None and index < len(self.snapshots) - 1: while index is not None and index < len(self.snapshots) - 1:
index = index + 1 index = index + 1
if other_snapshots or self.snapshots[index].is_ours(): if also_other_snapshots or self.snapshots[index].is_ours():
return self.snapshots[index] return self.snapshots[index]
return None return None
@ -277,7 +277,6 @@ class ZfsDataset:
def snapshots(self): def snapshots(self):
"""get all snapshots of this dataset""" """get all snapshots of this dataset"""
if not self.exists: if not self.exists:
return [] return []
@ -414,7 +413,7 @@ class ZfsDataset:
# progress output # progress output
if show_progress: if show_progress:
cmd.append("-v") # cmd.append("-v")
cmd.append("-P") cmd.append("-P")
# resume a previous send? (don't need more parameters in that case) # resume a previous send? (don't need more parameters in that case)
@ -431,9 +430,6 @@ class ZfsDataset:
cmd.append(self.name) cmd.append(self.name)
# if args.buffer and args.ssh_source!="local":
# cmd.append("|mbuffer -m {}".format(args.buffer))
# NOTE: this doesn't start the send yet, it only returns a subprocess.Pipe # NOTE: this doesn't start the send yet, it only returns a subprocess.Pipe
return self.zfs_node.run(cmd, pipe=True) return self.zfs_node.run(cmd, pipe=True)
@ -495,9 +491,6 @@ class ZfsDataset:
self.error("error during transfer") self.error("error during transfer")
raise (Exception("Target doesn't exist after transfer, something went wrong.")) raise (Exception("Target doesn't exist after transfer, something went wrong."))
# if args.buffer and args.ssh_target!="local":
# cmd.append("|mbuffer -m {}".format(args.buffer))
def transfer_snapshot(self, target_snapshot, features, prev_snapshot=None, show_progress=False, def transfer_snapshot(self, target_snapshot, features, prev_snapshot=None, show_progress=False,
filter_properties=None, set_properties=None, ignore_recv_exit_code=False, resume_token=None, filter_properties=None, set_properties=None, ignore_recv_exit_code=False, resume_token=None,
raw=False): raw=False):
@ -609,21 +602,24 @@ class ZfsDataset:
target_dataset.error("Cant find common snapshot with source.") target_dataset.error("Cant find common snapshot with source.")
raise (Exception("You probably need to delete the target dataset to fix this.")) raise (Exception("You probably need to delete the target dataset to fix this."))
def find_start_snapshot(self, common_snapshot, other_snapshots): def find_start_snapshot(self, common_snapshot, also_other_snapshots):
"""finds first snapshot to send""" """finds first snapshot to send
:rtype: ZfsDataset or None if we cant find it.
"""
if not common_snapshot: if not common_snapshot:
if not self.snapshots: if not self.snapshots:
start_snapshot = None start_snapshot = None
else: else:
# start from beginning # no common snapshot, start from beginning
start_snapshot = self.snapshots[0] start_snapshot = self.snapshots[0]
if not start_snapshot.is_ours() and not other_snapshots: if not start_snapshot.is_ours() and not also_other_snapshots:
# try to start at a snapshot thats ours # try to start at a snapshot thats ours
start_snapshot = self.find_next_snapshot(start_snapshot, other_snapshots) start_snapshot = self.find_next_snapshot(start_snapshot, also_other_snapshots)
else: else:
start_snapshot = self.find_next_snapshot(common_snapshot, other_snapshots) # normal situation: start_snapshot is the one after the common snapshot
start_snapshot = self.find_next_snapshot(common_snapshot, also_other_snapshots)
return start_snapshot return start_snapshot
@ -659,50 +655,25 @@ class ZfsDataset:
return allowed_filter_properties, allowed_set_properties return allowed_filter_properties, allowed_set_properties
def sync_snapshots(self, target_dataset, features, show_progress=False, filter_properties=None, set_properties=None, def _add_virtual_snapshots(self, source_dataset, source_start_snapshot, also_other_snapshots):
ignore_recv_exit_code=False, holds=True, rollback=False, raw=False, other_snapshots=False, """add snapshots from source to our snapshot list. (just the in memory list, no disk operations)"""
no_send=False, destroy_incompatible=False):
"""sync this dataset's snapshots to target_dataset, while also thinning out old snapshots along the way."""
if set_properties is None: self.debug("Creating virtual target snapshots")
set_properties = [] snapshot = source_start_snapshot
if filter_properties is None: while snapshot:
filter_properties = [] # create virtual target snapsho
# NOTE: with force_exist we're telling the dataset it doesnt exist yet. (e.g. its virtual)
# determine common and start snapshot virtual_snapshot = ZfsDataset(self.zfs_node,
target_dataset.debug("Determining start snapshot") self.filesystem_name + "@" + snapshot.snapshot_name,
common_snapshot = self.find_common_snapshot(target_dataset)
start_snapshot = self.find_start_snapshot(common_snapshot, other_snapshots)
# should be destroyed before attempting zfs recv:
incompatible_target_snapshots = target_dataset.find_incompatible_snapshots(common_snapshot)
# make target snapshot list the same as source, by adding virtual non-existing ones to the list.
target_dataset.debug("Creating virtual target snapshots")
source_snapshot = start_snapshot
while source_snapshot:
# create virtual target snapshot
virtual_snapshot = ZfsDataset(target_dataset.zfs_node,
target_dataset.filesystem_name + "@" + source_snapshot.snapshot_name,
force_exists=False) force_exists=False)
target_dataset.snapshots.append(virtual_snapshot) self.snapshots.append(virtual_snapshot)
source_snapshot = self.find_next_snapshot(source_snapshot, other_snapshots) snapshot = source_dataset.find_next_snapshot(snapshot, also_other_snapshots)
# now let thinner decide what we want on both sides as final state (after all transfers are done) def _pre_clean(self, common_snapshot, target_dataset, source_obsoletes, target_obsoletes, target_keeps):
if self.our_snapshots: """cleanup old stuff before starting snapshot syncing"""
self.debug("Create thinning list")
(source_keeps, source_obsoletes) = self.thin_list(keeps=[self.our_snapshots[-1]])
else:
source_obsoletes = []
if target_dataset.our_snapshots: # on source: destroy all obsoletes before common.
(target_keeps, target_obsoletes) = target_dataset.thin_list(keeps=[target_dataset.our_snapshots[-1]], # But after common, only delete snapshots that target also doesn't want
ignores=incompatible_target_snapshots)
else:
target_keeps = []
target_obsoletes = []
# on source: destroy all obsoletes before common. but after common, only delete snapshots that target also
# doesn't want to explicitly keep
before_common = True before_common = True
for source_snapshot in self.snapshots: for source_snapshot in self.snapshots:
if common_snapshot and source_snapshot.snapshot_name == common_snapshot.snapshot_name: if common_snapshot and source_snapshot.snapshot_name == common_snapshot.snapshot_name:
@ -720,12 +691,9 @@ class ZfsDataset:
if target_snapshot.exists: if target_snapshot.exists:
target_snapshot.destroy() target_snapshot.destroy()
# now actually transfer the snapshots, if we want def _validate_resume_token(self, target_dataset, start_snapshot):
if no_send: """validate and get (or destory) resume token"""
return
# resume?
resume_token = None
if 'receive_resume_token' in target_dataset.properties: if 'receive_resume_token' in target_dataset.properties:
resume_token = target_dataset.properties['receive_resume_token'] resume_token = target_dataset.properties['receive_resume_token']
# not valid anymore? # not valid anymore?
@ -733,9 +701,36 @@ class ZfsDataset:
if not resume_snapshot or start_snapshot.snapshot_name != resume_snapshot.snapshot_name: if not resume_snapshot or start_snapshot.snapshot_name != resume_snapshot.snapshot_name:
target_dataset.verbose("Cant resume, resume token no longer valid.") target_dataset.verbose("Cant resume, resume token no longer valid.")
target_dataset.abort_resume() target_dataset.abort_resume()
resume_token = None else:
return resume_token
def _plan_sync(self, target_dataset, also_other_snapshots):
"""plan where to start syncing and what to sync and what to keep"""
# determine common and start snapshot
target_dataset.debug("Determining start snapshot")
common_snapshot = self.find_common_snapshot(target_dataset)
start_snapshot = self.find_start_snapshot(common_snapshot, also_other_snapshots)
incompatible_target_snapshots = target_dataset.find_incompatible_snapshots(common_snapshot)
# let thinner decide whats obsolete on source
source_obsoletes = []
if self.our_snapshots:
source_obsoletes = self.thin_list(keeps=[self.our_snapshots[-1]])[1]
# let thinner decide keeps/obsoletes on target, AFTER the transfer would be done (by using virtual snapshots)
target_dataset._add_virtual_snapshots(self, start_snapshot, also_other_snapshots)
target_keeps = []
target_obsoletes = []
if target_dataset.our_snapshots:
(target_keeps, target_obsoletes) = target_dataset.thin_list(keeps=[target_dataset.our_snapshots[-1]],
ignores=incompatible_target_snapshots)
return common_snapshot, start_snapshot, source_obsoletes, target_obsoletes, target_keeps, incompatible_target_snapshots
def handle_incompatible_snapshots(self, incompatible_target_snapshots, destroy_incompatible):
"""destroy incompatbile snapshots on target before sync, or inform user what to do"""
# incompatible target snapshots?
if incompatible_target_snapshots: if incompatible_target_snapshots:
if not destroy_incompatible: if not destroy_incompatible:
for snapshot in incompatible_target_snapshots: for snapshot in incompatible_target_snapshots:
@ -745,7 +740,33 @@ class ZfsDataset:
for snapshot in incompatible_target_snapshots: for snapshot in incompatible_target_snapshots:
snapshot.verbose("Incompatible snapshot") snapshot.verbose("Incompatible snapshot")
snapshot.destroy() snapshot.destroy()
target_dataset.snapshots.remove(snapshot) self.snapshots.remove(snapshot)
def sync_snapshots(self, target_dataset, features, show_progress, filter_properties, set_properties,
ignore_recv_exit_code, holds, rollback, raw, also_other_snapshots,
no_send, destroy_incompatible, no_thinning):
"""sync this dataset's snapshots to target_dataset, while also thinning out old snapshots along the way."""
(common_snapshot, start_snapshot, source_obsoletes, target_obsoletes, target_keeps,
incompatible_target_snapshots) = \
self._plan_sync(target_dataset=target_dataset, also_other_snapshots=also_other_snapshots)
# NOTE: we do this because we dont want filesystems to fillup when backups keep failing.
# Also usefull with no_send to still cleanup stuff.
if not no_thinning:
self._pre_clean(
common_snapshot=common_snapshot, target_dataset=target_dataset,
target_keeps=target_keeps, target_obsoletes=target_obsoletes, source_obsoletes=source_obsoletes)
# now actually transfer the snapshots, if we want
if no_send:
return
# check if we can resume
resume_token = self._validate_resume_token(target_dataset, start_snapshot)
# handle incompatible stuff on target
target_dataset.handle_incompatible_snapshots(incompatible_target_snapshots, destroy_incompatible)
# rollback target to latest? # rollback target to latest?
if rollback: if rollback:
@ -780,6 +801,7 @@ class ZfsDataset:
prev_source_snapshot.release() prev_source_snapshot.release()
target_dataset.find_snapshot(prev_source_snapshot).release() target_dataset.find_snapshot(prev_source_snapshot).release()
if not no_thinning:
# we may now destroy the previous source snapshot if its obsolete # we may now destroy the previous source snapshot if its obsolete
if prev_source_snapshot in source_obsoletes: if prev_source_snapshot in source_obsoletes:
prev_source_snapshot.destroy() prev_source_snapshot.destroy()
@ -799,4 +821,4 @@ class ZfsDataset:
target_dataset.abort_resume() target_dataset.abort_resume()
resume_token = None resume_token = None
source_snapshot = self.find_next_snapshot(source_snapshot, other_snapshots) source_snapshot = self.find_next_snapshot(source_snapshot, also_other_snapshots)