allow disabling guid-checking as well, for performance
This commit is contained in:
@ -38,7 +38,7 @@ class TestZfsScaling(unittest2.TestCase):
|
|||||||
|
|
||||||
|
|
||||||
#this triggers if you make a change with an impact of more than O(snapshot_count/2)
|
#this triggers if you make a change with an impact of more than O(snapshot_count/2)
|
||||||
expected_runs=336
|
expected_runs=335
|
||||||
print("EXPECTED RUNS: {}".format(expected_runs))
|
print("EXPECTED RUNS: {}".format(expected_runs))
|
||||||
print("ACTUAL RUNS : {}".format(run_counter))
|
print("ACTUAL RUNS : {}".format(run_counter))
|
||||||
self.assertLess(abs(run_counter-expected_runs), snapshot_count/2)
|
self.assertLess(abs(run_counter-expected_runs), snapshot_count/2)
|
||||||
@ -52,7 +52,7 @@ class TestZfsScaling(unittest2.TestCase):
|
|||||||
|
|
||||||
|
|
||||||
#this triggers if you make a change with a performance impact of more than O(snapshot_count/2)
|
#this triggers if you make a change with a performance impact of more than O(snapshot_count/2)
|
||||||
expected_runs=42
|
expected_runs=47
|
||||||
print("EXPECTED RUNS: {}".format(expected_runs))
|
print("EXPECTED RUNS: {}".format(expected_runs))
|
||||||
print("ACTUAL RUNS : {}".format(run_counter))
|
print("ACTUAL RUNS : {}".format(run_counter))
|
||||||
self.assertLess(abs(run_counter-expected_runs), snapshot_count/2)
|
self.assertLess(abs(run_counter-expected_runs), snapshot_count/2)
|
||||||
@ -80,7 +80,7 @@ class TestZfsScaling(unittest2.TestCase):
|
|||||||
|
|
||||||
|
|
||||||
#this triggers if you make a change with an impact of more than O(snapshot_count/2)`
|
#this triggers if you make a change with an impact of more than O(snapshot_count/2)`
|
||||||
expected_runs=636
|
expected_runs=635
|
||||||
print("EXPECTED RUNS: {}".format(expected_runs))
|
print("EXPECTED RUNS: {}".format(expected_runs))
|
||||||
print("ACTUAL RUNS: {}".format(run_counter))
|
print("ACTUAL RUNS: {}".format(run_counter))
|
||||||
self.assertLess(abs(run_counter-expected_runs), dataset_count/2)
|
self.assertLess(abs(run_counter-expected_runs), dataset_count/2)
|
||||||
@ -95,7 +95,7 @@ class TestZfsScaling(unittest2.TestCase):
|
|||||||
|
|
||||||
|
|
||||||
#this triggers if you make a change with a performance impact of more than O(snapshot_count/2)
|
#this triggers if you make a change with a performance impact of more than O(snapshot_count/2)
|
||||||
expected_runs=842
|
expected_runs=1047
|
||||||
print("EXPECTED RUNS: {}".format(expected_runs))
|
print("EXPECTED RUNS: {}".format(expected_runs))
|
||||||
print("ACTUAL RUNS: {}".format(run_counter))
|
print("ACTUAL RUNS: {}".format(run_counter))
|
||||||
self.assertLess(abs(run_counter-expected_runs), dataset_count/2)
|
self.assertLess(abs(run_counter-expected_runs), dataset_count/2)
|
||||||
|
|||||||
@ -18,7 +18,10 @@ class TestZfsAutobackup32(unittest2.TestCase):
|
|||||||
shelltest("zfs snapshot test_target1/test_source1/fs1@invalid")
|
shelltest("zfs snapshot test_target1/test_source1/fs1@invalid")
|
||||||
|
|
||||||
with patch('time.strftime', return_value="test-20101111000001"):
|
with patch('time.strftime', return_value="test-20101111000001"):
|
||||||
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run())
|
#try the old way (without guid checking), and fail:
|
||||||
|
self.assertEqual(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --no-guid-check".split(" ")).run(),1)
|
||||||
|
#new way should be ok:
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-snapshot".split(" ")).run())
|
||||||
|
|
||||||
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||||
self.assertMultiLineEqual(r,"""
|
self.assertMultiLineEqual(r,"""
|
||||||
@ -65,7 +68,10 @@ test_target1/test_source2/fs2/sub@test-20101111000001
|
|||||||
shelltest("zfs snapshot test_target1/test_source1/fs1@invalid")
|
shelltest("zfs snapshot test_target1/test_source1/fs1@invalid")
|
||||||
|
|
||||||
with patch('time.strftime', return_value="test-20101111000001"):
|
with patch('time.strftime', return_value="test-20101111000001"):
|
||||||
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --destroy-incompatible".split(" ")).run())
|
#try the old way and fail:
|
||||||
|
self.assertEqual(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --destroy-incompatible --no-guid-check".split(" ")).run(),1)
|
||||||
|
#new way should be ok
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-snapshot --destroy-incompatible".split(" ")).run())
|
||||||
|
|
||||||
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||||
self.assertMultiLineEqual(r,"""
|
self.assertMultiLineEqual(r,"""
|
||||||
|
|||||||
@ -72,6 +72,8 @@ class ZfsAutobackup(ZfsAuto):
|
|||||||
help='Send over other snapshots as well, not just the ones created by this tool.')
|
help='Send over other snapshots as well, not just the ones created by this tool.')
|
||||||
group.add_argument('--set-snapshot-properties', metavar='PROPERTY=VALUE,...', type=str,
|
group.add_argument('--set-snapshot-properties', metavar='PROPERTY=VALUE,...', type=str,
|
||||||
help='List of properties to set on the snapshot.')
|
help='List of properties to set on the snapshot.')
|
||||||
|
group.add_argument('--no-guid-check', action='store_true',
|
||||||
|
help='Dont check guid of common snapshots. (faster)')
|
||||||
|
|
||||||
|
|
||||||
group = parser.add_argument_group("Transfer options")
|
group = parser.add_argument_group("Transfer options")
|
||||||
@ -360,7 +362,7 @@ class ZfsAutobackup(ZfsAuto):
|
|||||||
destroy_incompatible=self.args.destroy_incompatible,
|
destroy_incompatible=self.args.destroy_incompatible,
|
||||||
send_pipes=send_pipes, recv_pipes=recv_pipes,
|
send_pipes=send_pipes, recv_pipes=recv_pipes,
|
||||||
decrypt=self.args.decrypt, encrypt=self.args.encrypt,
|
decrypt=self.args.decrypt, encrypt=self.args.encrypt,
|
||||||
zfs_compressed=self.args.zfs_compressed, force=self.args.force)
|
zfs_compressed=self.args.zfs_compressed, force=self.args.force, guid_check=not self.args.no_guid_check)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
# if self.args.progress:
|
# if self.args.progress:
|
||||||
# self.clear_progress()
|
# self.clear_progress()
|
||||||
|
|||||||
@ -186,7 +186,7 @@ class ZfsAutoverify(ZfsAuto):
|
|||||||
target_dataset = target_node.get_dataset(target_name)
|
target_dataset = target_node.get_dataset(target_name)
|
||||||
|
|
||||||
# find common snapshots to verify
|
# find common snapshots to verify
|
||||||
source_snapshot = source_dataset.find_common_snapshot(target_dataset)
|
source_snapshot = source_dataset.find_common_snapshot(target_dataset, True)
|
||||||
target_snapshot = target_dataset.find_snapshot(source_snapshot)
|
target_snapshot = target_dataset.find_snapshot(source_snapshot)
|
||||||
|
|
||||||
if source_snapshot is None or target_snapshot is None:
|
if source_snapshot is None or target_snapshot is None:
|
||||||
|
|||||||
@ -817,13 +817,15 @@ class ZfsDataset:
|
|||||||
obsolete.destroy()
|
obsolete.destroy()
|
||||||
self.snapshots.remove(obsolete)
|
self.snapshots.remove(obsolete)
|
||||||
|
|
||||||
def find_common_snapshot(self, target_dataset):
|
def find_common_snapshot(self, target_dataset, guid_check):
|
||||||
"""find latest common snapshot between us and target returns None if its
|
"""find latest common snapshot between us and target returns None if its
|
||||||
an initial transfer
|
an initial transfer
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
|
:type guid_check: bool
|
||||||
:type target_dataset: ZfsDataset
|
:type target_dataset: ZfsDataset
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if not target_dataset.snapshots:
|
if not target_dataset.snapshots:
|
||||||
# target has nothing yet
|
# target has nothing yet
|
||||||
return None
|
return None
|
||||||
@ -831,7 +833,7 @@ class ZfsDataset:
|
|||||||
for source_snapshot in reversed(self.snapshots):
|
for source_snapshot in reversed(self.snapshots):
|
||||||
target_snapshot=target_dataset.find_snapshot(source_snapshot)
|
target_snapshot=target_dataset.find_snapshot(source_snapshot)
|
||||||
if target_snapshot:
|
if target_snapshot:
|
||||||
if source_snapshot.properties['guid']!=target_snapshot.properties['guid']:
|
if guid_check and source_snapshot.properties['guid']!=target_snapshot.properties['guid']:
|
||||||
source_snapshot.warning("Common snapshot has invalid guid, ignoring.")
|
source_snapshot.warning("Common snapshot has invalid guid, ignoring.")
|
||||||
else:
|
else:
|
||||||
source_snapshot.debug("common snapshot")
|
source_snapshot.debug("common snapshot")
|
||||||
@ -981,18 +983,19 @@ class ZfsDataset:
|
|||||||
else:
|
else:
|
||||||
return resume_token
|
return resume_token
|
||||||
|
|
||||||
def _plan_sync(self, target_dataset, also_other_snapshots):
|
def _plan_sync(self, target_dataset, also_other_snapshots, guid_check):
|
||||||
"""plan where to start syncing and what to sync and what to keep
|
"""plan where to start syncing and what to sync and what to keep
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
:rtype: ( ZfsDataset, ZfsDataset, list of ZfsDataset, list of ZfsDataset, list of ZfsDataset, list of ZfsDataset )
|
:rtype: ( ZfsDataset, ZfsDataset, list of ZfsDataset, list of ZfsDataset, list of ZfsDataset, list of ZfsDataset )
|
||||||
:type target_dataset: ZfsDataset
|
:type target_dataset: ZfsDataset
|
||||||
:type also_other_snapshots: bool
|
:type also_other_snapshots: bool
|
||||||
|
:type guid_check: bool
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# determine common and start snapshot
|
# determine common and start snapshot
|
||||||
target_dataset.debug("Determining start snapshot")
|
target_dataset.debug("Determining start snapshot")
|
||||||
common_snapshot = self.find_common_snapshot(target_dataset)
|
common_snapshot = self.find_common_snapshot(target_dataset, guid_check=guid_check)
|
||||||
start_snapshot = self.find_start_snapshot(common_snapshot, also_other_snapshots)
|
start_snapshot = self.find_start_snapshot(common_snapshot, also_other_snapshots)
|
||||||
incompatible_target_snapshots = target_dataset.find_incompatible_snapshots(common_snapshot)
|
incompatible_target_snapshots = target_dataset.find_incompatible_snapshots(common_snapshot)
|
||||||
|
|
||||||
@ -1034,7 +1037,7 @@ class ZfsDataset:
|
|||||||
|
|
||||||
def sync_snapshots(self, target_dataset, features, show_progress, filter_properties, set_properties,
|
def sync_snapshots(self, target_dataset, features, show_progress, filter_properties, set_properties,
|
||||||
ignore_recv_exit_code, holds, rollback, decrypt, encrypt, also_other_snapshots,
|
ignore_recv_exit_code, holds, rollback, decrypt, encrypt, also_other_snapshots,
|
||||||
no_send, destroy_incompatible, send_pipes, recv_pipes, zfs_compressed, force):
|
no_send, destroy_incompatible, send_pipes, recv_pipes, zfs_compressed, force, guid_check):
|
||||||
"""sync this dataset's snapshots to target_dataset, while also thinning
|
"""sync this dataset's snapshots to target_dataset, while also thinning
|
||||||
out old snapshots along the way.
|
out old snapshots along the way.
|
||||||
|
|
||||||
@ -1052,14 +1055,14 @@ class ZfsDataset:
|
|||||||
:type decrypt: bool
|
:type decrypt: bool
|
||||||
:type also_other_snapshots: bool
|
:type also_other_snapshots: bool
|
||||||
:type no_send: bool
|
:type no_send: bool
|
||||||
:type destroy_incompatible: bool
|
:type guid_check: bool
|
||||||
"""
|
"""
|
||||||
|
|
||||||
self.verbose("sending to {}".format(target_dataset))
|
self.verbose("sending to {}".format(target_dataset))
|
||||||
|
|
||||||
(common_snapshot, start_snapshot, source_obsoletes, target_obsoletes, target_keeps,
|
(common_snapshot, start_snapshot, source_obsoletes, target_obsoletes, target_keeps,
|
||||||
incompatible_target_snapshots) = \
|
incompatible_target_snapshots) = \
|
||||||
self._plan_sync(target_dataset=target_dataset, also_other_snapshots=also_other_snapshots)
|
self._plan_sync(target_dataset=target_dataset, also_other_snapshots=also_other_snapshots, guid_check=guid_check)
|
||||||
|
|
||||||
# NOTE: we do this because we dont want filesystems to fillup when backups keep failing.
|
# NOTE: we do this because we dont want filesystems to fillup when backups keep failing.
|
||||||
# Also usefull with no_send to still cleanup stuff.
|
# Also usefull with no_send to still cleanup stuff.
|
||||||
|
|||||||
Reference in New Issue
Block a user