ready to implement zfs-autoverify
This commit is contained in:
@ -6,7 +6,7 @@ from .LogConsole import LogConsole
|
|||||||
|
|
||||||
|
|
||||||
class ZfsAuto(object):
|
class ZfsAuto(object):
|
||||||
"""Common Base class for zfs-auto... tools """
|
"""Common Base class, this class is always used subclassed. Look at ZfsAutobackup and ZfsAutoverify ."""
|
||||||
|
|
||||||
# also used by setup.py
|
# also used by setup.py
|
||||||
VERSION = "3.2-dev1"
|
VERSION = "3.2-dev1"
|
||||||
@ -120,6 +120,9 @@ class ZfsAuto(object):
|
|||||||
help=argparse.SUPPRESS) # needed to workaround a zfs recv -v bug
|
help=argparse.SUPPRESS) # needed to workaround a zfs recv -v bug
|
||||||
parser.add_argument('--version', action='store_true',
|
parser.add_argument('--version', action='store_true',
|
||||||
help='Show version.')
|
help='Show version.')
|
||||||
|
parser.add_argument('--strip-path', metavar='N', default=0, type=int,
|
||||||
|
help='Number of directories to strip from target path (use 1 when cloning zones between 2 '
|
||||||
|
'SmartOS machines)')
|
||||||
|
|
||||||
|
|
||||||
# SSH options
|
# SSH options
|
||||||
@ -162,6 +165,22 @@ class ZfsAuto(object):
|
|||||||
def debug(self, txt):
|
def debug(self, txt):
|
||||||
self.log.debug(txt)
|
self.log.debug(txt)
|
||||||
|
|
||||||
|
def progress(self, txt):
|
||||||
|
self.log.progress(txt)
|
||||||
|
|
||||||
|
def clear_progress(self):
|
||||||
|
self.log.clear_progress()
|
||||||
|
|
||||||
def set_title(self, title):
|
def set_title(self, title):
|
||||||
self.log.verbose("")
|
self.log.verbose("")
|
||||||
self.log.verbose("#### " + title)
|
self.log.verbose("#### " + title)
|
||||||
|
|
||||||
|
def print_error_sources(self):
|
||||||
|
self.error(
|
||||||
|
"No source filesystems selected, please do a 'zfs set autobackup:{0}=true' on the source datasets "
|
||||||
|
"you want to select.".format(
|
||||||
|
self.args.backup_name))
|
||||||
|
|
||||||
|
def make_target_name(self, source_dataset):
|
||||||
|
"""make target_name from a source_dataset"""
|
||||||
|
return self.args.target_path + "/" + source_dataset.lstrip_path(self.args.strip_path)
|
||||||
|
|||||||
@ -73,9 +73,6 @@ class ZfsAutobackup(ZfsAuto):
|
|||||||
help='Don\'t transfer snapshots (useful for cleanups, or if you want a serperate send-cronjob)')
|
help='Don\'t transfer snapshots (useful for cleanups, or if you want a serperate send-cronjob)')
|
||||||
group.add_argument('--no-holds', action='store_true',
|
group.add_argument('--no-holds', action='store_true',
|
||||||
help='Don\'t hold snapshots. (Faster. Allows you to destroy common snapshot.)')
|
help='Don\'t hold snapshots. (Faster. Allows you to destroy common snapshot.)')
|
||||||
group.add_argument('--strip-path', metavar='N', default=0, type=int,
|
|
||||||
help='Number of directories to strip from target path (use 1 when cloning zones between 2 '
|
|
||||||
'SmartOS machines)')
|
|
||||||
group.add_argument('--clear-refreservation', action='store_true',
|
group.add_argument('--clear-refreservation', action='store_true',
|
||||||
help='Filter "refreservation" property. (recommended, safes space. same as '
|
help='Filter "refreservation" property. (recommended, safes space. same as '
|
||||||
'--filter-properties refreservation)')
|
'--filter-properties refreservation)')
|
||||||
@ -135,12 +132,6 @@ class ZfsAutobackup(ZfsAuto):
|
|||||||
|
|
||||||
return parser
|
return parser
|
||||||
|
|
||||||
def progress(self, txt):
|
|
||||||
self.log.progress(txt)
|
|
||||||
|
|
||||||
def clear_progress(self):
|
|
||||||
self.log.clear_progress()
|
|
||||||
|
|
||||||
# NOTE: this method also uses self.args. args that need extra processing are passed as function parameters:
|
# NOTE: this method also uses self.args. args that need extra processing are passed as function parameters:
|
||||||
def thin_missing_targets(self, target_dataset, used_target_datasets):
|
def thin_missing_targets(self, target_dataset, used_target_datasets):
|
||||||
"""thin target datasets that are missing on the source."""
|
"""thin target datasets that are missing on the source."""
|
||||||
@ -307,7 +298,7 @@ class ZfsAutobackup(ZfsAuto):
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
# determine corresponding target_dataset
|
# determine corresponding target_dataset
|
||||||
target_name = self.args.target_path + "/" + source_dataset.lstrip_path(self.args.strip_path)
|
target_name = self.make_target_name(source_dataset)
|
||||||
target_dataset = ZfsDataset(target_node, target_name)
|
target_dataset = ZfsDataset(target_node, target_name)
|
||||||
target_datasets.append(target_dataset)
|
target_datasets.append(target_dataset)
|
||||||
|
|
||||||
@ -409,10 +400,7 @@ class ZfsAutobackup(ZfsAuto):
|
|||||||
exclude_unchanged=self.args.exclude_unchanged,
|
exclude_unchanged=self.args.exclude_unchanged,
|
||||||
min_change=self.args.min_change)
|
min_change=self.args.min_change)
|
||||||
if not source_datasets:
|
if not source_datasets:
|
||||||
self.error(
|
self.print_error_sources()
|
||||||
"No source filesystems selected, please do a 'zfs set autobackup:{0}=true' on the source datasets "
|
|
||||||
"you want to select.".format(
|
|
||||||
self.args.backup_name))
|
|
||||||
return 255
|
return 255
|
||||||
|
|
||||||
################# snapshotting
|
################# snapshotting
|
||||||
|
|||||||
@ -1,8 +1,10 @@
|
|||||||
from .ZfsAuto import ZfsAuto
|
from .ZfsAuto import ZfsAuto
|
||||||
|
from .ZfsDataset import ZfsDataset
|
||||||
|
from .ZfsNode import ZfsNode
|
||||||
|
import sys
|
||||||
|
|
||||||
class ZfsAutoverify(ZfsAuto):
|
class ZfsAutoverify(ZfsAuto):
|
||||||
"""The zfs-autoverify class"""
|
"""The zfs-autoverify class, default agruments and stuff come from ZfsAuto"""
|
||||||
|
|
||||||
def __init__(self, argv, print_arguments=True):
|
def __init__(self, argv, print_arguments=True):
|
||||||
|
|
||||||
@ -14,6 +16,9 @@ class ZfsAutoverify(ZfsAuto):
|
|||||||
|
|
||||||
args=super(ZfsAutoverify, self).parse_args(argv)
|
args=super(ZfsAutoverify, self).parse_args(argv)
|
||||||
|
|
||||||
|
if args.target_path == None:
|
||||||
|
self.log.error("Please specify TARGET-PATH")
|
||||||
|
sys.exit(255)
|
||||||
|
|
||||||
return args
|
return args
|
||||||
|
|
||||||
@ -25,9 +30,78 @@ class ZfsAutoverify(ZfsAuto):
|
|||||||
|
|
||||||
return (parser)
|
return (parser)
|
||||||
|
|
||||||
|
def verify_datasets(self, source_node, source_datasets, target_node):
|
||||||
|
|
||||||
|
fail_count=0
|
||||||
|
count = 0
|
||||||
|
for source_dataset in source_datasets:
|
||||||
|
|
||||||
|
# stats
|
||||||
|
if self.args.progress:
|
||||||
|
count = count + 1
|
||||||
|
self.progress("Analysing dataset {}/{} ({} failed)".format(count, len(source_datasets), fail_count))
|
||||||
|
|
||||||
|
try:
|
||||||
|
# determine corresponding target_dataset
|
||||||
|
target_name = self.make_target_name(source_dataset)
|
||||||
|
target_dataset = ZfsDataset(target_node, target_name)
|
||||||
|
|
||||||
|
XXX
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
fail_count = fail_count + 1
|
||||||
|
source_dataset.error("FAILED: " + str(e))
|
||||||
|
if self.args.debug:
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
|
|
||||||
pass
|
try:
|
||||||
|
|
||||||
|
################ create source zfsNode
|
||||||
|
self.set_title("Source settings")
|
||||||
|
|
||||||
|
description = "[Source]"
|
||||||
|
source_node = ZfsNode(snapshot_time_format=self.snapshot_time_format, hold_name=self.hold_name, logger=self,
|
||||||
|
ssh_config=self.args.ssh_config,
|
||||||
|
ssh_to=self.args.ssh_source, readonly=self.args.test,
|
||||||
|
debug_output=self.args.debug_output, description=description)
|
||||||
|
|
||||||
|
################# select source datasets
|
||||||
|
self.set_title("Selecting")
|
||||||
|
source_datasets = source_node.selected_datasets(property_name=self.property_name,
|
||||||
|
exclude_received=self.args.exclude_received,
|
||||||
|
exclude_paths=self.exclude_paths,
|
||||||
|
exclude_unchanged=self.args.exclude_unchanged,
|
||||||
|
min_change=0)
|
||||||
|
if not source_datasets:
|
||||||
|
self.print_error_sources()
|
||||||
|
return 255
|
||||||
|
|
||||||
|
# create target_node
|
||||||
|
self.set_title("Target settings")
|
||||||
|
target_node = ZfsNode(snapshot_time_format=self.snapshot_time_format, hold_name=self.hold_name,
|
||||||
|
logger=self, ssh_config=self.args.ssh_config,
|
||||||
|
ssh_to=self.args.ssh_target,
|
||||||
|
readonly=self.args.test, debug_output=self.args.debug_output,
|
||||||
|
description="[Target]")
|
||||||
|
target_node.verbose("Verify datasets under: {}".format(self.args.target_path))
|
||||||
|
|
||||||
|
fail_count = self.verify_datasets(
|
||||||
|
source_node=source_node,
|
||||||
|
source_datasets=source_datasets,
|
||||||
|
target_node=target_node)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
self.error("Exception: " + str(e))
|
||||||
|
if self.args.debug:
|
||||||
|
raise
|
||||||
|
return 255
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
self.error("Aborted")
|
||||||
|
return 255
|
||||||
|
|
||||||
def cli():
|
def cli():
|
||||||
import sys
|
import sys
|
||||||
|
|||||||
Reference in New Issue
Block a user