Compare commits
13 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| de877362c9 | |||
| 9b1254a6d9 | |||
| c110943f20 | |||
| e94eb11f63 | |||
| 0d498e3f44 | |||
| dd301dc422 | |||
| 9e6d90adfe | |||
| a6b688c976 | |||
| 10f1290ad9 | |||
| b51eefa139 | |||
| 805d7e3536 | |||
| 8f0472e8f5 | |||
| 002aa6a731 |
14
README.md
14
README.md
@ -84,7 +84,7 @@ On older servers you might have to use easy_install
|
|||||||
|
|
||||||
Its also possible to just download <https://raw.githubusercontent.com/psy0rz/zfs_autobackup/master/bin/zfs-autobackup> and run it directly.
|
Its also possible to just download <https://raw.githubusercontent.com/psy0rz/zfs_autobackup/master/bin/zfs-autobackup> and run it directly.
|
||||||
|
|
||||||
The only requirement that is sometimes missing is the `argparse` python module. Optionally you can install `colorma` for colors.
|
The only requirement that is sometimes missing is the `argparse` python module. Optionally you can install `colorama` for colors.
|
||||||
|
|
||||||
It should work with python 2.7 and higher.
|
It should work with python 2.7 and higher.
|
||||||
|
|
||||||
@ -324,7 +324,7 @@ Snapshots on the source that still have to be send to the target wont be destroy
|
|||||||
## Tips
|
## Tips
|
||||||
|
|
||||||
* Use ```--debug``` if something goes wrong and you want to see the commands that are executed. This will also stop at the first error.
|
* Use ```--debug``` if something goes wrong and you want to see the commands that are executed. This will also stop at the first error.
|
||||||
* You can split up the snapshotting and sending tasks by creating two cronjobs. Use ```--no-send``` for the snapshotter-cronjob and use ```--no-snapshot``` for the send-cronjob. This is usefull if you only want to send at night or if your send take too long.
|
* You can split up the snapshotting and sending tasks by creating two cronjobs. Use ```--no-send``` for the snapshotter-cronjob and use ```--no-snapshot``` for the send-cronjob. This is useful if you only want to send at night or if your send take too long.
|
||||||
* Set the ```readonly``` property of the target filesystem to ```on```. This prevents changes on the target side. (Normally, if there are changes the next backup will fail and will require a zfs rollback.) Note that readonly means you cant change the CONTENTS of the dataset directly. Its still possible to receive new datasets and manipulate properties etc.
|
* Set the ```readonly``` property of the target filesystem to ```on```. This prevents changes on the target side. (Normally, if there are changes the next backup will fail and will require a zfs rollback.) Note that readonly means you cant change the CONTENTS of the dataset directly. Its still possible to receive new datasets and manipulate properties etc.
|
||||||
* Use ```--clear-refreservation``` to save space on your backup server.
|
* Use ```--clear-refreservation``` to save space on your backup server.
|
||||||
* Use ```--clear-mountpoint``` to prevent the target server from mounting the backupped filesystem in the wrong place during a reboot.
|
* Use ```--clear-mountpoint``` to prevent the target server from mounting the backupped filesystem in the wrong place during a reboot.
|
||||||
@ -409,9 +409,9 @@ optional arguments:
|
|||||||
10,1d1w,1w1m,1m1y
|
10,1d1w,1w1m,1m1y
|
||||||
--other-snapshots Send over other snapshots as well, not just the ones
|
--other-snapshots Send over other snapshots as well, not just the ones
|
||||||
created by this tool.
|
created by this tool.
|
||||||
--no-snapshot Dont create new snapshots (usefull for finishing
|
--no-snapshot Dont create new snapshots (useful for finishing
|
||||||
uncompleted backups, or cleanups)
|
uncompleted backups, or cleanups)
|
||||||
--no-send Dont send snapshots (usefull for cleanups, or if you
|
--no-send Dont send snapshots (useful for cleanups, or if you
|
||||||
want a separate send-cronjob)
|
want a separate send-cronjob)
|
||||||
--min-change MIN_CHANGE
|
--min-change MIN_CHANGE
|
||||||
Number of bytes written after which we consider a
|
Number of bytes written after which we consider a
|
||||||
@ -419,9 +419,9 @@ optional arguments:
|
|||||||
--allow-empty If nothing has changed, still create empty snapshots.
|
--allow-empty If nothing has changed, still create empty snapshots.
|
||||||
(same as --min-change=0)
|
(same as --min-change=0)
|
||||||
--ignore-replicated Ignore datasets that seem to be replicated some other
|
--ignore-replicated Ignore datasets that seem to be replicated some other
|
||||||
way. (No changes since lastest snapshot. Usefull for
|
way. (No changes since lastest snapshot. Useful for
|
||||||
proxmox HA replication)
|
proxmox HA replication)
|
||||||
--no-holds Dont lock snapshots on the source. (Usefull to allow
|
--no-holds Dont lock snapshots on the source. (Useful to allow
|
||||||
proxmox HA replication to switches nodes)
|
proxmox HA replication to switches nodes)
|
||||||
--resume Support resuming of interrupted transfers by using the
|
--resume Support resuming of interrupted transfers by using the
|
||||||
zfs extensible_dataset feature (both zpools should
|
zfs extensible_dataset feature (both zpools should
|
||||||
@ -454,7 +454,7 @@ optional arguments:
|
|||||||
care! (implies --rollback)
|
care! (implies --rollback)
|
||||||
--ignore-transfer-errors
|
--ignore-transfer-errors
|
||||||
Ignore transfer errors (still checks if received
|
Ignore transfer errors (still checks if received
|
||||||
filesystem exists. usefull for acltype errors)
|
filesystem exists. useful for acltype errors)
|
||||||
--raw For encrypted datasets, send data exactly as it exists
|
--raw For encrypted datasets, send data exactly as it exists
|
||||||
on disk.
|
on disk.
|
||||||
--test dont change anything, just show what would be done
|
--test dont change anything, just show what would be done
|
||||||
|
|||||||
@ -26,7 +26,7 @@ if sys.stdout.isatty():
|
|||||||
except ImportError:
|
except ImportError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
VERSION="3.0-rc9"
|
VERSION="3.0-rc10"
|
||||||
HEADER="zfs-autobackup v{} - Copyright 2020 E.H.Eefting (edwin@datux.nl)\n".format(VERSION)
|
HEADER="zfs-autobackup v{} - Copyright 2020 E.H.Eefting (edwin@datux.nl)\n".format(VERSION)
|
||||||
|
|
||||||
class Log:
|
class Log:
|
||||||
@ -40,6 +40,7 @@ class Log:
|
|||||||
print(colorama.Fore.RED+colorama.Style.BRIGHT+ "! "+txt+colorama.Style.RESET_ALL, file=sys.stderr)
|
print(colorama.Fore.RED+colorama.Style.BRIGHT+ "! "+txt+colorama.Style.RESET_ALL, file=sys.stderr)
|
||||||
else:
|
else:
|
||||||
print("! "+txt, file=sys.stderr)
|
print("! "+txt, file=sys.stderr)
|
||||||
|
sys.stderr.flush()
|
||||||
|
|
||||||
def verbose(self, txt):
|
def verbose(self, txt):
|
||||||
if self.show_verbose:
|
if self.show_verbose:
|
||||||
@ -47,6 +48,7 @@ class Log:
|
|||||||
print(colorama.Style.NORMAL+ " "+txt+colorama.Style.RESET_ALL)
|
print(colorama.Style.NORMAL+ " "+txt+colorama.Style.RESET_ALL)
|
||||||
else:
|
else:
|
||||||
print(" "+txt)
|
print(" "+txt)
|
||||||
|
sys.stdout.flush()
|
||||||
|
|
||||||
def debug(self, txt):
|
def debug(self, txt):
|
||||||
if self.show_debug:
|
if self.show_debug:
|
||||||
@ -54,6 +56,7 @@ class Log:
|
|||||||
print(colorama.Fore.GREEN+ "# "+txt+colorama.Style.RESET_ALL)
|
print(colorama.Fore.GREEN+ "# "+txt+colorama.Style.RESET_ALL)
|
||||||
else:
|
else:
|
||||||
print("# "+txt)
|
print("# "+txt)
|
||||||
|
sys.stdout.flush()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@ -308,7 +311,7 @@ class ExecuteNode:
|
|||||||
def __init__(self, ssh_config=None, ssh_to=None, readonly=False, debug_output=False):
|
def __init__(self, ssh_config=None, ssh_to=None, readonly=False, debug_output=False):
|
||||||
"""ssh_config: custom ssh config
|
"""ssh_config: custom ssh config
|
||||||
ssh_to: server you want to ssh to. none means local
|
ssh_to: server you want to ssh to. none means local
|
||||||
readonly: only execute commands that don't make any changes (usefull for testing-runs)
|
readonly: only execute commands that don't make any changes (useful for testing-runs)
|
||||||
debug_output: show output and exit codes of commands in debugging output.
|
debug_output: show output and exit codes of commands in debugging output.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@ -622,7 +625,7 @@ class ZfsDataset():
|
|||||||
@cached_property
|
@cached_property
|
||||||
def exists(self):
|
def exists(self):
|
||||||
"""check if dataset exists.
|
"""check if dataset exists.
|
||||||
Use force to force a specific value to be cached, if you already know. Usefull for performance reasons"""
|
Use force to force a specific value to be cached, if you already know. Useful for performance reasons"""
|
||||||
|
|
||||||
|
|
||||||
if self.force_exists!=None:
|
if self.force_exists!=None:
|
||||||
@ -1024,9 +1027,7 @@ class ZfsDataset():
|
|||||||
return(None)
|
return(None)
|
||||||
|
|
||||||
|
|
||||||
|
def thin_list(self, keeps=[], ignores=[]):
|
||||||
|
|
||||||
def thin(self, keeps=[], ignores=[]):
|
|
||||||
"""determines list of snapshots that should be kept or deleted based on the thinning schedule. cull the herd!
|
"""determines list of snapshots that should be kept or deleted based on the thinning schedule. cull the herd!
|
||||||
keep: list of snapshots to always keep (usually the last)
|
keep: list of snapshots to always keep (usually the last)
|
||||||
ignores: snapshots to completely ignore (usually incompatible target snapshots that are going to be destroyed anyway)
|
ignores: snapshots to completely ignore (usually incompatible target snapshots that are going to be destroyed anyway)
|
||||||
@ -1039,6 +1040,15 @@ class ZfsDataset():
|
|||||||
return(self.zfs_node.thinner.thin(snapshots, keep_objects=keeps))
|
return(self.zfs_node.thinner.thin(snapshots, keep_objects=keeps))
|
||||||
|
|
||||||
|
|
||||||
|
def thin(self):
|
||||||
|
"""destroys snapshots according to thin_list, except last snapshot"""
|
||||||
|
|
||||||
|
(keeps, obsoletes)=self.thin_list(keeps=self.our_snapshots[-1:])
|
||||||
|
for obsolete in obsoletes:
|
||||||
|
obsolete.destroy()
|
||||||
|
self.snapshots.remove(obsolete)
|
||||||
|
|
||||||
|
|
||||||
def find_common_snapshot(self, target_dataset):
|
def find_common_snapshot(self, target_dataset):
|
||||||
"""find latest common snapshot between us and target
|
"""find latest common snapshot between us and target
|
||||||
returns None if its an initial transfer
|
returns None if its an initial transfer
|
||||||
@ -1137,13 +1147,13 @@ class ZfsDataset():
|
|||||||
#now let thinner decide what we want on both sides as final state (after all transfers are done)
|
#now let thinner decide what we want on both sides as final state (after all transfers are done)
|
||||||
self.debug("Create thinning list")
|
self.debug("Create thinning list")
|
||||||
if self.our_snapshots:
|
if self.our_snapshots:
|
||||||
(source_keeps, source_obsoletes)=self.thin(keeps=[self.our_snapshots[-1]])
|
(source_keeps, source_obsoletes)=self.thin_list(keeps=[self.our_snapshots[-1]])
|
||||||
else:
|
else:
|
||||||
source_keeps=[]
|
source_keeps=[]
|
||||||
source_obsoletes=[]
|
source_obsoletes=[]
|
||||||
|
|
||||||
if target_dataset.our_snapshots:
|
if target_dataset.our_snapshots:
|
||||||
(target_keeps, target_obsoletes)=target_dataset.thin(keeps=[target_dataset.our_snapshots[-1]], ignores=incompatible_target_snapshots)
|
(target_keeps, target_obsoletes)=target_dataset.thin_list(keeps=[target_dataset.our_snapshots[-1]], ignores=incompatible_target_snapshots)
|
||||||
else:
|
else:
|
||||||
target_keeps=[]
|
target_keeps=[]
|
||||||
target_obsoletes=[]
|
target_obsoletes=[]
|
||||||
@ -1226,10 +1236,10 @@ class ZfsDataset():
|
|||||||
|
|
||||||
# we may now destroy the previous source snapshot if its obsolete
|
# we may now destroy the previous source snapshot if its obsolete
|
||||||
if prev_source_snapshot in source_obsoletes:
|
if prev_source_snapshot in source_obsoletes:
|
||||||
prev_source_snapshot.destroy()
|
prev_source_snapshot.destroy()
|
||||||
|
|
||||||
# destroy the previous target snapshot if obsolete (usually this is only the common_snapshot, the rest was already destroyed or will not be send)
|
# destroy the previous target snapshot if obsolete (usually this is only the common_snapshot, the rest was already destroyed or will not be send)
|
||||||
prev_target_snapshot=target_dataset.find_snapshot(common_snapshot)
|
prev_target_snapshot=target_dataset.find_snapshot(prev_source_snapshot)
|
||||||
if prev_target_snapshot in target_obsoletes:
|
if prev_target_snapshot in target_obsoletes:
|
||||||
prev_target_snapshot.destroy()
|
prev_target_snapshot.destroy()
|
||||||
|
|
||||||
@ -1302,7 +1312,7 @@ class ZfsNode(ExecuteNode):
|
|||||||
#always output for debugging offcourse
|
#always output for debugging offcourse
|
||||||
self.debug(prefix+line.rstrip())
|
self.debug(prefix+line.rstrip())
|
||||||
|
|
||||||
#actual usefull info
|
#actual useful info
|
||||||
if len(progress_fields)>=3:
|
if len(progress_fields)>=3:
|
||||||
if progress_fields[0]=='full' or progress_fields[0]=='size':
|
if progress_fields[0]=='full' or progress_fields[0]=='size':
|
||||||
self._progress_total_bytes=int(progress_fields[2])
|
self._progress_total_bytes=int(progress_fields[2])
|
||||||
@ -1317,8 +1327,8 @@ class ZfsNode(ExecuteNode):
|
|||||||
bytes_left=self._progress_total_bytes-bytes
|
bytes_left=self._progress_total_bytes-bytes
|
||||||
minutes_left=int((bytes_left/(bytes/(time.time()-self._progress_start_time)))/60)
|
minutes_left=int((bytes_left/(bytes/(time.time()-self._progress_start_time)))/60)
|
||||||
|
|
||||||
print(">>> {}% {}MB/s (total {}MB, {} minutes left) \r".format(percentage, speed, int(self._progress_total_bytes/(1024*1024)), minutes_left), end='')
|
print(">>> {}% {}MB/s (total {}MB, {} minutes left) \r".format(percentage, speed, int(self._progress_total_bytes/(1024*1024)), minutes_left), end='', file=sys.stderr)
|
||||||
sys.stdout.flush()
|
sys.stderr.flush()
|
||||||
|
|
||||||
return
|
return
|
||||||
|
|
||||||
@ -1370,7 +1380,7 @@ class ZfsNode(ExecuteNode):
|
|||||||
|
|
||||||
pools[pool].append(snapshot)
|
pools[pool].append(snapshot)
|
||||||
|
|
||||||
#add snapshot to cache (also usefull in testmode)
|
#add snapshot to cache (also useful in testmode)
|
||||||
dataset.snapshots.append(snapshot) #NOTE: this will trigger zfs list
|
dataset.snapshots.append(snapshot) #NOTE: this will trigger zfs list
|
||||||
|
|
||||||
if not pools:
|
if not pools:
|
||||||
@ -1394,6 +1404,9 @@ class ZfsNode(ExecuteNode):
|
|||||||
|
|
||||||
returns: list of ZfsDataset
|
returns: list of ZfsDataset
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
self.debug("Getting selected datasets")
|
||||||
|
|
||||||
#get all source filesystems that have the backup property
|
#get all source filesystems that have the backup property
|
||||||
lines=self.run(tab_split=True, readonly=True, cmd=[
|
lines=self.run(tab_split=True, readonly=True, cmd=[
|
||||||
"zfs", "get", "-t", "volume,filesystem", "-o", "name,value,source", "-s", "local,inherited", "-H", "autobackup:"+self.backup_name
|
"zfs", "get", "-t", "volume,filesystem", "-o", "name,value,source", "-s", "local,inherited", "-H", "autobackup:"+self.backup_name
|
||||||
@ -1429,11 +1442,6 @@ class ZfsNode(ExecuteNode):
|
|||||||
return(selected_filesystems)
|
return(selected_filesystems)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
class ZfsAutobackup:
|
class ZfsAutobackup:
|
||||||
"""main class"""
|
"""main class"""
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
@ -1451,13 +1459,13 @@ class ZfsAutobackup:
|
|||||||
parser.add_argument('target_path', help='Target ZFS filesystem')
|
parser.add_argument('target_path', help='Target ZFS filesystem')
|
||||||
|
|
||||||
parser.add_argument('--other-snapshots', action='store_true', help='Send over other snapshots as well, not just the ones created by this tool.')
|
parser.add_argument('--other-snapshots', action='store_true', help='Send over other snapshots as well, not just the ones created by this tool.')
|
||||||
parser.add_argument('--no-snapshot', action='store_true', help='Don\'t create new snapshots (usefull for finishing uncompleted backups, or cleanups)')
|
parser.add_argument('--no-snapshot', action='store_true', help='Don\'t create new snapshots (useful for finishing uncompleted backups, or cleanups)')
|
||||||
parser.add_argument('--no-send', action='store_true', help='Don\'t send snapshots (usefull for cleanups, or if you want a serperate send-cronjob)')
|
parser.add_argument('--no-send', action='store_true', help='Don\'t send snapshots (useful for cleanups, or if you want a serperate send-cronjob)')
|
||||||
parser.add_argument('--min-change', type=int, default=1, help='Number of bytes written after which we consider a dataset changed (default %(default)s)')
|
parser.add_argument('--min-change', type=int, default=1, help='Number of bytes written after which we consider a dataset changed (default %(default)s)')
|
||||||
parser.add_argument('--allow-empty', action='store_true', help='If nothing has changed, still create empty snapshots. (same as --min-change=0)')
|
parser.add_argument('--allow-empty', action='store_true', help='If nothing has changed, still create empty snapshots. (same as --min-change=0)')
|
||||||
parser.add_argument('--ignore-replicated', action='store_true', help='Ignore datasets that seem to be replicated some other way. (No changes since lastest snapshot. Usefull for proxmox HA replication)')
|
parser.add_argument('--ignore-replicated', action='store_true', help='Ignore datasets that seem to be replicated some other way. (No changes since lastest snapshot. Useful for proxmox HA replication)')
|
||||||
parser.add_argument('--no-holds', action='store_true', help='Don\'t lock snapshots on the source. (Usefull to allow proxmox HA replication to switches nodes)')
|
parser.add_argument('--no-holds', action='store_true', help='Don\'t lock snapshots on the source. (Useful to allow proxmox HA replication to switches nodes)')
|
||||||
#not sure if this ever was usefull:
|
#not sure if this ever was useful:
|
||||||
# parser.add_argument('--ignore-new', action='store_true', help='Ignore filesystem if there are already newer snapshots for it on the target (use with caution)')
|
# parser.add_argument('--ignore-new', action='store_true', help='Ignore filesystem if there are already newer snapshots for it on the target (use with caution)')
|
||||||
|
|
||||||
parser.add_argument('--resume', action='store_true', help='Support resuming of interrupted transfers by using the zfs extensible_dataset feature (both zpools should have it enabled) Disadvantage is that you need to use zfs recv -A if another snapshot is created on the target during a receive. Otherwise it will keep failing.')
|
parser.add_argument('--resume', action='store_true', help='Support resuming of interrupted transfers by using the zfs extensible_dataset feature (both zpools should have it enabled) Disadvantage is that you need to use zfs recv -A if another snapshot is created on the target during a receive. Otherwise it will keep failing.')
|
||||||
@ -1472,7 +1480,7 @@ class ZfsAutobackup:
|
|||||||
parser.add_argument('--set-properties', type=str, help='List of propererties to override when receiving filesystems. (you can still restore them with zfs inherit -S)')
|
parser.add_argument('--set-properties', type=str, help='List of propererties to override when receiving filesystems. (you can still restore them with zfs inherit -S)')
|
||||||
parser.add_argument('--rollback', action='store_true', help='Rollback changes to the latest target snapshot before starting. (normally you can prevent changes by setting the readonly property on the target_path to on)')
|
parser.add_argument('--rollback', action='store_true', help='Rollback changes to the latest target snapshot before starting. (normally you can prevent changes by setting the readonly property on the target_path to on)')
|
||||||
parser.add_argument('--destroy-incompatible', action='store_true', help='Destroy incompatible snapshots on target. Use with care! (implies --rollback)')
|
parser.add_argument('--destroy-incompatible', action='store_true', help='Destroy incompatible snapshots on target. Use with care! (implies --rollback)')
|
||||||
parser.add_argument('--ignore-transfer-errors', action='store_true', help='Ignore transfer errors (still checks if received filesystem exists. usefull for acltype errors)')
|
parser.add_argument('--ignore-transfer-errors', action='store_true', help='Ignore transfer errors (still checks if received filesystem exists. useful for acltype errors)')
|
||||||
parser.add_argument('--raw', action='store_true', help='For encrypted datasets, send data exactly as it exists on disk.')
|
parser.add_argument('--raw', action='store_true', help='For encrypted datasets, send data exactly as it exists on disk.')
|
||||||
|
|
||||||
|
|
||||||
@ -1480,13 +1488,16 @@ class ZfsAutobackup:
|
|||||||
parser.add_argument('--verbose', action='store_true', help='verbose output')
|
parser.add_argument('--verbose', action='store_true', help='verbose output')
|
||||||
parser.add_argument('--debug', action='store_true', help='Show zfs commands that are executed, stops after an exception.')
|
parser.add_argument('--debug', action='store_true', help='Show zfs commands that are executed, stops after an exception.')
|
||||||
parser.add_argument('--debug-output', action='store_true', help='Show zfs commands and their output/exit codes. (noisy)')
|
parser.add_argument('--debug-output', action='store_true', help='Show zfs commands and their output/exit codes. (noisy)')
|
||||||
parser.add_argument('--progress', action='store_true', help='show zfs progress output (to stderr)')
|
parser.add_argument('--progress', action='store_true', help='show zfs progress output (to stderr). Enabled by default on ttys.')
|
||||||
|
|
||||||
#note args is the only global variable we use, since its a global readonly setting anyway
|
#note args is the only global variable we use, since its a global readonly setting anyway
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
self.args=args
|
self.args=args
|
||||||
|
|
||||||
|
if sys.stderr.isatty():
|
||||||
|
args.progress=True
|
||||||
|
|
||||||
if args.debug_output:
|
if args.debug_output:
|
||||||
args.debug=True
|
args.debug=True
|
||||||
|
|
||||||
@ -1517,105 +1528,126 @@ class ZfsAutobackup:
|
|||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
|
|
||||||
self.verbose (HEADER)
|
try:
|
||||||
|
self.verbose (HEADER)
|
||||||
|
|
||||||
if self.args.test:
|
if self.args.test:
|
||||||
self.verbose("TEST MODE - SIMULATING WITHOUT MAKING ANY CHANGES")
|
self.verbose("TEST MODE - SIMULATING WITHOUT MAKING ANY CHANGES")
|
||||||
|
|
||||||
self.set_title("Settings summary")
|
self.set_title("Settings summary")
|
||||||
|
|
||||||
description="[Source]"
|
description="[Source]"
|
||||||
source_thinner=Thinner(self.args.keep_source)
|
source_thinner=Thinner(self.args.keep_source)
|
||||||
source_node=ZfsNode(self.args.backup_name, self, ssh_config=self.args.ssh_config, ssh_to=self.args.ssh_source, readonly=self.args.test, debug_output=self.args.debug_output, description=description, thinner=source_thinner)
|
source_node=ZfsNode(self.args.backup_name, self, ssh_config=self.args.ssh_config, ssh_to=self.args.ssh_source, readonly=self.args.test, debug_output=self.args.debug_output, description=description, thinner=source_thinner)
|
||||||
source_node.verbose("Send all datasets that have 'autobackup:{}=true' or 'autobackup:{}=child'".format(self.args.backup_name, self.args.backup_name))
|
source_node.verbose("Send all datasets that have 'autobackup:{}=true' or 'autobackup:{}=child'".format(self.args.backup_name, self.args.backup_name))
|
||||||
|
|
||||||
self.verbose("")
|
self.verbose("")
|
||||||
|
|
||||||
description="[Target]"
|
description="[Target]"
|
||||||
target_thinner=Thinner(self.args.keep_target)
|
target_thinner=Thinner(self.args.keep_target)
|
||||||
target_node=ZfsNode(self.args.backup_name, self, ssh_config=self.args.ssh_config, ssh_to=self.args.ssh_target, readonly=self.args.test, debug_output=self.args.debug_output, description=description, thinner=target_thinner)
|
target_node=ZfsNode(self.args.backup_name, self, ssh_config=self.args.ssh_config, ssh_to=self.args.ssh_target, readonly=self.args.test, debug_output=self.args.debug_output, description=description, thinner=target_thinner)
|
||||||
target_node.verbose("Receive datasets under: {}".format(self.args.target_path))
|
target_node.verbose("Receive datasets under: {}".format(self.args.target_path))
|
||||||
|
|
||||||
self.set_title("Selecting")
|
|
||||||
selected_source_datasets=source_node.selected_datasets
|
self.set_title("Selecting")
|
||||||
if not selected_source_datasets:
|
selected_source_datasets=source_node.selected_datasets
|
||||||
self.error("No source filesystems selected, please do a 'zfs set autobackup:{0}=true' on the source datasets you want to backup.".format(self.args.backup_name))
|
if not selected_source_datasets:
|
||||||
|
self.error("No source filesystems selected, please do a 'zfs set autobackup:{0}=true' on the source datasets you want to backup.".format(self.args.backup_name))
|
||||||
|
return(255)
|
||||||
|
|
||||||
|
source_datasets=[]
|
||||||
|
|
||||||
|
|
||||||
|
#filter out already replicated stuff?
|
||||||
|
if not self.args.ignore_replicated:
|
||||||
|
source_datasets=selected_source_datasets
|
||||||
|
else:
|
||||||
|
self.set_title("Filtering already replicated filesystems")
|
||||||
|
for selected_source_dataset in selected_source_datasets:
|
||||||
|
if selected_source_dataset.is_changed(self.args.min_change):
|
||||||
|
source_datasets.append(selected_source_dataset)
|
||||||
|
else:
|
||||||
|
selected_source_dataset.verbose("Ignoring, already replicated")
|
||||||
|
|
||||||
|
|
||||||
|
if not self.args.no_snapshot:
|
||||||
|
self.set_title("Snapshotting")
|
||||||
|
source_node.consistent_snapshot(source_datasets, source_node.new_snapshotname(), min_changed_bytes=self.args.min_change)
|
||||||
|
|
||||||
|
|
||||||
|
if self.args.no_send:
|
||||||
|
self.set_title("Thinning")
|
||||||
|
else:
|
||||||
|
self.set_title("Sending and thinning")
|
||||||
|
|
||||||
|
if self.args.filter_properties:
|
||||||
|
filter_properties=self.args.filter_properties.split(",")
|
||||||
|
else:
|
||||||
|
filter_properties=[]
|
||||||
|
|
||||||
|
if self.args.set_properties:
|
||||||
|
set_properties=self.args.set_properties.split(",")
|
||||||
|
else:
|
||||||
|
set_properties=[]
|
||||||
|
|
||||||
|
if self.args.clear_refreservation:
|
||||||
|
filter_properties.append("refreservation")
|
||||||
|
|
||||||
|
if self.args.clear_mountpoint:
|
||||||
|
set_properties.append("canmount=noauto")
|
||||||
|
|
||||||
|
#sync datasets
|
||||||
|
fail_count=0
|
||||||
|
target_datasets=[]
|
||||||
|
for source_dataset in source_datasets:
|
||||||
|
|
||||||
|
try:
|
||||||
|
#determine corresponding target_dataset
|
||||||
|
target_name=self.args.target_path + "/" + source_dataset.lstrip_path(self.args.strip_path)
|
||||||
|
target_dataset=ZfsDataset(target_node, target_name)
|
||||||
|
target_datasets.append(target_dataset)
|
||||||
|
|
||||||
|
#ensure parents exists
|
||||||
|
if not self.args.no_send and not target_dataset.parent.exists:
|
||||||
|
target_dataset.parent.create_filesystem(parents=True)
|
||||||
|
|
||||||
|
source_dataset.sync_snapshots(target_dataset, show_progress=self.args.progress, resume=self.args.resume, filter_properties=filter_properties, set_properties=set_properties, ignore_recv_exit_code=self.args.ignore_transfer_errors, source_holds= not self.args.no_holds, rollback=self.args.rollback, raw=self.args.raw, other_snapshots=self.args.other_snapshots, no_send=self.args.no_send, destroy_incompatible=self.args.destroy_incompatible)
|
||||||
|
except Exception as e:
|
||||||
|
fail_count=fail_count+1
|
||||||
|
self.error("DATASET FAILED: "+str(e))
|
||||||
|
if self.args.debug:
|
||||||
|
raise
|
||||||
|
|
||||||
|
#also thin target_datasets that are not on the source any more
|
||||||
|
self.debug("Thinning obsolete datasets")
|
||||||
|
for dataset in ZfsDataset(target_node, self.args.target_path).recursive_datasets:
|
||||||
|
if dataset not in target_datasets:
|
||||||
|
dataset.verbose("Missing on source")
|
||||||
|
dataset.thin()
|
||||||
|
|
||||||
|
|
||||||
|
if not fail_count:
|
||||||
|
if self.args.test:
|
||||||
|
self.set_title("All tests successfull.")
|
||||||
|
else:
|
||||||
|
self.set_title("All backups completed successfully")
|
||||||
|
else:
|
||||||
|
self.error("{} datasets failed!".format(fail_count))
|
||||||
|
|
||||||
|
if self.args.test:
|
||||||
|
self.verbose("TEST MODE - DID NOT MAKE ANY BACKUPS!")
|
||||||
|
|
||||||
|
return(fail_count)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
self.error("Exception: "+str(e))
|
||||||
|
if self.args.debug:
|
||||||
|
raise
|
||||||
|
return(255)
|
||||||
|
except KeyboardInterrupt as e:
|
||||||
|
self.error("Aborted")
|
||||||
return(255)
|
return(255)
|
||||||
|
|
||||||
source_datasets=[]
|
|
||||||
|
|
||||||
#filter out already replicated stuff?
|
|
||||||
if not self.args.ignore_replicated:
|
|
||||||
source_datasets=selected_source_datasets
|
|
||||||
else:
|
|
||||||
self.set_title("Filtering already replicated filesystems")
|
|
||||||
for selected_source_dataset in selected_source_datasets:
|
|
||||||
if selected_source_dataset.is_changed(self.args.min_change):
|
|
||||||
source_datasets.append(selected_source_dataset)
|
|
||||||
else:
|
|
||||||
selected_source_dataset.verbose("Ignoring, already replicated")
|
|
||||||
|
|
||||||
|
|
||||||
if not self.args.no_snapshot:
|
|
||||||
self.set_title("Snapshotting")
|
|
||||||
source_node.consistent_snapshot(source_datasets, source_node.new_snapshotname(), min_changed_bytes=self.args.min_change)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if self.args.no_send:
|
|
||||||
self.set_title("Thinning")
|
|
||||||
else:
|
|
||||||
self.set_title("Sending and thinning")
|
|
||||||
|
|
||||||
if self.args.filter_properties:
|
|
||||||
filter_properties=self.args.filter_properties.split(",")
|
|
||||||
else:
|
|
||||||
filter_properties=[]
|
|
||||||
|
|
||||||
if self.args.set_properties:
|
|
||||||
set_properties=self.args.set_properties.split(",")
|
|
||||||
else:
|
|
||||||
set_properties=[]
|
|
||||||
|
|
||||||
if self.args.clear_refreservation:
|
|
||||||
filter_properties.append("refreservation")
|
|
||||||
|
|
||||||
if self.args.clear_mountpoint:
|
|
||||||
set_properties.append("canmount=noauto")
|
|
||||||
|
|
||||||
fail_count=0
|
|
||||||
for source_dataset in source_datasets:
|
|
||||||
|
|
||||||
try:
|
|
||||||
#determine corresponding target_dataset
|
|
||||||
target_name=self.args.target_path + "/" + source_dataset.lstrip_path(self.args.strip_path)
|
|
||||||
target_dataset=ZfsDataset(target_node, target_name)
|
|
||||||
|
|
||||||
#ensure parents exists
|
|
||||||
if not self.args.no_send and not target_dataset.parent.exists:
|
|
||||||
target_dataset.parent.create_filesystem(parents=True)
|
|
||||||
|
|
||||||
source_dataset.sync_snapshots(target_dataset, show_progress=self.args.progress, resume=self.args.resume, filter_properties=filter_properties, set_properties=set_properties, ignore_recv_exit_code=self.args.ignore_transfer_errors, source_holds= not self.args.no_holds, rollback=self.args.rollback, raw=self.args.raw, other_snapshots=self.args.other_snapshots, no_send=self.args.no_send, destroy_incompatible=self.args.destroy_incompatible)
|
|
||||||
except Exception as e:
|
|
||||||
fail_count=fail_count+1
|
|
||||||
self.error("DATASET FAILED: "+str(e))
|
|
||||||
if self.args.debug:
|
|
||||||
raise
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if not fail_count:
|
|
||||||
if self.args.test:
|
|
||||||
self.set_title("All tests successfull.")
|
|
||||||
else:
|
|
||||||
self.set_title("All backups completed successfully")
|
|
||||||
else:
|
|
||||||
self.error("{} datasets failed!".format(fail_count))
|
|
||||||
|
|
||||||
if self.args.test:
|
|
||||||
self.verbose("TEST MODE - DID NOT MAKE ANY BACKUPS!")
|
|
||||||
|
|
||||||
return(fail_count)
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
zfs_autobackup=ZfsAutobackup()
|
zfs_autobackup=ZfsAutobackup()
|
||||||
|
|||||||
Reference in New Issue
Block a user