Compare commits

...

153 Commits

Author SHA1 Message Date
89ed1e012d cleanup 2022-01-24 17:22:44 +01:00
ff9beae427 create temporary clone to verify volumes 2022-01-24 16:55:20 +01:00
302a9ecd86 more consistent creation of ZfsDataset and ZfsPool via ZfsNode.get_dataset() and ZfsNode.get_pool() 2022-01-24 16:29:32 +01:00
c0086f8953 added tar-mode. moved static methods. more compatible /dev checking without udevadm 2022-01-24 13:53:32 +01:00
ddd82b935b show test output 2022-01-24 12:31:28 +01:00
51d6731aa8 settle udev devices when 2022-01-24 11:46:34 +01:00
36f2b672bd more zfs-verify tests 2022-01-24 11:41:51 +01:00
81a785b360 more zfs-verify tests 2022-01-24 11:37:42 +01:00
670532ef31 pythonversion agnostic 2022-01-24 11:02:56 +01:00
dd55ca4079 zfs-autoverify wip (basics start to function) 2022-01-24 00:18:27 +01:00
f66957d867 zfs-autoverify wip 2022-01-23 23:01:53 +01:00
69975b37fb zfs-autoverify wip 2022-01-23 21:36:56 +01:00
c299626d18 debug mode implies verbose mode now 2022-01-23 21:22:46 +01:00
7b4f10080f zfs-verify wip (not functional yet) 2022-01-19 00:11:27 +01:00
787e3dba9c zfs-verify stuff 2022-01-18 23:46:08 +01:00
86d504722c zfs-verify stuff 2022-01-18 20:54:19 +01:00
6791bc4abd ready to implement zfs-autoverify 2022-01-18 01:02:01 +01:00
db5186bf38 ready to implement zfs-autoverify 2022-01-18 00:11:52 +01:00
d2b183bb27 move more ulgy stuff to parse_args 2022-01-17 23:34:22 +01:00
033fcf68f7 move exclude_paths and exclude_received to common 2022-01-17 23:10:35 +01:00
14d45667de fixes 2022-01-17 22:54:27 +01:00
f2a3221911 fixes 2022-01-17 22:34:18 +01:00
8baee52ab1 greatly improved output of help (divided into sections) 2022-01-17 22:26:42 +01:00
d114f63f29 extract common stuff to prepare for zfs-autoverify 2022-01-17 21:19:40 +01:00
b36b64cc94 Update FUNDING.yml 2022-01-12 00:20:30 +01:00
5a70172a50 Update FUNDING.yml 2022-01-12 00:20:17 +01:00
f635e8cd67 Create FUNDING.yml 2022-01-12 00:16:01 +01:00
0e362e5d89 moved stuff to wiki 2022-01-07 11:56:32 +01:00
f2ab2938b0 moved stuff to wiki 2022-01-07 11:54:36 +01:00
2d96d13125 moved stuff to wiki 2022-01-07 11:53:29 +01:00
883984fda3 Revert "Initial ZFS clones support"
Woops accidently committed this, still need to review/change it before comitting.

This reverts commit e11c332808.
2022-01-04 22:48:25 +01:00
db2625b08c fix #101 2022-01-04 22:26:44 +01:00
e11c332808 Initial ZFS clones support 2021-12-21 20:09:41 +01:00
07cb7cfad4 version bump 2021-12-19 18:23:09 +01:00
7b4a986f13 fix #103 2021-12-19 18:16:54 +01:00
be2474bb1c error doc 2021-11-02 20:18:57 +01:00
81e7cd940c forgot to remove debugging print 2021-10-04 00:59:50 +02:00
0b4448798e out of range for python 2 2021-10-04 00:34:07 +02:00
b1689f5066 added --...-format options. closes #87 2021-10-04 00:14:40 +02:00
dcb9cdac44 Merge branch 'master' of github.com:psy0rz/zfs_autobackup 2021-10-03 21:50:08 +02:00
9dc280abad Merge pull request #98 from sbidoul/relative-imports
Use relative imports
2021-09-21 16:44:47 +02:00
6b8c683315 Merge branch 'relative-imports' of https://github.com/sbidoul/zfs_autobackup 2021-09-21 14:31:50 +02:00
66e123849b preparations for #87 2021-09-21 14:30:59 +02:00
7325e1e351 ignore coveralls submission errors 2021-09-21 14:21:32 +02:00
9f4ea51622 ignore coveralls submission errors 2021-09-21 14:14:58 +02:00
8c1058a808 Use relative imports 2021-09-21 14:05:33 +02:00
d9e759a3eb fix 2021-09-20 21:14:26 +02:00
46457b3aca added some common short options and changes to fix #88 2021-09-20 15:11:23 +02:00
59f7ccc352 default compression is now zstd-fast, fixes #97 2021-09-20 14:54:38 +02:00
578fb1be4b renamed --ignore-replicated to --exclude-unchanged. tidied up and removed seperate filter_replicated() step. #93, #95, #96 2021-09-20 14:52:20 +02:00
f9b16c050b Merge pull request #96 from xrobau/really-ignore-replicated
Fix #93, Fix #95 Re-Document --exclude-received
2021-09-16 12:14:01 +02:00
2ba6fe5235 Fix #93, Fix #95 Re-Document --exclude-received
If another zfs_autobackup session is running, there will be changes
on zvols that are replicated. This means that it is possible for
a pair of servers running replication between themselves to start
backing up the backups.  Turning on --exclude-received when backups
are running between an a <--> b pair of servers means that the vols
that are received will never be accidentally seleted.
2021-09-16 09:38:26 +10:00
8e2c91735a Merge remote-tracking branch 'origin/master' 2021-08-24 11:14:24 +02:00
d57e3922a0 added note, #91 2021-08-24 11:14:13 +02:00
4b25dd76f1 Merge pull request #90 from bagbag/master
Support for zstd-adapt
2021-08-19 15:45:17 +02:00
2843781aa6 Support for zstd-adapt 2021-08-18 23:08:00 +02:00
ce987328d9 update doc 2021-08-18 10:32:41 +02:00
9a902f0f38 final 3.1 release 2021-08-18 10:28:10 +02:00
ee2c074539 it seems the amount of changed bytes has become bigger on my prox. (approx 230k) 2021-07-07 13:20:04 +02:00
77f1c16414 fix #84 2021-07-03 14:31:34 +02:00
c5363a1538 pre/post cmd tests 2021-06-22 12:43:52 +02:00
119225ba5b bump 2021-06-18 17:27:41 +02:00
84437ee1d0 pre/post snapshot polishing. still needs test-scripts 2021-06-18 17:16:18 +02:00
1286bfafd0 Merge pull request #80 from tuffnatty/pre-post-snapshot-cmd
Add support for pre- and post-snapshot scripts (#39)
2021-06-17 11:48:09 +02:00
9fc2703638 Always call post-snapshot-cmd to clean up faster on snapshot failure 2021-06-17 12:38:16 +03:00
01dc65af96 Merge pull request #81 from tuffnatty/another-typo
Another typo fix
2021-06-17 11:21:08 +02:00
082153e0ce A more robust MySQL example 2021-06-17 09:52:18 +03:00
77f5474447 Fix tests. 2021-06-16 23:37:38 +03:00
55ff14f1d8 Allow multiple --pre/post-snapshot-cmd options. Add a usage example. 2021-06-16 23:19:29 +03:00
2acd26b304 Fix a typo 2021-06-16 19:52:06 +03:00
ec9459c1d2 Use shlex.split() for --pre-snapshot-cmd and --post-snapshot-cmd 2021-06-16 19:35:41 +03:00
233fd83ded Merge pull request #79 from tuffnatty/typos
Fix a couple of typos
2021-06-16 16:46:10 +02:00
37c24e092c Merge pull request #78 from tuffnatty/patch-1
Typo fix in argument name in warning message
2021-06-16 16:45:42 +02:00
b2bf11382c Add --pre-snapshot-cmd and --post-snapshot-cmd options 2021-06-16 16:12:20 +03:00
19b918044e Fix a couple of typos 2021-06-16 13:19:47 +03:00
67d9240e7b Typo fix in argument name in warning message 2021-06-16 00:32:31 +03:00
1a5e4a9cdd doc 2021-05-31 22:33:44 +02:00
31f8c359ff update version number 2021-05-31 22:19:28 +02:00
b50b7b7563 test 2021-05-31 22:10:56 +02:00
37f91e1e08 no longer use zfs send --compressed as default. uses --zfs-compressed to reenable it. fixes #77 . 2021-05-31 22:02:31 +02:00
a2f3aee5b1 test and fix resume edge-case 2021-05-26 23:06:19 +02:00
75d0a3cc7e rc1 2021-05-26 20:15:05 +02:00
98c55e2aa8 test fix 2021-05-26 18:07:17 +02:00
d478e22111 test rate limit 2021-05-26 18:04:05 +02:00
3a4953fbc5 doc 2021-05-26 17:57:38 +02:00
8d4e041a9c add mbuffer 2021-05-26 17:52:10 +02:00
8725d56bc9 also add buffer on receving side 2021-05-26 17:38:05 +02:00
ab0bfdbf4e update docs 2021-05-19 00:52:56 +02:00
ea9012e476 beta6 2021-05-18 23:42:13 +02:00
97e3c110b3 added bandwidth throttling. fixes #51 2021-05-18 19:56:33 +02:00
9264e8de6d more warnings 2021-05-18 19:36:33 +02:00
830ccf1bd4 added warnings in yellow 2021-05-18 19:22:46 +02:00
a389e4c81c fix 2021-05-18 18:18:54 +02:00
36a66fbafc fix 2021-05-18 18:10:34 +02:00
b70c9986c7 regression tests for all compressors 2021-05-18 18:04:47 +02:00
664ea32c96 doc 2021-05-15 16:18:34 +02:00
30f30babea added compression, fixes #40 2021-05-15 16:18:02 +02:00
5e04aabf37 show pipes in verbose 2021-05-15 12:34:21 +02:00
59d53e9664 --recv-pipe and --send-pipe implemented. Added CmdItem to make CmdPipe more consitent 2021-05-11 00:59:26 +02:00
171f0ac5ad as final step we now can do system piping. fixes #50 2021-05-09 14:03:57 +02:00
0ce3bf1297 python 2 compat 2021-05-09 13:04:22 +02:00
c682665888 python 2 compat 2021-05-09 11:09:55 +02:00
086cfe570b run everything in either local shell (shell=true), or remote shell (ssh). this it to allow external shell piping 2021-05-09 10:56:30 +02:00
521d1078bd working on send pipe 2021-05-03 20:25:49 +02:00
8ea178af1f test re-replication 2021-05-03 00:03:22 +02:00
3e39e1553e allow re-replication of a backup with the same name. (now filters on target_path instead of received-status when selecting when appropriate. also shows notes about this) 2021-05-02 22:51:20 +02:00
f0cc2bca2a improved progress reporting. improved no_thinning performance 2021-04-23 20:31:37 +02:00
59b0c23a20 Merge branch 'master' of github.com:psy0rz/zfs_autobackup 2021-04-22 01:16:53 +02:00
401a3f73cc better handling of piped exit codes 2021-04-22 01:12:41 +02:00
8ec5ed2f4f extra test 2021-04-22 00:14:14 +02:00
8318b2f9bf Update README.md 2021-04-21 00:19:21 +02:00
72b97ab2e8 doc. bump version 2021-04-21 00:04:58 +02:00
a16a038f0e doc 2021-04-20 23:43:20 +02:00
fc0da9d380 skip encryption if not supported 2021-04-20 23:34:41 +02:00
31be12c0bf doc fix 2021-04-20 23:24:59 +02:00
176f04b302 proper encryption/decryption support. also fixes #60 2021-04-20 23:20:54 +02:00
7696d8c16d working on encryption 2021-04-20 21:22:27 +02:00
190a73ec10 merge 2021-04-20 21:06:46 +02:00
2bf015e127 still working on encryption 2021-04-20 21:05:44 +02:00
671eda7386 working on proper encryption support 2021-04-20 18:39:57 +02:00
3d4b26cec3 fix test 2021-04-19 10:54:55 +02:00
c0ea311e18 fix 2021-04-18 22:57:03 +02:00
b7b2723b2e coverage 2021-04-18 22:47:53 +02:00
ec1d3ff93e fix #74.
also changed internal logic: thinner now isnt actually created when --no-thinning is active.

When using --no-thinning --no-snapshot --no-send it still does usefull stuff like checking common snapshots and showing incompatible snapshots
2021-04-18 14:30:23 +02:00
352d5e6094 coverage 2021-04-17 22:33:12 +02:00
488ff6f551 coverage 2021-04-17 22:29:47 +02:00
f52b8bbf58 coverage 2021-04-17 21:53:27 +02:00
e47d461999 Merge branch 'master' of github.com:psy0rz/zfs_autobackup 2021-04-17 21:08:34 +02:00
a920744b1e allow regression to run from pycharm by using sudo with passwd file. you also need to suid root zfs and zpool 2021-04-15 13:56:24 +02:00
63f423a201 Update README.md 2021-04-14 09:58:20 +02:00
db6523f3c0 remove unused code 2021-04-13 21:21:35 +02:00
6b172dce2d fix 2021-04-13 17:13:13 +02:00
85d493469d fix 2021-04-13 17:07:01 +02:00
bef3be4955 tests, nicer error message for invalid schedule str 2021-04-13 16:42:55 +02:00
f9719ba87e tests 2021-04-13 16:32:43 +02:00
4b97f789df run() now uses CmdPipe for better pipe handling and cleaner code 2021-04-12 18:16:42 +02:00
ed7cd41ad7 accidently broke testing 2021-04-12 13:56:06 +02:00
62e19d97c2 fix 2021-04-10 14:56:25 +02:00
594a2664c4 more tests 2021-04-10 14:52:52 +02:00
d8fbc96be6 Merge branch 'master' of github.com:psy0rz/zfs_autobackup 2021-04-10 14:34:42 +02:00
61bb590112 also enable branch-coverage 2021-04-10 14:29:27 +02:00
86ea5e49f4 working on better piping system 2021-04-07 23:58:41 +02:00
01642365c7 Update README.md 2021-04-06 23:51:40 +02:00
4910b1dfb5 seperated piping 2021-04-05 22:18:14 +02:00
966df73d2f added doctypes 2021-04-01 22:48:17 +02:00
69ed827c0d doc 2021-03-30 11:53:11 +02:00
e79f6ac157 speedup testing 2021-03-27 21:07:31 +01:00
59efd070a1 autoruntests 2021-03-27 20:08:05 +01:00
80c1bdad1c update usage text as requested in #54 2021-03-17 00:11:52 +01:00
cf72de7c28 cleanedup and improved select-code 2021-03-16 23:40:31 +01:00
686bb48bda restore progres. verify --destroy-incompabible output. 2021-03-11 11:57:51 +01:00
6a48b8a2a9 nicer help 2021-03-03 15:48:44 +01:00
477b66c342 split encoder 2021-03-03 11:50:11 +01:00
39 changed files with 3026 additions and 1490 deletions

5
.github/FUNDING.yml vendored Normal file
View File

@ -0,0 +1,5 @@
# These are supported funding model platforms
github: psy0rz
ko_fi: psy0rz
custom: https://paypal.me/psy0rz

View File

@ -17,7 +17,7 @@ jobs:
- name: Prepare
run: sudo apt update && sudo apt install zfsutils-linux && sudo -H pip3 install coverage unittest2 mock==3.0.5 coveralls
run: sudo apt update && sudo apt install zfsutils-linux lzop pigz zstd gzip xz-utils lz4 mbuffer && sudo -H pip3 install coverage unittest2 mock==3.0.5 coveralls
- name: Regression test
@ -27,7 +27,7 @@ jobs:
- name: Coveralls
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: coveralls --service=github
run: coveralls --service=github || true
ubuntu18:
runs-on: ubuntu-18.04
@ -39,7 +39,7 @@ jobs:
- name: Prepare
run: sudo apt update && sudo apt install zfsutils-linux python3-setuptools && sudo -H pip3 install coverage unittest2 mock==3.0.5 coveralls
run: sudo apt update && sudo apt install zfsutils-linux python3-setuptools lzop pigz zstd gzip xz-utils liblz4-tool mbuffer && sudo -H pip3 install coverage unittest2 mock==3.0.5 coveralls
- name: Regression test
@ -49,7 +49,7 @@ jobs:
- name: Coveralls
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: coveralls --service=github
run: coveralls --service=github || true
ubuntu18_python2:
runs-on: ubuntu-18.04
@ -64,7 +64,7 @@ jobs:
python-version: '2.x'
- name: Prepare
run: sudo apt update && sudo apt install zfsutils-linux python-setuptools && sudo -H pip install coverage unittest2 mock==3.0.5 coveralls
run: sudo apt update && sudo apt install zfsutils-linux python-setuptools lzop pigz zstd gzip xz-utils liblz4-tool mbuffer && sudo -H pip install coverage unittest2 mock==3.0.5 coveralls colorama
- name: Regression test
run: sudo -E ./tests/run_tests
@ -73,4 +73,4 @@ jobs:
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
COVERALLS_REPO_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: coveralls --service=github
run: coveralls --service=github || true

1
.gitignore vendored
View File

@ -11,3 +11,4 @@ __pycache__
python2.env
venv
.idea
password.sh

653
README.md
View File

@ -5,7 +5,7 @@
## Introduction
This is a tool I wrote to make replicating ZFS datasets easy and reliable.
ZFS-autobackup tries to be the most reliable and easiest to use tool, while having all the features.
You can either use it as a **backup** tool, **replication** tool or **snapshot** tool.
@ -13,657 +13,46 @@ You can select what to backup by setting a custom `ZFS property`. This makes it
Other settings are just specified on the commandline: Simply setup and test your zfs-autobackup command and fix all the issues you might encounter. When you're done you can just copy/paste your command to a cron or script.
Since its using ZFS commands, you can see what its actually doing by specifying `--debug`. This also helps a lot if you run into some strange problem or error. You can just copy-paste the command that fails and play around with it on the commandline. (something I missed in other tools)
Since its using ZFS commands, you can see what it's actually doing by specifying `--debug`. This also helps a lot if you run into some strange problem or error. You can just copy-paste the command that fails and play around with it on the commandline. (something I missed in other tools)
An important feature thats missing from other tools is a reliable `--test` option: This allows you to see what zfs-autobackup will do and tune your parameters. It will do everything, except make changes to your system.
zfs-autobackup tries to be the easiest to use backup tool for zfs.
## Features
* Works across operating systems: Tested with **Linux**, **FreeBSD/FreeNAS** and **SmartOS**.
* Low learning curve: no complex daemons or services, no additional software or networking needed. (Only read this page)
* Plays nicely with existing replication systems. (Like Proxmox HA)
* Automatically selects filesystems to backup by looking at a simple ZFS property. (recursive)
* Automatically selects filesystems to backup by looking at a simple ZFS property.
* Creates consistent snapshots. (takes all snapshots at once, atomicly.)
* Multiple backups modes:
* Backup local data on the same server.
* "push" local data to a backup-server via SSH.
* "pull" remote data from a server via SSH and backup it locally.
* Or even pull data from a server while pushing the backup to another server. (Zero trust between source and target server)
* Can be scheduled via a simple cronjob or run directly from commandline.
* Supports resuming of interrupted transfers.
* "pull+push": Zero trust between source and target.
* Can be scheduled via simple cronjob or run directly from commandline.
* ZFS encryption support: Can decrypt / encrypt or even re-encrypt datasets during transfer.
* Supports sending with compression. (Using pigz, zstd etc)
* IO buffering to speed up transfer.
* Bandwidth rate limiting.
* Multiple backups from and to the same datasets are no problem.
* Creates the snapshot before doing anything else. (assuring you at least have a snapshot if all else fails)
* Checks everything but tries continue on non-fatal errors when possible. (Reports error-count when done)
* Resillient to errors.
* Ability to manually 'finish' failed backups to see whats going on.
* Easy to debug and has a test-mode. Actual unix commands are printed.
* Uses **progressive thinning** for older snapshots.
* Uses zfs-holds on important snapshots so they cant be accidentally destroyed.
* Uses progressive thinning for older snapshots.
* Uses zfs-holds on important snapshots to prevent accidental deletion.
* Automatic resuming of failed transfers.
* Can continue from existing common snapshots. (e.g. easy migration)
* Gracefully handles destroyed datasets on source.
* Easy migration from existing zfs backups.
* Gracefully handles datasets that no longer exist on source.
* Complete and clean logging.
* Easy installation:
* Just install zfs-autobackup via pip, or download it manually.
* Written in python and uses zfs-commands, no 3rd party dependency's or libraries needed.
* No separate config files or properties. Just one zfs-autobackup command you can copy/paste in your backup script.
* Just install zfs-autobackup via pip.
* Only needs to be installed on one side.
* Written in python and uses zfs-commands, no special 3rd party dependency's or compiled libraries needed.
* No annoying config files or properties.
## Installation
## Getting started
### Using pip
The recommended way on most servers is to use [pip](https://pypi.org/project/zfs-autobackup/):
```console
[root@server ~]# pip install --upgrade zfs-autobackup
```
This can also be used to upgrade zfs-autobackup to the newest stable version.
### Using easy_install
On older servers you might have to use easy_install
```console
[root@server ~]# easy_install zfs-autobackup
```
## Example
In this example we're going to backup a machine called `server1` to a machine called `backup`.
### Setup SSH login
zfs-autobackup needs passwordless login via ssh. This means generating an ssh key and copying it to the remote server.
#### Generate SSH key on `backup`
On the backup-server that runs zfs-autobackup you need to create an SSH key. You only need to do this once.
Use the `ssh-keygen` command and leave the passphrase empty:
```console
root@backup:~# ssh-keygen
Generating public/private rsa key pair.
Enter file in which to save the key (/root/.ssh/id_rsa):
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /root/.ssh/id_rsa.
Your public key has been saved in /root/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:McJhCxvaxvFhO/3e8Lf5gzSrlTWew7/bwrd2U2EHymE root@backup
The key's randomart image is:
+---[RSA 2048]----+
| + = |
| + X * E . |
| . = B + o o . |
| . o + o o.|
| S o .oo|
| . + o= +|
| . ++==.|
| .+o**|
| .. +B@|
+----[SHA256]-----+
root@backup:~#
```
#### Copy SSH key to `server1`
Now you need to copy the public part of the key to `server1`
The `ssh-copy-id` command is a handy tool to automate this. It will just ask for your password.
```console
root@backup:~# ssh-copy-id root@server1.server.com
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_rsa.pub"
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
Password:
Number of key(s) added: 1
Now try logging into the machine, with: "ssh 'root@server1.server.com'"
and check to make sure that only the key(s) you wanted were added.
root@backup:~#
```
This allows the backup-server to login to `server1` as root without password.
### Select filesystems to backup
Its important to choose a unique and consistent backup name. In this case we name our backup: `offsite1`.
On the source zfs system set the ```autobackup:offsite1``` zfs property to true:
```console
[root@server1 ~]# zfs set autobackup:offsite1=true rpool
[root@server1 ~]# zfs get -t filesystem,volume autobackup:offsite1
NAME PROPERTY VALUE SOURCE
rpool autobackup:offsite1 true local
rpool/ROOT autobackup:offsite1 true inherited from rpool
rpool/ROOT/server1-1 autobackup:offsite1 true inherited from rpool
rpool/data autobackup:offsite1 true inherited from rpool
rpool/data/vm-100-disk-0 autobackup:offsite1 true inherited from rpool
rpool/swap autobackup:offsite1 true inherited from rpool
...
```
ZFS properties are ```inherited``` by child datasets. Since we've set the property on the highest dataset, we're essentially backupping the whole pool.
Because we don't want to backup everything, we can exclude certain filesystem by setting the property to false:
```console
[root@server1 ~]# zfs set autobackup:offsite1=false rpool/swap
[root@server1 ~]# zfs get -t filesystem,volume autobackup:offsite1
NAME PROPERTY VALUE SOURCE
rpool autobackup:offsite1 true local
rpool/ROOT autobackup:offsite1 true inherited from rpool
rpool/ROOT/server1-1 autobackup:offsite1 true inherited from rpool
rpool/data autobackup:offsite1 true inherited from rpool
rpool/data/vm-100-disk-0 autobackup:offsite1 true inherited from rpool
rpool/swap autobackup:offsite1 false local
...
```
The autobackup-property can have 3 values:
* ```true```: Backup the dataset and all its children
* ```false```: Dont backup the dataset and all its children. (used to exclude certain datasets)
* ```child```: Only backup the children off the dataset, not the dataset itself.
Only use the zfs-command to set these properties, not the zpool command.
### Running zfs-autobackup
Run the script on the backup server and pull the data from the server specified by --ssh-source.
```console
[root@backup ~]# zfs-autobackup --ssh-source server1.server.com offsite1 backup/server1 --progress --verbose
#### Settings summary
[Source] Datasets on: server1.server.com
[Source] Keep the last 10 snapshots.
[Source] Keep every 1 day, delete after 1 week.
[Source] Keep every 1 week, delete after 1 month.
[Source] Keep every 1 month, delete after 1 year.
[Source] Send all datasets that have 'autobackup:offsite1=true' or 'autobackup:offsite1=child'
[Target] Datasets are local
[Target] Keep the last 10 snapshots.
[Target] Keep every 1 day, delete after 1 week.
[Target] Keep every 1 week, delete after 1 month.
[Target] Keep every 1 month, delete after 1 year.
[Target] Receive datasets under: backup/server1
#### Selecting
[Source] rpool: Selected (direct selection)
[Source] rpool/ROOT: Selected (inherited selection)
[Source] rpool/ROOT/server1-1: Selected (inherited selection)
[Source] rpool/data: Selected (inherited selection)
[Source] rpool/data/vm-100-disk-0: Selected (inherited selection)
[Source] rpool/swap: Ignored (disabled)
#### Snapshotting
[Source] rpool: No changes since offsite1-20200218175435
[Source] rpool/ROOT: No changes since offsite1-20200218175435
[Source] rpool/data: No changes since offsite1-20200218175435
[Source] Creating snapshot offsite1-20200218180123
#### Sending and thinning
[Target] backup/server1/rpool/ROOT/server1-1@offsite1-20200218175435: receiving full
[Target] backup/server1/rpool/ROOT/server1-1@offsite1-20200218175547: receiving incremental
[Target] backup/server1/rpool/ROOT/server1-1@offsite1-20200218175706: receiving incremental
[Target] backup/server1/rpool/ROOT/server1-1@offsite1-20200218180049: receiving incremental
[Target] backup/server1/rpool/ROOT/server1-1@offsite1-20200218180123: receiving incremental
[Target] backup/server1/rpool/data@offsite1-20200218175435: receiving full
[Target] backup/server1/rpool/data/vm-100-disk-0@offsite1-20200218175435: receiving full
...
```
Note that this is called a "pull" backup: The backup server pulls the backup from the server. This is usually the preferred way.
Its also possible to let a server push its backup to the backup-server. However this has security implications. In that case you would setup the SSH keys the other way around and use the --ssh-target parameter on the server.
### Automatic backups
Now every time you run the command, zfs-autobackup will create a new snapshot and replicate your data.
Older snapshots will eventually be deleted, depending on the `--keep-source` and `--keep-target` settings. (The defaults are shown above under the 'Settings summary')
Once you've got the correct settings for your situation, you can just store the command in a cronjob.
Or just create a script and run it manually when you need it.
## Use as snapshot tool
You can use zfs-autobackup to only make snapshots.
Just dont specify the target-path:
```console
root@ws1:~# zfs-autobackup test --verbose
zfs-autobackup v3.0 - Copyright 2020 E.H.Eefting (edwin@datux.nl)
#### Source settings
[Source] Datasets are local
[Source] Keep the last 10 snapshots.
[Source] Keep every 1 day, delete after 1 week.
[Source] Keep every 1 week, delete after 1 month.
[Source] Keep every 1 month, delete after 1 year.
[Source] Selects all datasets that have property 'autobackup:test=true' (or childs of datasets that have 'autobackup:test=child')
#### Selecting
[Source] test_source1/fs1: Selected (direct selection)
[Source] test_source1/fs1/sub: Selected (inherited selection)
[Source] test_source2/fs2: Ignored (only childs)
[Source] test_source2/fs2/sub: Selected (inherited selection)
#### Snapshotting
[Source] Creating snapshots test-20200710125958 in pool test_source1
[Source] Creating snapshots test-20200710125958 in pool test_source2
#### Thinning source
[Source] test_source1/fs1@test-20200710125948: Destroying
[Source] test_source1/fs1/sub@test-20200710125948: Destroying
[Source] test_source2/fs2/sub@test-20200710125948: Destroying
#### All operations completed successfully
(No target_path specified, only operated as snapshot tool.)
```
This also allows you to make several snapshots during the day, but only backup the data at night when the server is not busy.
## Thinning out obsolete snapshots
The thinner is the thing that destroys old snapshots on the source and target.
The thinner operates "stateless": There is nothing in the name or properties of a snapshot that indicates how long it will be kept. Everytime zfs-autobackup runs, it will look at the timestamp of all the existing snapshots. From there it will determine which snapshots are obsolete according to your schedule. The advantage of this stateless system is that you can always change the schedule.
Note that the thinner will ONLY destroy snapshots that are matching the naming pattern of zfs-autobackup. If you use `--other-snapshots`, it wont destroy those snapshots after replicating them to the target.
### Destroying missing datasets
When a dataset has been destroyed or deselected on the source, but still exists on the target we call it a missing dataset. Missing datasets will be still thinned out according to the schedule.
The final snapshot will never be destroyed, unless you specify a **deadline** with the `--destroy-missing` option:
In that case it will look at the last snapshot we took and determine if is older than the deadline you specified. e.g: `--destroy-missing 30d` will start destroying things 30 days after the last snapshot.
#### After the deadline
When the deadline is passed, all our snapshots, except the last one will be destroyed. Irregardless of the normal thinning schedule.
The dataset has to have the following properties to be finally really destroyed:
* The dataset has no direct child-filesystems or volumes.
* The only snapshot left is the last one created by zfs-autobackup.
* The remaining snapshot has no clones.
### Thinning schedule
The default thinning schedule is: `10,1d1w,1w1m,1m1y`.
The schedule consists of multiple rules separated by a `,`
A plain number specifies how many snapshots you want to always keep, regardless of time or interval.
The format of the other rules is: `<Interval><TTL>`.
* Interval: The minimum interval between the snapshots. Snapshots with intervals smaller than this will be destroyed.
* TTL: The maximum time to life time of a snapshot, after that they will be destroyed.
* These are the time units you can use for interval and TTL:
* `y`: Years
* `m`: Months
* `d`: Days
* `h`: Hours
* `min`: Minutes
* `s`: Seconds
Since this might sound very complicated, the `--verbose` option will show you what it all means:
```console
[Source] Keep the last 10 snapshots.
[Source] Keep every 1 day, delete after 1 week.
[Source] Keep every 1 week, delete after 1 month.
[Source] Keep every 1 month, delete after 1 year.
```
A snapshot will only be destroyed if it not needed anymore by ANY of the rules.
You can specify as many rules as you need. The order of the rules doesn't matter.
Keep in mind its up to you to actually run zfs-autobackup often enough: If you want to keep hourly snapshots, you have to make sure you at least run it every hour.
However, its no problem if you run it more or less often than that: The thinner will still keep an optimal set of snapshots to match your schedule as good as possible.
If you want to keep as few snapshots as possible, just specify 0. (`--keep-source=0` for example)
If you want to keep ALL the snapshots, just specify a very high number.
### More details about the Thinner
We will give a practical example of how the thinner operates.
Say we want have 3 thinner rules:
* We want to keep daily snapshots for 7 days.
* We want to keep weekly snapshots for 4 weeks.
* We want to keep monthly snapshots for 12 months.
So far we have taken 4 snapshots at random moments:
![thinner example](https://raw.githubusercontent.com/psy0rz/zfs_autobackup/master/doc/thinner.png)
For every rule, the thinner will divide the timeline in blocks and assign each snapshot to a block.
A block can only be assigned one snapshot: If multiple snapshots fall into the same block, it only assigns it to the oldest that we want to keep.
The colors show to which block a snapshot belongs:
* Snapshot 1: This snapshot belongs to daily block 1, weekly block 0 and monthly block 0. However the daily block is too old.
* Snapshot 2: Since weekly block 0 and monthly block 0 already have a snapshot, it only belongs to daily block 4.
* Snapshot 3: This snapshot belongs to daily block 8 and weekly block 1.
* Snapshot 4: Since daily block 8 already has a snapshot, this one doesn't belong to anything and can be deleted right away. (it will be keeped for now since its the last snapshot)
zfs-autobackup will re-evaluate this on every run: As soon as a snapshot doesn't belong to any block anymore it will be destroyed.
Snapshots on the source that still have to be send to the target wont be destroyed off course. (If the target still wants them, according to the target schedule)
## Tips
* Use ```--debug``` if something goes wrong and you want to see the commands that are executed. This will also stop at the first error.
* You can split up the snapshotting and sending tasks by creating two cronjobs. Create a separate snapshotter-cronjob by just omitting target-path.
* Set the ```readonly``` property of the target filesystem to ```on```. This prevents changes on the target side. (Normally, if there are changes the next backup will fail and will require a zfs rollback.) Note that readonly means you cant change the CONTENTS of the dataset directly. Its still possible to receive new datasets and manipulate properties etc.
* Use ```--clear-refreservation``` to save space on your backup server.
* Use ```--clear-mountpoint``` to prevent the target server from mounting the backupped filesystem in the wrong place during a reboot.
### Performance tips
If you have a large number of datasets its important to keep the following tips in mind.
#### Some statistics
To get some idea of how fast zfs-autobackup is, I did some test on my laptop, with a SKHynix_HFS512GD9TNI-L2B0B disk. I'm using zfs 2.0.2.
I created 100 empty datasets and measured the total runtime of zfs-autobackup. I used all the performance tips below. (--no-holds, --allow-empty, ssh ControlMaster)
* without ssh: 15 seconds. (>6 datasets/s)
* either ssh-target or ssh-source=localhost: 20 seconds (5 datasets/s)
* both ssh-target and ssh-source=localhost: 24 seconds (4 datasets/s)
To be bold I created 2500 datasets, but that also was no problem. So it seems it should be possible to use zfs-autobackup with thousands of datasets.
If you need more performance let me know.
NOTE: There is actually a performance regression in ZFS version 2: https://github.com/openzfs/zfs/issues/11560 Use --no-progress as workaround.
#### Less work
You can make zfs-autobackup generate less work by using --no-holds and --allow-empty.
This saves a lot of extra zfs-commands per dataset.
#### Speeding up SSH
You can make your ssh connections persistent and greatly speed up zfs-autobackup:
On the backup-server add this to your ~/.ssh/config:
```console
Host *
ControlPath ~/.ssh/control-master-%r@%h:%p
ControlMaster auto
ControlPersist 3600
```
Thanks @mariusvw :)
### Specifying ssh port or options
The correct way to do this is by creating ~/.ssh/config:
```console
Host smartos04
Hostname 1.2.3.4
Port 1234
user root
Compression yes
```
This way you can just specify "smartos04" as host.
Also uses compression on slow links.
Look in man ssh_config for many more options.
## Usage
Here you find all the options:
```console
[root@server ~]# zfs-autobackup --help
usage: zfs-autobackup [-h] [--ssh-config SSH_CONFIG] [--ssh-source SSH_SOURCE]
[--ssh-target SSH_TARGET] [--keep-source KEEP_SOURCE]
[--keep-target KEEP_TARGET] [--other-snapshots]
[--no-snapshot] [--no-send] [--min-change MIN_CHANGE]
[--allow-empty] [--ignore-replicated] [--no-holds]
[--strip-path STRIP_PATH] [--clear-refreservation]
[--clear-mountpoint]
[--filter-properties FILTER_PROPERTIES]
[--set-properties SET_PROPERTIES] [--rollback]
[--destroy-incompatible] [--ignore-transfer-errors]
[--raw] [--test] [--verbose] [--debug] [--debug-output]
[--progress]
backup-name [target-path]
zfs-autobackup v3.0-rc12 - Copyright 2020 E.H.Eefting (edwin@datux.nl)
positional arguments:
backup-name Name of the backup (you should set the zfs property
"autobackup:backup-name" to true on filesystems you
want to backup
target-path Target ZFS filesystem (optional: if not specified,
zfs-autobackup will only operate as snapshot-tool on
source)
optional arguments:
-h, --help show this help message and exit
--ssh-config SSH_CONFIG
Custom ssh client config
--ssh-source SSH_SOURCE
Source host to get backup from. (user@hostname)
Default None.
--ssh-target SSH_TARGET
Target host to push backup to. (user@hostname) Default
None.
--keep-source KEEP_SOURCE
Thinning schedule for old source snapshots. Default:
10,1d1w,1w1m,1m1y
--keep-target KEEP_TARGET
Thinning schedule for old target snapshots. Default:
10,1d1w,1w1m,1m1y
--other-snapshots Send over other snapshots as well, not just the ones
created by this tool.
--no-snapshot Don't create new snapshots (useful for finishing
uncompleted backups, or cleanups)
--no-send Don't send snapshots (useful for cleanups, or if you
want a serperate send-cronjob)
--min-change MIN_CHANGE
Number of bytes written after which we consider a
dataset changed (default 1)
--allow-empty If nothing has changed, still create empty snapshots.
(same as --min-change=0)
--ignore-replicated Ignore datasets that seem to be replicated some other
way. (No changes since lastest snapshot. Useful for
proxmox HA replication)
--no-holds Don't lock snapshots on the source. (Useful to allow
proxmox HA replication to switches nodes)
--strip-path STRIP_PATH
Number of directories to strip from target path (use 1
when cloning zones between 2 SmartOS machines)
--clear-refreservation
Filter "refreservation" property. (recommended, safes
space. same as --filter-properties refreservation)
--clear-mountpoint Set property canmount=noauto for new datasets.
(recommended, prevents mount conflicts. same as --set-
properties canmount=noauto)
--filter-properties FILTER_PROPERTIES
List of properties to "filter" when receiving
filesystems. (you can still restore them with zfs
inherit -S)
--set-properties SET_PROPERTIES
List of propererties to override when receiving
filesystems. (you can still restore them with zfs
inherit -S)
--rollback Rollback changes to the latest target snapshot before
starting. (normally you can prevent changes by setting
the readonly property on the target_path to on)
--destroy-incompatible
Destroy incompatible snapshots on target. Use with
care! (implies --rollback)
--ignore-transfer-errors
Ignore transfer errors (still checks if received
filesystem exists. useful for acltype errors)
--raw For encrypted datasets, send data exactly as it exists
on disk.
--test dont change anything, just show what would be done
(still does all read-only operations)
--verbose verbose output
--debug Show zfs commands that are executed, stops after an
exception.
--debug-output Show zfs commands and their output/exit codes. (noisy)
--progress show zfs progress output (to stderr). Enabled by
default on ttys.
When a filesystem fails, zfs_backup will continue and report the number of
failures at that end. Also the exit code will indicate the number of failures.
```
## Troubleshooting
### It keeps asking for my SSH password
You forgot to setup automatic login via SSH keys, look in the example how to do this.
### It says 'cannot receive incremental stream: invalid backup stream'
This usually means you've created a new snapshot on the target side during a backup. If you restart zfs-autobackup, it will automaticly abort the invalid partially received snapshot and start over.
### It says 'cannot receive incremental stream: destination has been modified since most recent snapshot'
This means files have been modified on the target side somehow.
You can use --rollback to automaticly rollback such changes.
Note: This usually happens if the source-side has a non-standard mountpoint for a dataset, and you're using --clear-mountpoint. In this case the target side creates a mountpoint in the parent dataset, causing the change.
### It says 'internal error: Invalid argument'
In some cases (Linux -> FreeBSD) this means certain properties are not fully supported on the target system.
Try using something like: --filter-properties xattr
## Restore example
Restoring can be done with simple zfs commands. For example, use this to restore a specific SmartOS disk image to a temporary restore location:
```console
root@fs1:/home/psy# zfs send fs1/zones/backup/zfsbackups/smartos01.server.com/zones/a3abd6c8-24c6-4125-9e35-192e2eca5908-disk0@smartos01_fs1-20160110000003 | ssh root@2.2.2.2 "zfs recv zones/restore"
```
After that you can rename the disk image from the temporary location to the location of a new SmartOS machine you've created.
## Monitoring with Zabbix-jobs
You can monitor backups by using my zabbix-jobs script. (<https://github.com/psy0rz/stuff/tree/master/zabbix-jobs>)
Put this command directly after the zfs_backup command in your cronjob:
```console
zabbix-job-status backup_smartos01_fs1 daily $?
```
This will update the zabbix server with the exit code and will also alert you if the job didn't run for more than 2 days.
## Backup a proxmox cluster with HA replication
Due to the nature of proxmox we had to make a few enhancements to zfs-autobackup. This will probably also benefit other systems that use their own replication in combination with zfs-autobackup.
All data under rpool/data can be on multiple nodes of the cluster. The naming of those filesystem is unique over the whole cluster. Because of this we should backup rpool/data of all nodes to the same destination. This way we wont have duplicate backups of the filesystems that are replicated. Because of various options, you can even migrate hosts and zfs-autobackup will be fine. (and it will get the next backup from the new node automatically)
In the example below we have 3 nodes, named pve1, pve2 and pve3.
### Preparing the proxmox nodes
No preparation is needed, the script will take care of everything. You only need to setup the ssh keys, so that the backup server can access the proxmox server.
TIP: make sure your backup server is firewalled and cannot be reached from any production machine.
### SSH config on backup server
I use ~/.ssh/config to specify how to reach the various hosts.
In this example we are making an offsite copy and use portforwarding to reach the proxmox machines:
```
Host *
ControlPath ~/.ssh/control-master-%r@%h:%p
ControlMaster auto
ControlPersist 3600
Compression yes
Host pve1
Hostname some.host.com
Port 10001
Host pve2
Hostname some.host.com
Port 10002
Host pve3
Hostname some.host.com
Port 10003
```
### Backup script
I use the following backup script on the backup server.
Adjust the variables HOSTS TARGET and NAME to your needs.
```shell
#!/bin/bash
HOSTS="pve1 pve2 pve3"
TARGET=rpool/pvebackups
NAME=prox
zfs create -p $TARGET/data &>/dev/null
for HOST in $HOSTS; do
echo "################################### RPOOL $HOST"
# enable backup
ssh $HOST "zfs set autobackup:rpool_$NAME=child rpool/ROOT"
#backup rpool to specific directory per host
zfs create -p $TARGET/rpools/$HOST &>/dev/null
zfs-autobackup --keep-source=1d1w,1w1m --ssh-source $HOST rpool_$NAME $TARGET/rpools/$HOST --clear-mountpoint --clear-refreservation --ignore-transfer-errors --strip-path 2 --verbose --no-holds $@
zabbix-job-status backup_$HOST""_rpool_$NAME daily $? >/dev/null 2>/dev/null
echo "################################### DATA $HOST"
# enable backup
ssh $HOST "zfs set autobackup:data_$NAME=child rpool/data"
#backup data filesystems to a common directory
zfs-autobackup --keep-source=1d1w,1w1m --ssh-source $HOST data_$NAME $TARGET/data --clear-mountpoint --clear-refreservation --ignore-transfer-errors --strip-path 2 --verbose --ignore-replicated --min-change 200000 --no-holds $@
zabbix-job-status backup_$HOST""_data_$NAME daily $? >/dev/null 2>/dev/null
done
```
This script will also send the backup status to Zabbix. (if you've installed my zabbix-job-status script https://github.com/psy0rz/stuff/tree/master/zabbix-jobs)
Please look at our wiki to [Get started](https://github.com/psy0rz/zfs_autobackup/wiki).
# Sponsor list

View File

@ -1,6 +1,6 @@
colorama
argparse
coverage==4.5.4
coverage
python-coveralls
unittest2
mock

View File

@ -18,7 +18,8 @@ setuptools.setup(
entry_points={
'console_scripts':
[
'zfs-autobackup = zfs_autobackup:cli',
'zfs-autobackup = zfs_autobackup.ZfsAutobackup:cli',
'zfs-autoverify = zfs_autobackup.ZfsAutoverify:cli',
]
},
packages=setuptools.find_packages(),

6
tests/autoruntests Executable file
View File

@ -0,0 +1,6 @@
#!/bin/bash
#NOTE: run from top directory
find tests/*.py zfs_autobackup/*.py| entr -r ./tests/run_tests $@

View File

@ -1,4 +1,6 @@
# To run tests as non-root, use this hack:
# chmod 4755 /usr/sbin/zpool /usr/sbin/zfs
import subprocess
import random
@ -9,6 +11,7 @@ import subprocess
import time
from pprint import *
from zfs_autobackup.ZfsAutobackup import *
from zfs_autobackup.ZfsAutoverify import *
from mock import *
import contextlib
import sys
@ -58,7 +61,9 @@ def redirect_stderr(target):
def shelltest(cmd):
"""execute and print result as nice copypastable string for unit tests (adds extra newlines on top/bottom)"""
ret=(subprocess.check_output(cmd , shell=True).decode('utf-8'))
ret=(subprocess.check_output("SUDO_ASKPASS=./password.sh sudo -A "+cmd , shell=True).decode('utf-8'))
print("######### result of: {}".format(cmd))
print(ret)
print("#########")

5
tests/run_test Executable file
View File

@ -0,0 +1,5 @@
#!/bin/bash
#run one test. start from main directory
python -m unittest discover tests $@ -vvvf

View File

@ -19,7 +19,7 @@ if ! [ -e /root/.ssh/id_rsa ]; then
fi
coverage run --source zfs_autobackup -m unittest discover -vvvvf $SCRIPTDIR $@ 2>&1
coverage run --branch --source zfs_autobackup -m unittest discover -vvvvf $SCRIPTDIR $@ 2>&1
EXIT=$?
echo

123
tests/test_cmdpipe.py Normal file
View File

@ -0,0 +1,123 @@
from basetest import *
from zfs_autobackup.CmdPipe import CmdPipe,CmdItem
class TestCmdPipe(unittest2.TestCase):
def test_single(self):
"""single process stdout and stderr"""
p=CmdPipe(readonly=False, inp=None)
err=[]
out=[]
p.add(CmdItem(["ls", "-d", "/", "/", "/nonexistent"], stderr_handler=lambda line: err.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,2)))
executed=p.execute(stdout_handler=lambda line: out.append(line))
self.assertEqual(err, ["ls: cannot access '/nonexistent': No such file or directory"])
self.assertEqual(out, ["/","/"])
self.assertIsNone(executed)
def test_input(self):
"""test stdinput"""
p=CmdPipe(readonly=False, inp="test")
err=[]
out=[]
p.add(CmdItem(["cat"], stderr_handler=lambda line: err.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,0)))
executed=p.execute(stdout_handler=lambda line: out.append(line))
self.assertEqual(err, [])
self.assertEqual(out, ["test"])
self.assertIsNone(executed)
def test_pipe(self):
"""test piped"""
p=CmdPipe(readonly=False)
err1=[]
err2=[]
err3=[]
out=[]
p.add(CmdItem(["echo", "test"], stderr_handler=lambda line: err1.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,0)))
p.add(CmdItem(["tr", "e", "E"], stderr_handler=lambda line: err2.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,0)))
p.add(CmdItem(["tr", "t", "T"], stderr_handler=lambda line: err3.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,0)))
executed=p.execute(stdout_handler=lambda line: out.append(line))
self.assertEqual(err1, [])
self.assertEqual(err2, [])
self.assertEqual(err3, [])
self.assertEqual(out, ["TEsT"])
self.assertIsNone(executed)
#test str representation as well
self.assertEqual(str(p), "(echo test) | (tr e E) | (tr t T)")
def test_pipeerrors(self):
"""test piped stderrs """
p=CmdPipe(readonly=False)
err1=[]
err2=[]
err3=[]
out=[]
p.add(CmdItem(["ls", "/nonexistent1"], stderr_handler=lambda line: err1.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,2)))
p.add(CmdItem(["ls", "/nonexistent2"], stderr_handler=lambda line: err2.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,2)))
p.add(CmdItem(["ls", "/nonexistent3"], stderr_handler=lambda line: err3.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,2)))
executed=p.execute(stdout_handler=lambda line: out.append(line))
self.assertEqual(err1, ["ls: cannot access '/nonexistent1': No such file or directory"])
self.assertEqual(err2, ["ls: cannot access '/nonexistent2': No such file or directory"])
self.assertEqual(err3, ["ls: cannot access '/nonexistent3': No such file or directory"])
self.assertEqual(out, [])
self.assertIsNone(executed)
def test_exitcode(self):
"""test piped exitcodes """
p=CmdPipe(readonly=False)
err1=[]
err2=[]
err3=[]
out=[]
p.add(CmdItem(["bash", "-c", "exit 1"], stderr_handler=lambda line: err1.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,1)))
p.add(CmdItem(["bash", "-c", "exit 2"], stderr_handler=lambda line: err2.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,2)))
p.add(CmdItem(["bash", "-c", "exit 3"], stderr_handler=lambda line: err3.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,3)))
executed=p.execute(stdout_handler=lambda line: out.append(line))
self.assertEqual(err1, [])
self.assertEqual(err2, [])
self.assertEqual(err3, [])
self.assertEqual(out, [])
self.assertIsNone(executed)
def test_readonly_execute(self):
"""everything readonly, just should execute"""
p=CmdPipe(readonly=True)
err1=[]
err2=[]
out=[]
def true_exit(exit_code):
return True
p.add(CmdItem(["echo", "test1"], stderr_handler=lambda line: err1.append(line), exit_handler=true_exit, readonly=True))
p.add(CmdItem(["echo", "test2"], stderr_handler=lambda line: err2.append(line), exit_handler=true_exit, readonly=True))
executed=p.execute(stdout_handler=lambda line: out.append(line))
self.assertEqual(err1, [])
self.assertEqual(err2, [])
self.assertEqual(out, ["test2"])
self.assertTrue(executed)
def test_readonly_skip(self):
"""one command not readonly, skip"""
p=CmdPipe(readonly=True)
err1=[]
err2=[]
out=[]
p.add(CmdItem(["echo", "test1"], stderr_handler=lambda line: err1.append(line), readonly=False))
p.add(CmdItem(["echo", "test2"], stderr_handler=lambda line: err2.append(line), readonly=True))
executed=p.execute(stdout_handler=lambda line: out.append(line))
self.assertEqual(err1, [])
self.assertEqual(err2, [])
self.assertEqual(out, [])
self.assertTrue(executed)

View File

@ -13,17 +13,17 @@ class TestZfsNode(unittest2.TestCase):
def test_destroymissing(self):
#initial backup
with patch('time.strftime', return_value="10101111000000"): #1000 years in past
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-holds".split(" ")).run())
with patch('time.strftime', return_value="test-19101111000000"): #1000 years in past
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-holds".split(" ")).run())
with patch('time.strftime', return_value="20101111000000"): #far in past
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-holds --allow-empty".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000000"): #far in past
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-holds --allow-empty".split(" ")).run())
with self.subTest("Should do nothing yet"):
with OutputIO() as buf:
with redirect_stdout(buf):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-snapshot --destroy-missing 0s".split(" ")).run())
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-snapshot --destroy-missing 0s".split(" ")).run())
print(buf.getvalue())
self.assertNotIn(": Destroy missing", buf.getvalue())
@ -36,11 +36,11 @@ class TestZfsNode(unittest2.TestCase):
with OutputIO() as buf:
with redirect_stdout(buf), redirect_stderr(buf):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-snapshot --destroy-missing 0s".split(" ")).run())
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-snapshot --destroy-missing 0s".split(" ")).run())
print(buf.getvalue())
#should have done the snapshot cleanup for destoy missing:
self.assertIn("fs1@test-10101111000000: Destroying", buf.getvalue())
self.assertIn("fs1@test-19101111000000: Destroying", buf.getvalue())
self.assertIn("fs1: Destroy missing: Still has children here.", buf.getvalue())
@ -54,7 +54,7 @@ class TestZfsNode(unittest2.TestCase):
with OutputIO() as buf:
with redirect_stdout(buf):
#100y: lastest should not be old enough, while second to latest snapshot IS old enough:
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-snapshot --destroy-missing 100y".split(" ")).run())
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-snapshot --destroy-missing 100y".split(" ")).run())
print(buf.getvalue())
self.assertIn(": Waiting for deadline", buf.getvalue())
@ -62,7 +62,7 @@ class TestZfsNode(unittest2.TestCase):
#past deadline, destroy
with OutputIO() as buf:
with redirect_stdout(buf):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-snapshot --destroy-missing 1y".split(" ")).run())
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-snapshot --destroy-missing 1y".split(" ")).run())
print(buf.getvalue())
self.assertIn("sub: Destroying", buf.getvalue())
@ -75,7 +75,7 @@ class TestZfsNode(unittest2.TestCase):
with OutputIO() as buf:
with redirect_stdout(buf):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-snapshot --destroy-missing 0s".split(" ")).run())
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-snapshot --destroy-missing 0s".split(" ")).run())
print(buf.getvalue())
@ -90,7 +90,7 @@ class TestZfsNode(unittest2.TestCase):
with OutputIO() as buf:
with redirect_stdout(buf), redirect_stderr(buf):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-snapshot --destroy-missing 0s".split(" ")).run())
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-snapshot --destroy-missing 0s".split(" ")).run())
print(buf.getvalue())
#now tries to destroy our own last snapshot (before the final destroy of the dataset)
@ -105,7 +105,7 @@ class TestZfsNode(unittest2.TestCase):
with OutputIO() as buf:
with redirect_stdout(buf), redirect_stderr(buf):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-snapshot --destroy-missing 0s".split(" ")).run())
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-snapshot --destroy-missing 0s".split(" ")).run())
print(buf.getvalue())
#should have done the snapshot cleanup for destoy missing:
@ -113,7 +113,7 @@ class TestZfsNode(unittest2.TestCase):
with OutputIO() as buf:
with redirect_stdout(buf), redirect_stderr(buf):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-snapshot --destroy-missing 0s".split(" ")).run())
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-snapshot --destroy-missing 0s".split(" ")).run())
print(buf.getvalue())
#on second run it sees the dangling ex-parent but doesnt know what to do with it (since it has no own snapshot)
@ -130,6 +130,6 @@ test_target1/test_source1
test_target1/test_source2
test_target1/test_source2/fs2
test_target1/test_source2/fs2/sub
test_target1/test_source2/fs2/sub@test-10101111000000
test_target1/test_source2/fs2/sub@test-19101111000000
test_target1/test_source2/fs2/sub@test-20101111000000
""")

193
tests/test_encryption.py Normal file
View File

@ -0,0 +1,193 @@
from zfs_autobackup.CmdPipe import CmdPipe
from basetest import *
import time
# We have to do a LOT to properly test encryption/decryption/raw transfers
#
# For every scenario we need at least:
# - plain source dataset
# - encrypted source dataset
# - plain target path
# - encrypted target path
# - do a full transfer
# - do a incremental transfer
# Scenarios:
# - Raw transfer
# - Decryption transfer (--decrypt)
# - Encryption transfer (--encrypt)
# - Re-encryption transfer (--decrypt --encrypt)
class TestZfsEncryption(unittest2.TestCase):
def setUp(self):
prepare_zpools()
try:
shelltest("zfs get encryption test_source1")
except:
self.skipTest("Encryption not supported on this ZFS version.")
def prepare_encrypted_dataset(self, key, path, unload_key=False):
# create encrypted source dataset
shelltest("rm /tmp/zfstest.key 2>/dev/null;true")
shelltest("echo {} > /tmp/zfstest.key".format(key))
shelltest("zfs create -o keylocation=file:///tmp/zfstest.key -o keyformat=passphrase -o encryption=on {}".format(path))
if unload_key:
shelltest("zfs unmount {}".format(path))
shelltest("zfs unload-key {}".format(path))
# r=shelltest("dd if=/dev/zero of=/test_source1/fs1/enc1/data.txt bs=200000 count=1")
def test_raw(self):
"""send encrypted data unaltered (standard operation)"""
self.prepare_encrypted_dataset("11111111", "test_source1/fs1/encryptedsource")
self.prepare_encrypted_dataset("11111111", "test_source1/fs1/encryptedsourcekeyless", unload_key=True) # raw mode shouldn't need a key
self.prepare_encrypted_dataset("22222222", "test_target1/encryptedtarget")
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --allow-empty --exclude-received".split(" ")).run())
self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --no-snapshot --exclude-received".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --allow-empty --exclude-received".split(" ")).run())
self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --no-snapshot --exclude-received".split(" ")).run())
r = shelltest("zfs get -r -t filesystem encryptionroot test_target1")
self.assertMultiLineEqual(r,"""
NAME PROPERTY VALUE SOURCE
test_target1 encryptionroot - -
test_target1/encryptedtarget encryptionroot test_target1/encryptedtarget -
test_target1/encryptedtarget/test_source1 encryptionroot test_target1/encryptedtarget -
test_target1/encryptedtarget/test_source1/fs1 encryptionroot - -
test_target1/encryptedtarget/test_source1/fs1/encryptedsource encryptionroot test_target1/encryptedtarget/test_source1/fs1/encryptedsource -
test_target1/encryptedtarget/test_source1/fs1/encryptedsourcekeyless encryptionroot test_target1/encryptedtarget/test_source1/fs1/encryptedsourcekeyless -
test_target1/encryptedtarget/test_source1/fs1/sub encryptionroot - -
test_target1/encryptedtarget/test_source2 encryptionroot test_target1/encryptedtarget -
test_target1/encryptedtarget/test_source2/fs2 encryptionroot test_target1/encryptedtarget -
test_target1/encryptedtarget/test_source2/fs2/sub encryptionroot - -
test_target1/test_source1 encryptionroot - -
test_target1/test_source1/fs1 encryptionroot - -
test_target1/test_source1/fs1/encryptedsource encryptionroot test_target1/test_source1/fs1/encryptedsource -
test_target1/test_source1/fs1/encryptedsourcekeyless encryptionroot test_target1/test_source1/fs1/encryptedsourcekeyless -
test_target1/test_source1/fs1/sub encryptionroot - -
test_target1/test_source2 encryptionroot - -
test_target1/test_source2/fs2 encryptionroot - -
test_target1/test_source2/fs2/sub encryptionroot - -
""")
def test_decrypt(self):
"""decrypt data and store unencrypted (--decrypt)"""
self.prepare_encrypted_dataset("11111111", "test_source1/fs1/encryptedsource")
self.prepare_encrypted_dataset("22222222", "test_target1/encryptedtarget")
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --decrypt --allow-empty --exclude-received".split(" ")).run())
self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --decrypt --no-snapshot --exclude-received".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --decrypt --allow-empty --exclude-received".split(" ")).run())
self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --decrypt --no-snapshot --exclude-received".split(" ")).run())
r = shelltest("zfs get -r -t filesystem encryptionroot test_target1")
self.assertEqual(r, """
NAME PROPERTY VALUE SOURCE
test_target1 encryptionroot - -
test_target1/encryptedtarget encryptionroot test_target1/encryptedtarget -
test_target1/encryptedtarget/test_source1 encryptionroot test_target1/encryptedtarget -
test_target1/encryptedtarget/test_source1/fs1 encryptionroot - -
test_target1/encryptedtarget/test_source1/fs1/encryptedsource encryptionroot - -
test_target1/encryptedtarget/test_source1/fs1/sub encryptionroot - -
test_target1/encryptedtarget/test_source2 encryptionroot test_target1/encryptedtarget -
test_target1/encryptedtarget/test_source2/fs2 encryptionroot test_target1/encryptedtarget -
test_target1/encryptedtarget/test_source2/fs2/sub encryptionroot - -
test_target1/test_source1 encryptionroot - -
test_target1/test_source1/fs1 encryptionroot - -
test_target1/test_source1/fs1/encryptedsource encryptionroot - -
test_target1/test_source1/fs1/sub encryptionroot - -
test_target1/test_source2 encryptionroot - -
test_target1/test_source2/fs2 encryptionroot - -
test_target1/test_source2/fs2/sub encryptionroot - -
""")
def test_encrypt(self):
"""send normal data set and store encrypted on the other side (--encrypt) issue #60 """
self.prepare_encrypted_dataset("11111111", "test_source1/fs1/encryptedsource")
self.prepare_encrypted_dataset("22222222", "test_target1/encryptedtarget")
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --encrypt --debug --allow-empty --exclude-received".split(" ")).run())
self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --encrypt --debug --no-snapshot --exclude-received".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --encrypt --debug --allow-empty --exclude-received".split(" ")).run())
self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --encrypt --debug --no-snapshot --exclude-received".split(" ")).run())
r = shelltest("zfs get -r -t filesystem encryptionroot test_target1")
self.assertEqual(r, """
NAME PROPERTY VALUE SOURCE
test_target1 encryptionroot - -
test_target1/encryptedtarget encryptionroot test_target1/encryptedtarget -
test_target1/encryptedtarget/test_source1 encryptionroot test_target1/encryptedtarget -
test_target1/encryptedtarget/test_source1/fs1 encryptionroot test_target1/encryptedtarget -
test_target1/encryptedtarget/test_source1/fs1/encryptedsource encryptionroot test_target1/encryptedtarget/test_source1/fs1/encryptedsource -
test_target1/encryptedtarget/test_source1/fs1/sub encryptionroot test_target1/encryptedtarget -
test_target1/encryptedtarget/test_source2 encryptionroot test_target1/encryptedtarget -
test_target1/encryptedtarget/test_source2/fs2 encryptionroot test_target1/encryptedtarget -
test_target1/encryptedtarget/test_source2/fs2/sub encryptionroot test_target1/encryptedtarget -
test_target1/test_source1 encryptionroot - -
test_target1/test_source1/fs1 encryptionroot - -
test_target1/test_source1/fs1/encryptedsource encryptionroot test_target1/test_source1/fs1/encryptedsource -
test_target1/test_source1/fs1/sub encryptionroot - -
test_target1/test_source2 encryptionroot - -
test_target1/test_source2/fs2 encryptionroot - -
test_target1/test_source2/fs2/sub encryptionroot - -
""")
def test_reencrypt(self):
"""reencrypt data (--decrypt --encrypt) """
self.prepare_encrypted_dataset("11111111", "test_source1/fs1/encryptedsource")
self.prepare_encrypted_dataset("22222222", "test_target1/encryptedtarget")
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup(
"test test_target1 --verbose --no-progress --decrypt --encrypt --debug --allow-empty --exclude-received".split(" ")).run())
self.assertFalse(ZfsAutobackup(
"test test_target1/encryptedtarget --verbose --no-progress --decrypt --encrypt --debug --no-snapshot --exclude-received".split(
" ")).run())
with patch('time.strftime', return_value="test-20101111000001"):
self.assertFalse(ZfsAutobackup(
"test test_target1 --verbose --no-progress --decrypt --encrypt --debug --allow-empty --exclude-received".split(" ")).run())
self.assertFalse(ZfsAutobackup(
"test test_target1/encryptedtarget --verbose --no-progress --decrypt --encrypt --debug --no-snapshot --exclude-received".split(
" ")).run())
r = shelltest("zfs get -r -t filesystem encryptionroot test_target1")
self.assertEqual(r, """
NAME PROPERTY VALUE SOURCE
test_target1 encryptionroot - -
test_target1/encryptedtarget encryptionroot test_target1/encryptedtarget -
test_target1/encryptedtarget/test_source1 encryptionroot test_target1/encryptedtarget -
test_target1/encryptedtarget/test_source1/fs1 encryptionroot test_target1/encryptedtarget -
test_target1/encryptedtarget/test_source1/fs1/encryptedsource encryptionroot test_target1/encryptedtarget -
test_target1/encryptedtarget/test_source1/fs1/sub encryptionroot test_target1/encryptedtarget -
test_target1/encryptedtarget/test_source2 encryptionroot test_target1/encryptedtarget -
test_target1/encryptedtarget/test_source2/fs2 encryptionroot test_target1/encryptedtarget -
test_target1/encryptedtarget/test_source2/fs2/sub encryptionroot test_target1/encryptedtarget -
test_target1/test_source1 encryptionroot - -
test_target1/test_source1/fs1 encryptionroot - -
test_target1/test_source1/fs1/encryptedsource encryptionroot - -
test_target1/test_source1/fs1/sub encryptionroot - -
test_target1/test_source2 encryptionroot - -
test_target1/test_source2/fs2 encryptionroot - -
test_target1/test_source2/fs2/sub encryptionroot - -
""")

View File

@ -1,5 +1,5 @@
from basetest import *
from zfs_autobackup.ExecuteNode import ExecuteNode
from zfs_autobackup.ExecuteNode import *
print("THIS TEST REQUIRES SSH TO LOCALHOST")
@ -15,7 +15,7 @@ class TestExecuteNode(unittest2.TestCase):
self.assertEqual(node.run(["echo","test"]), ["test"])
with self.subTest("error exit code"):
with self.assertRaises(subprocess.CalledProcessError):
with self.assertRaises(ExecuteError):
node.run(["false"])
#
@ -26,9 +26,9 @@ class TestExecuteNode(unittest2.TestCase):
with self.subTest("multiline tabsplit"):
self.assertEqual(node.run(["echo","l1c1\tl1c2\nl2c1\tl2c2"], tab_split=True), [['l1c1', 'l1c2'], ['l2c1', 'l2c2']])
#escaping test (shouldnt be a problem locally, single quotes can be a problem remote via ssh)
#escaping test
with self.subTest("escape test"):
s="><`'\"@&$()$bla\\/.*!#test _+-={}[]|"
s="><`'\"@&$()$bla\\/.* !#test _+-={}[]|${bla} $bla"
self.assertEqual(node.run(["echo",s]), [s])
#return std err as well, trigger stderr by listing something non existing
@ -51,6 +51,15 @@ class TestExecuteNode(unittest2.TestCase):
with self.subTest("stdin process with inp=None (shouldn't hang)"):
self.assertEqual(node.run(["cat"]), [])
# let the system do the piping with an unescaped |:
with self.subTest("system piping test"):
#first make sure the actual | character is still properly escaped:
self.assertEqual(node.run(["echo","|"]), ["|"])
#now pipe
self.assertEqual(node.run(["echo", "abc", node.PIPE, "tr", "a", "A" ]), ["Abc"])
def test_basics_local(self):
node=ExecuteNode(debug_output=True)
self.basics(node)
@ -64,7 +73,7 @@ class TestExecuteNode(unittest2.TestCase):
def test_readonly(self):
node=ExecuteNode(debug_output=True, readonly=True)
self.assertEqual(node.run(["echo","test"], readonly=False), None)
self.assertEqual(node.run(["echo","test"], readonly=False), [])
self.assertEqual(node.run(["echo","test"], readonly=True), ["test"])
@ -73,7 +82,7 @@ class TestExecuteNode(unittest2.TestCase):
def pipe(self, nodea, nodeb):
with self.subTest("pipe data"):
output=nodea.run(["dd", "if=/dev/zero", "count=1000"], pipe=True)
output=nodea.run(["dd", "if=/dev/zero", "count=1000"],pipe=True)
self.assertEqual(nodeb.run(["md5sum"], inp=output), ["816df6f64deba63b029ca19d880ee10a -"])
with self.subTest("exit code both ends of pipe ok"):
@ -81,31 +90,35 @@ class TestExecuteNode(unittest2.TestCase):
nodeb.run(["true"], inp=output)
with self.subTest("error on pipe input side"):
with self.assertRaises(subprocess.CalledProcessError):
with self.assertRaises(ExecuteError):
output=nodea.run(["false"], pipe=True)
nodeb.run(["true"], inp=output)
with self.subTest("error on both sides, ignore exit codes"):
output=nodea.run(["false"], pipe=True, valid_exitcodes=[])
nodeb.run(["false"], inp=output, valid_exitcodes=[])
with self.subTest("error on pipe output side "):
with self.assertRaises(subprocess.CalledProcessError):
with self.assertRaises(ExecuteError):
output=nodea.run(["true"], pipe=True)
nodeb.run(["false"], inp=output)
with self.subTest("error on both sides of pipe"):
with self.assertRaises(subprocess.CalledProcessError):
with self.assertRaises(ExecuteError):
output=nodea.run(["false"], pipe=True)
nodeb.run(["false"], inp=output)
with self.subTest("check stderr on pipe output side"):
output=nodea.run(["true"], pipe=True)
(stdout, stderr)=nodeb.run(["ls", "nonexistingfile"], inp=output, return_stderr=True, valid_exitcodes=[0,2])
output=nodea.run(["true"], pipe=True, valid_exitcodes=[0])
(stdout, stderr)=nodeb.run(["ls", "nonexistingfile"], inp=output, return_stderr=True, valid_exitcodes=[2])
self.assertEqual(stdout,[])
self.assertRegex(stderr[0], "nonexistingfile" )
with self.subTest("check stderr on pipe input side (should be only printed)"):
output=nodea.run(["ls", "nonexistingfile"], pipe=True)
(stdout, stderr)=nodeb.run(["true"], inp=output, return_stderr=True, valid_exitcodes=[0,2])
output=nodea.run(["ls", "nonexistingfile"], pipe=True, valid_exitcodes=[2])
(stdout, stderr)=nodeb.run(["true"], inp=output, return_stderr=True, valid_exitcodes=[0])
self.assertEqual(stdout,[])
self.assertEqual(stderr,[] )
self.assertEqual(stderr,[])

View File

@ -20,7 +20,7 @@ class TestExternalFailures(unittest2.TestCase):
r = shelltest("dd if=/dev/zero of=/test_target1/waste bs=250M count=1")
# should fail and leave resume token (if supported)
self.assertTrue(ZfsAutobackup("test test_target1 --verbose".split(" ")).run())
self.assertTrue(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
# free up space
r = shelltest("rm /test_target1/waste")
@ -32,13 +32,13 @@ class TestExternalFailures(unittest2.TestCase):
def test_initial_resume(self):
# inital backup, leaves resume token
with patch('time.strftime', return_value="20101111000000"):
with patch('time.strftime', return_value="test-20101111000000"):
self.generate_resume()
# --test should resume and succeed
with OutputIO() as buf:
with redirect_stdout(buf):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --test".split(" ")).run())
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --test".split(" ")).run())
print(buf.getvalue())
@ -52,7 +52,7 @@ class TestExternalFailures(unittest2.TestCase):
# should resume and succeed
with OutputIO() as buf:
with redirect_stdout(buf):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose".split(" ")).run())
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
print(buf.getvalue())
@ -81,17 +81,17 @@ test_target1/test_source2/fs2/sub@test-20101111000000
def test_incremental_resume(self):
# initial backup
with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run())
# incremental backup leaves resume token
with patch('time.strftime', return_value="20101111000001"):
with patch('time.strftime', return_value="test-20101111000001"):
self.generate_resume()
# --test should resume and succeed
with OutputIO() as buf:
with redirect_stdout(buf):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --test".split(" ")).run())
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --test".split(" ")).run())
print(buf.getvalue())
@ -105,7 +105,7 @@ test_target1/test_source2/fs2/sub@test-20101111000000
# should resume and succeed
with OutputIO() as buf:
with redirect_stdout(buf):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose".split(" ")).run())
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
print(buf.getvalue())
@ -138,7 +138,7 @@ test_target1/test_source2/fs2/sub@test-20101111000000
self.skipTest("Resume not supported in this ZFS userspace version")
# inital backup, leaves resume token
with patch('time.strftime', return_value="20101111000000"):
with patch('time.strftime', return_value="test-20101111000000"):
self.generate_resume()
# remove corresponding source snapshot, so it becomes invalid
@ -148,12 +148,12 @@ test_target1/test_source2/fs2/sub@test-20101111000000
shelltest("zfs destroy test_target1/test_source1/fs1/sub; true")
# --test try again, should abort old resume
with patch('time.strftime', return_value="20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --test".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --test".split(" ")).run())
# try again, should abort old resume
with patch('time.strftime', return_value="20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
r = shelltest("zfs list -H -o name -r -t all test_target1")
self.assertMultiLineEqual(r, """
@ -176,23 +176,23 @@ test_target1/test_source2/fs2/sub@test-20101111000000
self.skipTest("Resume not supported in this ZFS userspace version")
# initial backup
with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run())
# icremental backup, leaves resume token
with patch('time.strftime', return_value="20101111000001"):
with patch('time.strftime', return_value="test-20101111000001"):
self.generate_resume()
# remove corresponding source snapshot, so it becomes invalid
shelltest("zfs destroy test_source1/fs1@test-20101111000001")
# --test try again, should abort old resume
with patch('time.strftime', return_value="20101111000002"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --test".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000002"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --test".split(" ")).run())
# try again, should abort old resume
with patch('time.strftime', return_value="20101111000002"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000002"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
r = shelltest("zfs list -H -o name -r -t all test_target1")
self.assertMultiLineEqual(r, """
@ -215,23 +215,23 @@ test_target1/test_source2/fs2/sub@test-20101111000000
if "0.6.5" in ZFS_USERSPACE:
self.skipTest("Resume not supported in this ZFS userspace version")
with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
# generate resume
with patch('time.strftime', return_value="20101111000001"):
with patch('time.strftime', return_value="test-20101111000001"):
self.generate_resume()
with OutputIO() as buf:
with redirect_stdout(buf):
# incremental, doesnt want previous anymore
with patch('time.strftime', return_value="20101111000002"):
with patch('time.strftime', return_value="test-20101111000002"):
self.assertFalse(ZfsAutobackup(
"test test_target1 --verbose --keep-target=0 --debug --allow-empty".split(" ")).run())
"test test_target1 --no-progress --verbose --keep-target=0 --allow-empty".split(" ")).run())
print(buf.getvalue())
self.assertIn(": aborting resume, since", buf.getvalue())
self.assertIn("Aborting resume, we dont want that snapshot anymore.", buf.getvalue())
r = shelltest("zfs list -H -o name -r -t all test_target1")
self.assertMultiLineEqual(r, """
@ -247,17 +247,45 @@ test_target1/test_source2/fs2/sub
test_target1/test_source2/fs2/sub@test-20101111000002
""")
# test with empty snapshot list (this was a bug)
def test_abort_resume_emptysnapshotlist(self):
if "0.6.5" in ZFS_USERSPACE:
self.skipTest("Resume not supported in this ZFS userspace version")
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
# generate resume
with patch('time.strftime', return_value="test-20101111000001"):
self.generate_resume()
shelltest("zfs destroy test_source1/fs1@test-20101111000001")
with OutputIO() as buf:
with redirect_stdout(buf):
# incremental, doesnt want previous anymore
with patch('time.strftime', return_value="test-20101111000002"):
self.assertFalse(ZfsAutobackup(
"test test_target1 --no-progress --verbose --no-snapshot".split(
" ")).run())
print(buf.getvalue())
self.assertIn("Aborting resume, its obsolete", buf.getvalue())
def test_missing_common(self):
with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run())
# remove common snapshot and leave nothing
shelltest("zfs release zfs_autobackup:test test_source1/fs1@test-20101111000000")
shelltest("zfs destroy test_source1/fs1@test-20101111000000")
with patch('time.strftime', return_value="20101111000001"):
self.assertTrue(ZfsAutobackup("test test_target1 --verbose --allow-empty".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000001"):
self.assertTrue(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run())
#UPDATE: offcourse the one thing that wasn't tested had a bug :( (in ExecuteNode.run()).
def test_ignoretransfererrors(self):
@ -267,7 +295,7 @@ test_target1/test_source2/fs2/sub@test-20101111000002
# #recreate target pool without any features
# # shelltest("zfs set compress=on test_source1; zpool destroy test_target1; zpool create test_target1 -o feature@project_quota=disabled /dev/ram2")
#
# with patch('time.strftime', return_value="20101111000000"):
# with patch('time.strftime', return_value="test-20101111000000"):
# self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty --no-progress".split(" ")).run())
#
# r = shelltest("zfs list -H -o name -r -t all test_target1")

52
tests/test_log.py Normal file
View File

@ -0,0 +1,52 @@
from zfs_autobackup.LogConsole import LogConsole
from basetest import *
class TestLog(unittest2.TestCase):
def test_colored(self):
"""test with color output"""
with OutputIO() as buf:
with redirect_stdout(buf):
l= LogConsole(show_verbose=False, show_debug=False, color=True)
l.verbose("verbose")
l.debug("debug")
with redirect_stdout(buf):
l=LogConsole(show_verbose=True, show_debug=True, color=True)
l.verbose("verbose")
l.debug("debug")
with redirect_stderr(buf):
l=LogConsole(show_verbose=False, show_debug=False, color=True)
l.error("error")
print(list(buf.getvalue()))
self.assertEqual(list(buf.getvalue()), ['\x1b', '[', '2', '2', 'm', ' ', ' ', 'v', 'e', 'r', 'b', 'o', 's', 'e', '\x1b', '[', '0', 'm', '\n', '\x1b', '[', '3', '2', 'm', '#', ' ', 'd', 'e', 'b', 'u', 'g', '\x1b', '[', '0', 'm', '\n', '\x1b', '[', '3', '1', 'm', '\x1b', '[', '1', 'm', '!', ' ', 'e', 'r', 'r', 'o', 'r', '\x1b', '[', '0', 'm', '\n'])
def test_nocolor(self):
"""test without color output"""
with OutputIO() as buf:
with redirect_stdout(buf):
l=LogConsole(show_verbose=False, show_debug=False, color=False)
l.verbose("verbose")
l.debug("debug")
with redirect_stdout(buf):
l=LogConsole(show_verbose=True, show_debug=True, color=False)
l.verbose("verbose")
l.debug("debug")
with redirect_stderr(buf):
l=LogConsole(show_verbose=False, show_debug=False, color=False)
l.error("error")
print(list(buf.getvalue()))
self.assertEqual(list(buf.getvalue()), [' ', ' ', 'v', 'e', 'r', 'b', 'o', 's', 'e', '\n', '#', ' ', 'd', 'e', 'b', 'u', 'g', '\n', '!', ' ', 'e', 'r', 'r', 'o', 'r', '\n'])
# zfs_autobackup.LogConsole.colorama=False

View File

@ -8,12 +8,98 @@ class TestZfsNode(unittest2.TestCase):
prepare_zpools()
self.longMessage=True
# #resume initial backup
# def test_keepsource0(self):
def test_keepsource0target10queuedsend(self):
"""Test if thinner doesnt destroy too much early on if there are no common snapshots YET. Issue #84"""
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup(
"test test_target1 --no-progress --verbose --keep-source=0 --keep-target=10 --allow-empty --no-send".split(
" ")).run())
with patch('time.strftime', return_value="test-20101111000001"):
self.assertFalse(ZfsAutobackup(
"test test_target1 --no-progress --verbose --keep-source=0 --keep-target=10 --allow-empty --no-send".split(
" ")).run())
with patch('time.strftime', return_value="test-20101111000002"):
self.assertFalse(ZfsAutobackup(
"test test_target1 --no-progress --verbose --keep-source=0 --keep-target=10 --allow-empty".split(
" ")).run())
r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
self.assertMultiLineEqual(r, """
test_source1
test_source1/fs1
test_source1/fs1@test-20101111000002
test_source1/fs1/sub
test_source1/fs1/sub@test-20101111000002
test_source2
test_source2/fs2
test_source2/fs2/sub
test_source2/fs2/sub@test-20101111000002
test_source2/fs3
test_source2/fs3/sub
test_target1
test_target1/test_source1
test_target1/test_source1/fs1
test_target1/test_source1/fs1@test-20101111000000
test_target1/test_source1/fs1@test-20101111000001
test_target1/test_source1/fs1@test-20101111000002
test_target1/test_source1/fs1/sub
test_target1/test_source1/fs1/sub@test-20101111000000
test_target1/test_source1/fs1/sub@test-20101111000001
test_target1/test_source1/fs1/sub@test-20101111000002
test_target1/test_source2
test_target1/test_source2/fs2
test_target1/test_source2/fs2/sub
test_target1/test_source2/fs2/sub@test-20101111000000
test_target1/test_source2/fs2/sub@test-20101111000001
test_target1/test_source2/fs2/sub@test-20101111000002
""")
def test_excludepaths(self):
"""Test issue #103"""
shelltest("zfs create test_target1/target_shouldnotbeexcluded")
shelltest("zfs set autobackup:test=true test_target1/target_shouldnotbeexcluded")
shelltest("zfs create test_target1/target")
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup(
"test test_target1/target --no-progress --verbose --allow-empty".split(
" ")).run())
r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
self.assertMultiLineEqual(r, """
test_source1
test_source1/fs1
test_source1/fs1@test-20101111000000
test_source1/fs1/sub
test_source1/fs1/sub@test-20101111000000
test_source2
test_source2/fs2
test_source2/fs2/sub
test_source2/fs2/sub@test-20101111000000
test_source2/fs3
test_source2/fs3/sub
test_target1
test_target1/target
test_target1/target/test_source1
test_target1/target/test_source1/fs1
test_target1/target/test_source1/fs1@test-20101111000000
test_target1/target/test_source1/fs1/sub
test_target1/target/test_source1/fs1/sub@test-20101111000000
test_target1/target/test_source2
test_target1/target/test_source2/fs2
test_target1/target/test_source2/fs2/sub
test_target1/target/test_source2/fs2/sub@test-20101111000000
test_target1/target/test_target1
test_target1/target/test_target1/target_shouldnotbeexcluded
test_target1/target/test_target1/target_shouldnotbeexcluded@test-20101111000000
test_target1/target_shouldnotbeexcluded
test_target1/target_shouldnotbeexcluded@test-20101111000000
""")
# #somehow only specifying --allow-empty --keep-source 0 failed:
# with patch('time.strftime', return_value="20101111000000"):
# self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty --keep-source 0".split(" ")).run())
# with patch('time.strftime', return_value="20101111000001"):
# self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty --keep-source 0".split(" ")).run())

View File

@ -33,8 +33,8 @@ class TestZfsScaling(unittest2.TestCase):
run_counter=0
with patch.object(ExecuteNode,'run', run_count) as p:
with patch('time.strftime', return_value="20101112000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --keep-source=10000 --keep-target=10000 --no-holds --allow-empty".split(" ")).run())
with patch('time.strftime', return_value="test-20101112000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --keep-source=10000 --keep-target=10000 --no-holds --allow-empty".split(" ")).run())
#this triggers if you make a change with an impact of more than O(snapshot_count/2)
@ -46,8 +46,8 @@ class TestZfsScaling(unittest2.TestCase):
run_counter=0
with patch.object(ExecuteNode,'run', run_count) as p:
with patch('time.strftime', return_value="20101112000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --keep-source=10000 --keep-target=10000 --no-holds --allow-empty".split(" ")).run())
with patch('time.strftime', return_value="test-20101112000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --keep-source=10000 --keep-target=10000 --no-holds --allow-empty".split(" ")).run())
#this triggers if you make a change with a performance impact of more than O(snapshot_count/2)
@ -69,11 +69,12 @@ class TestZfsScaling(unittest2.TestCase):
global run_counter
#first run
run_counter=0
with patch.object(ExecuteNode,'run', run_count) as p:
with patch('time.strftime', return_value="20101112000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-holds --allow-empty".split(" ")).run())
with patch('time.strftime', return_value="test-20101112000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-holds --allow-empty".split(" ")).run())
#this triggers if you make a change with an impact of more than O(snapshot_count/2)
@ -82,11 +83,12 @@ class TestZfsScaling(unittest2.TestCase):
self.assertLess(abs(run_counter-expected_runs), dataset_count/2)
#second run, should have higher number of expected_runs
run_counter=0
with patch.object(ExecuteNode,'run', run_count) as p:
with patch('time.strftime', return_value="20101112000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-holds --allow-empty".split(" ")).run())
with patch('time.strftime', return_value="test-20101112000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-holds --allow-empty".split(" ")).run())
#this triggers if you make a change with a performance impact of more than O(snapshot_count/2)

View File

@ -0,0 +1,88 @@
import zfs_autobackup.compressors
from basetest import *
import time
class TestSendRecvPipes(unittest2.TestCase):
"""test input/output pipes for zfs send and recv"""
def setUp(self):
prepare_zpools()
self.longMessage=True
def test_send_basics(self):
"""send basics (remote/local send pipe)"""
with self.subTest("local local pipe"):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--send-pipe=dd bs=1M", "--recv-pipe=dd bs=2M"]).run())
shelltest("zfs destroy -r test_target1/test_source1/fs1/sub")
with self.subTest("remote local pipe"):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--ssh-source=localhost", "--send-pipe=dd bs=1M", "--recv-pipe=dd bs=2M"]).run())
shelltest("zfs destroy -r test_target1/test_source1/fs1/sub")
with self.subTest("local remote pipe"):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--ssh-target=localhost", "--send-pipe=dd bs=1M", "--recv-pipe=dd bs=2M"]).run())
shelltest("zfs destroy -r test_target1/test_source1/fs1/sub")
with self.subTest("remote remote pipe"):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--ssh-source=localhost", "--ssh-target=localhost", "--send-pipe=dd bs=1M", "--recv-pipe=dd bs=2M"]).run())
def test_compress(self):
"""send basics (remote/local send pipe)"""
for compress in zfs_autobackup.compressors.COMPRESS_CMDS.keys():
with self.subTest("compress "+compress):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--compress="+compress]).run())
shelltest("zfs destroy -r test_target1/test_source1/fs1/sub")
def test_buffer(self):
"""test different buffer configurations"""
with self.subTest("local local pipe"):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--buffer=1M" ]).run())
shelltest("zfs destroy -r test_target1/test_source1/fs1/sub")
with self.subTest("remote local pipe"):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--ssh-source=localhost", "--buffer=1M"]).run())
shelltest("zfs destroy -r test_target1/test_source1/fs1/sub")
with self.subTest("local remote pipe"):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--ssh-target=localhost", "--buffer=1M"]).run())
shelltest("zfs destroy -r test_target1/test_source1/fs1/sub")
with self.subTest("remote remote pipe"):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--ssh-source=localhost", "--ssh-target=localhost", "--buffer=1M"]).run())
def test_rate(self):
"""test rate limit"""
start=time.time()
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--rate=50k" ]).run())
#not a great way of verifying but it works.
self.assertGreater(time.time()-start, 5)

View File

@ -3,7 +3,7 @@ import pprint
from zfs_autobackup.Thinner import Thinner
#randint is different in python 2 vs 3
# randint is different in python 2 vs 3
randint_compat = lambda lo, hi: lo + int(random.random() * (hi + 1 - lo))
@ -23,6 +23,23 @@ class TestThinner(unittest2.TestCase):
# return super().setUp()
def test_exceptions(self):
with self.assertRaisesRegexp(Exception, "^Invalid period"):
ThinnerRule("12X12m")
with self.assertRaisesRegexp(Exception, "^Invalid ttl"):
ThinnerRule("12d12X")
with self.assertRaisesRegexp(Exception, "^Period cant be"):
ThinnerRule("12d1d")
with self.assertRaisesRegexp(Exception, "^Invalid schedule"):
ThinnerRule("XXX")
with self.assertRaisesRegexp(Exception, "^Number of"):
Thinner("-1")
def test_incremental(self):
ok=['2023-01-03 10:53:16',
'2024-01-02 15:43:29',
@ -138,5 +155,5 @@ class TestThinner(unittest2.TestCase):
self.assertEqual(result, ok)
if __name__ == '__main__':
unittest.main()
# if __name__ == '__main__':
# unittest.main()

99
tests/test_verify.py Normal file
View File

@ -0,0 +1,99 @@
from basetest import *
# test zfs-verify:
# - when there is no common snapshot at all
# - when encryption key not loaded
# - --test mode
# - --fs-compare methods
# - on snapshots of datasets:
# - that are correct
# - that are different
# - on snapshots of zvols
# - that are correct
# - that are different
# - test all directions (local, remote/local, local/remote, remote/remote)
#
class TestZfsEncryption(unittest2.TestCase):
def setUp(self):
prepare_zpools()
#create actual test files and data
shelltest("zfs create test_source1/fs1/ok_filesystem")
shelltest("cp tests/*.py /test_source1/fs1/ok_filesystem")
shelltest("zfs create test_source1/fs1/bad_filesystem")
shelltest("cp tests/*.py /test_source1/fs1/bad_filesystem")
shelltest("zfs create -V 1M test_source1/fs1/ok_zvol")
shelltest("dd if=/dev/urandom of=/dev/zvol/test_source1/fs1/ok_zvol count=1 bs=512k")
shelltest("zfs create -V 1M test_source1/fs1/bad_zvol")
shelltest("dd if=/dev/urandom of=/dev/zvol/test_source1/fs1/bad_zvol count=1 bs=512k")
#create backup
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --no-holds".split(" ")).run())
#Do an ugly hack to create a fault in the bad filesystem
#In zfs-autoverify it doenst matter that the snapshot isnt actually the same snapshot, so this hack works
shelltest("zfs destroy test_target1/test_source1/fs1/bad_filesystem@test-20101111000000")
shelltest("zfs mount test_target1/test_source1/fs1/bad_filesystem")
shelltest("echo >> /test_target1/test_source1/fs1/bad_filesystem/test_verify.py")
shelltest("zfs snapshot test_target1/test_source1/fs1/bad_filesystem@test-20101111000000")
#do the same hack for the bad zvol
shelltest("zfs destroy test_target1/test_source1/fs1/bad_zvol@test-20101111000000")
shelltest("dd if=/dev/urandom of=/dev/zvol/test_target1/test_source1/fs1/bad_zvol count=1 bs=1")
shelltest("zfs snapshot test_target1/test_source1/fs1/bad_zvol@test-20101111000000")
# make sure we cant accidently compare current data
shelltest("zfs mount test_target1/test_source1/fs1/ok_filesystem")
shelltest("rm /test_source1/fs1/ok_filesystem/*")
shelltest("rm /test_source1/fs1/bad_filesystem/*")
shelltest("dd if=/dev/zero of=/dev/zvol/test_source1/fs1/ok_zvol count=1 bs=512k")
def test_verify(self):
with self.subTest("default --test"):
self.assertFalse(ZfsAutoverify("test test_target1 --verbose --test".split(" ")).run())
with self.subTest("rsync, remote source and target. (not supported, all 6 fail)"):
self.assertEqual(6, ZfsAutoverify("test test_target1 --ssh-source=localhost --ssh-target=localhost --verbose --exclude-received --fs-compare=rsync".split(" ")).run())
def runchecked(testname, command):
with self.subTest(testname):
with OutputIO() as buf:
result=None
with redirect_stderr(buf):
result=ZfsAutoverify(command.split(" ")).run()
print(buf.getvalue())
self.assertEqual(2,result)
self.assertRegex(buf.getvalue(), "bad_filesystem: FAILED:")
self.assertRegex(buf.getvalue(), "bad_zvol: FAILED:")
runchecked("rsync, remote source", "test test_target1 --ssh-source=localhost --verbose --exclude-received --fs-compare=rsync")
runchecked("rsync, remote target", "test test_target1 --ssh-target=localhost --verbose --exclude-received --fs-compare=rsync")
runchecked("rsync, local", "test test_target1 --verbose --exclude-received --fs-compare=rsync")
runchecked("tar, remote source and remote target",
"test test_target1 --ssh-source=localhost --ssh-target=localhost --verbose --exclude-received --fs-compare=tar")
runchecked("tar, remote source",
"test test_target1 --ssh-source=localhost --verbose --exclude-received --fs-compare=tar")
runchecked("tar, remote target",
"test test_target1 --ssh-target=localhost --verbose --exclude-received --fs-compare=tar")
runchecked("tar, local", "test test_target1 --verbose --exclude-received --fs-compare=tar")
with self.subTest("no common snapshot"):
#destroy common snapshot, now 3 should fail
shelltest("zfs destroy test_source1/fs1/ok_zvol@test-20101111000000")
self.assertEqual(3, ZfsAutoverify("test test_target1 --verbose --exclude-received".split(" ")).run())

View File

@ -1,6 +1,9 @@
from zfs_autobackup.CmdPipe import CmdPipe
from basetest import *
import time
from zfs_autobackup.LogConsole import LogConsole
class TestZfsAutobackup(unittest2.TestCase):
@ -11,13 +14,29 @@ class TestZfsAutobackup(unittest2.TestCase):
def test_invalidpars(self):
self.assertEqual(ZfsAutobackup("test test_target1 --keep-source -1".split(" ")).run(), 255)
self.assertEqual(ZfsAutobackup("test test_target1 --no-progress --keep-source -1".split(" ")).run(), 255)
with OutputIO() as buf:
with redirect_stdout(buf):
self.assertEqual(ZfsAutobackup("test test_target1 --no-progress --resume --verbose --no-snapshot".split(" ")).run(), 0)
print(buf.getvalue())
self.assertIn("The --resume", buf.getvalue())
with OutputIO() as buf:
with redirect_stderr(buf):
self.assertEqual(ZfsAutobackup("test test_target_nonexisting --no-progress".split(" ")).run(), 255)
print(buf.getvalue())
# correct message?
self.assertIn("Please create this dataset", buf.getvalue())
def test_snapshotmode(self):
"""test snapshot tool mode"""
with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(ZfsAutobackup("test --verbose".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test --no-progress --verbose".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
self.assertMultiLineEqual(r,"""
@ -35,15 +54,13 @@ test_source2/fs3/sub
test_target1
""")
def test_defaults(self):
with self.subTest("no datasets selected"):
with OutputIO() as buf:
with redirect_stderr(buf):
with patch('time.strftime', return_value="20101111000000"):
self.assertTrue(ZfsAutobackup("nonexisting test_target1 --verbose --debug".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000000"):
self.assertTrue(ZfsAutobackup("nonexisting test_target1 --verbose --debug --no-progress".split(" ")).run())
print(buf.getvalue())
#correct message?
@ -52,8 +69,8 @@ test_target1
with self.subTest("defaults with full verbose and debug"):
with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --debug".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --debug --no-progress".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
self.assertMultiLineEqual(r,"""
@ -81,8 +98,8 @@ test_target1/test_source2/fs2/sub@test-20101111000000
""")
with self.subTest("bare defaults, allow empty"):
with patch('time.strftime', return_value="20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --allow-empty".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --allow-empty --no-progress".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
@ -152,15 +169,15 @@ test_target1/test_source2/fs2/sub@test-20101111000001 userrefs 1 -
#make sure time handling is correctly. try to make snapshots a year appart and verify that only snapshots mostly 1y old are kept
with self.subTest("test time checking"):
with patch('time.strftime', return_value="20111111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --allow-empty --verbose".split(" ")).run())
with patch('time.strftime', return_value="test-20111111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --allow-empty --verbose --no-progress".split(" ")).run())
time_str="20111112000000" #month in the "future"
future_timestamp=time_secs=time.mktime(time.strptime(time_str,"%Y%m%d%H%M%S"))
with patch('time.time', return_value=future_timestamp):
with patch('time.strftime', return_value="20111111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --allow-empty --verbose --keep-source 1y1y --keep-target 1d1y".split(" ")).run())
with patch('time.strftime', return_value="test-20111111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --allow-empty --verbose --keep-source 1y1y --keep-target 1d1y --no-progress".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
@ -194,14 +211,13 @@ test_target1/test_source2/fs2/sub@test-20111111000000
test_target1/test_source2/fs2/sub@test-20111111000001
""")
def test_ignore_othersnaphots(self):
r=shelltest("zfs snapshot test_source1/fs1@othersimple")
r=shelltest("zfs snapshot test_source1/fs1@otherdate-20001111000000")
with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
self.assertMultiLineEqual(r,"""
@ -235,8 +251,8 @@ test_target1/test_source2/fs2/sub@test-20101111000000
r=shelltest("zfs snapshot test_source1/fs1@othersimple")
r=shelltest("zfs snapshot test_source1/fs1@otherdate-20001111000000")
with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --other-snapshots".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --other-snapshots".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
self.assertMultiLineEqual(r,"""
@ -270,8 +286,8 @@ test_target1/test_source2/fs2/sub@test-20101111000000
def test_nosnapshot(self):
with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-snapshot".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-snapshot --no-progress".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
#(only parents are created )
@ -294,12 +310,10 @@ test_target1/test_source2/fs2
def test_nosend(self):
with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-send".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-send --no-progress".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
#(only parents are created )
#TODO: it probably shouldn't create these
self.assertMultiLineEqual(r,"""
test_source1
test_source1/fs1
@ -319,12 +333,10 @@ test_target1
def test_ignorereplicated(self):
r=shelltest("zfs snapshot test_source1/fs1@otherreplication")
with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --ignore-replicated".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --ignore-replicated".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
#(only parents are created )
#TODO: it probably shouldn't create these
self.assertMultiLineEqual(r,"""
test_source1
test_source1/fs1
@ -350,8 +362,8 @@ test_target1/test_source2/fs2/sub@test-20101111000000
def test_noholds(self):
with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-holds".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-holds --no-progress".split(" ")).run())
r=shelltest("zfs get -r userrefs test_source1 test_source2 test_target1")
self.assertMultiLineEqual(r,"""
@ -382,8 +394,8 @@ test_target1/test_source2/fs2/sub@test-20101111000000 userrefs 0 -
def test_strippath(self):
with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --strip-path=1".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --strip-path=1 --no-progress".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
self.assertMultiLineEqual(r,"""
@ -408,6 +420,33 @@ test_target1/fs2/sub
test_target1/fs2/sub@test-20101111000000
""")
# def test_strippath_toomuch(self):
# with patch('time.strftime', return_value="test-20101111000000"):
# self.assertFalse(
# ZfsAutobackup("test test_target1 --verbose --strip-path=2 --no-progress".split(" ")).run())
#
# r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
# self.assertMultiLineEqual(r, """
# test_source1
# test_source1/fs1
# test_source1/fs1@test-20101111000000
# test_source1/fs1/sub
# test_source1/fs1/sub@test-20101111000000
# test_source2
# test_source2/fs2
# test_source2/fs2/sub
# test_source2/fs2/sub@test-20101111000000
# test_source2/fs3
# test_source2/fs3/sub
# test_target1
# test_target1/fs1
# test_target1/fs1@test-20101111000000
# test_target1/fs1/sub
# test_target1/fs1/sub@test-20101111000000
# test_target1/fs2
# test_target1/fs2/sub
# test_target1/fs2/sub@test-20101111000000
# """)
def test_clearrefres(self):
@ -418,8 +457,8 @@ test_target1/fs2/sub@test-20101111000000
r=shelltest("zfs set refreservation=1M test_source1/fs1")
with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --clear-refreservation".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --clear-refreservation".split(" ")).run())
r=shelltest("zfs get refreservation -r test_source1 test_source2 test_target1")
self.assertMultiLineEqual(r,"""
@ -456,8 +495,8 @@ test_target1/test_source2/fs2/sub@test-20101111000000 refreservation -
self.skipTest("This zfs-userspace version doesnt support -o")
with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --clear-mountpoint --debug".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --clear-mountpoint --debug".split(" ")).run())
r=shelltest("zfs get canmount -r test_source1 test_source2 test_target1")
self.assertMultiLineEqual(r,"""
@ -489,35 +528,35 @@ test_target1/test_source2/fs2/sub@test-20101111000000 canmount - -
def test_rollback(self):
#initial backup
with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
#make change
r=shelltest("zfs mount test_target1/test_source1/fs1")
r=shelltest("touch /test_target1/test_source1/fs1/change.txt")
with patch('time.strftime', return_value="20101111000001"):
with patch('time.strftime', return_value="test-20101111000001"):
#should fail (busy)
self.assertTrue(ZfsAutobackup("test test_target1 --verbose --allow-empty".split(" ")).run())
self.assertTrue(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run())
with patch('time.strftime', return_value="20101111000002"):
with patch('time.strftime', return_value="test-20101111000002"):
#rollback, should succeed
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty --rollback".split(" ")).run())
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --rollback".split(" ")).run())
def test_destroyincompat(self):
#initial backup
with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
#add multiple compatible snapshot (written is still 0)
r=shelltest("zfs snapshot test_target1/test_source1/fs1@compatible1")
r=shelltest("zfs snapshot test_target1/test_source1/fs1@compatible2")
with patch('time.strftime', return_value="20101111000001"):
with patch('time.strftime', return_value="test-20101111000001"):
#should be ok, is compatible
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty".split(" ")).run())
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run())
#add incompatible snapshot by changing and snapshotting
r=shelltest("zfs mount test_target1/test_source1/fs1")
@ -525,22 +564,46 @@ test_target1/test_source2/fs2/sub@test-20101111000000 canmount - -
r=shelltest("zfs snapshot test_target1/test_source1/fs1@incompatible1")
with patch('time.strftime', return_value="20101111000002"):
with patch('time.strftime', return_value="test-20101111000002"):
#--test should fail, now incompatible
self.assertTrue(ZfsAutobackup("test test_target1 --verbose --allow-empty --test".split(" ")).run())
self.assertTrue(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --test".split(" ")).run())
with patch('time.strftime', return_value="20101111000002"):
with patch('time.strftime', return_value="test-20101111000002"):
#should fail, now incompatible
self.assertTrue(ZfsAutobackup("test test_target1 --verbose --allow-empty".split(" ")).run())
self.assertTrue(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run())
with patch('time.strftime', return_value="20101111000003"):
with patch('time.strftime', return_value="test-20101111000003"):
#--test should succeed by destroying incompatibles
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty --destroy-incompatible --test".split(" ")).run())
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --destroy-incompatible --test".split(" ")).run())
with patch('time.strftime', return_value="20101111000003"):
with patch('time.strftime', return_value="test-20101111000003"):
#should succeed by destroying incompatibles
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty --destroy-incompatible".split(" ")).run())
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --destroy-incompatible".split(" ")).run())
r = shelltest("zfs list -H -o name -r -t all test_target1")
self.assertMultiLineEqual(r, """
test_target1
test_target1/test_source1
test_target1/test_source1/fs1
test_target1/test_source1/fs1@test-20101111000000
test_target1/test_source1/fs1@compatible1
test_target1/test_source1/fs1@compatible2
test_target1/test_source1/fs1@test-20101111000001
test_target1/test_source1/fs1@test-20101111000002
test_target1/test_source1/fs1@test-20101111000003
test_target1/test_source1/fs1/sub
test_target1/test_source1/fs1/sub@test-20101111000000
test_target1/test_source1/fs1/sub@test-20101111000001
test_target1/test_source1/fs1/sub@test-20101111000002
test_target1/test_source1/fs1/sub@test-20101111000003
test_target1/test_source2
test_target1/test_source2/fs2
test_target1/test_source2/fs2/sub
test_target1/test_source2/fs2/sub@test-20101111000000
test_target1/test_source2/fs2/sub@test-20101111000001
test_target1/test_source2/fs2/sub@test-20101111000002
test_target1/test_source2/fs2/sub@test-20101111000003
""")
@ -551,14 +614,14 @@ test_target1/test_source2/fs2/sub@test-20101111000000 canmount - -
#test all ssh directions
with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty --ssh-source localhost".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --ssh-source localhost --exclude-received".split(" ")).run())
with patch('time.strftime', return_value="20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty --ssh-target localhost".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --ssh-target localhost --exclude-received".split(" ")).run())
with patch('time.strftime', return_value="20101111000002"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty --ssh-source localhost --ssh-target localhost".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000002"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --ssh-source localhost --ssh-target localhost".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
@ -602,8 +665,8 @@ test_target1/test_source2/fs2/sub@test-20101111000002
def test_minchange(self):
#initial
with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --min-change 100000".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --min-change 100000".split(" ")).run())
#make small change, use umount to reflect the changes immediately
r=shelltest("zfs set compress=off test_source1")
@ -612,16 +675,16 @@ test_target1/test_source2/fs2/sub@test-20101111000002
#too small change, takes no snapshots
with patch('time.strftime', return_value="20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --min-change 100000".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --min-change 100000".split(" ")).run())
#make big change
r=shelltest("dd if=/dev/zero of=/test_source1/fs1/change.txt bs=200000 count=1")
r=shelltest("zfs umount test_source1/fs1; zfs mount test_source1/fs1")
#bigger change, should take snapshot
with patch('time.strftime', return_value="20101111000002"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --min-change 100000".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000002"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --min-change 100000".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
self.assertMultiLineEqual(r,"""
@ -653,8 +716,8 @@ test_target1/test_source2/fs2/sub@test-20101111000000
def test_test(self):
#initial
with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --test".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --test".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
self.assertMultiLineEqual(r,"""
@ -670,13 +733,13 @@ test_target1
""")
#actual make initial backup
with patch('time.strftime', return_value="20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
#test incremental
with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --test".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000002"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --allow-empty --verbose --test".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
self.assertMultiLineEqual(r,"""
@ -711,8 +774,8 @@ test_target1/test_source2/fs2/sub@test-20101111000001
shelltest("zfs create test_target1/test_source1")
shelltest("zfs send test_source1/fs1@migrate1| zfs recv test_target1/test_source1/fs1")
with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
self.assertMultiLineEqual(r,"""
@ -744,16 +807,16 @@ test_target1/test_source2/fs2/sub@test-20101111000000
def test_keep0(self):
"""test if keep-source=0 and keep-target=0 dont delete common snapshot and break backup"""
with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --keep-source=0 --keep-target=0".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --keep-source=0 --keep-target=0".split(" ")).run())
#make snapshot, shouldnt delete 0
with patch('time.strftime', return_value="20101111000001"):
self.assertFalse(ZfsAutobackup("test --verbose --keep-source=0 --keep-target=0 --allow-empty".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000001"):
self.assertFalse(ZfsAutobackup("test --no-progress --verbose --keep-source=0 --keep-target=0 --allow-empty".split(" ")).run())
#make snapshot 2, shouldnt delete 0 since it has holds, but will delete 1 since it has no holds
with patch('time.strftime', return_value="20101111000002"):
self.assertFalse(ZfsAutobackup("test --verbose --keep-source=0 --keep-target=0 --allow-empty".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000002"):
self.assertFalse(ZfsAutobackup("test --no-progress --verbose --keep-source=0 --keep-target=0 --allow-empty".split(" ")).run())
r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
self.assertMultiLineEqual(r, """
@ -784,8 +847,8 @@ test_target1/test_source2/fs2/sub@test-20101111000000
""")
#make another backup but with no-holds. we should naturally endup with only number 3
with patch('time.strftime', return_value="20101111000003"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --keep-source=0 --keep-target=0 --no-holds --allow-empty".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000003"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --keep-source=0 --keep-target=0 --no-holds --allow-empty".split(" ")).run())
r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
self.assertMultiLineEqual(r, """
@ -813,9 +876,9 @@ test_target1/test_source2/fs2/sub@test-20101111000003
""")
# make snapshot 4, since we used no-holds, it will delete 3 on the source, breaking the backup
with patch('time.strftime', return_value="20101111000004"):
self.assertFalse(ZfsAutobackup("test --verbose --keep-source=0 --keep-target=0 --allow-empty".split(" ")).run())
# run with snapshot-only for 4, since we used no-holds, it will delete 3 on the source, breaking the backup
with patch('time.strftime', return_value="test-20101111000004"):
self.assertFalse(ZfsAutobackup("test --no-progress --verbose --keep-source=0 --keep-target=0 --allow-empty".split(" ")).run())
r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
self.assertMultiLineEqual(r, """
@ -842,9 +905,26 @@ test_target1/test_source2/fs2/sub
test_target1/test_source2/fs2/sub@test-20101111000003
""")
###########################
# TODO:
def test_raw(self):
def test_progress(self):
self.skipTest("todo: later when travis supports zfs 0.8")
r=shelltest("dd if=/dev/zero of=/test_source1/data.txt bs=200000 count=1")
r = shelltest("zfs snapshot test_source1@test")
l=LogConsole(show_verbose=True, show_debug=False, color=False)
n=ZfsNode(snapshot_time_format="bla", hold_name="bla", logger=l)
d=ZfsDataset(n,"test_source1@test")
sp=d.send_pipe([], prev_snapshot=None, resume_token=None, show_progress=True, raw=False, send_pipes=[], send_properties=True, write_embedded=True, zfs_compressed=True)
with OutputIO() as buf:
with redirect_stderr(buf):
try:
n.run(["sleep", "2"], inp=sp)
except:
pass
print(buf.getvalue())
# correct message?
self.assertRegex(buf.getvalue(),".*>>> .*minutes left.*")

View File

@ -2,6 +2,7 @@ from basetest import *
import time
class TestZfsAutobackup31(unittest2.TestCase):
"""various new 3.1 features"""
def setUp(self):
prepare_zpools()
@ -9,11 +10,11 @@ class TestZfsAutobackup31(unittest2.TestCase):
def test_no_thinning(self):
with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run())
with patch('time.strftime', return_value="20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty --keep-target=0 --keep-source=0 --no-thinning".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --keep-target=0 --keep-source=0 --no-thinning".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
self.assertMultiLineEqual(r,"""
@ -47,3 +48,34 @@ test_target1/test_source2/fs2/sub@test-20101111000001
""")
def test_re_replication(self):
"""test re-replication of something thats already a backup (new in v3.1-beta5)"""
shelltest("zfs create test_target1/a")
shelltest("zfs create test_target1/b")
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1/a --no-progress --verbose --debug".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1/b --no-progress --verbose".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t snapshot test_target1")
#NOTE: it wont backup test_target1/a/test_source2/fs2/sub to test_target1/b since it doesnt have the zfs_autobackup property anymore.
self.assertMultiLineEqual(r,"""
test_target1/a/test_source1/fs1@test-20101111000000
test_target1/a/test_source1/fs1/sub@test-20101111000000
test_target1/a/test_source2/fs2/sub@test-20101111000000
test_target1/b/test_source1/fs1@test-20101111000000
test_target1/b/test_source1/fs1/sub@test-20101111000000
test_target1/b/test_source2/fs2/sub@test-20101111000000
test_target1/b/test_target1/a/test_source1/fs1@test-20101111000000
test_target1/b/test_target1/a/test_source1/fs1/sub@test-20101111000000
""")
def test_zfs_compressed(self):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(
ZfsAutobackup("test test_target1 --no-progress --verbose --debug --zfs-compressed".split(" ")).run())

View File

@ -1,6 +1,6 @@
from basetest import *
from zfs_autobackup.LogStub import LogStub
from zfs_autobackup.ExecuteNode import ExecuteError
class TestZfsNode(unittest2.TestCase):
@ -9,95 +9,148 @@ class TestZfsNode(unittest2.TestCase):
prepare_zpools()
# return super().setUp()
def test_consistent_snapshot(self):
logger=LogStub()
description="[Source]"
node=ZfsNode("test", logger, description=description)
logger = LogStub()
description = "[Source]"
node = ZfsNode(snapshot_time_format="test-%Y%m%d%H%M%S", hold_name="zfs_autobackup:test", logger=logger, description=description)
with self.subTest("first snapshot"):
node.consistent_snapshot(node.selected_datasets, "test-1",100000)
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
self.assertEqual(r,"""
node.consistent_snapshot(node.selected_datasets(property_name="autobackup:test",exclude_paths=[], exclude_received=False, exclude_unchanged=False, min_change=200000), "test-20101111000001", 100000)
r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
self.assertEqual(r, """
test_source1
test_source1/fs1
test_source1/fs1@test-1
test_source1/fs1@test-20101111000001
test_source1/fs1/sub
test_source1/fs1/sub@test-1
test_source1/fs1/sub@test-20101111000001
test_source2
test_source2/fs2
test_source2/fs2/sub
test_source2/fs2/sub@test-1
test_source2/fs2/sub@test-20101111000001
test_source2/fs3
test_source2/fs3/sub
test_target1
""")
with self.subTest("second snapshot, no changes, no snapshot"):
node.consistent_snapshot(node.selected_datasets, "test-2",1)
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
self.assertEqual(r,"""
node.consistent_snapshot(node.selected_datasets(property_name="autobackup:test",exclude_paths=[], exclude_received=False, exclude_unchanged=False, min_change=200000), "test-20101111000002", 1)
r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
self.assertEqual(r, """
test_source1
test_source1/fs1
test_source1/fs1@test-1
test_source1/fs1@test-20101111000001
test_source1/fs1/sub
test_source1/fs1/sub@test-1
test_source1/fs1/sub@test-20101111000001
test_source2
test_source2/fs2
test_source2/fs2/sub
test_source2/fs2/sub@test-1
test_source2/fs2/sub@test-20101111000001
test_source2/fs3
test_source2/fs3/sub
test_target1
""")
with self.subTest("second snapshot, no changes, empty snapshot"):
node.consistent_snapshot(node.selected_datasets, "test-2",0)
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
self.assertEqual(r,"""
node.consistent_snapshot(node.selected_datasets(property_name="autobackup:test", exclude_paths=[], exclude_received=False, exclude_unchanged=False, min_change=200000), "test-20101111000002", 0)
r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
self.assertEqual(r, """
test_source1
test_source1/fs1
test_source1/fs1@test-1
test_source1/fs1@test-2
test_source1/fs1@test-20101111000001
test_source1/fs1@test-20101111000002
test_source1/fs1/sub
test_source1/fs1/sub@test-1
test_source1/fs1/sub@test-2
test_source1/fs1/sub@test-20101111000001
test_source1/fs1/sub@test-20101111000002
test_source2
test_source2/fs2
test_source2/fs2/sub
test_source2/fs2/sub@test-1
test_source2/fs2/sub@test-2
test_source2/fs2/sub@test-20101111000001
test_source2/fs2/sub@test-20101111000002
test_source2/fs3
test_source2/fs3/sub
test_target1
""")
def test_consistent_snapshot_prepostcmds(self):
logger = LogStub()
description = "[Source]"
node = ZfsNode(snapshot_time_format="test", hold_name="test", logger=logger, description=description, debug_output=True)
with self.subTest("Test if all cmds are executed correctly (no failures)"):
with OutputIO() as buf:
with redirect_stdout(buf):
node.consistent_snapshot(node.selected_datasets(property_name="autobackup:test", exclude_paths=[], exclude_received=False, exclude_unchanged=False, min_change=1), "test-1",
0,
pre_snapshot_cmds=["echo pre1", "echo pre2"],
post_snapshot_cmds=["echo post1 >&2", "echo post2 >&2"]
)
self.assertIn("STDOUT > pre1", buf.getvalue())
self.assertIn("STDOUT > pre2", buf.getvalue())
self.assertIn("STDOUT > post1", buf.getvalue())
self.assertIn("STDOUT > post2", buf.getvalue())
with self.subTest("Failure in the middle, only pre1 and both post1 and post2 should be executed, no snapshot should be attempted"):
with OutputIO() as buf:
with redirect_stdout(buf):
with self.assertRaises(ExecuteError):
node.consistent_snapshot(node.selected_datasets(property_name="autobackup:test", exclude_paths=[], exclude_received=False, exclude_unchanged=False, min_change=1), "test-1",
0,
pre_snapshot_cmds=["echo pre1", "false", "echo pre2"],
post_snapshot_cmds=["echo post1", "false", "echo post2"]
)
print(buf.getvalue())
self.assertIn("STDOUT > pre1", buf.getvalue())
self.assertNotIn("STDOUT > pre2", buf.getvalue())
self.assertIn("STDOUT > post1", buf.getvalue())
self.assertIn("STDOUT > post2", buf.getvalue())
with self.subTest("Snapshot fails"):
with OutputIO() as buf:
with redirect_stdout(buf):
with self.assertRaises(ExecuteError):
#same snapshot name as before so it fails
node.consistent_snapshot(node.selected_datasets(property_name="autobackup:test", exclude_paths=[], exclude_received=False, exclude_unchanged=False, min_change=1), "test-1",
0,
pre_snapshot_cmds=["echo pre1", "echo pre2"],
post_snapshot_cmds=["echo post1", "echo post2"]
)
print(buf.getvalue())
self.assertIn("STDOUT > pre1", buf.getvalue())
self.assertIn("STDOUT > pre2", buf.getvalue())
self.assertIn("STDOUT > post1", buf.getvalue())
self.assertIn("STDOUT > post2", buf.getvalue())
def test_getselected(self):
logger=LogStub()
description="[Source]"
node=ZfsNode("test", logger, description=description)
s=pformat(node.selected_datasets)
# should be excluded by property
shelltest("zfs create test_source1/fs1/subexcluded")
shelltest("zfs set autobackup:test=false test_source1/fs1/subexcluded")
# should be excluded by being unchanged
shelltest("zfs create test_source1/fs1/unchanged")
shelltest("zfs snapshot test_source1/fs1/unchanged@somesnapshot")
logger = LogStub()
description = "[Source]"
node = ZfsNode(snapshot_time_format="test-%Y%m%d%H%M%S", hold_name="zfs_autobackup:test", logger=logger, description=description)
s = pformat(node.selected_datasets(property_name="autobackup:test", exclude_paths=[], exclude_received=False, exclude_unchanged=True, min_change=1))
print(s)
#basics
self.assertEqual (s, """[(local): test_source1/fs1,
(local): test_source1/fs1/sub,
(local): test_source2/fs2/sub]""")
#caching, so expect same result after changing it
subprocess.check_call("zfs set autobackup:test=true test_source2/fs3", shell=True)
self.assertEqual (s, """[(local): test_source1/fs1,
# basics
self.assertEqual(s, """[(local): test_source1/fs1,
(local): test_source1/fs1/sub,
(local): test_source2/fs2/sub]""")
def test_validcommand(self):
logger=LogStub()
description="[Source]"
node=ZfsNode("test", logger, description=description)
logger = LogStub()
description = "[Source]"
node = ZfsNode(snapshot_time_format="test-%Y%m%d%H%M%S", hold_name="zfs_autobackup:test", logger=logger, description=description)
with self.subTest("test invalid option"):
self.assertFalse(node.valid_command(["zfs", "send", "--invalid-option", "nonexisting"]))
@ -105,21 +158,19 @@ test_target1
self.assertTrue(node.valid_command(["zfs", "send", "-v", "nonexisting"]))
def test_supportedsendoptions(self):
logger=LogStub()
description="[Source]"
node=ZfsNode("test", logger, description=description)
logger = LogStub()
description = "[Source]"
node = ZfsNode(snapshot_time_format="test-%Y%m%d%H%M%S", hold_name="zfs_autobackup:test", logger=logger, description=description)
# -D propably always supported
self.assertGreater(len(node.supported_send_options),0)
self.assertGreater(len(node.supported_send_options), 0)
def test_supportedrecvoptions(self):
logger=LogStub()
description="[Source]"
#NOTE: this could hang via ssh if we dont close filehandles properly. (which was a previous bug)
node=ZfsNode("test", logger, description=description, ssh_to='localhost')
logger = LogStub()
description = "[Source]"
# NOTE: this could hang via ssh if we dont close filehandles properly. (which was a previous bug)
node = ZfsNode(snapshot_time_format="test-%Y%m%d%H%M%S", hold_name="zfs_autobackup:test", logger=logger, description=description, ssh_to='localhost')
self.assertIsInstance(node.supported_recv_options, list)
if __name__ == '__main__':
unittest.main()

160
zfs_autobackup/CmdPipe.py Normal file
View File

@ -0,0 +1,160 @@
import subprocess
import os
import select
try:
from shlex import quote as cmd_quote
except ImportError:
from pipes import quote as cmd_quote
class CmdItem:
"""one command item, to be added to a CmdPipe"""
def __init__(self, cmd, readonly=False, stderr_handler=None, exit_handler=None, shell=False):
"""create item. caller has to make sure cmd is properly escaped when using shell.
:type cmd: list of str
"""
self.cmd = cmd
self.readonly = readonly
self.stderr_handler = stderr_handler
self.exit_handler = exit_handler
self.shell = shell
self.process = None
def __str__(self):
"""return copy-pastable version of command."""
if self.shell:
# its already copy pastable for a shell:
return " ".join(self.cmd)
else:
# make it copy-pastable, will make a mess of quotes sometimes, but is correct
return " ".join(map(cmd_quote, self.cmd))
def create(self, stdin):
"""actually create the subprocess (called by CmdPipe)"""
# make sure the command gets all the data in utf8 format:
# (this is necessary if LC_ALL=en_US.utf8 is not set in the environment)
encoded_cmd = []
for arg in self.cmd:
encoded_cmd.append(arg.encode('utf-8'))
self.process = subprocess.Popen(encoded_cmd, env=os.environ, stdout=subprocess.PIPE, stdin=stdin,
stderr=subprocess.PIPE, shell=self.shell)
class CmdPipe:
"""a pipe of one or more commands. also takes care of utf-8 encoding/decoding and line based parsing"""
def __init__(self, readonly=False, inp=None):
"""
:param inp: input string for stdin
:param readonly: Only execute if entire pipe consist of readonly commands
"""
# list of commands + error handlers to execute
self.items = []
self.inp = inp
self.readonly = readonly
self._should_execute = True
def add(self, cmd_item):
"""adds a CmdItem to pipe.
:type cmd_item: CmdItem
"""
self.items.append(cmd_item)
if not cmd_item.readonly and self.readonly:
self._should_execute = False
def __str__(self):
"""transform whole pipe into oneliner for debugging and testing. this should generate a copy-pastable string for in a console """
ret = ""
for item in self.items:
if ret:
ret = ret + " | "
ret = ret + "({})".format(item) # this will do proper escaping to make it copypastable
return ret
def should_execute(self):
return self._should_execute
def execute(self, stdout_handler):
"""run the pipe. returns True all exit handlers returned true"""
if not self._should_execute:
return True
# first process should have actual user input as stdin:
selectors = []
# create processes
last_stdout = None
stdin = subprocess.PIPE
for item in self.items:
item.create(stdin)
selectors.append(item.process.stderr)
if last_stdout is None:
# we're the first process in the pipe, do we have some input?
if self.inp is not None:
# TODO: make streaming to support big inputs?
item.process.stdin.write(self.inp.encode('utf-8'))
item.process.stdin.close()
else:
# last stdout was piped to this stdin already, so close it because we dont need it anymore
last_stdout.close()
last_stdout = item.process.stdout
stdin = last_stdout
# monitor last stdout as well
selectors.append(last_stdout)
while True:
# wait for output on one of the stderrs or last_stdout
(read_ready, write_ready, ex_ready) = select.select(selectors, [], [])
eof_count = 0
done_count = 0
# read line and call appropriate handlers
if last_stdout in read_ready:
line = last_stdout.readline().decode('utf-8').rstrip()
if line != "":
stdout_handler(line)
else:
eof_count = eof_count + 1
for item in self.items:
if item.process.stderr in read_ready:
line = item.process.stderr.readline().decode('utf-8').rstrip()
if line != "":
item.stderr_handler(line)
else:
eof_count = eof_count + 1
if item.process.poll() is not None:
done_count = done_count + 1
# all filehandles are eof and all processes are done (poll() is not None)
if eof_count == len(selectors) and done_count == len(self.items):
break
# close filehandles
last_stdout.close()
for item in self.items:
item.process.stderr.close()
# call exit handlers
success = True
for item in self.items:
if item.exit_handler is not None:
success=item.exit_handler(item.process.returncode) and success
return success

View File

@ -1,13 +1,24 @@
import os
import select
import subprocess
from .CmdPipe import CmdPipe, CmdItem
from .LogStub import LogStub
from zfs_autobackup.LogStub import LogStub
try:
from shlex import quote as cmd_quote
except ImportError:
from pipes import quote as cmd_quote
class ExecuteError(Exception):
pass
class ExecuteNode(LogStub):
"""an endpoint to execute local or remote commands via ssh"""
PIPE=1
def __init__(self, ssh_config=None, ssh_to=None, readonly=False, debug_output=False):
"""ssh_config: custom ssh config
ssh_to: server you want to ssh to. none means local
@ -38,169 +49,118 @@ class ExecuteNode(LogStub):
else:
self.error("STDERR > " + line.rstrip())
def _parse_stderr_pipe(self, line, hide_errors):
"""parse stderr from pipe input process. can be overridden in subclass"""
if hide_errors:
self.debug("STDERR|> " + line.rstrip())
def _quote(self, cmd):
"""return quoted version of command. if it has value PIPE it will add an actual | """
if cmd==self.PIPE:
return('|')
else:
self.error("STDERR|> " + line.rstrip())
return(cmd_quote(cmd))
def run(self, cmd, inp=None, tab_split=False, valid_exitcodes=None, readonly=False, hide_errors=False, pipe=False,
return_stderr=False):
"""run a command on the node.
def _shell_cmd(self, cmd):
"""prefix specified ssh shell to command and escape shell characters"""
ret=[]
#add remote shell
if not self.is_local():
ret=["ssh"]
if self.ssh_config is not None:
ret.extend(["-F", self.ssh_config])
ret.append(self.ssh_to)
ret.append(" ".join(map(self._quote, cmd)))
return ret
def is_local(self):
return self.ssh_to is None
def run(self, cmd, inp=None, tab_split=False, valid_exitcodes=None, readonly=False, hide_errors=False,
return_stderr=False, pipe=False, return_all=False):
"""run a command on the node , checks output and parses/handle output and returns it
Either uses a local shell (sh -c) or remote shell (ssh) to execute the command.
Therefore the command can have stuff like actual pipes in it, if you dont want to use pipe=True to pipe stuff.
:param cmd: the actual command, should be a list, where the first item is the command
and the rest are parameters.
:param inp: Can be None, a string or a pipe-handle you got from another run()
and the rest are parameters. use ExecuteNode.PIPE to add an unescaped |
(if you want to use system piping instead of python piping)
:param pipe: return CmdPipe instead of executing it.
:param inp: Can be None, a string or a CmdPipe that was previously returned.
:param tab_split: split tabbed files in output into a list
:param valid_exitcodes: list of valid exit codes for this command (checks exit code of both sides of a pipe)
Use [] to accept all exit codes.
Use [] to accept all exit codes. Default [0]
:param readonly: make this True if the command doesn't make any changes and is safe to execute in testmode
:param hide_errors: don't show stderr output as error, instead show it as debugging output (use to hide expected errors)
:param pipe: Instead of executing, return a pipe-handle to be used to
input to another run() command. (just like a | in linux)
:param return_stderr: return both stdout and stderr as a tuple. (normally only returns stdout)
:param return_all: return both stdout and stderr and exit_code as a tuple. (normally only returns stdout)
"""
# create new pipe?
if not isinstance(inp, CmdPipe):
cmd_pipe = CmdPipe(self.readonly, inp)
else:
# add stuff to existing pipe
cmd_pipe = inp
# stderr parser
error_lines = []
returned_exit_code=None
def stderr_handler(line):
if tab_split:
error_lines.append(line.rstrip().split('\t'))
else:
error_lines.append(line.rstrip())
self._parse_stderr(line, hide_errors)
# exit code hanlder
if valid_exitcodes is None:
valid_exitcodes = [0]
encoded_cmd = []
# use ssh?
if self.ssh_to is not None:
encoded_cmd.append("ssh".encode('utf-8'))
if self.ssh_config is not None:
encoded_cmd.extend(["-F".encode('utf-8'), self.ssh_config.encode('utf-8')])
encoded_cmd.append(self.ssh_to.encode('utf-8'))
# make sure the command gets all the data in utf8 format:
# (this is necessary if LC_ALL=en_US.utf8 is not set in the environment)
for arg in cmd:
# add single quotes for remote commands to support spaces and other weird stuff (remote commands are
# executed in a shell) and escape existing single quotes (bash needs ' to end the quoted string,
# then a \' for the actual quote and then another ' to start a new quoted string) (and then python
# needs the double \ to get a single \)
encoded_cmd.append(("'" + arg.replace("'", "'\\''") + "'").encode('utf-8'))
else:
for arg in cmd:
encoded_cmd.append(arg.encode('utf-8'))
# debug and test stuff
debug_txt = ""
for c in encoded_cmd:
debug_txt = debug_txt + " " + c.decode()
if pipe:
debug_txt = debug_txt + " |"
if self.readonly and not readonly:
self.debug("SKIP > " + debug_txt)
else:
if pipe:
self.debug("PIPE > " + debug_txt)
else:
self.debug("RUN > " + debug_txt)
# determine stdin
if inp is None:
# NOTE: Not None, otherwise it reads stdin from terminal!
stdin = subprocess.PIPE
elif isinstance(inp, str) or type(inp) == 'unicode':
self.debug("INPUT > \n" + inp.rstrip())
stdin = subprocess.PIPE
elif isinstance(inp, subprocess.Popen):
self.debug("Piping input")
stdin = inp.stdout
else:
raise (Exception("Program error: Incompatible input"))
if self.readonly and not readonly:
# todo: what happens if input is piped?
return
# execute and parse/return results
p = subprocess.Popen(encoded_cmd, env=os.environ, stdout=subprocess.PIPE, stdin=stdin, stderr=subprocess.PIPE)
# Note: make streaming?
if isinstance(inp, str) or type(inp) == 'unicode':
p.stdin.write(inp.encode('utf-8'))
if p.stdin:
p.stdin.close()
# return pipe
if pipe:
return p
# handle all outputs
if isinstance(inp, subprocess.Popen):
selectors = [p.stdout, p.stderr, inp.stderr]
inp.stdout.close() # otherwise inputprocess wont exit when ours does
else:
selectors = [p.stdout, p.stderr]
output_lines = []
error_lines = []
while True:
(read_ready, write_ready, ex_ready) = select.select(selectors, [], [])
eof_count = 0
if p.stdout in read_ready:
line = p.stdout.readline().decode('utf-8')
if line != "":
if tab_split:
output_lines.append(line.rstrip().split('\t'))
else:
output_lines.append(line.rstrip())
self._parse_stdout(line)
else:
eof_count = eof_count + 1
if p.stderr in read_ready:
line = p.stderr.readline().decode('utf-8')
if line != "":
if tab_split:
error_lines.append(line.rstrip().split('\t'))
else:
error_lines.append(line.rstrip())
self._parse_stderr(line, hide_errors)
else:
eof_count = eof_count + 1
if isinstance(inp, subprocess.Popen) and (inp.stderr in read_ready):
line = inp.stderr.readline().decode('utf-8')
if line != "":
self._parse_stderr_pipe(line, hide_errors)
else:
eof_count = eof_count + 1
# stop if both processes are done and all filehandles are EOF:
if (p.poll() is not None) and (
(not isinstance(inp, subprocess.Popen)) or inp.poll() is not None) and eof_count == len(selectors):
break
p.stderr.close()
p.stdout.close()
if self.debug_output:
self.debug("EXIT > {}".format(p.returncode))
# handle piped process error output and exit codes
if isinstance(inp, subprocess.Popen):
inp.stderr.close()
inp.stdout.close()
def exit_handler(exit_code):
if self.debug_output:
self.debug("EXIT |> {}".format(inp.returncode))
if valid_exitcodes and inp.returncode not in valid_exitcodes:
raise (subprocess.CalledProcessError(inp.returncode, "(pipe)"))
self.debug("EXIT > {}".format(exit_code))
if valid_exitcodes and p.returncode not in valid_exitcodes:
raise (subprocess.CalledProcessError(p.returncode, encoded_cmd))
if (valid_exitcodes != []) and (exit_code not in valid_exitcodes):
self.error("Command \"{}\" returned exit code {} (valid codes: {})".format(cmd_item, exit_code, valid_exitcodes))
return False
if return_stderr:
return True
# add shell command and handlers to pipe
cmd_item=CmdItem(cmd=self._shell_cmd(cmd), readonly=readonly, stderr_handler=stderr_handler, exit_handler=exit_handler, shell=self.is_local())
cmd_pipe.add(cmd_item)
# return pipe instead of executing?
if pipe:
return cmd_pipe
# stdout parser
output_lines = []
def stdout_handler(line):
if tab_split:
output_lines.append(line.rstrip().split('\t'))
else:
output_lines.append(line.rstrip())
self._parse_stdout(line)
if cmd_pipe.should_execute():
self.debug("CMD > {}".format(cmd_pipe))
else:
self.debug("CMDSKIP> {}".format(cmd_pipe))
# execute and calls handlers in CmdPipe
if not cmd_pipe.execute(stdout_handler=stdout_handler):
raise(ExecuteError("Last command returned error"))
if return_all:
return output_lines, error_lines, cmd_item.process and cmd_item.process.returncode
elif return_stderr:
return output_lines, error_lines
else:
return output_lines

View File

@ -3,35 +3,44 @@ from __future__ import print_function
import sys
colorama = False
if sys.stdout.isatty():
try:
import colorama
except ImportError:
colorama = False
pass
class LogConsole:
"""Log-class that outputs to console, adding colors if needed"""
def __init__(self, show_debug=False, show_verbose=False):
def __init__(self, show_debug, show_verbose, color):
self.last_log = ""
self.show_debug = show_debug
self.show_verbose = show_verbose
@staticmethod
def error(txt):
if colorama:
if color:
# try to use color, failback if colorama not available
self.colorama=False
try:
import colorama
global colorama
self.colorama = True
except ImportError:
pass
else:
self.colorama=False
def error(self, txt):
if self.colorama:
print(colorama.Fore.RED + colorama.Style.BRIGHT + "! " + txt + colorama.Style.RESET_ALL, file=sys.stderr)
else:
print("! " + txt, file=sys.stderr)
sys.stderr.flush()
def warning(self, txt):
if self.colorama:
print(colorama.Fore.YELLOW + colorama.Style.BRIGHT + " NOTE: " + txt + colorama.Style.RESET_ALL)
else:
print(" NOTE: " + txt)
sys.stdout.flush()
def verbose(self, txt):
if self.show_verbose:
if colorama:
if self.colorama:
print(colorama.Style.NORMAL + " " + txt + colorama.Style.RESET_ALL)
else:
print(" " + txt)
@ -39,8 +48,19 @@ class LogConsole:
def debug(self, txt):
if self.show_debug:
if colorama:
if self.colorama:
print(colorama.Fore.GREEN + "# " + txt + colorama.Style.RESET_ALL)
else:
print("# " + txt)
sys.stdout.flush()
def progress(self, txt):
"""print progress output to stderr (stays on same line)"""
self.clear_progress()
print(">>> {}\r".format(txt), end='', file=sys.stderr)
sys.stderr.flush()
def clear_progress(self):
import colorama
print(colorama.ansi.clear_line(), end='', file=sys.stderr)
sys.stderr.flush()

View File

@ -11,5 +11,8 @@ class LogStub:
def verbose(self, txt):
print("VERBOSE: " + txt)
def warning(self, txt):
print("WARNING: " + txt)
def error(self, txt):
print("ERROR : " + txt)

View File

@ -1,14 +1,15 @@
import time
from zfs_autobackup.ThinnerRule import ThinnerRule
from .ThinnerRule import ThinnerRule
class Thinner:
"""progressive thinner (universal, used for cleaning up snapshots)"""
def __init__(self, schedule_str=""):
"""schedule_str: comma seperated list of ThinnerRules. A plain number specifies how many snapshots to always
keep.
"""
Args:
schedule_str: comma seperated list of ThinnerRules. A plain number specifies how many snapshots to always keep.
"""
self.rules = []
@ -19,7 +20,7 @@ class Thinner:
rule_strs = schedule_str.split(",")
for rule_str in rule_strs:
if rule_str.isdigit():
if rule_str.lstrip('-').isdigit():
self.always_keep = int(rule_str)
if self.always_keep < 0:
raise (Exception("Number of snapshots to keep cant be negative: {}".format(self.always_keep)))
@ -37,11 +38,15 @@ class Thinner:
return ret
def thin(self, objects, keep_objects=None, now=None):
"""thin list of objects with current schedule rules. objects: list of objects to thin. every object should
have timestamp attribute. keep_objects: objects to always keep (these should also be in normal objects list,
so we can use them to perhaps delete other obsolete objects)
"""thin list of objects with current schedule rules. objects: list of
objects to thin. every object should have timestamp attribute.
return( keeps, removes )
Args:
objects: list of objects to check (should have a timestamp attribute)
keep_objects: objects to always keep (if they also are in the in the normal objects list)
now: if specified, use this time as current time
"""
if not keep_objects:

View File

@ -39,6 +39,9 @@ class ThinnerRule:
rule_str = rule_str.lower()
matches = re.findall("([0-9]*)([a-z]*)([0-9]*)([a-z]*)", rule_str)[0]
if '' in matches:
raise (Exception("Invalid schedule string: '{}'".format(rule_str)))
period_amount = int(matches[0])
period_unit = matches[1]
ttl_amount = int(matches[2])

191
zfs_autobackup/ZfsAuto.py Normal file
View File

@ -0,0 +1,191 @@
import argparse
import os.path
import sys
from .LogConsole import LogConsole
class ZfsAuto(object):
"""Common Base class, this class is always used subclassed. Look at ZfsAutobackup and ZfsAutoverify ."""
# also used by setup.py
VERSION = "3.2-alpha1"
HEADER = "{} v{} - (c)2021 E.H.Eefting (edwin@datux.nl)".format(os.path.basename(sys.argv[0]), VERSION)
def __init__(self, argv, print_arguments=True):
self.hold_name = None
self.snapshot_time_format = None
self.property_name = None
self.exclude_paths = None
# helps with investigating failed regression tests:
if print_arguments:
print("ARGUMENTS: " + " ".join(argv))
self.args = self.parse_args(argv)
def parse_args(self, argv):
"""parse common arguments, setup logging, check and adjust parameters"""
parser=self.get_parser()
args = parser.parse_args(argv)
if args.help:
parser.print_help()
sys.exit(255)
if args.version:
print(self.HEADER)
sys.exit(255)
# auto enable progress?
if sys.stderr.isatty() and not args.no_progress:
args.progress = True
if args.debug_output:
args.debug = True
if args.test:
args.verbose = True
if args.debug:
args.verbose = True
self.log = LogConsole(show_debug=args.debug, show_verbose=args.verbose, color=sys.stdout.isatty())
self.verbose(self.HEADER)
self.verbose("")
if args.backup_name == None:
parser.print_usage()
self.log.error("Please specify BACKUP-NAME")
sys.exit(255)
if args.target_path is not None and args.target_path[0] == "/":
self.log.error("Target should not start with a /")
sys.exit(255)
if args.ignore_replicated:
self.warning("--ignore-replicated has been renamed, using --exclude-unchanged")
args.exclude_unchanged = True
# Note: Before version v3.1-beta5, we always used exclude_received. This was a problem if you wanted to
# replicate an existing backup to another host and use the same backupname/snapshots. However, exclude_received
# may still need to be used to explicitly exclude a backup with the 'received' source property to avoid accidental
# recursive replication of a zvol that is currently being received in another session (as it will have changes).
self.exclude_paths = []
if args.ssh_source == args.ssh_target:
if args.target_path:
# target and source are the same, make sure to exclude target_path
self.verbose("NOTE: Source and target are on the same host, excluding target-path from selection.")
self.exclude_paths.append(args.target_path)
else:
self.verbose("NOTE: Source and target are on the same host, excluding received datasets from selection.")
args.exclude_received = True
if args.test:
self.warning("TEST MODE - SIMULATING WITHOUT MAKING ANY CHANGES")
#format all the names
self.property_name = args.property_format.format(args.backup_name)
self.snapshot_time_format = args.snapshot_format.format(args.backup_name)
self.hold_name = args.hold_format.format(args.backup_name)
self.verbose("")
self.verbose("Selecting dataset property : {}".format(self.property_name))
self.verbose("Snapshot format : {}".format(self.snapshot_time_format))
return args
def get_parser(self):
parser = argparse.ArgumentParser(description=self.HEADER, add_help=False,
epilog='Full manual at: https://github.com/psy0rz/zfs_autobackup')
#positional arguments
parser.add_argument('backup_name', metavar='BACKUP-NAME', default=None, nargs='?',
help='Name of the backup to select')
parser.add_argument('target_path', metavar='TARGET-PATH', default=None, nargs='?',
help='Target ZFS filesystem (optional)')
# Basic options
group=parser.add_argument_group("Basic options")
group.add_argument('--help', '-h', action='store_true', help='show help')
group.add_argument('--test', '--dry-run', '-n', action='store_true',
help='Dry run, dont change anything, just show what would be done (still does all read-only '
'operations)')
group.add_argument('--verbose', '-v', action='store_true', help='verbose output')
group.add_argument('--debug', '-d', action='store_true',
help='Show zfs commands that are executed, stops after an exception.')
group.add_argument('--debug-output', action='store_true',
help='Show zfs commands and their output/exit codes. (noisy)')
group.add_argument('--progress', action='store_true',
help='show zfs progress output. Enabled automaticly on ttys. (use --no-progress to disable)')
group.add_argument('--no-progress', action='store_true',
help=argparse.SUPPRESS) # needed to workaround a zfs recv -v bug
group.add_argument('--version', action='store_true',
help='Show version.')
group.add_argument('--strip-path', metavar='N', default=0, type=int,
help='Number of directories to strip from target path (use 1 when cloning zones between 2 '
'SmartOS machines)')
# SSH options
group=parser.add_argument_group("SSH options")
group.add_argument('--ssh-config', metavar='CONFIG-FILE', default=None, help='Custom ssh client config')
group.add_argument('--ssh-source', metavar='USER@HOST', default=None,
help='Source host to get backup from.')
group.add_argument('--ssh-target', metavar='USER@HOST', default=None,
help='Target host to push backup to.')
group=parser.add_argument_group("String formatting options")
group.add_argument('--property-format', metavar='FORMAT', default="autobackup:{}",
help='Dataset selection string format. Default: %(default)s')
group.add_argument('--snapshot-format', metavar='FORMAT', default="{}-%Y%m%d%H%M%S",
help='ZFS Snapshot string format. Default: %(default)s')
group.add_argument('--hold-format', metavar='FORMAT', default="zfs_autobackup:{}",
help='ZFS hold string format. Default: %(default)s')
group=parser.add_argument_group("Selection options")
group.add_argument('--ignore-replicated', action='store_true', help=argparse.SUPPRESS)
group.add_argument('--exclude-unchanged', action='store_true',
help='Exclude datasets that have no changes since any last snapshot. (Useful in combination with proxmox HA replication)')
group.add_argument('--exclude-received', action='store_true',
help='Exclude datasets that have the origin of their autobackup: property as "received". '
'This can avoid recursive replication between two backup partners.')
return parser
def verbose(self, txt):
self.log.verbose(txt)
def warning(self, txt):
self.log.warning(txt)
def error(self, txt):
self.log.error(txt)
def debug(self, txt):
self.log.debug(txt)
def progress(self, txt):
self.log.progress(txt)
def clear_progress(self):
self.log.clear_progress()
def set_title(self, title):
self.log.verbose("")
self.log.verbose("#### " + title)
def print_error_sources(self):
self.error(
"No source filesystems selected, please do a 'zfs set autobackup:{0}=true' on the source datasets "
"you want to select.".format(
self.args.backup_name))
def make_target_name(self, source_dataset):
"""make target_name from a source_dataset"""
return self.args.target_path + "/" + source_dataset.lstrip_path(self.args.strip_path)

View File

@ -1,124 +1,31 @@
import argparse
import sys
import time
from zfs_autobackup.Thinner import Thinner
from zfs_autobackup.ZfsDataset import ZfsDataset
from zfs_autobackup.LogConsole import LogConsole
from zfs_autobackup.ZfsNode import ZfsNode
from zfs_autobackup.ThinnerRule import ThinnerRule
import argparse
from .ZfsAuto import ZfsAuto
from . import compressors
from .ExecuteNode import ExecuteNode
from .Thinner import Thinner
from .ZfsDataset import ZfsDataset
from .ZfsNode import ZfsNode
from .ThinnerRule import ThinnerRule
class ZfsAutobackup:
"""main class"""
VERSION = "3.1-beta1"
HEADER = "zfs-autobackup v{} - Copyright 2020 E.H.Eefting (edwin@datux.nl)".format(VERSION)
class ZfsAutobackup(ZfsAuto):
"""The main zfs-autobackup class. Start here, at run() :)"""
def __init__(self, argv, print_arguments=True):
# helps with investigating failed regression tests:
if print_arguments:
print("ARGUMENTS: " + " ".join(argv))
# NOTE: common options and parameters are in ZfsAuto
super(ZfsAutobackup, self).__init__(argv, print_arguments)
parser = argparse.ArgumentParser(
description=self.HEADER,
epilog='Full manual at: https://github.com/psy0rz/zfs_autobackup')
parser.add_argument('--ssh-config', default=None, help='Custom ssh client config')
parser.add_argument('--ssh-source', default=None,
help='Source host to get backup from. (user@hostname) Default %(default)s.')
parser.add_argument('--ssh-target', default=None,
help='Target host to push backup to. (user@hostname) Default %(default)s.')
parser.add_argument('--keep-source', type=str, default="10,1d1w,1w1m,1m1y",
help='Thinning schedule for old source snapshots. Default: %(default)s')
parser.add_argument('--keep-target', type=str, default="10,1d1w,1w1m,1m1y",
help='Thinning schedule for old target snapshots. Default: %(default)s')
def parse_args(self, argv):
"""do extra checks on common args"""
parser.add_argument('backup_name', metavar='backup-name',
help='Name of the backup (you should set the zfs property "autobackup:backup-name" to '
'true on filesystems you want to backup')
parser.add_argument('target_path', metavar='target-path', default=None, nargs='?',
help='Target ZFS filesystem (optional: if not specified, zfs-autobackup will only operate '
'as snapshot-tool on source)')
args = super(ZfsAutobackup, self).parse_args(argv)
parser.add_argument('--other-snapshots', action='store_true',
help='Send over other snapshots as well, not just the ones created by this tool.')
parser.add_argument('--no-snapshot', action='store_true',
help='Don\'t create new snapshots (useful for finishing uncompleted backups, or cleanups)')
parser.add_argument('--no-send', action='store_true',
help='Don\'t send snapshots (useful for cleanups, or if you want a serperate send-cronjob)')
parser.add_argument('--no-thinning', action='store_true', help="Do not destroy any snapshots.")
parser.add_argument('--no-holds', action='store_true',
help='Don\'t hold snapshots. (Faster. Allows you to destroy common snapshot.)')
parser.add_argument('--min-change', type=int, default=1,
help='Number of bytes written after which we consider a dataset changed (default %('
'default)s)')
parser.add_argument('--allow-empty', action='store_true',
help='If nothing has changed, still create empty snapshots. (same as --min-change=0)')
parser.add_argument('--ignore-replicated', action='store_true',
help='Ignore datasets that seem to be replicated some other way. (No changes since '
'lastest snapshot. Useful for proxmox HA replication)')
parser.add_argument('--resume', action='store_true', help=argparse.SUPPRESS)
parser.add_argument('--strip-path', default=0, type=int,
help='Number of directories to strip from target path (use 1 when cloning zones between 2 '
'SmartOS machines)')
# parser.add_argument('--buffer', default="", help='Use mbuffer with specified size to speedup zfs transfer.
# (e.g. --buffer 1G) Will also show nice progress output.')
parser.add_argument('--clear-refreservation', action='store_true',
help='Filter "refreservation" property. (recommended, safes space. same as '
'--filter-properties refreservation)')
parser.add_argument('--clear-mountpoint', action='store_true',
help='Set property canmount=noauto for new datasets. (recommended, prevents mount '
'conflicts. same as --set-properties canmount=noauto)')
parser.add_argument('--filter-properties', type=str,
help='List of properties to "filter" when receiving filesystems. (you can still restore '
'them with zfs inherit -S)')
parser.add_argument('--set-properties', type=str,
help='List of propererties to override when receiving filesystems. (you can still restore '
'them with zfs inherit -S)')
parser.add_argument('--rollback', action='store_true',
help='Rollback changes to the latest target snapshot before starting. (normally you can '
'prevent changes by setting the readonly property on the target_path to on)')
parser.add_argument('--destroy-incompatible', action='store_true',
help='Destroy incompatible snapshots on target. Use with care! (implies --rollback)')
parser.add_argument('--destroy-missing', type=str, default=None,
help='Destroy datasets on target that are missing on the source. Specify the time since '
'the last snapshot, e.g: --destroy-missing 30d')
parser.add_argument('--ignore-transfer-errors', action='store_true',
help='Ignore transfer errors (still checks if received filesystem exists. useful for '
'acltype errors)')
parser.add_argument('--raw', action='store_true',
help='For encrypted datasets, send data exactly as it exists on disk.')
parser.add_argument('--test', action='store_true',
help='dont change anything, just show what would be done (still does all read-only '
'operations)')
parser.add_argument('--verbose', action='store_true', help='verbose output')
parser.add_argument('--debug', action='store_true',
help='Show zfs commands that are executed, stops after an exception.')
parser.add_argument('--debug-output', action='store_true',
help='Show zfs commands and their output/exit codes. (noisy)')
parser.add_argument('--progress', action='store_true',
help='show zfs progress output. Enabled automaticly on ttys. (use --no-progress to disable)')
parser.add_argument('--no-progress', action='store_true', help=argparse.SUPPRESS) # needed to workaround a zfs recv -v bug
# note args is the only global variable we use, since its a global readonly setting anyway
args = parser.parse_args(argv)
self.args = args
# auto enable progress?
if sys.stderr.isatty() and not args.no_progress:
args.progress = True
if args.debug_output:
args.debug = True
if self.args.test:
self.args.verbose = True
if not args.no_holds:
self.verbose("Hold name : {}".format(self.hold_name))
if args.allow_empty:
args.min_change = 0
@ -126,107 +33,276 @@ class ZfsAutobackup:
if args.destroy_incompatible:
args.rollback = True
self.log = LogConsole(show_debug=self.args.debug, show_verbose=self.args.verbose)
if args.resume:
self.verbose("NOTE: The --resume option isn't needed anymore (its autodetected now)")
self.warning("The --resume option isn't needed anymore (its autodetected now)")
if args.target_path is not None and args.target_path[0] == "/":
self.log.error("Target should not start with a /")
sys.exit(255)
if args.raw:
self.warning(
"The --raw option isn't needed anymore (its autodetected now). Also see --encrypt and --decrypt.")
def verbose(self, txt):
self.log.verbose(txt)
if args.compress and args.ssh_source is None and args.ssh_target is None:
self.warning("Using compression, but transfer is local.")
def error(self, txt):
self.log.error(txt)
if args.compress and args.zfs_compressed:
self.warning("Using --compress with --zfs-compressed, might be inefficient.")
def debug(self, txt):
self.log.debug(txt)
return args
def set_title(self, title):
self.log.verbose("")
self.log.verbose("#### " + title)
def get_parser(self):
"""extend common parser with extra stuff needed for zfs-autobackup"""
parser = super(ZfsAutobackup, self).get_parser()
group = parser.add_argument_group("Snapshot options")
group.add_argument('--no-snapshot', action='store_true',
help='Don\'t create new snapshots (useful for finishing uncompleted backups, or cleanups)')
group.add_argument('--pre-snapshot-cmd', metavar="COMMAND", default=[], action='append',
help='Run COMMAND before snapshotting (can be used multiple times.')
group.add_argument('--post-snapshot-cmd', metavar="COMMAND", default=[], action='append',
help='Run COMMAND after snapshotting (can be used multiple times.')
group.add_argument('--min-change', metavar='BYTES', type=int, default=1,
help='Only create snapshot if enough bytes are changed. (default %('
'default)s)')
group.add_argument('--allow-empty', action='store_true',
help='If nothing has changed, still create empty snapshots. (Faster. Same as --min-change=0)')
group.add_argument('--other-snapshots', action='store_true',
help='Send over other snapshots as well, not just the ones created by this tool.')
group = parser.add_argument_group("Transfer options")
group.add_argument('--no-send', action='store_true',
help='Don\'t transfer snapshots (useful for cleanups, or if you want a serperate send-cronjob)')
group.add_argument('--no-holds', action='store_true',
help='Don\'t hold snapshots. (Faster. Allows you to destroy common snapshot.)')
group.add_argument('--clear-refreservation', action='store_true',
help='Filter "refreservation" property. (recommended, safes space. same as '
'--filter-properties refreservation)')
group.add_argument('--clear-mountpoint', action='store_true',
help='Set property canmount=noauto for new datasets. (recommended, prevents mount '
'conflicts. same as --set-properties canmount=noauto)')
group.add_argument('--filter-properties', metavar='PROPERTY,...', type=str,
help='List of properties to "filter" when receiving filesystems. (you can still restore '
'them with zfs inherit -S)')
group.add_argument('--set-properties', metavar='PROPERTY=VALUE,...', type=str,
help='List of propererties to override when receiving filesystems. (you can still restore '
'them with zfs inherit -S)')
group.add_argument('--rollback', action='store_true',
help='Rollback changes to the latest target snapshot before starting. (normally you can '
'prevent changes by setting the readonly property on the target_path to on)')
group.add_argument('--destroy-incompatible', action='store_true',
help='Destroy incompatible snapshots on target. Use with care! (implies --rollback)')
group.add_argument('--ignore-transfer-errors', action='store_true',
help='Ignore transfer errors (still checks if received filesystem exists. useful for '
'acltype errors)')
group.add_argument('--decrypt', action='store_true',
help='Decrypt data before sending it over.')
group.add_argument('--encrypt', action='store_true',
help='Encrypt data after receiving it.')
group.add_argument('--zfs-compressed', action='store_true',
help='Transfer blocks that already have zfs-compression as-is.')
group = parser.add_argument_group("ZFS send/recv pipes")
group.add_argument('--compress', metavar='TYPE', default=None, nargs='?', const='zstd-fast',
choices=compressors.choices(),
help='Use compression during transfer, defaults to zstd-fast if TYPE is not specified. ({})'.format(
", ".join(compressors.choices())))
group.add_argument('--rate', metavar='DATARATE', default=None,
help='Limit data transfer rate (e.g. 128K. requires mbuffer.)')
group.add_argument('--buffer', metavar='SIZE', default=None,
help='Add zfs send and recv buffers to smooth out IO bursts. (e.g. 128M. requires mbuffer)')
group.add_argument('--send-pipe', metavar="COMMAND", default=[], action='append',
help='pipe zfs send output through COMMAND (can be used multiple times)')
group.add_argument('--recv-pipe', metavar="COMMAND", default=[], action='append',
help='pipe zfs recv input through COMMAND (can be used multiple times)')
group = parser.add_argument_group("Thinner options")
group.add_argument('--no-thinning', action='store_true', help="Do not destroy any snapshots.")
group.add_argument('--keep-source', metavar='SCHEDULE', type=str, default="10,1d1w,1w1m,1m1y",
help='Thinning schedule for old source snapshots. Default: %(default)s')
group.add_argument('--keep-target', metavar='SCHEDULE', type=str, default="10,1d1w,1w1m,1m1y",
help='Thinning schedule for old target snapshots. Default: %(default)s')
group.add_argument('--destroy-missing', metavar="SCHEDULE", type=str, default=None,
help='Destroy datasets on target that are missing on the source. Specify the time since '
'the last snapshot, e.g: --destroy-missing 30d')
# obsolete
parser.add_argument('--resume', action='store_true', help=argparse.SUPPRESS)
parser.add_argument('--raw', action='store_true', help=argparse.SUPPRESS)
return parser
# NOTE: this method also uses self.args. args that need extra processing are passed as function parameters:
def thin_missing_targets(self, target_dataset, used_target_datasets):
"""thin target datasets that are missing on the source."""
self.debug("Thinning obsolete datasets")
missing_datasets = [dataset for dataset in target_dataset.recursive_datasets if
dataset not in used_target_datasets]
count = 0
for dataset in missing_datasets:
count = count + 1
if self.args.progress:
self.progress("Analysing missing {}/{}".format(count, len(missing_datasets)))
for dataset in target_dataset.recursive_datasets:
try:
if dataset not in used_target_datasets:
dataset.debug("Missing on source, thinning")
dataset.thin()
dataset.debug("Missing on source, thinning")
dataset.thin()
except Exception as e:
dataset.error("Error during thinning of missing datasets ({})".format(str(e)))
if self.args.progress:
self.clear_progress()
# NOTE: this method also uses self.args. args that need extra processing are passed as function parameters:
def destroy_missing_targets(self, target_dataset, used_target_datasets):
"""destroy target datasets that are missing on the source and that meet the requirements"""
self.debug("Destroying obsolete datasets")
for dataset in target_dataset.recursive_datasets:
missing_datasets = [dataset for dataset in target_dataset.recursive_datasets if
dataset not in used_target_datasets]
count = 0
for dataset in missing_datasets:
count = count + 1
if self.args.progress:
self.progress("Analysing destroy missing {}/{}".format(count, len(missing_datasets)))
try:
if dataset not in used_target_datasets:
# cant do anything without our own snapshots
if not dataset.our_snapshots:
if dataset.datasets:
# its not a leaf, just ignore
dataset.debug("Destroy missing: ignoring")
else:
dataset.verbose(
"Destroy missing: has no snapshots made by us. (please destroy manually)")
# cant do anything without our own snapshots
if not dataset.our_snapshots:
if dataset.datasets:
# its not a leaf, just ignore
dataset.debug("Destroy missing: ignoring")
else:
# past the deadline?
deadline_ttl = ThinnerRule("0s" + self.args.destroy_missing).ttl
now = int(time.time())
if dataset.our_snapshots[-1].timestamp + deadline_ttl > now:
dataset.verbose("Destroy missing: Waiting for deadline.")
dataset.verbose(
"Destroy missing: has no snapshots made by us. (please destroy manually)")
else:
# past the deadline?
deadline_ttl = ThinnerRule("0s" + self.args.destroy_missing).ttl
now = int(time.time())
if dataset.our_snapshots[-1].timestamp + deadline_ttl > now:
dataset.verbose("Destroy missing: Waiting for deadline.")
else:
dataset.debug("Destroy missing: Removing our snapshots.")
# remove all our snaphots, except last, to safe space in case we fail later on
for snapshot in dataset.our_snapshots[:-1]:
snapshot.destroy(fail_exception=True)
# does it have other snapshots?
has_others = False
for snapshot in dataset.snapshots:
if not snapshot.is_ours():
has_others = True
break
if has_others:
dataset.verbose("Destroy missing: Still in use by other snapshots")
else:
dataset.debug("Destroy missing: Removing our snapshots.")
# remove all our snaphots, except last, to safe space in case we fail later on
for snapshot in dataset.our_snapshots[:-1]:
snapshot.destroy(fail_exception=True)
# does it have other snapshots?
has_others = False
for snapshot in dataset.snapshots:
if not snapshot.is_ours():
has_others = True
break
if has_others:
dataset.verbose("Destroy missing: Still in use by other snapshots")
if dataset.datasets:
dataset.verbose("Destroy missing: Still has children here.")
else:
if dataset.datasets:
dataset.verbose("Destroy missing: Still has children here.")
else:
dataset.verbose("Destroy missing.")
dataset.our_snapshots[-1].destroy(fail_exception=True)
dataset.destroy(fail_exception=True)
dataset.verbose("Destroy missing.")
dataset.our_snapshots[-1].destroy(fail_exception=True)
dataset.destroy(fail_exception=True)
except Exception as e:
if self.args.progress:
self.clear_progress()
dataset.error("Error during --destroy-missing: {}".format(str(e)))
if self.args.progress:
self.clear_progress()
def get_send_pipes(self, logger):
"""determine the zfs send pipe"""
ret = []
# IO buffer
if self.args.buffer:
logger("zfs send buffer : {}".format(self.args.buffer))
ret.extend([ExecuteNode.PIPE, "mbuffer", "-q", "-s128k", "-m" + self.args.buffer])
# custom pipes
for send_pipe in self.args.send_pipe:
ret.append(ExecuteNode.PIPE)
ret.extend(send_pipe.split(" "))
logger("zfs send custom pipe : {}".format(send_pipe))
# compression
if self.args.compress != None:
ret.append(ExecuteNode.PIPE)
cmd = compressors.compress_cmd(self.args.compress)
ret.extend(cmd)
logger("zfs send compression : {}".format(" ".join(cmd)))
# transfer rate
if self.args.rate:
logger("zfs send transfer rate : {}".format(self.args.rate))
ret.extend([ExecuteNode.PIPE, "mbuffer", "-q", "-s128k", "-m16M", "-R" + self.args.rate])
return ret
def get_recv_pipes(self, logger):
ret = []
# decompression
if self.args.compress != None:
cmd = compressors.decompress_cmd(self.args.compress)
ret.extend(cmd)
ret.append(ExecuteNode.PIPE)
logger("zfs recv decompression : {}".format(" ".join(cmd)))
# custom pipes
for recv_pipe in self.args.recv_pipe:
ret.extend(recv_pipe.split(" "))
ret.append(ExecuteNode.PIPE)
logger("zfs recv custom pipe : {}".format(recv_pipe))
# IO buffer
if self.args.buffer:
# only add second buffer if its usefull. (e.g. non local transfer or other pipes active)
if self.args.ssh_source != None or self.args.ssh_target != None or self.args.recv_pipe or self.args.send_pipe or self.args.compress != None:
logger("zfs recv buffer : {}".format(self.args.buffer))
ret.extend(["mbuffer", "-q", "-s128k", "-m" + self.args.buffer, ExecuteNode.PIPE])
return ret
# NOTE: this method also uses self.args. args that need extra processing are passed as function parameters:
def sync_datasets(self, source_node, source_datasets, target_node):
"""Sync datasets, or thin-only on both sides"""
"""Sync datasets, or thin-only on both sides
:type target_node: ZfsNode
:type source_datasets: list of ZfsDataset
:type source_node: ZfsNode
"""
send_pipes = self.get_send_pipes(source_node.verbose)
recv_pipes = self.get_recv_pipes(target_node.verbose)
fail_count = 0
count = 0
target_datasets = []
for source_dataset in source_datasets:
# stats
if self.args.progress:
count = count + 1
self.progress("Analysing dataset {}/{} ({} failed)".format(count, len(source_datasets), fail_count))
try:
# determine corresponding target_dataset
target_name = self.args.target_path + "/" + source_dataset.lstrip_path(self.args.strip_path)
target_dataset = ZfsDataset(target_node, target_name)
target_name = self.make_target_name(source_dataset)
target_dataset = target_node.get_dataset(target_name)
target_datasets.append(target_dataset)
# ensure parents exists
@ -237,8 +313,8 @@ class ZfsAutobackup:
target_dataset.parent.create_filesystem(parents=True)
# determine common zpool features (cached, so no problem we call it often)
source_features = source_node.get_zfs_pool(source_dataset.split_path()[0]).features
target_features = target_node.get_zfs_pool(target_dataset.split_path()[0]).features
source_features = source_node.get_pool(source_dataset).features
target_features = target_node.get_pool(target_dataset).features
common_features = source_features and target_features
# sync the snapshots of this dataset
@ -247,17 +323,26 @@ class ZfsAutobackup:
set_properties=self.set_properties_list(),
ignore_recv_exit_code=self.args.ignore_transfer_errors,
holds=not self.args.no_holds, rollback=self.args.rollback,
raw=self.args.raw, also_other_snapshots=self.args.other_snapshots,
also_other_snapshots=self.args.other_snapshots,
no_send=self.args.no_send,
destroy_incompatible=self.args.destroy_incompatible,
no_thinning=self.args.no_thinning)
send_pipes=send_pipes, recv_pipes=recv_pipes,
decrypt=self.args.decrypt, encrypt=self.args.encrypt,
zfs_compressed=self.args.zfs_compressed)
except Exception as e:
if self.args.progress:
self.clear_progress()
fail_count = fail_count + 1
source_dataset.error("FAILED: " + str(e))
if self.args.debug:
self.verbose("Debug mode, aborting on first error")
raise
target_path_dataset=ZfsDataset(target_node, self.args.target_path)
if self.args.progress:
self.clear_progress()
target_path_dataset = target_node.get_dataset(self.args.target_path)
if not self.args.no_thinning:
self.thin_missing_targets(target_dataset=target_path_dataset, used_target_datasets=target_datasets)
@ -273,20 +358,6 @@ class ZfsAutobackup:
for source_dataset in source_datasets:
source_dataset.thin(skip_holds=True)
def filter_replicated(self, datasets):
if not self.args.ignore_replicated:
return datasets
else:
self.set_title("Filtering already replicated filesystems")
ret = []
for dataset in datasets:
if dataset.is_changed(self.args.min_change):
ret.append(dataset)
else:
dataset.verbose("Ignoring, already replicated")
return(ret)
def filter_properties_list(self):
if self.args.filter_properties:
@ -314,70 +385,74 @@ class ZfsAutobackup:
def run(self):
try:
self.verbose(self.HEADER)
if self.args.test:
self.verbose("TEST MODE - SIMULATING WITHOUT MAKING ANY CHANGES")
################ create source zfsNode
self.set_title("Source settings")
description = "[Source]"
source_thinner = Thinner(self.args.keep_source)
source_node = ZfsNode(self.args.backup_name, self, ssh_config=self.args.ssh_config,
if self.args.no_thinning:
source_thinner = None
else:
source_thinner = Thinner(self.args.keep_source)
source_node = ZfsNode(snapshot_time_format=self.snapshot_time_format, hold_name=self.hold_name, logger=self,
ssh_config=self.args.ssh_config,
ssh_to=self.args.ssh_source, readonly=self.args.test,
debug_output=self.args.debug_output, description=description, thinner=source_thinner)
source_node.verbose(
"Selects all datasets that have property 'autobackup:{}=true' (or childs of datasets that have "
"'autobackup:{}=child')".format(
self.args.backup_name, self.args.backup_name))
################# select source datasets
self.set_title("Selecting")
selected_source_datasets = source_node.selected_datasets
if not selected_source_datasets:
self.error(
"No source filesystems selected, please do a 'zfs set autobackup:{0}=true' on the source datasets "
"you want to select.".format(
self.args.backup_name))
source_datasets = source_node.selected_datasets(property_name=self.property_name,
exclude_received=self.args.exclude_received,
exclude_paths=self.exclude_paths,
exclude_unchanged=self.args.exclude_unchanged,
min_change=self.args.min_change)
if not source_datasets:
self.print_error_sources()
return 255
# filter out already replicated stuff?
source_datasets = self.filter_replicated(selected_source_datasets)
################# snapshotting
if not self.args.no_snapshot:
self.set_title("Snapshotting")
source_node.consistent_snapshot(source_datasets, source_node.new_snapshotname(),
min_changed_bytes=self.args.min_change)
snapshot_name = time.strftime(self.snapshot_time_format)
source_node.consistent_snapshot(source_datasets, snapshot_name,
min_changed_bytes=self.args.min_change,
pre_snapshot_cmds=self.args.pre_snapshot_cmd,
post_snapshot_cmds=self.args.post_snapshot_cmd)
################# sync
# if target is specified, we sync the datasets, otherwise we just thin the source. (e.g. snapshot mode)
if self.args.target_path:
# create target_node
self.set_title("Target settings")
target_thinner = Thinner(self.args.keep_target)
target_node = ZfsNode(self.args.backup_name, self, ssh_config=self.args.ssh_config,
if self.args.no_thinning:
target_thinner = None
else:
target_thinner = Thinner(self.args.keep_target)
target_node = ZfsNode(snapshot_time_format=self.snapshot_time_format, hold_name=self.hold_name,
logger=self, ssh_config=self.args.ssh_config,
ssh_to=self.args.ssh_target,
readonly=self.args.test, debug_output=self.args.debug_output,
description="[Target]",
thinner=target_thinner)
target_node.verbose("Receive datasets under: {}".format(self.args.target_path))
if self.args.no_send:
self.set_title("Thinning source and target")
else:
self.set_title("Sending and thinning")
self.set_title("Synchronising")
# check if exists, to prevent vague errors
target_dataset = ZfsDataset(target_node, self.args.target_path)
target_dataset = target_node.get_dataset(self.args.target_path)
if not target_dataset.exists:
raise(Exception(
raise (Exception(
"Target path '{}' does not exist. Please create this dataset first.".format(target_dataset)))
# do the actual sync
# NOTE: even with no_send, no_thinning and no_snapshot it does a usefull thing because it checks if the common snapshots and shows incompatible snapshots
fail_count = self.sync_datasets(
source_node=source_node,
source_datasets=source_datasets,
target_node=target_node)
# no target specified, run in snapshot-only mode
else:
if not self.args.no_thinning:
self.thin_source(source_datasets)
@ -385,7 +460,7 @@ class ZfsAutobackup:
if not fail_count:
if self.args.test:
self.set_title("All tests successfull.")
self.set_title("All tests successful.")
else:
self.set_title("All operations completed successfully")
if not self.args.target_path:
@ -393,11 +468,11 @@ class ZfsAutobackup:
else:
if fail_count != 255:
self.error("{} failures!".format(fail_count))
self.error("{} dataset(s) failed!".format(fail_count))
if self.args.test:
self.verbose("")
self.verbose("TEST MODE - DID NOT MAKE ANY CHANGES!")
self.warning("TEST MODE - DID NOT MAKE ANY CHANGES!")
return fail_count
@ -409,3 +484,13 @@ class ZfsAutobackup:
except KeyboardInterrupt:
self.error("Aborted")
return 255
def cli():
import sys
sys.exit(ZfsAutobackup(sys.argv[1:], False).run())
if __name__ == "__main__":
cli()

View File

@ -0,0 +1,402 @@
import os
import time
from .ExecuteNode import ExecuteNode
from .ZfsAuto import ZfsAuto
from .ZfsDataset import ZfsDataset
from .ZfsNode import ZfsNode
import sys
import platform
def tmp_name(suffix=""):
"""create temporary name unique to this process and node"""
#we could use uuids but those are ugly and confusing
name="zfstmp_{}_{}".format(platform.node(), os.getpid())
name=name+suffix
return name
def hash_tree_tar(node, path):
"""calculate md5sum of a directory tree, using tar"""
node.debug("Hashing filesystem {} ".format(path))
cmd=[ "tar", "-cf", "-", "-C", path, ".",
ExecuteNode.PIPE, "md5sum"]
stdout = node.run(cmd)
if node.readonly:
hashed=None
else:
hashed = stdout[0].split(" ")[0]
node.debug("Hash of {} filesytem is {}".format(path, hashed))
return hashed
def compare_trees_tar(source_node, source_path, target_node, target_path):
"""compare two trees using tar. compatible and simple"""
source_hash= hash_tree_tar(source_node, source_path)
target_hash= hash_tree_tar(target_node, target_path)
if source_hash != target_hash:
raise Exception("md5hash difference: {} != {}".format(source_hash, target_hash))
def compare_trees_rsync(source_node, source_path, target_node, target_path):
"""use rsync to compare two trees.
Advantage is that we can see which individual files differ.
But requires rsync and cant do remote to remote."""
cmd = ["rsync", "-rcn", "--info=COPY,DEL,MISC,NAME,SYMSAFE", "--msgs2stderr", "--delete" ]
#local
if source_node.ssh_to is None and target_node.ssh_to is None:
cmd.append("{}/".format(source_path))
cmd.append("{}/".format(target_path))
source_node.debug("Running rsync locally, on source.")
stdout, stderr = source_node.run(cmd, return_stderr=True)
#source is local
elif source_node.ssh_to is None and target_node.ssh_to is not None:
cmd.append("{}/".format(source_path))
cmd.append("{}:{}/".format(target_node.ssh_to, target_path))
source_node.debug("Running rsync locally, on source.")
stdout, stderr = source_node.run(cmd, return_stderr=True)
#target is local
elif source_node.ssh_to is not None and target_node.ssh_to is None:
cmd.append("{}:{}/".format(source_node.ssh_to, source_path))
cmd.append("{}/".format(target_path))
source_node.debug("Running rsync locally, on target.")
stdout, stderr=target_node.run(cmd, return_stderr=True)
else:
raise Exception("Source and target cant both be remote when verifying. (rsync limitation)")
if stderr:
raise Exception("Dataset verify failed, see above list for differences")
def verify_filesystem(source_snapshot, source_mnt, target_snapshot, target_mnt, method):
"""Compare the contents of two zfs filesystem snapshots """
try:
# mount the snapshots
source_snapshot.mount(source_mnt)
target_snapshot.mount(target_mnt)
if method=='rsync':
compare_trees_rsync(source_snapshot.zfs_node, source_mnt, target_snapshot.zfs_node, target_mnt)
elif method == 'tar':
compare_trees_tar(source_snapshot.zfs_node, source_mnt, target_snapshot.zfs_node, target_mnt)
else:
raise(Exception("program errror, unknown method"))
finally:
source_snapshot.unmount()
target_snapshot.unmount()
def hash_dev(node, dev):
"""calculate md5sum of a device on a node"""
node.debug("Hashing volume {} ".format(dev))
cmd = [ "md5sum", dev ]
stdout = node.run(cmd)
if node.readonly:
hashed=None
else:
hashed = stdout[0].split(" ")[0]
node.debug("Hash of volume {} is {}".format(dev, hashed))
return hashed
# def activate_volume_snapshot(dataset, snapshot):
# """enables snapdev, waits and tries to findout /dev path to the volume, in a compatible way. (linux/freebsd/smartos)"""
#
# dataset.set("snapdev", "visible")
#
# #NOTE: add smartos location to this list as well
# locations=[
# "/dev/zvol/" + snapshot.name
# ]
#
# dataset.debug("Waiting for /dev entry to appear...")
# time.sleep(0.1)
#
# start_time=time.time()
# while time.time()-start_time<10:
# for location in locations:
# stdout, stderr, exit_code=dataset.zfs_node.run(["test", "-e", location], return_all=True, valid_exitcodes=[0,1])
#
# #fake it in testmode
# if dataset.zfs_node.readonly:
# return location
#
# if exit_code==0:
# return location
# time.sleep(1)
#
# raise(Exception("Timeout while waiting for {} entry to appear.".format(locations)))
#
# def deacitvate_volume_snapshot(dataset):
# dataset.inherit("snapdev")
#NOTE: https://www.google.com/search?q=Mount+Path+Limit+freebsd
#Freebsd has limitations regarding path length, so we cant use the above method.
#Instead we create a temporary clone
def get_tmp_clone_name(snapshot):
pool=snapshot.zfs_node.get_pool(snapshot)
return pool.name+"/"+tmp_name()
def activate_volume_snapshot(snapshot):
"""clone volume, waits and tries to findout /dev path to the volume, in a compatible way. (linux/freebsd/smartos)"""
clone_name=get_tmp_clone_name(snapshot)
clone=snapshot.clone(clone_name)
#NOTE: add smartos location to this list as well
locations=[
"/dev/zvol/" + clone_name
]
clone.debug("Waiting for /dev entry to appear...")
time.sleep(0.1)
start_time=time.time()
while time.time()-start_time<10:
for location in locations:
stdout, stderr, exit_code=clone.zfs_node.run(["test", "-e", location], return_all=True, valid_exitcodes=[0,1])
#fake it in testmode
if clone.zfs_node.readonly:
return location
if exit_code==0:
return location
time.sleep(1)
raise(Exception("Timeout while waiting for {} entry to appear.".format(locations)))
def deacitvate_volume_snapshot(snapshot):
clone_name=get_tmp_clone_name(snapshot)
clone=snapshot.zfs_node.get_dataset(clone_name)
clone.destroy()
def verify_volume(source_dataset, source_snapshot, target_dataset, target_snapshot):
"""compare the contents of two zfs volume snapshots"""
try:
source_dev= activate_volume_snapshot(source_snapshot)
target_dev= activate_volume_snapshot(target_snapshot)
source_hash= hash_dev(source_snapshot.zfs_node, source_dev)
target_hash= hash_dev(target_snapshot.zfs_node, target_dev)
if source_hash!=target_hash:
raise Exception("md5hash difference: {} != {}".format(source_hash, target_hash))
finally:
deacitvate_volume_snapshot(source_snapshot)
deacitvate_volume_snapshot(target_snapshot)
def create_mountpoints(source_node, target_node):
# prepare mount points
source_node.debug("Create temporary mount point")
source_mnt = "/tmp/"+tmp_name("source")
source_node.run(["mkdir", source_mnt])
target_node.debug("Create temporary mount point")
target_mnt = "/tmp/"+tmp_name("target")
target_node.run(["mkdir", target_mnt])
return source_mnt, target_mnt
def cleanup_mountpoint(node, mnt):
node.debug("Cleaning up temporary mount point")
node.run([ "rmdir", mnt ], hide_errors=True, valid_exitcodes=[] )
class ZfsAutoverify(ZfsAuto):
"""The zfs-autoverify class, default agruments and stuff come from ZfsAuto"""
def __init__(self, argv, print_arguments=True):
# NOTE: common options and parameters are in ZfsAuto
super(ZfsAutoverify, self).__init__(argv, print_arguments)
def parse_args(self, argv):
"""do extra checks on common args"""
args=super(ZfsAutoverify, self).parse_args(argv)
if args.target_path == None:
self.log.error("Please specify TARGET-PATH")
sys.exit(255)
return args
def get_parser(self):
"""extend common parser with extra stuff needed for zfs-autobackup"""
parser=super(ZfsAutoverify, self).get_parser()
group=parser.add_argument_group("Verify options")
group.add_argument('--fs-compare', metavar='METHOD', default="tar", choices=["tar", "rsync"],
help='Compare method to use for filesystems. (tar, rsync) Default: %(default)s ')
return parser
def verify_datasets(self, source_mnt, source_datasets, target_node, target_mnt):
fail_count=0
count = 0
for source_dataset in source_datasets:
# stats
if self.args.progress:
count = count + 1
self.progress("Analysing dataset {}/{} ({} failed)".format(count, len(source_datasets), fail_count))
try:
# determine corresponding target_dataset
target_name = self.make_target_name(source_dataset)
target_dataset = target_node.get_dataset(target_name)
# find common snapshots to verify
source_snapshot = source_dataset.find_common_snapshot(target_dataset)
target_snapshot = target_dataset.find_snapshot(source_snapshot)
if source_snapshot is None or target_snapshot is None:
raise(Exception("Cant find common snapshot"))
target_snapshot.verbose("Verifying...")
if source_dataset.properties['type']=="filesystem":
verify_filesystem(source_snapshot, source_mnt, target_snapshot, target_mnt, self.args.fs_compare)
elif source_dataset.properties['type']=="volume":
verify_volume(source_dataset, source_snapshot, target_dataset, target_snapshot)
else:
raise(Exception("{} has unknown type {}".format(source_dataset, source_dataset.properties['type'])))
except Exception as e:
if self.args.progress:
self.clear_progress()
fail_count = fail_count + 1
target_dataset.error("FAILED: " + str(e))
if self.args.debug:
self.verbose("Debug mode, aborting on first error")
raise
if self.args.progress:
self.clear_progress()
return fail_count
def run(self):
source_node=None
source_mnt=None
target_node=None
target_mnt=None
try:
################ create source zfsNode
self.set_title("Source settings")
description = "[Source]"
source_node = ZfsNode(snapshot_time_format=self.snapshot_time_format, hold_name=self.hold_name, logger=self,
ssh_config=self.args.ssh_config,
ssh_to=self.args.ssh_source, readonly=self.args.test,
debug_output=self.args.debug_output, description=description)
################# select source datasets
self.set_title("Selecting")
source_datasets = source_node.selected_datasets(property_name=self.property_name,
exclude_received=self.args.exclude_received,
exclude_paths=self.exclude_paths,
exclude_unchanged=self.args.exclude_unchanged,
min_change=0)
if not source_datasets:
self.print_error_sources()
return 255
# create target_node
self.set_title("Target settings")
target_node = ZfsNode(snapshot_time_format=self.snapshot_time_format, hold_name=self.hold_name,
logger=self, ssh_config=self.args.ssh_config,
ssh_to=self.args.ssh_target,
readonly=self.args.test, debug_output=self.args.debug_output,
description="[Target]")
target_node.verbose("Verify datasets under: {}".format(self.args.target_path))
self.set_title("Verifying")
source_mnt, target_mnt= create_mountpoints(source_node, target_node)
fail_count = self.verify_datasets(
source_mnt=source_mnt,
source_datasets=source_datasets,
target_mnt=target_mnt,
target_node=target_node)
if not fail_count:
if self.args.test:
self.set_title("All tests successful.")
else:
self.set_title("All datasets verified ok")
else:
if fail_count != 255:
self.error("{} dataset(s) failed!".format(fail_count))
if self.args.test:
self.verbose("")
self.warning("TEST MODE - DID NOT VERIFY ANYTHING!")
return fail_count
except Exception as e:
self.error("Exception: " + str(e))
if self.args.debug:
raise
return 255
except KeyboardInterrupt:
self.error("Aborted")
return 255
finally:
# cleanup
if source_mnt is not None:
cleanup_mountpoint(source_node, source_mnt)
if target_mnt is not None:
cleanup_mountpoint(target_node, target_mnt)
def cli():
import sys
sys.exit(ZfsAutoverify(sys.argv[1:], False).run())
if __name__ == "__main__":
cli()

View File

@ -1,15 +1,14 @@
import re
import subprocess
import time
from zfs_autobackup.CachedProperty import CachedProperty
from .CachedProperty import CachedProperty
from .ExecuteNode import ExecuteError
class ZfsDataset:
"""a zfs dataset (filesystem/volume/snapshot/clone)
Note that a dataset doesn't have to actually exist (yet/anymore)
Also most properties are cached for performance-reasons, but also to allow --test to function correctly.
"""a zfs dataset (filesystem/volume/snapshot/clone) Note that a dataset
doesn't have to actually exist (yet/anymore) Also most properties are cached
for performance-reasons, but also to allow --test to function correctly.
"""
# illegal properties per dataset type. these will be removed from --set-properties and --filter-properties
@ -19,8 +18,11 @@ class ZfsDataset:
}
def __init__(self, zfs_node, name, force_exists=None):
"""name: full path of the zfs dataset exists: specify if you already know a dataset exists or not. for
performance and testing reasons. (otherwise it will have to check with zfs list when needed)
"""
Args:
:type zfs_node: ZfsNode.ZfsNode
:type name: str
:type force_exists: bool
"""
self.zfs_node = zfs_node
self.name = name # full name
@ -41,12 +43,24 @@ class ZfsDataset:
return self.name == obj.name
def verbose(self, txt):
"""
Args:
:type txt: str
"""
self.zfs_node.verbose("{}: {}".format(self.name, txt))
def error(self, txt):
"""
Args:
:type txt: str
"""
self.zfs_node.error("{}: {}".format(self.name, txt))
def debug(self, txt):
"""
Args:
:type txt: str
"""
self.zfs_node.debug("{}: {}".format(self.name, txt))
def invalidate(self):
@ -60,11 +74,19 @@ class ZfsDataset:
return self.name.split("/")
def lstrip_path(self, count):
"""return name with first count components stripped"""
"""return name with first count components stripped
Args:
:type count: int
"""
return "/".join(self.split_path()[count:])
def rstrip_path(self, count):
"""return name with last count components stripped"""
"""return name with last count components stripped
Args:
:type count: int
"""
return "/".join(self.split_path()[:-count])
@property
@ -90,36 +112,113 @@ class ZfsDataset:
"""true if this dataset is a snapshot"""
return self.name.find("@") != -1
def is_selected(self, value, source, inherited, exclude_received, exclude_paths, exclude_unchanged, min_change):
"""determine if dataset should be selected for backup (called from
ZfsNode)
Args:
:type exclude_paths: list of str
:type value: str
:type source: str
:type inherited: bool
:type exclude_received: bool
:type exclude_unchanged: bool
:type min_change: bool
:param value: Value of the zfs property ("false"/"true"/"child"/"-")
:param source: Source of the zfs property ("local"/"received", "-")
:param inherited: True of the value/source was inherited from a higher dataset.
"""
# sanity checks
if source not in ["local", "received", "-"]:
# probably a program error in zfs-autobackup or new feature in zfs
raise (Exception(
"{} autobackup-property has illegal source: '{}' (possible BUG)".format(self.name, source)))
if value not in ["false", "true", "child", "-"]:
# user error
raise (Exception(
"{} autobackup-property has illegal value: '{}'".format(self.name, value)))
# non specified, ignore
if value == "-":
return False
# only select childs of this dataset, ignore
if value == "child" and not inherited:
return False
# manually excluded by property
if value == "false":
self.verbose("Excluded")
return False
# from here on the dataset is selected by property, now do additional exclusion checks
# our path starts with one of the excluded paths?
for exclude_path in exclude_paths:
# if self.name.startswith(exclude_path):
if (self.name + "/").startswith(exclude_path + "/"):
# too noisy for verbose
self.debug("Excluded (path in exclude list)")
return False
if source == "received":
if exclude_received:
self.verbose("Excluded (dataset already received)")
return False
if exclude_unchanged and not self.is_changed(min_change):
self.verbose("Excluded (unchanged since last snapshot)")
return False
self.verbose("Selected")
return True
@CachedProperty
def parent(self):
"""get zfs-parent of this dataset. for snapshots this means it will get the filesystem/volume that it belongs
to. otherwise it will return the parent according to path
"""get zfs-parent of this dataset. for snapshots this means it will get
the filesystem/volume that it belongs to. otherwise it will return the
parent according to path
we cache this so everything in the parent that is cached also stays.
"""
if self.is_snapshot:
return ZfsDataset(self.zfs_node, self.filesystem_name)
return self.zfs_node.get_dataset(self.filesystem_name)
else:
return ZfsDataset(self.zfs_node, self.rstrip_path(1))
return self.zfs_node.get_dataset(self.rstrip_path(1))
def find_prev_snapshot(self, snapshot, also_other_snapshots=False):
"""find previous snapshot in this dataset. None if it doesn't exist.
also_other_snapshots: set to true to also return snapshots that where not created by us. (is_ours)
"""
if self.is_snapshot:
raise (Exception("Please call this on a dataset."))
index = self.find_snapshot_index(snapshot)
while index:
index = index - 1
if also_other_snapshots or self.snapshots[index].is_ours():
return self.snapshots[index]
return None
# NOTE: unused for now
# def find_prev_snapshot(self, snapshot, also_other_snapshots=False):
# """find previous snapshot in this dataset. None if it doesn't exist.
#
# also_other_snapshots: set to true to also return snapshots that where
# not created by us. (is_ours)
#
# Args:
# :type snapshot: str or ZfsDataset.ZfsDataset
# :type also_other_snapshots: bool
# """
#
# if self.is_snapshot:
# raise (Exception("Please call this on a dataset."))
#
# index = self.find_snapshot_index(snapshot)
# while index:
# index = index - 1
# if also_other_snapshots or self.snapshots[index].is_ours():
# return self.snapshots[index]
# return None
def find_next_snapshot(self, snapshot, also_other_snapshots=False):
"""find next snapshot in this dataset. None if it doesn't exist"""
"""find next snapshot in this dataset. None if it doesn't exist
Args:
:type snapshot: ZfsDataset
:type also_other_snapshots: bool
"""
if self.is_snapshot:
raise (Exception("Please call this on a dataset."))
@ -133,8 +232,9 @@ class ZfsDataset:
@CachedProperty
def exists(self):
"""check if dataset exists.
Use force to force a specific value to be cached, if you already know. Useful for performance reasons"""
"""check if dataset exists. Use force to force a specific value to be
cached, if you already know. Useful for performance reasons
"""
if self.force_exists is not None:
self.debug("Checking if filesystem exists: was forced to {}".format(self.force_exists))
@ -146,7 +246,11 @@ class ZfsDataset:
hide_errors=True) and True)
def create_filesystem(self, parents=False):
"""create a filesystem"""
"""create a filesystem
Args:
:type parents: bool
"""
if parents:
self.verbose("Creating filesystem and parents")
self.zfs_node.run(["zfs", "create", "-p", self.name])
@ -157,7 +261,12 @@ class ZfsDataset:
self.force_exists = True
def destroy(self, fail_exception=False):
"""destroy the dataset. by default failures are not an exception, so we can continue making backups"""
"""destroy the dataset. by default failures are not an exception, so we
can continue making backups
Args:
:type fail_exception: bool
"""
self.verbose("Destroying")
@ -169,7 +278,7 @@ class ZfsDataset:
self.invalidate()
self.force_exists = False
return True
except subprocess.CalledProcessError:
except ExecuteError:
if not fail_exception:
return False
else:
@ -196,27 +305,30 @@ class ZfsDataset:
return ret
def is_changed(self, min_changed_bytes=1):
"""dataset is changed since ANY latest snapshot ?"""
"""dataset is changed since ANY latest snapshot ?
Args:
:type min_changed_bytes: int
"""
self.debug("Checking if dataset is changed")
if min_changed_bytes == 0:
return True
if int(self.properties['written']) < min_changed_bytes:
return False
else:
return True
def is_ours(self):
"""return true if this snapshot is created by this backup_name"""
if re.match("^" + self.zfs_node.backup_name + "-[0-9]*$", self.snapshot_name):
return True
else:
"""return true if this snapshot name has format"""
try:
test = self.timestamp
except ValueError as e:
return False
@property
def _hold_name(self):
return "zfs_autobackup:" + self.zfs_node.backup_name
return True
@property
def holds(self):
@ -228,35 +340,37 @@ class ZfsDataset:
def is_hold(self):
"""did we hold this snapshot?"""
return self._hold_name in self.holds
return self.zfs_node.hold_name in self.holds
def hold(self):
"""hold dataset"""
self.debug("holding")
self.zfs_node.run(["zfs", "hold", self._hold_name, self.name], valid_exitcodes=[0, 1])
self.zfs_node.run(["zfs", "hold", self.zfs_node.hold_name, self.name], valid_exitcodes=[0, 1])
def release(self):
"""release dataset"""
if self.zfs_node.readonly or self.is_hold():
self.debug("releasing")
self.zfs_node.run(["zfs", "release", self._hold_name, self.name], valid_exitcodes=[0, 1])
self.zfs_node.run(["zfs", "release", self.zfs_node.hold_name, self.name], valid_exitcodes=[0, 1])
@property
def timestamp(self):
"""get timestamp from snapshot name. Only works for our own snapshots with the correct format."""
time_str = re.findall("^.*-([0-9]*)$", self.snapshot_name)[0]
if len(time_str) != 14:
raise (Exception("Snapshot has invalid timestamp in name: {}".format(self.snapshot_name)))
"""get timestamp from snapshot name. Only works for our own snapshots
with the correct format.
"""
# new format:
time_secs = time.mktime(time.strptime(time_str, "%Y%m%d%H%M%S"))
time_secs = time.mktime(time.strptime(self.snapshot_name, self.zfs_node.snapshot_time_format))
return time_secs
def from_names(self, names):
"""convert a list of names to a list ZfsDatasets for this zfs_node"""
"""convert a list of names to a list ZfsDatasets for this zfs_node
Args:
:type names: list of str
"""
ret = []
for name in names:
ret.append(ZfsDataset(self.zfs_node, name))
ret.append(self.zfs_node.get_dataset(name))
return ret
@ -299,7 +413,13 @@ class ZfsDataset:
return ret
def find_snapshot(self, snapshot):
"""find snapshot by snapshot (can be a snapshot_name or a different ZfsDataset )"""
"""find snapshot by snapshot (can be a snapshot_name or a different
ZfsDataset )
Args:
:rtype: ZfsDataset
:type snapshot: str or ZfsDataset
"""
if not isinstance(snapshot, ZfsDataset):
snapshot_name = snapshot
@ -313,7 +433,12 @@ class ZfsDataset:
return None
def find_snapshot_index(self, snapshot):
"""find snapshot index by snapshot (can be a snapshot_name or ZfsDataset)"""
"""find snapshot index by snapshot (can be a snapshot_name or
ZfsDataset)
Args:
:type snapshot: str or ZfsDataset
"""
if not isinstance(snapshot, ZfsDataset):
snapshot_name = snapshot
@ -342,7 +467,11 @@ class ZfsDataset:
return int(output[0])
def is_changed_ours(self, min_changed_bytes=1):
"""dataset is changed since OUR latest snapshot?"""
"""dataset is changed since OUR latest snapshot?
Args:
:type min_changed_bytes: int
"""
if min_changed_bytes == 0:
return True
@ -358,7 +487,11 @@ class ZfsDataset:
@CachedProperty
def recursive_datasets(self, types="filesystem,volume"):
"""get all (non-snapshot) datasets recursively under us"""
"""get all (non-snapshot) datasets recursively under us
Args:
:type types: str
"""
self.debug("Getting all recursive datasets under us")
@ -370,7 +503,11 @@ class ZfsDataset:
@CachedProperty
def datasets(self, types="filesystem,volume"):
"""get all (non-snapshot) datasets directly under us"""
"""get all (non-snapshot) datasets directly under us
Args:
:type types: str
"""
self.debug("Getting all datasets under us")
@ -380,11 +517,20 @@ class ZfsDataset:
return self.from_names(names[1:])
def send_pipe(self, features, prev_snapshot=None, resume_token=None, show_progress=False, raw=False):
def send_pipe(self, features, prev_snapshot, resume_token, show_progress, raw, send_properties, write_embedded, send_pipes, zfs_compressed):
"""returns a pipe with zfs send output for this snapshot
resume_token: resume sending from this token. (in that case we don't need to know snapshot names)
resume_token: resume sending from this token. (in that case we don't
need to know snapshot names)
Args:
:param send_pipes: output cmd array that will be added to actual zfs send command. (e.g. mbuffer or compression program)
:type send_pipes: list of str
:type features: list of str
:type prev_snapshot: ZfsDataset
:type resume_token: str
:type show_progress: bool
:type raw: bool
"""
# build source command
cmd = []
@ -393,28 +539,22 @@ class ZfsDataset:
# all kind of performance options:
if 'large_blocks' in features and "-L" in self.zfs_node.supported_send_options:
cmd.append("-L") # large block support (only if recordsize>128k which is seldomly used)
cmd.append("--large-block") # large block support (only if recordsize>128k which is seldomly used)
if 'embedded_data' in features and "-e" in self.zfs_node.supported_send_options:
cmd.append("-e") # WRITE_EMBEDDED, more compact stream
if write_embedded and 'embedded_data' in features and "-e" in self.zfs_node.supported_send_options:
cmd.append("--embed") # WRITE_EMBEDDED, more compact stream
if "-c" in self.zfs_node.supported_send_options:
cmd.append("-c") # use compressed WRITE records
if zfs_compressed and "-c" in self.zfs_node.supported_send_options:
cmd.append("--compressed") # use compressed WRITE records
# NOTE: performance is usually worse with this option, according to manual
# also -D will be depricated in newer ZFS versions
# if not resume:
# if "-D" in self.zfs_node.supported_send_options:
# cmd.append("-D") # dedupped stream, sends less duplicate data
# raw? (for encryption)
# raw? (send over encrypted data in its original encrypted form without decrypting)
if raw:
cmd.append("--raw")
# progress output
if show_progress:
# cmd.append("-v")
cmd.append("-P")
cmd.append("--verbose")
cmd.append("--parsable")
# resume a previous send? (don't need more parameters in that case)
if resume_token:
@ -422,7 +562,8 @@ class ZfsDataset:
else:
# send properties
cmd.append("-p")
if send_properties:
cmd.append("--props")
# incremental?
if prev_snapshot:
@ -430,14 +571,26 @@ class ZfsDataset:
cmd.append(self.name)
# NOTE: this doesn't start the send yet, it only returns a subprocess.Pipe
return self.zfs_node.run(cmd, pipe=True)
cmd.extend(send_pipes)
def recv_pipe(self, pipe, features, filter_properties=None, set_properties=None, ignore_exit_code=False):
output_pipe = self.zfs_node.run(cmd, pipe=True, readonly=True)
return output_pipe
def recv_pipe(self, pipe, features, recv_pipes, filter_properties=None, set_properties=None, ignore_exit_code=False):
"""starts a zfs recv for this snapshot and uses pipe as input
note: you can it both on a snapshot or filesystem object.
The resulting zfs command is the same, only our object cache is invalidated differently.
note: you can it both on a snapshot or filesystem object. The
resulting zfs command is the same, only our object cache is invalidated
differently.
Args:
:param recv_pipes: input cmd array that will be prepended to actual zfs recv command. (e.g. mbuffer or decompression program)
:type pipe: subprocess.pOpen
:type features: list of str
:type filter_properties: list of str
:type set_properties: list of str
:type ignore_exit_code: bool
"""
if set_properties is None:
@ -449,6 +602,8 @@ class ZfsDataset:
# build target command
cmd = []
cmd.extend(recv_pipes)
cmd.extend(["zfs", "recv"])
# don't mount filesystem that is received
@ -491,12 +646,26 @@ class ZfsDataset:
self.error("error during transfer")
raise (Exception("Target doesn't exist after transfer, something went wrong."))
def transfer_snapshot(self, target_snapshot, features, prev_snapshot=None, show_progress=False,
filter_properties=None, set_properties=None, ignore_recv_exit_code=False, resume_token=None,
raw=False):
"""transfer this snapshot to target_snapshot. specify prev_snapshot for incremental transfer
def transfer_snapshot(self, target_snapshot, features, prev_snapshot, show_progress,
filter_properties, set_properties, ignore_recv_exit_code, resume_token,
raw, send_properties, write_embedded, send_pipes, recv_pipes, zfs_compressed):
"""transfer this snapshot to target_snapshot. specify prev_snapshot for
incremental transfer
connects a send_pipe() to recv_pipe()
Args:
:type send_pipes: list of str
:type recv_pipes: list of str
:type target_snapshot: ZfsDataset
:type features: list of str
:type prev_snapshot: ZfsDataset
:type show_progress: bool
:type filter_properties: list of str
:type set_properties: list of str
:type ignore_recv_exit_code: bool
:type resume_token: str
:type raw: bool
"""
if set_properties is None:
@ -518,12 +687,13 @@ class ZfsDataset:
# do it
pipe = self.send_pipe(features=features, show_progress=show_progress, prev_snapshot=prev_snapshot,
resume_token=resume_token, raw=raw)
resume_token=resume_token, raw=raw, send_properties=send_properties, write_embedded=write_embedded, send_pipes=send_pipes, zfs_compressed=zfs_compressed)
target_snapshot.recv_pipe(pipe, features=features, filter_properties=filter_properties,
set_properties=set_properties, ignore_exit_code=ignore_recv_exit_code)
set_properties=set_properties, ignore_exit_code=ignore_recv_exit_code, recv_pipes=recv_pipes)
def abort_resume(self):
"""abort current resume state"""
self.debug("Aborting resume")
self.zfs_node.run(["zfs", "recv", "-A", self.name])
def rollback(self):
@ -536,7 +706,12 @@ class ZfsDataset:
return
def get_resume_snapshot(self, resume_token):
"""returns snapshot that will be resumed by this resume token (run this on source with target-token)"""
"""returns snapshot that will be resumed by this resume token (run this
on source with target-token)
Args:
:type resume_token: str
"""
# use zfs send -n option to determine this
# NOTE: on smartos stderr, on linux stdout
(stdout, stderr) = self.zfs_node.run(["zfs", "send", "-t", resume_token, "-n", "-v"], valid_exitcodes=[0, 255],
@ -549,18 +724,23 @@ class ZfsDataset:
matches = re.findall("toname = .*@(.*)", line)
if matches:
snapshot_name = matches[0]
snapshot = ZfsDataset(self.zfs_node, self.filesystem_name + "@" + snapshot_name)
snapshot = self.zfs_node.get_dataset(self.filesystem_name + "@" + snapshot_name)
snapshot.debug("resume token belongs to this snapshot")
return snapshot
return None
def thin_list(self, keeps=None, ignores=None):
"""determines list of snapshots that should be kept or deleted based on the thinning schedule. cull the herd!
keep: list of snapshots to always keep (usually the last) ignores: snapshots to completely ignore (usually
incompatible target snapshots that are going to be destroyed anyway)
"""determines list of snapshots that should be kept or deleted based on
the thinning schedule. cull the herd!
returns: ( keeps, obsoletes )
Args:
:param keeps: list of snapshots to always keep (usually the last)
:param ignores: snapshots to completely ignore (usually incompatible target snapshots that are going to be destroyed anyway)
:type keeps: list of ZfsDataset
:type ignores: list of ZfsDataset
"""
if ignores is None:
@ -570,10 +750,14 @@ class ZfsDataset:
snapshots = [snapshot for snapshot in self.our_snapshots if snapshot not in ignores]
return self.zfs_node.thinner.thin(snapshots, keep_objects=keeps)
return self.zfs_node.thin(snapshots, keep_objects=keeps)
def thin(self, skip_holds=False):
"""destroys snapshots according to thin_list, except last snapshot"""
"""destroys snapshots according to thin_list, except last snapshot
Args:
:type skip_holds: bool
"""
(keeps, obsoletes) = self.thin_list(keeps=self.our_snapshots[-1:])
for obsolete in obsoletes:
@ -584,17 +768,16 @@ class ZfsDataset:
self.snapshots.remove(obsolete)
def find_common_snapshot(self, target_dataset):
"""find latest common snapshot between us and target
returns None if its an initial transfer
"""find latest common snapshot between us and target returns None if its
an initial transfer
Args:
:type target_dataset: ZfsDataset
"""
if not target_dataset.snapshots:
# target has nothing yet
return None
else:
# snapshot=self.find_snapshot(target_dataset.snapshots[-1].snapshot_name)
# if not snapshot:
# try to common snapshot
for source_snapshot in reversed(self.snapshots):
if target_dataset.find_snapshot(source_snapshot):
source_snapshot.debug("common snapshot")
@ -603,8 +786,12 @@ class ZfsDataset:
raise (Exception("You probably need to delete the target dataset to fix this."))
def find_start_snapshot(self, common_snapshot, also_other_snapshots):
"""finds first snapshot to send
:rtype: ZfsDataset or None if we cant find it.
"""finds first snapshot to send :rtype: ZfsDataset or None if we cant
find it.
Args:
:type common_snapshot: ZfsDataset
:type also_other_snapshots: bool
"""
if not common_snapshot:
@ -624,8 +811,13 @@ class ZfsDataset:
return start_snapshot
def find_incompatible_snapshots(self, common_snapshot):
"""returns a list of snapshots that is incompatible for a zfs recv onto the common_snapshot.
all direct followup snapshots with written=0 are compatible."""
"""returns a list of snapshots that is incompatible for a zfs recv onto
the common_snapshot. all direct followup snapshots with written=0 are
compatible.
Args:
:type common_snapshot: ZfsDataset
"""
ret = []
@ -639,7 +831,12 @@ class ZfsDataset:
return ret
def get_allowed_properties(self, filter_properties, set_properties):
"""only returns lists of allowed properties for this dataset type"""
"""only returns lists of allowed properties for this dataset type
Args:
:type filter_properties: list of str
:type set_properties: list of str
"""
allowed_filter_properties = []
allowed_set_properties = []
@ -656,25 +853,42 @@ class ZfsDataset:
return allowed_filter_properties, allowed_set_properties
def _add_virtual_snapshots(self, source_dataset, source_start_snapshot, also_other_snapshots):
"""add snapshots from source to our snapshot list. (just the in memory list, no disk operations)"""
"""add snapshots from source to our snapshot list. (just the in memory
list, no disk operations)
Args:
:type source_dataset: ZfsDataset
:type source_start_snapshot: ZfsDataset
:type also_other_snapshots: bool
"""
self.debug("Creating virtual target snapshots")
snapshot = source_start_snapshot
while snapshot:
# create virtual target snapsho
# NOTE: with force_exist we're telling the dataset it doesnt exist yet. (e.g. its virtual)
virtual_snapshot = ZfsDataset(self.zfs_node,
self.filesystem_name + "@" + snapshot.snapshot_name,
force_exists=False)
virtual_snapshot = self.zfs_node.get_dataset(self.filesystem_name + "@" + snapshot.snapshot_name, force_exists=False)
self.snapshots.append(virtual_snapshot)
snapshot = source_dataset.find_next_snapshot(snapshot, also_other_snapshots)
def _pre_clean(self, common_snapshot, target_dataset, source_obsoletes, target_obsoletes, target_keeps):
"""cleanup old stuff before starting snapshot syncing"""
"""cleanup old stuff before starting snapshot syncing
# on source: destroy all obsoletes before common.
Args:
:type common_snapshot: ZfsDataset
:type target_dataset: ZfsDataset
:type source_obsoletes: list of ZfsDataset
:type target_obsoletes: list of ZfsDataset
:type target_keeps: list of ZfsDataset
"""
# on source: destroy all obsoletes before common. (since we cant send them anyways)
# But after common, only delete snapshots that target also doesn't want
before_common = True
if common_snapshot:
before_common = True
else:
before_common = False
for source_snapshot in self.snapshots:
if common_snapshot and source_snapshot.snapshot_name == common_snapshot.snapshot_name:
before_common = False
@ -686,26 +900,41 @@ class ZfsDataset:
# on target: destroy everything thats obsolete, except common_snapshot
for target_snapshot in target_dataset.snapshots:
if (target_snapshot in target_obsoletes) and (
not common_snapshot or target_snapshot.snapshot_name != common_snapshot.snapshot_name):
if (target_snapshot in target_obsoletes) \
and ( not common_snapshot or (target_snapshot.snapshot_name != common_snapshot.snapshot_name)):
if target_snapshot.exists:
target_snapshot.destroy()
def _validate_resume_token(self, target_dataset, start_snapshot):
"""validate and get (or destory) resume token"""
"""validate and get (or destory) resume token
Args:
:type target_dataset: ZfsDataset
:type start_snapshot: ZfsDataset
"""
if 'receive_resume_token' in target_dataset.properties:
resume_token = target_dataset.properties['receive_resume_token']
# not valid anymore?
resume_snapshot = self.get_resume_snapshot(resume_token)
if not resume_snapshot or start_snapshot.snapshot_name != resume_snapshot.snapshot_name:
target_dataset.verbose("Cant resume, resume token no longer valid.")
if start_snapshot==None:
target_dataset.verbose("Aborting resume, its obsolete.")
target_dataset.abort_resume()
else:
return resume_token
resume_token = target_dataset.properties['receive_resume_token']
# not valid anymore
resume_snapshot = self.get_resume_snapshot(resume_token)
if not resume_snapshot or start_snapshot.snapshot_name != resume_snapshot.snapshot_name:
target_dataset.verbose("Aborting resume, its no longer valid.")
target_dataset.abort_resume()
else:
return resume_token
def _plan_sync(self, target_dataset, also_other_snapshots):
"""plan where to start syncing and what to sync and what to keep"""
"""plan where to start syncing and what to sync and what to keep
Args:
:rtype: ( ZfsDataset, ZfsDataset, list of ZfsDataset, list of ZfsDataset, list of ZfsDataset, list of ZfsDataset )
:type target_dataset: ZfsDataset
:type also_other_snapshots: bool
"""
# determine common and start snapshot
target_dataset.debug("Determining start snapshot")
@ -729,7 +958,13 @@ class ZfsDataset:
return common_snapshot, start_snapshot, source_obsoletes, target_obsoletes, target_keeps, incompatible_target_snapshots
def handle_incompatible_snapshots(self, incompatible_target_snapshots, destroy_incompatible):
"""destroy incompatbile snapshots on target before sync, or inform user what to do"""
"""destroy incompatbile snapshots on target before sync, or inform user
what to do
Args:
:type incompatible_target_snapshots: list of ZfsDataset
:type destroy_incompatible: bool
"""
if incompatible_target_snapshots:
if not destroy_incompatible:
@ -742,10 +977,29 @@ class ZfsDataset:
snapshot.destroy()
self.snapshots.remove(snapshot)
def sync_snapshots(self, target_dataset, features, show_progress, filter_properties, set_properties,
ignore_recv_exit_code, holds, rollback, raw, also_other_snapshots,
no_send, destroy_incompatible, no_thinning):
"""sync this dataset's snapshots to target_dataset, while also thinning out old snapshots along the way."""
ignore_recv_exit_code, holds, rollback, decrypt, encrypt, also_other_snapshots,
no_send, destroy_incompatible, send_pipes, recv_pipes, zfs_compressed):
"""sync this dataset's snapshots to target_dataset, while also thinning
out old snapshots along the way.
Args:
:type send_pipes: list of str
:type recv_pipes: list of str
:type target_dataset: ZfsDataset
:type features: list of str
:type show_progress: bool
:type filter_properties: list of str
:type set_properties: list of str
:type ignore_recv_exit_code: bool
:type holds: bool
:type rollback: bool
:type decrypt: bool
:type also_other_snapshots: bool
:type no_send: bool
:type destroy_incompatible: bool
"""
(common_snapshot, start_snapshot, source_obsoletes, target_obsoletes, target_keeps,
incompatible_target_snapshots) = \
@ -753,10 +1007,12 @@ class ZfsDataset:
# NOTE: we do this because we dont want filesystems to fillup when backups keep failing.
# Also usefull with no_send to still cleanup stuff.
if not no_thinning:
self._pre_clean(
common_snapshot=common_snapshot, target_dataset=target_dataset,
target_keeps=target_keeps, target_obsoletes=target_obsoletes, source_obsoletes=source_obsoletes)
self._pre_clean(
common_snapshot=common_snapshot, target_dataset=target_dataset,
target_keeps=target_keeps, target_obsoletes=target_obsoletes, source_obsoletes=source_obsoletes)
# handle incompatible stuff on target
target_dataset.handle_incompatible_snapshots(incompatible_target_snapshots, destroy_incompatible)
# now actually transfer the snapshots, if we want
if no_send:
@ -765,13 +1021,34 @@ class ZfsDataset:
# check if we can resume
resume_token = self._validate_resume_token(target_dataset, start_snapshot)
# handle incompatible stuff on target
target_dataset.handle_incompatible_snapshots(incompatible_target_snapshots, destroy_incompatible)
# rollback target to latest?
if rollback:
target_dataset.rollback()
#defaults for these settings if there is no encryption stuff going on:
send_properties = True
raw = False
write_embedded = True
(active_filter_properties, active_set_properties) = self.get_allowed_properties(filter_properties, set_properties)
# source dataset encrypted?
if self.properties.get('encryption', 'off')!='off':
# user wants to send it over decrypted?
if decrypt:
# when decrypting, zfs cant send properties
send_properties=False
else:
# keep data encrypted by sending it raw (including properties)
raw=True
# encrypt at target?
if encrypt and not raw:
# filter out encryption properties to let encryption on the target take place
active_filter_properties.extend(["keylocation","pbkdf2iters","keyformat", "encryption"])
write_embedded=False
# now actually transfer the snapshots
prev_source_snapshot = common_snapshot
source_snapshot = start_snapshot
@ -780,15 +1057,14 @@ class ZfsDataset:
# does target actually want it?
if target_snapshot not in target_obsoletes:
# NOTE: should we let transfer_snapshot handle this?
(allowed_filter_properties, allowed_set_properties) = self.get_allowed_properties(filter_properties,
set_properties)
source_snapshot.transfer_snapshot(target_snapshot, features=features,
prev_snapshot=prev_source_snapshot, show_progress=show_progress,
filter_properties=allowed_filter_properties,
set_properties=allowed_set_properties,
filter_properties=active_filter_properties,
set_properties=active_set_properties,
ignore_recv_exit_code=ignore_recv_exit_code,
resume_token=resume_token, raw=raw)
resume_token=resume_token, write_embedded=write_embedded, raw=raw, send_properties=send_properties, send_pipes=send_pipes, recv_pipes=recv_pipes, zfs_compressed=zfs_compressed)
resume_token = None
# hold the new common snapshots and release the previous ones
@ -801,24 +1077,83 @@ class ZfsDataset:
prev_source_snapshot.release()
target_dataset.find_snapshot(prev_source_snapshot).release()
if not no_thinning:
# we may now destroy the previous source snapshot if its obsolete
if prev_source_snapshot in source_obsoletes:
prev_source_snapshot.destroy()
# we may now destroy the previous source snapshot if its obsolete
if prev_source_snapshot in source_obsoletes:
prev_source_snapshot.destroy()
# destroy the previous target snapshot if obsolete (usually this is only the common_snapshot,
# the rest was already destroyed or will not be send)
prev_target_snapshot = target_dataset.find_snapshot(prev_source_snapshot)
if prev_target_snapshot in target_obsoletes:
prev_target_snapshot.destroy()
# destroy the previous target snapshot if obsolete (usually this is only the common_snapshot,
# the rest was already destroyed or will not be send)
prev_target_snapshot = target_dataset.find_snapshot(prev_source_snapshot)
if prev_target_snapshot in target_obsoletes:
prev_target_snapshot.destroy()
prev_source_snapshot = source_snapshot
else:
source_snapshot.debug("skipped (target doesn't need it)")
# was it actually a resume?
if resume_token:
target_dataset.debug("aborting resume, since we don't want that snapshot anymore")
target_dataset.verbose("Aborting resume, we dont want that snapshot anymore.")
target_dataset.abort_resume()
resume_token = None
source_snapshot = self.find_next_snapshot(source_snapshot, also_other_snapshots)
def mount(self, mount_point):
self.debug("Mounting")
cmd = [
"mount", "-tzfs", self.name, mount_point
]
self.zfs_node.run(cmd=cmd, valid_exitcodes=[0])
def unmount(self):
self.debug("Unmounting")
cmd = [
"umount", self.name
]
self.zfs_node.run(cmd=cmd, valid_exitcodes=[0])
def clone(self, name):
"""clones this snapshot and returns ZfsDataset of the clone"""
self.debug("Cloning to {}".format(name))
cmd = [
"zfs", "clone", self.name, name
]
self.zfs_node.run(cmd=cmd, valid_exitcodes=[0])
return self.zfs_node.get_dataset(name, force_exists=True)
def set(self, prop, value):
"""set a zfs property"""
self.debug("Setting {}={}".format(prop, value))
cmd = [
"zfs", "set", "{}={}".format(prop, value), self.name
]
self.zfs_node.run(cmd=cmd, valid_exitcodes=[0])
self.invalidate()
def inherit(self, prop):
"""inherit zfs property"""
self.debug("Inheriting property {}".format(prop))
cmd = [
"zfs", "inherit", prop, self.name
]
self.zfs_node.run(cmd=cmd, valid_exitcodes=[0])
self.invalidate()

View File

@ -1,27 +1,30 @@
# python 2 compatibility
from __future__ import print_function
import re
import shlex
import subprocess
import sys
import time
from zfs_autobackup.ExecuteNode import ExecuteNode
from zfs_autobackup.Thinner import Thinner
from zfs_autobackup.CachedProperty import CachedProperty
from zfs_autobackup.ZfsPool import ZfsPool
from zfs_autobackup.ZfsDataset import ZfsDataset
from .ExecuteNode import ExecuteNode
from .Thinner import Thinner
from .CachedProperty import CachedProperty
from .ZfsPool import ZfsPool
from .ZfsDataset import ZfsDataset
from .ExecuteNode import ExecuteError
class ZfsNode(ExecuteNode):
"""a node that contains zfs datasets. implements global (systemwide/pool wide) zfs commands"""
def __init__(self, backup_name, logger, ssh_config=None, ssh_to=None, readonly=False, description="",
debug_output=False, thinner=Thinner()):
self.backup_name = backup_name
if not description and ssh_to:
self.description = ssh_to
else:
self.description = description
def __init__(self, snapshot_time_format, hold_name, logger, ssh_config=None, ssh_to=None, readonly=False,
description="",
debug_output=False, thinner=None):
self.snapshot_time_format = snapshot_time_format
self.hold_name = hold_name
self.description = description
self.logger = logger
@ -33,23 +36,32 @@ class ZfsNode(ExecuteNode):
else:
self.verbose("Datasets are local")
rules = thinner.human_rules()
if rules:
for rule in rules:
self.verbose(rule)
else:
self.verbose("Keep no old snaphots")
if thinner is not None:
rules = thinner.human_rules()
if rules:
for rule in rules:
self.verbose(rule)
else:
self.verbose("Keep no old snaphots")
self.thinner = thinner
self.__thinner = thinner
# list of ZfsPools
self.__pools = {}
self.__datasets = {}
self._progress_total_bytes = 0
self._progress_start_time = time.time()
ExecuteNode.__init__(self, ssh_config=ssh_config, ssh_to=ssh_to, readonly=readonly, debug_output=debug_output)
def thin(self, objects, keep_objects):
# NOTE: if thinning is disabled with --no-thinning, self.__thinner will be none.
if self.__thinner is not None:
return self.__thinner.thin(objects, keep_objects)
else:
return (keep_objects, [])
@CachedProperty
def supported_send_options(self):
"""list of supported options, for optimizing sends"""
@ -77,17 +89,25 @@ class ZfsNode(ExecuteNode):
try:
self.run(cmd, hide_errors=True, valid_exitcodes=[0, 1])
except subprocess.CalledProcessError:
except ExecuteError:
return False
return True
# TODO: also create a get_zfs_dataset() function that stores all the objects in a dict. This should optimize
# caching a bit and is more consistent.
def get_zfs_pool(self, name):
"""get a ZfsPool() object from specified name. stores objects internally to enable caching"""
def get_pool(self, dataset):
"""get a ZfsPool() object from dataset. stores objects internally to enable caching"""
return self.__pools.setdefault(name, ZfsPool(self, name))
if not isinstance(dataset, ZfsDataset):
raise (Exception("{} is not a ZfsDataset".format(dataset)))
zpool_name = dataset.name.split("/")[0]
return self.__pools.setdefault(zpool_name, ZfsPool(self, zpool_name))
def get_dataset(self, name, force_exists=None):
"""get a ZfsDataset() object from name. stores objects internally to enable caching"""
return self.__datasets.setdefault(name, ZfsDataset(self, name))
def reset_progress(self):
"""reset progress output counters"""
@ -115,7 +135,7 @@ class ZfsNode(ExecuteNode):
self._progress_total_bytes = int(progress_fields[2])
elif progress_fields[0] == 'incremental':
self._progress_total_bytes = int(progress_fields[3])
else:
elif progress_fields[1].isnumeric():
bytes_ = int(progress_fields[1])
if self._progress_total_bytes:
percentage = min(100, int(bytes_ * 100 / self._progress_total_bytes))
@ -123,9 +143,9 @@ class ZfsNode(ExecuteNode):
bytes_left = self._progress_total_bytes - bytes_
minutes_left = int((bytes_left / (bytes_ / (time.time() - self._progress_start_time))) / 60)
print(">>> {}% {}MB/s (total {}MB, {} minutes left) \r".format(percentage, speed, int(
self._progress_total_bytes / (1024 * 1024)), minutes_left), end='', file=sys.stderr)
sys.stderr.flush()
self.logger.progress(
"Transfer {}% {}MB/s (total {}MB, {} minutes left)".format(percentage, speed, int(
self._progress_total_bytes / (1024 * 1024)), minutes_left))
return
@ -135,8 +155,8 @@ class ZfsNode(ExecuteNode):
else:
self.error(prefix + line.rstrip())
def _parse_stderr_pipe(self, line, hide_errors):
self.parse_zfs_progress(line, hide_errors, "STDERR|> ")
# def _parse_stderr_pipe(self, line, hide_errors):
# self.parse_zfs_progress(line, hide_errors, "STDERR|> ")
def _parse_stderr(self, line, hide_errors):
self.parse_zfs_progress(line, hide_errors, "STDERR > ")
@ -147,14 +167,14 @@ class ZfsNode(ExecuteNode):
def error(self, txt):
self.logger.error("{} {}".format(self.description, txt))
def warning(self, txt):
self.logger.warning("{} {}".format(self.description, txt))
def debug(self, txt):
self.logger.debug("{} {}".format(self.description, txt))
def new_snapshotname(self):
"""determine uniq new snapshotname"""
return self.backup_name + "-" + time.strftime("%Y%m%d%H%M%S")
def consistent_snapshot(self, datasets, snapshot_name, min_changed_bytes):
def consistent_snapshot(self, datasets, snapshot_name, min_changed_bytes, pre_snapshot_cmds=[],
post_snapshot_cmds=[]):
"""create a consistent (atomic) snapshot of specified datasets, per pool.
"""
@ -168,7 +188,7 @@ class ZfsNode(ExecuteNode):
continue
# force_exist, since we're making it
snapshot = ZfsDataset(dataset.zfs_node, dataset.name + "@" + snapshot_name, force_exists=True)
snapshot = self.get_dataset(dataset.name + "@" + snapshot_name, force_exists=True)
pool = dataset.split_path()[0]
if pool not in pools:
@ -184,18 +204,30 @@ class ZfsNode(ExecuteNode):
self.verbose("No changes anywhere: not creating snapshots.")
return
# create consistent snapshot per pool
for (pool_name, snapshots) in pools.items():
cmd = ["zfs", "snapshot"]
try:
for cmd in pre_snapshot_cmds:
self.verbose("Running pre-snapshot-cmd")
self.run(cmd=shlex.split(cmd), readonly=False)
cmd.extend(map(lambda snapshot_: str(snapshot_), snapshots))
# create consistent snapshot per pool
for (pool_name, snapshots) in pools.items():
cmd = ["zfs", "snapshot"]
self.verbose("Creating snapshots {} in pool {}".format(snapshot_name, pool_name))
self.run(cmd, readonly=False)
cmd.extend(map(lambda snapshot_: str(snapshot_), snapshots))
@CachedProperty
def selected_datasets(self):
"""determine filesystems that should be backupped by looking at the special autobackup-property, systemwide
self.verbose("Creating snapshots {} in pool {}".format(snapshot_name, pool_name))
self.run(cmd, readonly=False)
finally:
for cmd in post_snapshot_cmds:
self.verbose("Running post-snapshot-cmd")
try:
self.run(cmd=shlex.split(cmd), readonly=False)
except Exception as e:
pass
def selected_datasets(self, property_name, exclude_received, exclude_paths, exclude_unchanged, min_change):
"""determine filesystems that should be backed up by looking at the special autobackup-property, systemwide
returns: list of ZfsDataset
"""
@ -204,35 +236,34 @@ class ZfsNode(ExecuteNode):
# get all source filesystems that have the backup property
lines = self.run(tab_split=True, readonly=True, cmd=[
"zfs", "get", "-t", "volume,filesystem", "-o", "name,value,source", "-s", "local,inherited", "-H",
"autobackup:" + self.backup_name
"zfs", "get", "-t", "volume,filesystem", "-o", "name,value,source", "-H",
property_name
])
# determine filesystems that should be actually backupped
# The returnlist of selected ZfsDataset's:
selected_filesystems = []
direct_filesystems = []
# list of sources, used to resolve inherited sources
sources = {}
for line in lines:
(name, value, source) = line
dataset = ZfsDataset(self, name)
if value == "false":
dataset.verbose("Ignored (disabled)")
(name, value, raw_source) = line
dataset = self.get_dataset(name, force_exists=True)
# "resolve" inherited sources
sources[name] = raw_source
if raw_source.find("inherited from ") == 0:
inherited = True
inherited_from = re.sub("^inherited from ", "", raw_source)
source = sources[inherited_from]
else:
if source == "local" and (value == "true" or value == "child"):
direct_filesystems.append(name)
inherited = False
source = raw_source
if source == "local" and value == "true":
dataset.verbose("Selected (direct selection)")
selected_filesystems.append(dataset)
elif source.find("inherited from ") == 0 and (value == "true" or value == "child"):
inherited_from = re.sub("^inherited from ", "", source)
if inherited_from in direct_filesystems:
selected_filesystems.append(dataset)
dataset.verbose("Selected (inherited selection)")
else:
dataset.debug("Ignored (already a backup)")
else:
dataset.verbose("Ignored (only childs)")
# determine it
if dataset.is_selected(value=value, source=source, inherited=inherited, exclude_received=exclude_received,
exclude_paths=exclude_paths, exclude_unchanged=exclude_unchanged,
min_change=min_change):
selected_filesystems.append(dataset)
return selected_filesystems
return selected_filesystems

View File

@ -1,4 +1,4 @@
from zfs_autobackup.CachedProperty import CachedProperty
from .CachedProperty import CachedProperty
class ZfsPool():
@ -45,8 +45,7 @@ class ZfsPool():
ret = {}
for pair in self.zfs_node.run(tab_split=True, cmd=cmd, readonly=True, valid_exitcodes=[0]):
if len(pair) == 4:
ret[pair[1]] = pair[2]
ret[pair[1]] = pair[2]
return ret

View File

@ -1,9 +1,3 @@
def cli():
import sys
from zfs_autobackup.ZfsAutobackup import ZfsAutobackup
zfs_autobackup = ZfsAutobackup(sys.argv[1:], False)
sys.exit(zfs_autobackup.run())

View File

@ -4,7 +4,4 @@
import sys
if __name__ == "__main__":
from . import cli
cli()

View File

@ -0,0 +1,75 @@
# Adopted from Syncoid :)
# this software is licensed for use under the Free Software Foundation's GPL v3.0 license, as retrieved
# from http://www.gnu.org/licenses/gpl-3.0.html on 2014-11-17. A copy should also be available in this
# project's Git repository at https://github.com/jimsalterjrs/sanoid/blob/master/LICENSE.
COMPRESS_CMDS = {
'gzip': {
'cmd': 'gzip',
'args': [ '-3' ],
'dcmd': 'zcat',
'dargs': [],
},
'pigz-fast': {
'cmd': 'pigz',
'args': [ '-3' ],
'dcmd': 'pigz',
'dargs': [ '-dc' ],
},
'pigz-slow': {
'cmd': 'pigz',
'args': [ '-9' ],
'dcmd': 'pigz',
'dargs': [ '-dc' ],
},
'zstd-fast': {
'cmd': 'zstdmt',
'args': [ '-3' ],
'dcmd': 'zstdmt',
'dargs': [ '-dc' ],
},
'zstd-slow': {
'cmd': 'zstdmt',
'args': [ '-19' ],
'dcmd': 'zstdmt',
'dargs': [ '-dc' ],
},
'zstd-adapt': {
'cmd': 'zstdmt',
'args': [ '--adapt' ],
'dcmd': 'zstdmt',
'dargs': [ '-dc' ],
},
'xz': {
'cmd': 'xz',
'args': [],
'dcmd': 'xz',
'dargs': [ '-d' ],
},
'lzo': {
'cmd': 'lzop',
'args': [],
'dcmd': 'lzop',
'dargs': [ '-dfc' ],
},
'lz4': {
'cmd': 'lz4',
'args': [],
'dcmd': 'lz4',
'dargs': [ '-dc' ],
},
}
def compress_cmd(compressor):
ret=[ COMPRESS_CMDS[compressor]['cmd'] ]
ret.extend( COMPRESS_CMDS[compressor]['args'])
return ret
def decompress_cmd(compressor):
ret= [ COMPRESS_CMDS[compressor]['dcmd'] ]
ret.extend(COMPRESS_CMDS[compressor]['dargs'])
return ret
def choices():
return COMPRESS_CMDS.keys()