Compare commits

...

240 Commits

Author SHA1 Message Date
89ed1e012d cleanup 2022-01-24 17:22:44 +01:00
ff9beae427 create temporary clone to verify volumes 2022-01-24 16:55:20 +01:00
302a9ecd86 more consistent creation of ZfsDataset and ZfsPool via ZfsNode.get_dataset() and ZfsNode.get_pool() 2022-01-24 16:29:32 +01:00
c0086f8953 added tar-mode. moved static methods. more compatible /dev checking without udevadm 2022-01-24 13:53:32 +01:00
ddd82b935b show test output 2022-01-24 12:31:28 +01:00
51d6731aa8 settle udev devices when 2022-01-24 11:46:34 +01:00
36f2b672bd more zfs-verify tests 2022-01-24 11:41:51 +01:00
81a785b360 more zfs-verify tests 2022-01-24 11:37:42 +01:00
670532ef31 pythonversion agnostic 2022-01-24 11:02:56 +01:00
dd55ca4079 zfs-autoverify wip (basics start to function) 2022-01-24 00:18:27 +01:00
f66957d867 zfs-autoverify wip 2022-01-23 23:01:53 +01:00
69975b37fb zfs-autoverify wip 2022-01-23 21:36:56 +01:00
c299626d18 debug mode implies verbose mode now 2022-01-23 21:22:46 +01:00
7b4f10080f zfs-verify wip (not functional yet) 2022-01-19 00:11:27 +01:00
787e3dba9c zfs-verify stuff 2022-01-18 23:46:08 +01:00
86d504722c zfs-verify stuff 2022-01-18 20:54:19 +01:00
6791bc4abd ready to implement zfs-autoverify 2022-01-18 01:02:01 +01:00
db5186bf38 ready to implement zfs-autoverify 2022-01-18 00:11:52 +01:00
d2b183bb27 move more ulgy stuff to parse_args 2022-01-17 23:34:22 +01:00
033fcf68f7 move exclude_paths and exclude_received to common 2022-01-17 23:10:35 +01:00
14d45667de fixes 2022-01-17 22:54:27 +01:00
f2a3221911 fixes 2022-01-17 22:34:18 +01:00
8baee52ab1 greatly improved output of help (divided into sections) 2022-01-17 22:26:42 +01:00
d114f63f29 extract common stuff to prepare for zfs-autoverify 2022-01-17 21:19:40 +01:00
b36b64cc94 Update FUNDING.yml 2022-01-12 00:20:30 +01:00
5a70172a50 Update FUNDING.yml 2022-01-12 00:20:17 +01:00
f635e8cd67 Create FUNDING.yml 2022-01-12 00:16:01 +01:00
0e362e5d89 moved stuff to wiki 2022-01-07 11:56:32 +01:00
f2ab2938b0 moved stuff to wiki 2022-01-07 11:54:36 +01:00
2d96d13125 moved stuff to wiki 2022-01-07 11:53:29 +01:00
883984fda3 Revert "Initial ZFS clones support"
Woops accidently committed this, still need to review/change it before comitting.

This reverts commit e11c332808.
2022-01-04 22:48:25 +01:00
db2625b08c fix #101 2022-01-04 22:26:44 +01:00
e11c332808 Initial ZFS clones support 2021-12-21 20:09:41 +01:00
07cb7cfad4 version bump 2021-12-19 18:23:09 +01:00
7b4a986f13 fix #103 2021-12-19 18:16:54 +01:00
be2474bb1c error doc 2021-11-02 20:18:57 +01:00
81e7cd940c forgot to remove debugging print 2021-10-04 00:59:50 +02:00
0b4448798e out of range for python 2 2021-10-04 00:34:07 +02:00
b1689f5066 added --...-format options. closes #87 2021-10-04 00:14:40 +02:00
dcb9cdac44 Merge branch 'master' of github.com:psy0rz/zfs_autobackup 2021-10-03 21:50:08 +02:00
9dc280abad Merge pull request #98 from sbidoul/relative-imports
Use relative imports
2021-09-21 16:44:47 +02:00
6b8c683315 Merge branch 'relative-imports' of https://github.com/sbidoul/zfs_autobackup 2021-09-21 14:31:50 +02:00
66e123849b preparations for #87 2021-09-21 14:30:59 +02:00
7325e1e351 ignore coveralls submission errors 2021-09-21 14:21:32 +02:00
9f4ea51622 ignore coveralls submission errors 2021-09-21 14:14:58 +02:00
8c1058a808 Use relative imports 2021-09-21 14:05:33 +02:00
d9e759a3eb fix 2021-09-20 21:14:26 +02:00
46457b3aca added some common short options and changes to fix #88 2021-09-20 15:11:23 +02:00
59f7ccc352 default compression is now zstd-fast, fixes #97 2021-09-20 14:54:38 +02:00
578fb1be4b renamed --ignore-replicated to --exclude-unchanged. tidied up and removed seperate filter_replicated() step. #93, #95, #96 2021-09-20 14:52:20 +02:00
f9b16c050b Merge pull request #96 from xrobau/really-ignore-replicated
Fix #93, Fix #95 Re-Document --exclude-received
2021-09-16 12:14:01 +02:00
2ba6fe5235 Fix #93, Fix #95 Re-Document --exclude-received
If another zfs_autobackup session is running, there will be changes
on zvols that are replicated. This means that it is possible for
a pair of servers running replication between themselves to start
backing up the backups.  Turning on --exclude-received when backups
are running between an a <--> b pair of servers means that the vols
that are received will never be accidentally seleted.
2021-09-16 09:38:26 +10:00
8e2c91735a Merge remote-tracking branch 'origin/master' 2021-08-24 11:14:24 +02:00
d57e3922a0 added note, #91 2021-08-24 11:14:13 +02:00
4b25dd76f1 Merge pull request #90 from bagbag/master
Support for zstd-adapt
2021-08-19 15:45:17 +02:00
2843781aa6 Support for zstd-adapt 2021-08-18 23:08:00 +02:00
ce987328d9 update doc 2021-08-18 10:32:41 +02:00
9a902f0f38 final 3.1 release 2021-08-18 10:28:10 +02:00
ee2c074539 it seems the amount of changed bytes has become bigger on my prox. (approx 230k) 2021-07-07 13:20:04 +02:00
77f1c16414 fix #84 2021-07-03 14:31:34 +02:00
c5363a1538 pre/post cmd tests 2021-06-22 12:43:52 +02:00
119225ba5b bump 2021-06-18 17:27:41 +02:00
84437ee1d0 pre/post snapshot polishing. still needs test-scripts 2021-06-18 17:16:18 +02:00
1286bfafd0 Merge pull request #80 from tuffnatty/pre-post-snapshot-cmd
Add support for pre- and post-snapshot scripts (#39)
2021-06-17 11:48:09 +02:00
9fc2703638 Always call post-snapshot-cmd to clean up faster on snapshot failure 2021-06-17 12:38:16 +03:00
01dc65af96 Merge pull request #81 from tuffnatty/another-typo
Another typo fix
2021-06-17 11:21:08 +02:00
082153e0ce A more robust MySQL example 2021-06-17 09:52:18 +03:00
77f5474447 Fix tests. 2021-06-16 23:37:38 +03:00
55ff14f1d8 Allow multiple --pre/post-snapshot-cmd options. Add a usage example. 2021-06-16 23:19:29 +03:00
2acd26b304 Fix a typo 2021-06-16 19:52:06 +03:00
ec9459c1d2 Use shlex.split() for --pre-snapshot-cmd and --post-snapshot-cmd 2021-06-16 19:35:41 +03:00
233fd83ded Merge pull request #79 from tuffnatty/typos
Fix a couple of typos
2021-06-16 16:46:10 +02:00
37c24e092c Merge pull request #78 from tuffnatty/patch-1
Typo fix in argument name in warning message
2021-06-16 16:45:42 +02:00
b2bf11382c Add --pre-snapshot-cmd and --post-snapshot-cmd options 2021-06-16 16:12:20 +03:00
19b918044e Fix a couple of typos 2021-06-16 13:19:47 +03:00
67d9240e7b Typo fix in argument name in warning message 2021-06-16 00:32:31 +03:00
1a5e4a9cdd doc 2021-05-31 22:33:44 +02:00
31f8c359ff update version number 2021-05-31 22:19:28 +02:00
b50b7b7563 test 2021-05-31 22:10:56 +02:00
37f91e1e08 no longer use zfs send --compressed as default. uses --zfs-compressed to reenable it. fixes #77 . 2021-05-31 22:02:31 +02:00
a2f3aee5b1 test and fix resume edge-case 2021-05-26 23:06:19 +02:00
75d0a3cc7e rc1 2021-05-26 20:15:05 +02:00
98c55e2aa8 test fix 2021-05-26 18:07:17 +02:00
d478e22111 test rate limit 2021-05-26 18:04:05 +02:00
3a4953fbc5 doc 2021-05-26 17:57:38 +02:00
8d4e041a9c add mbuffer 2021-05-26 17:52:10 +02:00
8725d56bc9 also add buffer on receving side 2021-05-26 17:38:05 +02:00
ab0bfdbf4e update docs 2021-05-19 00:52:56 +02:00
ea9012e476 beta6 2021-05-18 23:42:13 +02:00
97e3c110b3 added bandwidth throttling. fixes #51 2021-05-18 19:56:33 +02:00
9264e8de6d more warnings 2021-05-18 19:36:33 +02:00
830ccf1bd4 added warnings in yellow 2021-05-18 19:22:46 +02:00
a389e4c81c fix 2021-05-18 18:18:54 +02:00
36a66fbafc fix 2021-05-18 18:10:34 +02:00
b70c9986c7 regression tests for all compressors 2021-05-18 18:04:47 +02:00
664ea32c96 doc 2021-05-15 16:18:34 +02:00
30f30babea added compression, fixes #40 2021-05-15 16:18:02 +02:00
5e04aabf37 show pipes in verbose 2021-05-15 12:34:21 +02:00
59d53e9664 --recv-pipe and --send-pipe implemented. Added CmdItem to make CmdPipe more consitent 2021-05-11 00:59:26 +02:00
171f0ac5ad as final step we now can do system piping. fixes #50 2021-05-09 14:03:57 +02:00
0ce3bf1297 python 2 compat 2021-05-09 13:04:22 +02:00
c682665888 python 2 compat 2021-05-09 11:09:55 +02:00
086cfe570b run everything in either local shell (shell=true), or remote shell (ssh). this it to allow external shell piping 2021-05-09 10:56:30 +02:00
521d1078bd working on send pipe 2021-05-03 20:25:49 +02:00
8ea178af1f test re-replication 2021-05-03 00:03:22 +02:00
3e39e1553e allow re-replication of a backup with the same name. (now filters on target_path instead of received-status when selecting when appropriate. also shows notes about this) 2021-05-02 22:51:20 +02:00
f0cc2bca2a improved progress reporting. improved no_thinning performance 2021-04-23 20:31:37 +02:00
59b0c23a20 Merge branch 'master' of github.com:psy0rz/zfs_autobackup 2021-04-22 01:16:53 +02:00
401a3f73cc better handling of piped exit codes 2021-04-22 01:12:41 +02:00
8ec5ed2f4f extra test 2021-04-22 00:14:14 +02:00
8318b2f9bf Update README.md 2021-04-21 00:19:21 +02:00
72b97ab2e8 doc. bump version 2021-04-21 00:04:58 +02:00
a16a038f0e doc 2021-04-20 23:43:20 +02:00
fc0da9d380 skip encryption if not supported 2021-04-20 23:34:41 +02:00
31be12c0bf doc fix 2021-04-20 23:24:59 +02:00
176f04b302 proper encryption/decryption support. also fixes #60 2021-04-20 23:20:54 +02:00
7696d8c16d working on encryption 2021-04-20 21:22:27 +02:00
190a73ec10 merge 2021-04-20 21:06:46 +02:00
2bf015e127 still working on encryption 2021-04-20 21:05:44 +02:00
671eda7386 working on proper encryption support 2021-04-20 18:39:57 +02:00
3d4b26cec3 fix test 2021-04-19 10:54:55 +02:00
c0ea311e18 fix 2021-04-18 22:57:03 +02:00
b7b2723b2e coverage 2021-04-18 22:47:53 +02:00
ec1d3ff93e fix #74.
also changed internal logic: thinner now isnt actually created when --no-thinning is active.

When using --no-thinning --no-snapshot --no-send it still does usefull stuff like checking common snapshots and showing incompatible snapshots
2021-04-18 14:30:23 +02:00
352d5e6094 coverage 2021-04-17 22:33:12 +02:00
488ff6f551 coverage 2021-04-17 22:29:47 +02:00
f52b8bbf58 coverage 2021-04-17 21:53:27 +02:00
e47d461999 Merge branch 'master' of github.com:psy0rz/zfs_autobackup 2021-04-17 21:08:34 +02:00
a920744b1e allow regression to run from pycharm by using sudo with passwd file. you also need to suid root zfs and zpool 2021-04-15 13:56:24 +02:00
63f423a201 Update README.md 2021-04-14 09:58:20 +02:00
db6523f3c0 remove unused code 2021-04-13 21:21:35 +02:00
6b172dce2d fix 2021-04-13 17:13:13 +02:00
85d493469d fix 2021-04-13 17:07:01 +02:00
bef3be4955 tests, nicer error message for invalid schedule str 2021-04-13 16:42:55 +02:00
f9719ba87e tests 2021-04-13 16:32:43 +02:00
4b97f789df run() now uses CmdPipe for better pipe handling and cleaner code 2021-04-12 18:16:42 +02:00
ed7cd41ad7 accidently broke testing 2021-04-12 13:56:06 +02:00
62e19d97c2 fix 2021-04-10 14:56:25 +02:00
594a2664c4 more tests 2021-04-10 14:52:52 +02:00
d8fbc96be6 Merge branch 'master' of github.com:psy0rz/zfs_autobackup 2021-04-10 14:34:42 +02:00
61bb590112 also enable branch-coverage 2021-04-10 14:29:27 +02:00
86ea5e49f4 working on better piping system 2021-04-07 23:58:41 +02:00
01642365c7 Update README.md 2021-04-06 23:51:40 +02:00
4910b1dfb5 seperated piping 2021-04-05 22:18:14 +02:00
966df73d2f added doctypes 2021-04-01 22:48:17 +02:00
69ed827c0d doc 2021-03-30 11:53:11 +02:00
e79f6ac157 speedup testing 2021-03-27 21:07:31 +01:00
59efd070a1 autoruntests 2021-03-27 20:08:05 +01:00
80c1bdad1c update usage text as requested in #54 2021-03-17 00:11:52 +01:00
cf72de7c28 cleanedup and improved select-code 2021-03-16 23:40:31 +01:00
686bb48bda restore progres. verify --destroy-incompabible output. 2021-03-11 11:57:51 +01:00
6a48b8a2a9 nicer help 2021-03-03 15:48:44 +01:00
477b66c342 split encoder 2021-03-03 11:50:11 +01:00
a4155f970e completed --no-thinning option. fixes #54 2021-03-01 00:04:10 +01:00
0c9d14bf32 splitting up more stuff 2021-02-27 21:36:03 +01:00
1f5955ccec splitting up more stuff 2021-02-27 14:35:47 +01:00
1b94a849db splitting up more stuff 2021-02-26 20:10:39 +01:00
98c40c6df5 splitting up more stuff 2021-02-26 20:10:20 +01:00
b479ab9c98 more clear name 2021-02-21 22:25:42 +01:00
a0fb205e75 cleaning up code 2021-02-21 22:08:35 +01:00
d3ce222921 some more refactoring. splitting of smaller cleaner functions. started work on --no-thinning 2021-02-19 00:27:37 +01:00
36e134eb75 Update README.md 2021-02-16 12:28:00 +01:00
628cd75941 fix 2021-02-07 21:17:40 +01:00
1da14c5c3b forgot to document child 2021-02-07 21:09:03 +01:00
c83d0fcff2 explain inheritance 2021-02-07 20:59:19 +01:00
573af341b8 fixes 2021-02-07 18:04:00 +01:00
a64168bee2 fixes 2021-02-07 18:00:58 +01:00
c678ae5f9a fixed --ignore-transfer-errors 2021-02-07 16:57:12 +01:00
e95967db53 update version 2021-02-07 16:23:09 +01:00
29e6c056d1 p2 fix pythonpackages 2021-02-07 16:03:26 +01:00
deadbe9383 p2 fix coverals 2021-02-07 15:09:59 +01:00
5cbec2e06f p2 fix 2021-02-07 15:04:36 +01:00
66d284f183 p2 fix 2021-02-07 14:54:50 +01:00
ae64fd6e99 p2 fix 2021-02-07 14:52:04 +01:00
305bd3008d fix python2 support 2021-02-07 14:48:19 +01:00
17fec7d1ee dont support directdownload anymore 2021-02-07 14:13:22 +01:00
f5b0a4f272 fixed python packaging 2021-02-07 14:09:57 +01:00
06c8416771 splitup all classes into seperate files 2021-02-07 14:02:31 +01:00
4f9b7b6cef cleanup repository tree. use entry_points in setuptools instead of symlink 2021-02-07 13:00:04 +01:00
0214584e4c add link 2021-02-06 22:51:50 +01:00
b6627eb389 add --no-progress to workaround zfs recv bug 2021-02-06 22:22:38 +01:00
48f1f6ec5d readme 2021-02-06 21:55:21 +01:00
e33e47c10c cleaned up debug output. 2021-02-06 21:36:10 +01:00
01f0078ccf performance tests 2021-02-06 21:20:50 +01:00
9fad773bfb test manydatasets 2021-02-06 20:44:27 +01:00
7493a0bc55 testing scalability of snapshots. optimized performance by making --no-holds also not use holds on the target. (this is also more like expected behavious) 2021-02-06 19:54:32 +01:00
0649f42d66 prevent unwanted zfs list in snapshot only mode 2021-02-06 14:05:44 +01:00
6fefadf884 formatting 2021-02-06 13:58:40 +01:00
ce05e1ba4c cleanup caching 2021-02-06 13:37:54 +01:00
35584149ff cleanup 2021-02-06 12:44:03 +01:00
427f74d2f0 wip 2021-02-06 12:05:55 +01:00
9b2c321fe7 rollback 2021-02-05 15:45:51 +01:00
d02a6df0f3 better keepsource/target=0 testing 2021-02-02 23:11:13 +01:00
7fb5a7db92 fix 2021-02-02 22:00:30 +01:00
64e53952fe fix #57 2021-02-02 21:33:45 +01:00
b7ef6c9528 Merge pull request #65 from mariusvw/feature/whitespace
Whitespace corrections
2021-02-02 21:24:04 +01:00
b7b3e785ce Merge branch 'master' of github.com:psy0rz/zfs_autobackup 2021-02-02 21:21:16 +01:00
50070bc70f fix #61 2021-02-02 21:21:09 +01:00
0fb0faccae Whitespace corrections 2021-02-02 21:01:13 +01:00
ab77b91d4e Update README.md 2021-02-02 20:32:40 +01:00
bbe7a112fd no longer git tag in setup.py 2021-02-02 20:27:31 +01:00
8a09a49951 Update python-publish.yml 2021-02-02 20:17:07 +01:00
8092b08e7f Update python-publish.yml 2021-02-02 19:36:20 +01:00
075c96bf47 Create python-publish.yml 2021-02-02 19:22:56 +01:00
2cbfa0f38a Rename manual.yml to regression.yml 2021-02-02 18:51:21 +01:00
47c50583c0 Update README.md 2021-02-02 18:44:31 +01:00
e40eb71f39 Update README.md 2021-02-02 18:43:28 +01:00
fab3bf3b3e Update manual.yml 2021-02-02 16:46:24 +01:00
1afe2407fa Update manual.yml 2021-02-02 16:44:47 +01:00
3550100099 Update run_tests 2021-02-02 15:56:22 +01:00
9e2a6dba3d Update manual.yml 2021-02-02 15:55:49 +01:00
b31b74aa92 Update manual.yml 2021-02-02 15:45:03 +01:00
222568ad31 Update manual.yml 2021-02-02 15:39:15 +01:00
35f739b8dd Update manual.yml 2021-02-02 15:37:05 +01:00
52f9e0d810 Update .travis.yml 2021-02-02 15:33:29 +01:00
7bbf041a70 Update manual.yml 2021-02-02 15:30:21 +01:00
b6796ded84 Update manual.yml 2021-02-02 15:28:29 +01:00
930bf6cf50 Update manual.yml 2021-02-02 15:26:36 +01:00
fcc8470758 Update manual.yml 2021-02-02 15:05:29 +01:00
fde4a5ed6a Update manual.yml 2021-02-02 14:55:57 +01:00
12c45f95c3 Update manual.yml 2021-02-02 14:53:34 +01:00
10e7e5b95f Update manual.yml 2021-02-02 14:52:35 +01:00
656b435a7f Update manual.yml 2021-02-02 14:50:06 +01:00
1c1c6647f1 Update manual.yml 2021-02-02 14:48:13 +01:00
39514de86a Update manual.yml 2021-02-02 14:42:28 +01:00
49f6e36749 Update manual.yml 2021-02-02 14:41:55 +01:00
371de417a4 Update manual.yml 2021-02-02 14:33:09 +01:00
6965c04dc6 Update manual.yml 2021-02-02 14:30:58 +01:00
9e645e9237 Update manual.yml 2021-02-02 14:27:04 +01:00
fa372799f5 Update manual.yml 2021-02-02 14:26:18 +01:00
da55436863 Update manual.yml 2021-02-02 14:21:35 +01:00
4d0db0b5d3 Update manual.yml 2021-02-02 14:20:46 +01:00
75f5e0ee9f Update manual.yml 2021-02-02 14:16:31 +01:00
d0ab60168b Update manual.yml 2021-02-02 14:15:13 +01:00
b48726185c Create manual.yml 2021-02-02 14:14:38 +01:00
74da005870 easier to use proxmox backup script 2021-02-02 14:04:03 +01:00
6e0664ad8e Merge pull request #62 from dkew8/master
debug in zfs-autobackup
2021-01-17 11:28:13 +01:00
f508e72f5e Update zfs-autobackup 2021-01-01 14:54:22 -06:00
4918a2c055 huge cleanup of codestyle and other linter warnings. should be no functional changes. 2020-08-27 22:33:00 +02:00
e65d1ac860 point to 3.0 2020-08-26 13:48:18 +02:00
51 changed files with 5152 additions and 3173 deletions

5
.github/FUNDING.yml vendored Normal file
View File

@ -0,0 +1,5 @@
# These are supported funding model platforms
github: psy0rz
ko_fi: psy0rz
custom: https://paypal.me/psy0rz

46
.github/workflows/python-publish.yml vendored Normal file
View File

@ -0,0 +1,46 @@
# This workflow will upload a Python Package using Twine when a release is created
# For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries
name: Upload Python Package
on:
release:
types: [created]
jobs:
deploy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Set up Python 3.x
uses: actions/setup-python@v2
with:
python-version: '3.x'
- name: Set up Python 2.x
uses: actions/setup-python@v2
with:
python-version: '2.x'
- name: Install dependencies 3.x
run: |
python -m pip install --upgrade pip
pip3 install setuptools wheel twine
- name: Install dependencies 2.x
run: |
python2 -m pip install --upgrade pip
pip2 install setuptools wheel twine
- name: Build and publish
env:
TWINE_USERNAME: ${{ secrets.TWINE_USERNAME }}
TWINE_PASSWORD: ${{ secrets.TWINE_PASSWORD }}
run: |
python3 setup.py sdist bdist_wheel
python2 setup.py sdist bdist_wheel
twine check dist/*
twine upload dist/*

76
.github/workflows/regression.yml vendored Normal file
View File

@ -0,0 +1,76 @@
name: Regression tests
on: ["push", "pull_request"]
jobs:
ubuntu20:
runs-on: ubuntu-20.04
steps:
- name: Checkout
uses: actions/checkout@v2.3.4
- name: Prepare
run: sudo apt update && sudo apt install zfsutils-linux lzop pigz zstd gzip xz-utils lz4 mbuffer && sudo -H pip3 install coverage unittest2 mock==3.0.5 coveralls
- name: Regression test
run: sudo -E ./tests/run_tests
- name: Coveralls
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: coveralls --service=github || true
ubuntu18:
runs-on: ubuntu-18.04
steps:
- name: Checkout
uses: actions/checkout@v2.3.4
- name: Prepare
run: sudo apt update && sudo apt install zfsutils-linux python3-setuptools lzop pigz zstd gzip xz-utils liblz4-tool mbuffer && sudo -H pip3 install coverage unittest2 mock==3.0.5 coveralls
- name: Regression test
run: sudo -E ./tests/run_tests
- name: Coveralls
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: coveralls --service=github || true
ubuntu18_python2:
runs-on: ubuntu-18.04
steps:
- name: Checkout
uses: actions/checkout@v2.3.4
- name: Set up Python 2.x
uses: actions/setup-python@v2
with:
python-version: '2.x'
- name: Prepare
run: sudo apt update && sudo apt install zfsutils-linux python-setuptools lzop pigz zstd gzip xz-utils liblz4-tool mbuffer && sudo -H pip install coverage unittest2 mock==3.0.5 coveralls colorama
- name: Regression test
run: sudo -E ./tests/run_tests
- name: Coveralls
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
COVERALLS_REPO_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: coveralls --service=github || true

3
.gitignore vendored
View File

@ -9,3 +9,6 @@ __pycache__
.coverage
*.pyc
python2.env
venv
.idea
password.sh

View File

@ -1,31 +0,0 @@
jobs:
include:
- os: linux
dist: xenial
language: python
python: 2.7
- os: linux
dist: xenial
language: python
python: 3.6
- os: linux
dist: bionic
language: python
python: 2.7
- os: linux
dist: bionic
language: python
python: 3.6
before_install:
- sudo apt-get update
- sudo apt-get install zfsutils-linux
script:
# - sudo -E ./ngrok.sh
- sudo -E ./run_tests
# - sudo -E pip --version

629
README.md
View File

@ -1,628 +1,61 @@
# ZFS autobackup
[![Coverage Status](https://coveralls.io/repos/github/psy0rz/zfs_autobackup/badge.svg)](https://coveralls.io/github/psy0rz/zfs_autobackup) [![Build Status](https://travis-ci.org/psy0rz/zfs_autobackup.svg?branch=master)](https://travis-ci.org/psy0rz/zfs_autobackup)
## New in v3
* Complete rewrite, cleaner object oriented code.
* Python 3 and 2 support.
* Automated regression against real ZFS environment.
* Installable via [pip](https://pypi.org/project/zfs-autobackup/).
* Backwards compatible with your current backups and parameters.
* Progressive thinning (via a destroy schedule. default schedule should be fine for most people)
* Cleaner output, with optional color support (pip install colorama).
* Clear distinction between local and remote output.
* Summary at the beginning, displaying what will happen and the current thinning-schedule.
* More efficient destroying/skipping snapshots on the fly. (no more space issues if your backup is way behind)
* Progress indicator (--progress)
* Better property management (--set-properties and --filter-properties)
* Better resume handling, automatically abort invalid resumes.
* More robust error handling.
* Prepared for future enhancements.
* Supports raw backups for encryption.
* Custom SSH client config.
[![Tests](https://github.com/psy0rz/zfs_autobackup/workflows/Regression%20tests/badge.svg)](https://github.com/psy0rz/zfs_autobackup/actions?query=workflow%3A%22Regression+tests%22) [![Coverage Status](https://coveralls.io/repos/github/psy0rz/zfs_autobackup/badge.svg)](https://coveralls.io/github/psy0rz/zfs_autobackup) [![Python Package](https://github.com/psy0rz/zfs_autobackup/workflows/Upload%20Python%20Package/badge.svg)](https://pypi.org/project/zfs-autobackup/)
## Introduction
This is a tool I wrote to make replicating ZFS datasets easy and reliable.
ZFS-autobackup tries to be the most reliable and easiest to use tool, while having all the features.
You can either use it as a **backup** tool, **replication** tool or **snapshot** tool.
You can select what to backup by setting a custom `ZFS property`. This allows you to set and forget: Configure it so it backups your entire pool, and you never have to worry about backupping again. Even new datasets you create later will be backupped.
You can select what to backup by setting a custom `ZFS property`. This makes it easy to add/remove specific datasets, or just backup your whole pool.
Other settings are just specified on the commandline. This also makes it easier to setup and test zfs-autobackup and helps you fix all the issues you might encounter. When you're done you can just copy/paste your command to a cron or script.
Other settings are just specified on the commandline: Simply setup and test your zfs-autobackup command and fix all the issues you might encounter. When you're done you can just copy/paste your command to a cron or script.
Since its using ZFS commands, you can see what its actually doing by specifying `--debug`. This also helps a lot if you run into some strange problem or error. You can just copy-paste the command that fails and play around with it on the commandline. (also something I missed in other tools)
Since its using ZFS commands, you can see what it's actually doing by specifying `--debug`. This also helps a lot if you run into some strange problem or error. You can just copy-paste the command that fails and play around with it on the commandline. (something I missed in other tools)
An important feature thats missing from other tools is a reliable `--test` option: This allows you to see what zfs-autobackup will do and tune your parameters. It will do everything, except make changes to your zfs datasets.
Another nice thing is progress reporting: Its very useful with HUGE datasets, when you want to know how many hours/days it will take.
zfs-autobackup tries to be the easiest to use backup tool for zfs.
An important feature thats missing from other tools is a reliable `--test` option: This allows you to see what zfs-autobackup will do and tune your parameters. It will do everything, except make changes to your system.
## Features
* Works across operating systems: Tested with **Linux**, **FreeBSD/FreeNAS** and **SmartOS**.
* Works in combination with existing replication systems. (Like Proxmox HA)
* Automatically selects filesystems to backup by looking at a simple ZFS property. (recursive)
* Creates consistent snapshots. (takes all snapshots at once, atomic.)
* Low learning curve: no complex daemons or services, no additional software or networking needed. (Only read this page)
* Plays nicely with existing replication systems. (Like Proxmox HA)
* Automatically selects filesystems to backup by looking at a simple ZFS property.
* Creates consistent snapshots. (takes all snapshots at once, atomicly.)
* Multiple backups modes:
* Backup local data on the same server.
* "push" local data to a backup-server via SSH.
* "pull" remote data from a server via SSH and backup it locally.
* Or even pull data from a server while pushing the backup to another server.
* Can be scheduled via a simple cronjob or run directly from commandline.
* Supports resuming of interrupted transfers. (via the zfs extensible_dataset feature)
* Backups and snapshots can be named to prevent conflicts. (multiple backups from and to the same datasets are no problem)
* Always creates a new snapshot before starting.
* Checks everything but tries continue on non-fatal errors when possible. (Reports error-count when done)
* "pull+push": Zero trust between source and target.
* Can be scheduled via simple cronjob or run directly from commandline.
* ZFS encryption support: Can decrypt / encrypt or even re-encrypt datasets during transfer.
* Supports sending with compression. (Using pigz, zstd etc)
* IO buffering to speed up transfer.
* Bandwidth rate limiting.
* Multiple backups from and to the same datasets are no problem.
* Resillient to errors.
* Ability to manually 'finish' failed backups to see whats going on.
* Easy to debug and has a test-mode. Actual unix commands are printed.
* Uses **progressive thinning** for older snapshots.
* Uses zfs-holds on important snapshots so they cant be accidentally destroyed.
* Uses progressive thinning for older snapshots.
* Uses zfs-holds on important snapshots to prevent accidental deletion.
* Automatic resuming of failed transfers.
* Can continue from existing common snapshots. (e.g. easy migration)
* Gracefully handles destroyed datasets on source.
* Easy migration from existing zfs backups.
* Gracefully handles datasets that no longer exist on source.
* Complete and clean logging.
* Easy installation:
* Just install zfs-autobackup via pip, or download it manually.
* Written in python and uses zfs-commands, no 3rd party dependency's or libraries.
* No separate config files or properties. Just one zfs-autobackup command you can copy/paste in your backup script.
* Just install zfs-autobackup via pip.
* Only needs to be installed on one side.
* Written in python and uses zfs-commands, no special 3rd party dependency's or compiled libraries needed.
* No annoying config files or properties.
## Installation
## Getting started
### Using pip
The recommended way on most servers is to use [pip](https://pypi.org/project/zfs-autobackup/):
```console
[root@server ~]# pip install --upgrade zfs-autobackup
```
This can also be used to upgrade zfs-autobackup to the newest stable version.
### Using easy_install
On older servers you might have to use easy_install
```console
[root@server ~]# easy_install zfs-autobackup
```
### Direct download
Its also possible to just download <https://raw.githubusercontent.com/psy0rz/zfs_autobackup/master/bin/zfs-autobackup> and run it directly.
The only requirement that is sometimes missing is the `argparse` python module. Optionally you can install `colorama` for colors.
It should work with python 2.7 and higher.
## Example
In this example we're going to backup a machine called `server1` to a machine called `backup`.
### Setup SSH login
zfs-autobackup needs passwordless login via ssh. This means generating an ssh key and copying it to the remote server.
#### Generate SSH key on `backup`
On the backup-server that runs zfs-autobackup you need to create an SSH key. You only need to do this once.
Use the `ssh-keygen` command and leave the passphrase empty:
```console
root@backup:~# ssh-keygen
Generating public/private rsa key pair.
Enter file in which to save the key (/root/.ssh/id_rsa):
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /root/.ssh/id_rsa.
Your public key has been saved in /root/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:McJhCxvaxvFhO/3e8Lf5gzSrlTWew7/bwrd2U2EHymE root@backup
The key's randomart image is:
+---[RSA 2048]----+
| + = |
| + X * E . |
| . = B + o o . |
| . o + o o.|
| S o .oo|
| . + o= +|
| . ++==.|
| .+o**|
| .. +B@|
+----[SHA256]-----+
root@backup:~#
```
#### Copy SSH key to `server1`
Now you need to copy the public part of the key to `server1`
The `ssh-copy-id` command is a handy tool to automate this. It will just ask for your password.
```console
root@backup:~# ssh-copy-id root@server1.server.com
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_rsa.pub"
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
Password:
Number of key(s) added: 1
Now try logging into the machine, with: "ssh 'root@server1.server.com'"
and check to make sure that only the key(s) you wanted were added.
root@backup:~#
```
This allows the backup-server to login to `server1` as root without password.
### Select filesystems to backup
Its important to choose a unique and consistent backup name. In this case we name our backup: `offsite1`.
On the source zfs system set the ```autobackup:offsite1``` zfs property to true:
```console
[root@server1 ~]# zfs set autobackup:offsite1=true rpool
[root@server1 ~]# zfs get -t filesystem,volume autobackup:offsite1
NAME PROPERTY VALUE SOURCE
rpool autobackup:offsite1 true local
rpool/ROOT autobackup:offsite1 true inherited from rpool
rpool/ROOT/server1-1 autobackup:offsite1 true inherited from rpool
rpool/data autobackup:offsite1 true inherited from rpool
rpool/data/vm-100-disk-0 autobackup:offsite1 true inherited from rpool
rpool/swap autobackup:offsite1 true inherited from rpool
...
```
Because we don't want to backup everything, we can exclude certain filesystem by setting the property to false:
```console
[root@server1 ~]# zfs set autobackup:offsite1=false rpool/swap
[root@server1 ~]# zfs get -t filesystem,volume autobackup:offsite1
NAME PROPERTY VALUE SOURCE
rpool autobackup:offsite1 true local
rpool/ROOT autobackup:offsite1 true inherited from rpool
rpool/ROOT/server1-1 autobackup:offsite1 true inherited from rpool
rpool/data autobackup:offsite1 true inherited from rpool
rpool/data/vm-100-disk-0 autobackup:offsite1 true inherited from rpool
rpool/swap autobackup:offsite1 false local
...
```
### Running zfs-autobackup
Run the script on the backup server and pull the data from the server specified by --ssh-source.
```console
[root@backup ~]# zfs-autobackup --ssh-source server1.server.com offsite1 backup/server1 --progress --verbose
#### Settings summary
[Source] Datasets on: server1.server.com
[Source] Keep the last 10 snapshots.
[Source] Keep every 1 day, delete after 1 week.
[Source] Keep every 1 week, delete after 1 month.
[Source] Keep every 1 month, delete after 1 year.
[Source] Send all datasets that have 'autobackup:offsite1=true' or 'autobackup:offsite1=child'
[Target] Datasets are local
[Target] Keep the last 10 snapshots.
[Target] Keep every 1 day, delete after 1 week.
[Target] Keep every 1 week, delete after 1 month.
[Target] Keep every 1 month, delete after 1 year.
[Target] Receive datasets under: backup/server1
#### Selecting
[Source] rpool: Selected (direct selection)
[Source] rpool/ROOT: Selected (inherited selection)
[Source] rpool/ROOT/server1-1: Selected (inherited selection)
[Source] rpool/data: Selected (inherited selection)
[Source] rpool/data/vm-100-disk-0: Selected (inherited selection)
[Source] rpool/swap: Ignored (disabled)
#### Snapshotting
[Source] rpool: No changes since offsite1-20200218175435
[Source] rpool/ROOT: No changes since offsite1-20200218175435
[Source] rpool/data: No changes since offsite1-20200218175435
[Source] Creating snapshot offsite1-20200218180123
#### Sending and thinning
[Target] backup/server1/rpool/ROOT/server1-1@offsite1-20200218175435: receiving full
[Target] backup/server1/rpool/ROOT/server1-1@offsite1-20200218175547: receiving incremental
[Target] backup/server1/rpool/ROOT/server1-1@offsite1-20200218175706: receiving incremental
[Target] backup/server1/rpool/ROOT/server1-1@offsite1-20200218180049: receiving incremental
[Target] backup/server1/rpool/ROOT/server1-1@offsite1-20200218180123: receiving incremental
[Target] backup/server1/rpool/data@offsite1-20200218175435: receiving full
[Target] backup/server1/rpool/data/vm-100-disk-0@offsite1-20200218175435: receiving full
...
```
Note that this is called a "pull" backup: The backup server pulls the backup from the server. This is usually the preferred way.
Its also possible to let a server push its backup to the backup-server. However this has security implications. In that case you would setup the SSH keys the other way around and use the --ssh-target parameter on the server.
### Automatic backups
Now every time you run the command, zfs-autobackup will create a new snapshot and replicate your data.
Older snapshots will eventually be deleted, depending on the `--keep-source` and `--keep-target` settings. (The defaults are shown above under the 'Settings summary')
Once you've got the correct settings for your situation, you can just store the command in a cronjob.
Or just create a script and run it manually when you need it.
## Use as snapshot tool
You can use zfs-autobackup to only make snapshots.
Just dont specify the target-path:
```console
root@ws1:~# zfs-autobackup test --verbose
zfs-autobackup v3.0-rc12 - Copyright 2020 E.H.Eefting (edwin@datux.nl)
#### Source settings
[Source] Datasets are local
[Source] Keep the last 10 snapshots.
[Source] Keep every 1 day, delete after 1 week.
[Source] Keep every 1 week, delete after 1 month.
[Source] Keep every 1 month, delete after 1 year.
[Source] Selects all datasets that have property 'autobackup:test=true' (or childs of datasets that have 'autobackup:test=child')
#### Selecting
[Source] test_source1/fs1: Selected (direct selection)
[Source] test_source1/fs1/sub: Selected (inherited selection)
[Source] test_source2/fs2: Ignored (only childs)
[Source] test_source2/fs2/sub: Selected (inherited selection)
#### Snapshotting
[Source] Creating snapshots test-20200710125958 in pool test_source1
[Source] Creating snapshots test-20200710125958 in pool test_source2
#### Thinning source
[Source] test_source1/fs1@test-20200710125948: Destroying
[Source] test_source1/fs1/sub@test-20200710125948: Destroying
[Source] test_source2/fs2/sub@test-20200710125948: Destroying
#### All operations completed successfully
(No target_path specified, only operated as snapshot tool.)
```
This also allows you to make several snapshots during the day, but only backup the data at night when the server is not busy.
## Thinning out obsolete snapshots
The thinner is the thing that destroys old snapshots on the source and target.
The thinner operates "stateless": There is nothing in the name or properties of a snapshot that indicates how long it will be kept. Everytime zfs-autobackup runs, it will look at the timestamp of all the existing snapshots. From there it will determine which snapshots are obsolete according to your schedule. The advantage of this stateless system is that you can always change the schedule.
Note that the thinner will ONLY destroy snapshots that are matching the naming pattern of zfs-autobackup. If you use `--other-snapshots`, it wont destroy those snapshots after replicating them to the target.
### Destroying missing datasets
When a dataset has been destroyed or deselected on the source, but still exists on the target we call it a missing dataset. Missing datasets will be still thinned out according to the schedule.
The final snapshot will never be destroyed, unless you specify a **deadline** with the `--destroy-missing` option:
In that case it will look at the last snapshot we took and determine if is older than the deadline you specified. e.g: `--destroy-missing 30d` will start destroying things 30 days after the last snapshot.
#### After the deadline
When the deadline is passed, all our snapshots, except the last one will be destroyed. Irregardless of the normal thinning schedule.
The dataset has to have the following properties to be finally really destroyed:
* The dataset has no direct child-filesystems or volumes.
* The only snapshot left is the last one created by zfs-autobackup.
* The remaining snapshot has no clones.
### Thinning schedule
The default thinning schedule is: `10,1d1w,1w1m,1m1y`.
The schedule consists of multiple rules separated by a `,`
A plain number specifies how many snapshots you want to always keep, regardless of time or interval.
The format of the other rules is: `<Interval><TTL>`.
* Interval: The minimum interval between the snapshots. Snapshots with intervals smaller than this will be destroyed.
* TTL: The maximum time to life time of a snapshot, after that they will be destroyed.
* These are the time units you can use for interval and TTL:
* `y`: Years
* `m`: Months
* `d`: Days
* `h`: Hours
* `min`: Minutes
* `s`: Seconds
Since this might sound very complicated, the `--verbose` option will show you what it all means:
```console
[Source] Keep the last 10 snapshots.
[Source] Keep every 1 day, delete after 1 week.
[Source] Keep every 1 week, delete after 1 month.
[Source] Keep every 1 month, delete after 1 year.
```
A snapshot will only be destroyed if it not needed anymore by ANY of the rules.
You can specify as many rules as you need. The order of the rules doesn't matter.
Keep in mind its up to you to actually run zfs-autobackup often enough: If you want to keep hourly snapshots, you have to make sure you at least run it every hour.
However, its no problem if you run it more or less often than that: The thinner will still keep an optimal set of snapshots to match your schedule as good as possible.
If you want to keep as few snapshots as possible, just specify 0. (`--keep-source=0` for example)
If you want to keep ALL the snapshots, just specify a very high number.
### More details about the Thinner
We will give a practical example of how the thinner operates.
Say we want have 3 thinner rules:
* We want to keep daily snapshots for 7 days.
* We want to keep weekly snapshots for 4 weeks.
* We want to keep monthly snapshots for 12 months.
So far we have taken 4 snapshots at random moments:
![thinner example](https://raw.githubusercontent.com/psy0rz/zfs_autobackup/master/doc/thinner.png)
For every rule, the thinner will divide the timeline in blocks and assign each snapshot to a block.
A block can only be assigned one snapshot: If multiple snapshots fall into the same block, it only assigns it to the oldest that we want to keep.
The colors show to which block a snapshot belongs:
* Snapshot 1: This snapshot belongs to daily block 1, weekly block 0 and monthly block 0. However the daily block is too old.
* Snapshot 2: Since weekly block 0 and monthly block 0 already have a snapshot, it only belongs to daily block 4.
* Snapshot 3: This snapshot belongs to daily block 8 and weekly block 1.
* Snapshot 4: Since daily block 8 already has a snapshot, this one doesn't belong to anything and can be deleted right away. (it will be keeped for now since its the last snapshot)
zfs-autobackup will re-evaluate this on every run: As soon as a snapshot doesn't belong to any block anymore it will be destroyed.
Snapshots on the source that still have to be send to the target wont be destroyed off course. (If the target still wants them, according to the target schedule)
## Tips
* Use ```--debug``` if something goes wrong and you want to see the commands that are executed. This will also stop at the first error.
* You can split up the snapshotting and sending tasks by creating two cronjobs. Create a separate snapshotter-cronjob by just omitting target-path.
* Set the ```readonly``` property of the target filesystem to ```on```. This prevents changes on the target side. (Normally, if there are changes the next backup will fail and will require a zfs rollback.) Note that readonly means you cant change the CONTENTS of the dataset directly. Its still possible to receive new datasets and manipulate properties etc.
* Use ```--clear-refreservation``` to save space on your backup server.
* Use ```--clear-mountpoint``` to prevent the target server from mounting the backupped filesystem in the wrong place during a reboot.
### Speeding up SSH
You can make your ssh connections persistent and greatly speed up zfs-autobackup:
On the backup-server add this to your ~/.ssh/config:
```console
Host *
ControlPath ~/.ssh/control-master-%r@%h:%p
ControlMaster auto
ControlPersist 3600
```
Thanks @mariusvw :)
### Specifying ssh port or options
The correct way to do this is by creating ~/.ssh/config:
```console
Host smartos04
Hostname 1.2.3.4
Port 1234
user root
Compression yes
```
This way you can just specify "smartos04" as host.
Also uses compression on slow links.
Look in man ssh_config for many more options.
## Usage
Here you find all the options:
```console
[root@server ~]# zfs-autobackup --help
usage: zfs-autobackup [-h] [--ssh-config SSH_CONFIG] [--ssh-source SSH_SOURCE]
[--ssh-target SSH_TARGET] [--keep-source KEEP_SOURCE]
[--keep-target KEEP_TARGET] [--other-snapshots]
[--no-snapshot] [--no-send] [--min-change MIN_CHANGE]
[--allow-empty] [--ignore-replicated] [--no-holds]
[--strip-path STRIP_PATH] [--clear-refreservation]
[--clear-mountpoint]
[--filter-properties FILTER_PROPERTIES]
[--set-properties SET_PROPERTIES] [--rollback]
[--destroy-incompatible] [--ignore-transfer-errors]
[--raw] [--test] [--verbose] [--debug] [--debug-output]
[--progress]
backup-name [target-path]
zfs-autobackup v3.0-rc12 - Copyright 2020 E.H.Eefting (edwin@datux.nl)
positional arguments:
backup-name Name of the backup (you should set the zfs property
"autobackup:backup-name" to true on filesystems you
want to backup
target-path Target ZFS filesystem (optional: if not specified,
zfs-autobackup will only operate as snapshot-tool on
source)
optional arguments:
-h, --help show this help message and exit
--ssh-config SSH_CONFIG
Custom ssh client config
--ssh-source SSH_SOURCE
Source host to get backup from. (user@hostname)
Default None.
--ssh-target SSH_TARGET
Target host to push backup to. (user@hostname) Default
None.
--keep-source KEEP_SOURCE
Thinning schedule for old source snapshots. Default:
10,1d1w,1w1m,1m1y
--keep-target KEEP_TARGET
Thinning schedule for old target snapshots. Default:
10,1d1w,1w1m,1m1y
--other-snapshots Send over other snapshots as well, not just the ones
created by this tool.
--no-snapshot Don't create new snapshots (useful for finishing
uncompleted backups, or cleanups)
--no-send Don't send snapshots (useful for cleanups, or if you
want a serperate send-cronjob)
--min-change MIN_CHANGE
Number of bytes written after which we consider a
dataset changed (default 1)
--allow-empty If nothing has changed, still create empty snapshots.
(same as --min-change=0)
--ignore-replicated Ignore datasets that seem to be replicated some other
way. (No changes since lastest snapshot. Useful for
proxmox HA replication)
--no-holds Don't lock snapshots on the source. (Useful to allow
proxmox HA replication to switches nodes)
--strip-path STRIP_PATH
Number of directories to strip from target path (use 1
when cloning zones between 2 SmartOS machines)
--clear-refreservation
Filter "refreservation" property. (recommended, safes
space. same as --filter-properties refreservation)
--clear-mountpoint Set property canmount=noauto for new datasets.
(recommended, prevents mount conflicts. same as --set-
properties canmount=noauto)
--filter-properties FILTER_PROPERTIES
List of properties to "filter" when receiving
filesystems. (you can still restore them with zfs
inherit -S)
--set-properties SET_PROPERTIES
List of propererties to override when receiving
filesystems. (you can still restore them with zfs
inherit -S)
--rollback Rollback changes to the latest target snapshot before
starting. (normally you can prevent changes by setting
the readonly property on the target_path to on)
--destroy-incompatible
Destroy incompatible snapshots on target. Use with
care! (implies --rollback)
--ignore-transfer-errors
Ignore transfer errors (still checks if received
filesystem exists. useful for acltype errors)
--raw For encrypted datasets, send data exactly as it exists
on disk.
--test dont change anything, just show what would be done
(still does all read-only operations)
--verbose verbose output
--debug Show zfs commands that are executed, stops after an
exception.
--debug-output Show zfs commands and their output/exit codes. (noisy)
--progress show zfs progress output (to stderr). Enabled by
default on ttys.
When a filesystem fails, zfs_backup will continue and report the number of
failures at that end. Also the exit code will indicate the number of failures.
```
## Troubleshooting
### It keeps asking for my SSH password
You forgot to setup automatic login via SSH keys, look in the example how to do this.
### It says 'cannot receive incremental stream: invalid backup stream'
This usually means you've created a new snapshot on the target side during a backup. If you restart zfs-autobackup, it will automaticly abort the invalid partially received snapshot and start over.
### It says 'internal error: Invalid argument'
In some cases (Linux -> FreeBSD) this means certain properties are not fully supported on the target system.
Try using something like: --filter-properties xattr
## Restore example
Restoring can be done with simple zfs commands. For example, use this to restore a specific SmartOS disk image to a temporary restore location:
```console
root@fs1:/home/psy# zfs send fs1/zones/backup/zfsbackups/smartos01.server.com/zones/a3abd6c8-24c6-4125-9e35-192e2eca5908-disk0@smartos01_fs1-20160110000003 | ssh root@2.2.2.2 "zfs recv zones/restore"
```
After that you can rename the disk image from the temporary location to the location of a new SmartOS machine you've created.
## Monitoring with Zabbix-jobs
You can monitor backups by using my zabbix-jobs script. (<https://github.com/psy0rz/stuff/tree/master/zabbix-jobs>)
Put this command directly after the zfs_backup command in your cronjob:
```console
zabbix-job-status backup_smartos01_fs1 daily $?
```
This will update the zabbix server with the exit code and will also alert you if the job didn't run for more than 2 days.
## Backuping up a proxmox cluster with HA replication
Due to the nature of proxmox we had to make a few enhancements to zfs-autobackup. This will probably also benefit other systems that use their own replication in combination with zfs-autobackup.
All data under rpool/data can be on multiple nodes of the cluster. The naming of those filesystem is unique over the whole cluster. Because of this we should backup rpool/data of all nodes to the same destination. This way we wont have duplicate backups of the filesystems that are replicated. Because of various options, you can even migrate hosts and zfs-autobackup will be fine. (and it will get the next backup from the new node automatically)
In the example below we have 3 nodes, named h4, h5 and h6.
The backup will go to a machine named smartos03.
### Preparing the proxmox nodes
On each node select the filesystems as following:
```console
root@h4:~# zfs set autobackup:h4_smartos03=true rpool
root@h4:~# zfs set autobackup:h4_smartos03=false rpool/data
root@h4:~# zfs set autobackup:data_smartos03=child rpool/data
```
* rpool will be backuped the usual way, and is named h4_smartos03. (each node will have a unique name)
* rpool/data will be excluded from the usual backup
* The CHILDREN of rpool/data be selected for a cluster wide backup named data_smartos03. (each node uses the same backup name)
### Preparing the backup server
Extra options needed for proxmox with HA:
* --no-holds: To allow proxmox to destroy our snapshots if a VM migrates to another node.
* --ignore-replicated: To ignore the replicated filesystems of proxmox on the receiving proxmox nodes. (e.g: only backup from the node where the VM is active)
* --min-change 200000: Ignore replicated works by checking if there are no changes since the last snapshot. However for some reason proxmox always has some small changes. (Probably house-keeping data are something? This always was fine and suddenly changed with an update)
I use the following backup script on the backup server:
```shell
for H in h4 h5 h6; do
echo "################################### DATA $H"
#backup data filesystems to a common place
./zfs-autobackup --ssh-source root@$H data_smartos03 zones/backup/zfsbackups/pxe1_data --clear-refreservation --clear-mountpoint --ignore-transfer-errors --strip-path 2 --verbose --ignore-replicated --min-change 200000 --no-holds $@
zabbix-job-status backup_$H""_data_smartos03 daily $? >/dev/null 2>/dev/null
echo "################################### RPOOL $H"
#backup rpool to own place
./zfs-autobackup --ssh-source root@$H $H""_smartos03 zones/backup/zfsbackups/$H --verbose --clear-refreservation --clear-mountpoint --ignore-transfer-errors $@
zabbix-job-status backup_$H""_smartos03 daily $? >/dev/null 2>/dev/null
done
```
Please look at our wiki to [Get started](https://github.com/psy0rz/zfs_autobackup/wiki).
# Sponsor list
This project was sponsorred by:
* (None so far)
* JetBrains (Provided me with a license for their whole professional product line, https://www.jetbrains.com/pycharm/ )

View File

File diff suppressed because it is too large Load Diff

View File

@ -1 +0,0 @@
zfs-autobackup

View File

@ -1,17 +0,0 @@
#!/bin/bash
if ! [ -e ngrok ]; then
wget -O ngrok.zip https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-linux-amd64.zip
unzip ngrok.zip
fi
{
mkfifo pipe
echo "Executing nc"
nc -k -l -v 8888 <pipe | ( while true; do bash >pipe 2>&1; echo "restarting" ;sleep 1; done )
killall -SIGINT ngrok && echo "ngrok terminated"
} &
{
echo "Executing ngrok"
./ngrok authtoken $NGROK_TOKEN
./ngrok tcp 8888 --log=stdout
} &
wait

View File

@ -1,6 +1,6 @@
colorama
argparse
coverage==4.5.4
coverage
python-coveralls
unittest2
mock

View File

@ -1,6 +1,6 @@
#!/bin/bash
set -e
set -e
rm -rf dist

View File

@ -1,6 +1,6 @@
#!/bin/bash
set -e
set -e
rm -rf dist
@ -14,4 +14,3 @@ source tokentest
python3 -m twine check dist/*
python3 -m twine upload --repository-url https://test.pypi.org/legacy/ dist/* --verbose

View File

@ -1,24 +1,29 @@
import setuptools
import bin.zfs_autobackup
from zfs_autobackup.ZfsAutobackup import ZfsAutobackup
import os
os.system("git tag -m ' ' -a v{}".format(bin.zfs_autobackup.VERSION))
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="zfs_autobackup",
version=bin.zfs_autobackup.VERSION,
name="zfs_autobackup",
version=ZfsAutobackup.VERSION,
author="Edwin Eefting",
author_email="edwin@datux.nl",
description="ZFS autobackup is used to periodicly backup ZFS filesystems to other locations. It tries to be the most friendly to use and easy to debug ZFS backup tool.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/psy0rz/zfs_autobackup",
scripts=["bin/zfs-autobackup"],
entry_points={
'console_scripts':
[
'zfs-autobackup = zfs_autobackup.ZfsAutobackup:cli',
'zfs-autoverify = zfs_autobackup.ZfsAutoverify:cli',
]
},
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",

View File

@ -1,275 +0,0 @@
from basetest import *
class TestZfsNode(unittest2.TestCase):
def setUp(self):
prepare_zpools()
self.longMessage=True
# generate a resumable state
#NOTE: this generates two resumable test_target1/test_source1/fs1 and test_target1/test_source1/fs1/sub
def generate_resume(self):
r=shelltest("zfs set compress=off test_source1 test_target1")
#big change on source
r=shelltest("dd if=/dev/zero of=/test_source1/fs1/data bs=250M count=1")
#waste space on target
r=shelltest("dd if=/dev/zero of=/test_target1/waste bs=250M count=1")
#should fail and leave resume token (if supported)
self.assertTrue(ZfsAutobackup("test test_target1 --verbose".split(" ")).run())
#free up space
r=shelltest("rm /test_target1/waste")
#sync
r=shelltest("zfs umount test_target1")
r=shelltest("zfs mount test_target1")
#resume initial backup
def test_initial_resume(self):
#inital backup, leaves resume token
with patch('time.strftime', return_value="20101111000000"):
self.generate_resume()
#--test should resume and succeed
with OutputIO() as buf:
with redirect_stdout(buf):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --test".split(" ")).run())
print(buf.getvalue())
#did we really resume?
if "0.6.5" in ZFS_USERSPACE:
#abort this late, for beter coverage
self.skipTest("Resume not supported in this ZFS userspace version")
else:
self.assertIn(": resuming", buf.getvalue())
#should resume and succeed
with OutputIO() as buf:
with redirect_stdout(buf):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose".split(" ")).run())
print(buf.getvalue())
#did we really resume?
if "0.6.5" in ZFS_USERSPACE:
#abort this late, for beter coverage
self.skipTest("Resume not supported in this ZFS userspace version")
else:
self.assertIn(": resuming", buf.getvalue())
r=shelltest("zfs list -H -o name -r -t all test_target1")
self.assertMultiLineEqual(r,"""
test_target1
test_target1/test_source1
test_target1/test_source1/fs1
test_target1/test_source1/fs1@test-20101111000000
test_target1/test_source1/fs1/sub
test_target1/test_source1/fs1/sub@test-20101111000000
test_target1/test_source2
test_target1/test_source2/fs2
test_target1/test_source2/fs2/sub
test_target1/test_source2/fs2/sub@test-20101111000000
""")
#resume incremental backup
def test_incremental_resume(self):
#initial backup
with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty".split(" ")).run())
#incremental backup leaves resume token
with patch('time.strftime', return_value="20101111000001"):
self.generate_resume()
#--test should resume and succeed
with OutputIO() as buf:
with redirect_stdout(buf):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --test".split(" ")).run())
print(buf.getvalue())
#did we really resume?
if "0.6.5" in ZFS_USERSPACE:
#abort this late, for beter coverage
self.skipTest("Resume not supported in this ZFS userspace version")
else:
self.assertIn(": resuming", buf.getvalue())
#should resume and succeed
with OutputIO() as buf:
with redirect_stdout(buf):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose".split(" ")).run())
print(buf.getvalue())
#did we really resume?
if "0.6.5" in ZFS_USERSPACE:
#abort this late, for beter coverage
self.skipTest("Resume not supported in this ZFS userspace version")
else:
self.assertIn(": resuming", buf.getvalue())
r=shelltest("zfs list -H -o name -r -t all test_target1")
self.assertMultiLineEqual(r,"""
test_target1
test_target1/test_source1
test_target1/test_source1/fs1
test_target1/test_source1/fs1@test-20101111000000
test_target1/test_source1/fs1@test-20101111000001
test_target1/test_source1/fs1/sub
test_target1/test_source1/fs1/sub@test-20101111000000
test_target1/test_source2
test_target1/test_source2/fs2
test_target1/test_source2/fs2/sub
test_target1/test_source2/fs2/sub@test-20101111000000
""")
# generate an invalid resume token, and verify if its aborted automaticly
def test_initial_resumeabort(self):
if "0.6.5" in ZFS_USERSPACE:
self.skipTest("Resume not supported in this ZFS userspace version")
#inital backup, leaves resume token
with patch('time.strftime', return_value="20101111000000"):
self.generate_resume()
#remove corresponding source snapshot, so it becomes invalid
shelltest("zfs destroy test_source1/fs1@test-20101111000000")
#NOTE: it can only abort the initial dataset if it has no subs
shelltest("zfs destroy test_target1/test_source1/fs1/sub; true")
#--test try again, should abort old resume
with patch('time.strftime', return_value="20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --test".split(" ")).run())
#try again, should abort old resume
with patch('time.strftime', return_value="20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t all test_target1")
self.assertMultiLineEqual(r,"""
test_target1
test_target1/test_source1
test_target1/test_source1/fs1
test_target1/test_source1/fs1@test-20101111000001
test_target1/test_source1/fs1/sub
test_target1/test_source1/fs1/sub@test-20101111000000
test_target1/test_source2
test_target1/test_source2/fs2
test_target1/test_source2/fs2/sub
test_target1/test_source2/fs2/sub@test-20101111000000
""")
# generate an invalid resume token, and verify if its aborted automaticly
def test_incremental_resumeabort(self):
if "0.6.5" in ZFS_USERSPACE:
self.skipTest("Resume not supported in this ZFS userspace version")
#initial backup
with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty".split(" ")).run())
#icremental backup, leaves resume token
with patch('time.strftime', return_value="20101111000001"):
self.generate_resume()
#remove corresponding source snapshot, so it becomes invalid
shelltest("zfs destroy test_source1/fs1@test-20101111000001")
#--test try again, should abort old resume
with patch('time.strftime', return_value="20101111000002"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --test".split(" ")).run())
#try again, should abort old resume
with patch('time.strftime', return_value="20101111000002"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t all test_target1")
self.assertMultiLineEqual(r,"""
test_target1
test_target1/test_source1
test_target1/test_source1/fs1
test_target1/test_source1/fs1@test-20101111000000
test_target1/test_source1/fs1@test-20101111000002
test_target1/test_source1/fs1/sub
test_target1/test_source1/fs1/sub@test-20101111000000
test_target1/test_source2
test_target1/test_source2/fs2
test_target1/test_source2/fs2/sub
test_target1/test_source2/fs2/sub@test-20101111000000
""")
#create a resume situation, where the other side doesnt want the snapshot anymore ( should abort resume )
def test_abort_unwanted_resume(self):
if "0.6.5" in ZFS_USERSPACE:
self.skipTest("Resume not supported in this ZFS userspace version")
with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose".split(" ")).run())
#generate resume
with patch('time.strftime', return_value="20101111000001"):
self.generate_resume()
with OutputIO() as buf:
with redirect_stdout(buf):
#incremental, doesnt want previous anymore
with patch('time.strftime', return_value="20101111000002"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --keep-target=0 --debug --allow-empty".split(" ")).run())
print(buf.getvalue())
self.assertIn(": aborting resume, since", buf.getvalue())
r=shelltest("zfs list -H -o name -r -t all test_target1")
self.assertMultiLineEqual(r,"""
test_target1
test_target1/test_source1
test_target1/test_source1/fs1
test_target1/test_source1/fs1@test-20101111000002
test_target1/test_source1/fs1/sub
test_target1/test_source1/fs1/sub@test-20101111000002
test_target1/test_source2
test_target1/test_source2/fs2
test_target1/test_source2/fs2/sub
test_target1/test_source2/fs2/sub@test-20101111000002
""")
def test_missing_common(self):
with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty".split(" ")).run())
#remove common snapshot and leave nothing
shelltest("zfs release zfs_autobackup:test test_source1/fs1@test-20101111000000")
shelltest("zfs destroy test_source1/fs1@test-20101111000000")
with patch('time.strftime', return_value="20101111000001"):
self.assertTrue(ZfsAutobackup("test test_target1 --verbose --allow-empty".split(" ")).run())
############# TODO:
def test_ignoretransfererrors(self):
self.skipTest("todo: create some kind of situation where zfs recv exits with an error but transfer is still ok (happens in practice with acltype)")

View File

@ -1,20 +0,0 @@
from basetest import *
class TestZfsNode(unittest2.TestCase):
def setUp(self):
prepare_zpools()
self.longMessage=True
# #resume initial backup
# def test_keepsource0(self):
# #somehow only specifying --allow-empty --keep-source 0 failed:
# with patch('time.strftime', return_value="20101111000000"):
# self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty --keep-source 0".split(" ")).run())
# with patch('time.strftime', return_value="20101111000001"):
# self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty --keep-source 0".split(" ")).run())

View File

@ -1,123 +0,0 @@
from basetest import *
class TestZfsNode(unittest2.TestCase):
def setUp(self):
prepare_zpools()
# return super().setUp()
def test_consistent_snapshot(self):
logger=Logger()
description="[Source]"
node=ZfsNode("test", logger, description=description)
with self.subTest("first snapshot"):
node.consistent_snapshot(node.selected_datasets, "test-1",100000)
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
self.assertEqual(r,"""
test_source1
test_source1/fs1
test_source1/fs1@test-1
test_source1/fs1/sub
test_source1/fs1/sub@test-1
test_source2
test_source2/fs2
test_source2/fs2/sub
test_source2/fs2/sub@test-1
test_source2/fs3
test_source2/fs3/sub
test_target1
""")
with self.subTest("second snapshot, no changes, no snapshot"):
node.consistent_snapshot(node.selected_datasets, "test-2",1)
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
self.assertEqual(r,"""
test_source1
test_source1/fs1
test_source1/fs1@test-1
test_source1/fs1/sub
test_source1/fs1/sub@test-1
test_source2
test_source2/fs2
test_source2/fs2/sub
test_source2/fs2/sub@test-1
test_source2/fs3
test_source2/fs3/sub
test_target1
""")
with self.subTest("second snapshot, no changes, empty snapshot"):
node.consistent_snapshot(node.selected_datasets, "test-2",0)
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
self.assertEqual(r,"""
test_source1
test_source1/fs1
test_source1/fs1@test-1
test_source1/fs1@test-2
test_source1/fs1/sub
test_source1/fs1/sub@test-1
test_source1/fs1/sub@test-2
test_source2
test_source2/fs2
test_source2/fs2/sub
test_source2/fs2/sub@test-1
test_source2/fs2/sub@test-2
test_source2/fs3
test_source2/fs3/sub
test_target1
""")
def test_getselected(self):
logger=Logger()
description="[Source]"
node=ZfsNode("test", logger, description=description)
s=pformat(node.selected_datasets)
print(s)
#basics
self.assertEqual (s, """[(local): test_source1/fs1,
(local): test_source1/fs1/sub,
(local): test_source2/fs2/sub]""")
#caching, so expect same result after changing it
subprocess.check_call("zfs set autobackup:test=true test_source2/fs3", shell=True)
self.assertEqual (s, """[(local): test_source1/fs1,
(local): test_source1/fs1/sub,
(local): test_source2/fs2/sub]""")
def test_validcommand(self):
logger=Logger()
description="[Source]"
node=ZfsNode("test", logger, description=description)
with self.subTest("test invalid option"):
self.assertFalse(node.valid_command(["zfs", "send", "--invalid-option", "nonexisting"]))
with self.subTest("test valid option"):
self.assertTrue(node.valid_command(["zfs", "send", "-v", "nonexisting"]))
def test_supportedsendoptions(self):
logger=Logger()
description="[Source]"
node=ZfsNode("test", logger, description=description)
# -D propably always supported
self.assertGreater(len(node.supported_send_options),0)
def test_supportedrecvoptions(self):
logger=Logger()
description="[Source]"
#NOTE: this couldnt hang via ssh if we dont close filehandles properly. (which was a previous bug)
node=ZfsNode("test", logger, description=description, ssh_to='localhost')
self.assertIsInstance(node.supported_recv_options, list)
if __name__ == '__main__':
unittest.main()

6
tests/autoruntests Executable file
View File

@ -0,0 +1,6 @@
#!/bin/bash
#NOTE: run from top directory
find tests/*.py zfs_autobackup/*.py| entr -r ./tests/run_tests $@

View File

@ -1,4 +1,6 @@
# To run tests as non-root, use this hack:
# chmod 4755 /usr/sbin/zpool /usr/sbin/zfs
import subprocess
import random
@ -8,9 +10,10 @@ import unittest2
import subprocess
import time
from pprint import *
from bin.zfs_autobackup import *
from zfs_autobackup.ZfsAutobackup import *
from zfs_autobackup.ZfsAutoverify import *
from mock import *
import contextlib
import contextlib
import sys
import io
@ -58,7 +61,9 @@ def redirect_stderr(target):
def shelltest(cmd):
"""execute and print result as nice copypastable string for unit tests (adds extra newlines on top/bottom)"""
ret=(subprocess.check_output(cmd , shell=True).decode('utf-8'))
ret=(subprocess.check_output("SUDO_ASKPASS=./password.sh sudo -A "+cmd , shell=True).decode('utf-8'))
print("######### result of: {}".format(cmd))
print(ret)
print("#########")

5
tests/run_test Executable file
View File

@ -0,0 +1,5 @@
#!/bin/bash
#run one test. start from main directory
python -m unittest discover tests $@ -vvvf

View File

@ -1,4 +1,8 @@
#!/bin/bash
#!/bin/bash
SCRIPTDIR=`dirname $0`
#cd $SCRIPTDIR || exit 1
if [ "$USER" != "root" ]; then
@ -6,8 +10,6 @@ if [ "$USER" != "root" ]; then
exit 1
fi
#reactivate python environment, if any (usefull in Travis)
[ "$VIRTUAL_ENV" ] && source $VIRTUAL_ENV/bin/activate
# test needs ssh access to localhost for testing
if ! [ -e /root/.ssh/id_rsa ]; then
@ -16,16 +18,11 @@ if ! [ -e /root/.ssh/id_rsa ]; then
ssh -oStrictHostKeyChecking=no localhost true || exit 1
fi
coverage run --source bin.zfs_autobackup -m unittest discover -vv
coverage run --branch --source zfs_autobackup -m unittest discover -vvvvf $SCRIPTDIR $@ 2>&1
EXIT=$?
echo
echo
coverage report
#this does automatic travis CI/https://coveralls.io/ intergration:
if which coveralls > /dev/null; then
echo "Submitting to coveralls.io:"
coveralls
fi
exit $EXIT

123
tests/test_cmdpipe.py Normal file
View File

@ -0,0 +1,123 @@
from basetest import *
from zfs_autobackup.CmdPipe import CmdPipe,CmdItem
class TestCmdPipe(unittest2.TestCase):
def test_single(self):
"""single process stdout and stderr"""
p=CmdPipe(readonly=False, inp=None)
err=[]
out=[]
p.add(CmdItem(["ls", "-d", "/", "/", "/nonexistent"], stderr_handler=lambda line: err.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,2)))
executed=p.execute(stdout_handler=lambda line: out.append(line))
self.assertEqual(err, ["ls: cannot access '/nonexistent': No such file or directory"])
self.assertEqual(out, ["/","/"])
self.assertIsNone(executed)
def test_input(self):
"""test stdinput"""
p=CmdPipe(readonly=False, inp="test")
err=[]
out=[]
p.add(CmdItem(["cat"], stderr_handler=lambda line: err.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,0)))
executed=p.execute(stdout_handler=lambda line: out.append(line))
self.assertEqual(err, [])
self.assertEqual(out, ["test"])
self.assertIsNone(executed)
def test_pipe(self):
"""test piped"""
p=CmdPipe(readonly=False)
err1=[]
err2=[]
err3=[]
out=[]
p.add(CmdItem(["echo", "test"], stderr_handler=lambda line: err1.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,0)))
p.add(CmdItem(["tr", "e", "E"], stderr_handler=lambda line: err2.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,0)))
p.add(CmdItem(["tr", "t", "T"], stderr_handler=lambda line: err3.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,0)))
executed=p.execute(stdout_handler=lambda line: out.append(line))
self.assertEqual(err1, [])
self.assertEqual(err2, [])
self.assertEqual(err3, [])
self.assertEqual(out, ["TEsT"])
self.assertIsNone(executed)
#test str representation as well
self.assertEqual(str(p), "(echo test) | (tr e E) | (tr t T)")
def test_pipeerrors(self):
"""test piped stderrs """
p=CmdPipe(readonly=False)
err1=[]
err2=[]
err3=[]
out=[]
p.add(CmdItem(["ls", "/nonexistent1"], stderr_handler=lambda line: err1.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,2)))
p.add(CmdItem(["ls", "/nonexistent2"], stderr_handler=lambda line: err2.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,2)))
p.add(CmdItem(["ls", "/nonexistent3"], stderr_handler=lambda line: err3.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,2)))
executed=p.execute(stdout_handler=lambda line: out.append(line))
self.assertEqual(err1, ["ls: cannot access '/nonexistent1': No such file or directory"])
self.assertEqual(err2, ["ls: cannot access '/nonexistent2': No such file or directory"])
self.assertEqual(err3, ["ls: cannot access '/nonexistent3': No such file or directory"])
self.assertEqual(out, [])
self.assertIsNone(executed)
def test_exitcode(self):
"""test piped exitcodes """
p=CmdPipe(readonly=False)
err1=[]
err2=[]
err3=[]
out=[]
p.add(CmdItem(["bash", "-c", "exit 1"], stderr_handler=lambda line: err1.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,1)))
p.add(CmdItem(["bash", "-c", "exit 2"], stderr_handler=lambda line: err2.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,2)))
p.add(CmdItem(["bash", "-c", "exit 3"], stderr_handler=lambda line: err3.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,3)))
executed=p.execute(stdout_handler=lambda line: out.append(line))
self.assertEqual(err1, [])
self.assertEqual(err2, [])
self.assertEqual(err3, [])
self.assertEqual(out, [])
self.assertIsNone(executed)
def test_readonly_execute(self):
"""everything readonly, just should execute"""
p=CmdPipe(readonly=True)
err1=[]
err2=[]
out=[]
def true_exit(exit_code):
return True
p.add(CmdItem(["echo", "test1"], stderr_handler=lambda line: err1.append(line), exit_handler=true_exit, readonly=True))
p.add(CmdItem(["echo", "test2"], stderr_handler=lambda line: err2.append(line), exit_handler=true_exit, readonly=True))
executed=p.execute(stdout_handler=lambda line: out.append(line))
self.assertEqual(err1, [])
self.assertEqual(err2, [])
self.assertEqual(out, ["test2"])
self.assertTrue(executed)
def test_readonly_skip(self):
"""one command not readonly, skip"""
p=CmdPipe(readonly=True)
err1=[]
err2=[]
out=[]
p.add(CmdItem(["echo", "test1"], stderr_handler=lambda line: err1.append(line), readonly=False))
p.add(CmdItem(["echo", "test2"], stderr_handler=lambda line: err2.append(line), readonly=True))
executed=p.execute(stdout_handler=lambda line: out.append(line))
self.assertEqual(err1, [])
self.assertEqual(err2, [])
self.assertEqual(out, [])
self.assertTrue(executed)

View File

@ -13,34 +13,34 @@ class TestZfsNode(unittest2.TestCase):
def test_destroymissing(self):
#initial backup
with patch('time.strftime', return_value="10101111000000"): #1000 years in past
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-holds".split(" ")).run())
with patch('time.strftime', return_value="test-19101111000000"): #1000 years in past
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-holds".split(" ")).run())
with patch('time.strftime', return_value="20101111000000"): #far in past
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-holds --allow-empty".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000000"): #far in past
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-holds --allow-empty".split(" ")).run())
with self.subTest("Should do nothing yet"):
with OutputIO() as buf:
with redirect_stdout(buf):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-snapshot --destroy-missing 0s".split(" ")).run())
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-snapshot --destroy-missing 0s".split(" ")).run())
print(buf.getvalue())
self.assertNotIn(": Destroy missing", buf.getvalue())
with self.subTest("missing dataset of us that still has children"):
#just deselect it so it counts as 'missing'
shelltest("zfs set autobackup:test=child test_source1/fs1")
with OutputIO() as buf:
with redirect_stdout(buf), redirect_stderr(buf):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-snapshot --destroy-missing 0s".split(" ")).run())
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-snapshot --destroy-missing 0s".split(" ")).run())
print(buf.getvalue())
#should have done the snapshot cleanup for destoy missing:
self.assertIn("fs1@test-10101111000000: Destroying", buf.getvalue())
self.assertIn("fs1@test-19101111000000: Destroying", buf.getvalue())
self.assertIn("fs1: Destroy missing: Still has children here.", buf.getvalue())
@ -54,7 +54,7 @@ class TestZfsNode(unittest2.TestCase):
with OutputIO() as buf:
with redirect_stdout(buf):
#100y: lastest should not be old enough, while second to latest snapshot IS old enough:
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-snapshot --destroy-missing 100y".split(" ")).run())
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-snapshot --destroy-missing 100y".split(" ")).run())
print(buf.getvalue())
self.assertIn(": Waiting for deadline", buf.getvalue())
@ -62,7 +62,7 @@ class TestZfsNode(unittest2.TestCase):
#past deadline, destroy
with OutputIO() as buf:
with redirect_stdout(buf):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-snapshot --destroy-missing 1y".split(" ")).run())
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-snapshot --destroy-missing 1y".split(" ")).run())
print(buf.getvalue())
self.assertIn("sub: Destroying", buf.getvalue())
@ -75,7 +75,7 @@ class TestZfsNode(unittest2.TestCase):
with OutputIO() as buf:
with redirect_stdout(buf):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-snapshot --destroy-missing 0s".split(" ")).run())
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-snapshot --destroy-missing 0s".split(" ")).run())
print(buf.getvalue())
@ -90,22 +90,22 @@ class TestZfsNode(unittest2.TestCase):
with OutputIO() as buf:
with redirect_stdout(buf), redirect_stderr(buf):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-snapshot --destroy-missing 0s".split(" ")).run())
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-snapshot --destroy-missing 0s".split(" ")).run())
print(buf.getvalue())
#now tries to destroy our own last snapshot (before the final destroy of the dataset)
self.assertIn("fs1@test-20101111000000: Destroying", buf.getvalue())
#but cant finish because still in use:
self.assertIn("fs1: Error during destoy missing", buf.getvalue())
self.assertIn("fs1: Error during --destroy-missing", buf.getvalue())
shelltest("zfs destroy test_target1/clone1")
with self.subTest("Should leave test_source1 parent"):
with OutputIO() as buf:
with redirect_stdout(buf), redirect_stderr(buf):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-snapshot --destroy-missing 0s".split(" ")).run())
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-snapshot --destroy-missing 0s".split(" ")).run())
print(buf.getvalue())
#should have done the snapshot cleanup for destoy missing:
@ -113,7 +113,7 @@ class TestZfsNode(unittest2.TestCase):
with OutputIO() as buf:
with redirect_stdout(buf), redirect_stderr(buf):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-snapshot --destroy-missing 0s".split(" ")).run())
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-snapshot --destroy-missing 0s".split(" ")).run())
print(buf.getvalue())
#on second run it sees the dangling ex-parent but doesnt know what to do with it (since it has no own snapshot)
@ -130,6 +130,6 @@ test_target1/test_source1
test_target1/test_source2
test_target1/test_source2/fs2
test_target1/test_source2/fs2/sub
test_target1/test_source2/fs2/sub@test-10101111000000
test_target1/test_source2/fs2/sub@test-19101111000000
test_target1/test_source2/fs2/sub@test-20101111000000
""")

193
tests/test_encryption.py Normal file
View File

@ -0,0 +1,193 @@
from zfs_autobackup.CmdPipe import CmdPipe
from basetest import *
import time
# We have to do a LOT to properly test encryption/decryption/raw transfers
#
# For every scenario we need at least:
# - plain source dataset
# - encrypted source dataset
# - plain target path
# - encrypted target path
# - do a full transfer
# - do a incremental transfer
# Scenarios:
# - Raw transfer
# - Decryption transfer (--decrypt)
# - Encryption transfer (--encrypt)
# - Re-encryption transfer (--decrypt --encrypt)
class TestZfsEncryption(unittest2.TestCase):
def setUp(self):
prepare_zpools()
try:
shelltest("zfs get encryption test_source1")
except:
self.skipTest("Encryption not supported on this ZFS version.")
def prepare_encrypted_dataset(self, key, path, unload_key=False):
# create encrypted source dataset
shelltest("rm /tmp/zfstest.key 2>/dev/null;true")
shelltest("echo {} > /tmp/zfstest.key".format(key))
shelltest("zfs create -o keylocation=file:///tmp/zfstest.key -o keyformat=passphrase -o encryption=on {}".format(path))
if unload_key:
shelltest("zfs unmount {}".format(path))
shelltest("zfs unload-key {}".format(path))
# r=shelltest("dd if=/dev/zero of=/test_source1/fs1/enc1/data.txt bs=200000 count=1")
def test_raw(self):
"""send encrypted data unaltered (standard operation)"""
self.prepare_encrypted_dataset("11111111", "test_source1/fs1/encryptedsource")
self.prepare_encrypted_dataset("11111111", "test_source1/fs1/encryptedsourcekeyless", unload_key=True) # raw mode shouldn't need a key
self.prepare_encrypted_dataset("22222222", "test_target1/encryptedtarget")
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --allow-empty --exclude-received".split(" ")).run())
self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --no-snapshot --exclude-received".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --allow-empty --exclude-received".split(" ")).run())
self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --no-snapshot --exclude-received".split(" ")).run())
r = shelltest("zfs get -r -t filesystem encryptionroot test_target1")
self.assertMultiLineEqual(r,"""
NAME PROPERTY VALUE SOURCE
test_target1 encryptionroot - -
test_target1/encryptedtarget encryptionroot test_target1/encryptedtarget -
test_target1/encryptedtarget/test_source1 encryptionroot test_target1/encryptedtarget -
test_target1/encryptedtarget/test_source1/fs1 encryptionroot - -
test_target1/encryptedtarget/test_source1/fs1/encryptedsource encryptionroot test_target1/encryptedtarget/test_source1/fs1/encryptedsource -
test_target1/encryptedtarget/test_source1/fs1/encryptedsourcekeyless encryptionroot test_target1/encryptedtarget/test_source1/fs1/encryptedsourcekeyless -
test_target1/encryptedtarget/test_source1/fs1/sub encryptionroot - -
test_target1/encryptedtarget/test_source2 encryptionroot test_target1/encryptedtarget -
test_target1/encryptedtarget/test_source2/fs2 encryptionroot test_target1/encryptedtarget -
test_target1/encryptedtarget/test_source2/fs2/sub encryptionroot - -
test_target1/test_source1 encryptionroot - -
test_target1/test_source1/fs1 encryptionroot - -
test_target1/test_source1/fs1/encryptedsource encryptionroot test_target1/test_source1/fs1/encryptedsource -
test_target1/test_source1/fs1/encryptedsourcekeyless encryptionroot test_target1/test_source1/fs1/encryptedsourcekeyless -
test_target1/test_source1/fs1/sub encryptionroot - -
test_target1/test_source2 encryptionroot - -
test_target1/test_source2/fs2 encryptionroot - -
test_target1/test_source2/fs2/sub encryptionroot - -
""")
def test_decrypt(self):
"""decrypt data and store unencrypted (--decrypt)"""
self.prepare_encrypted_dataset("11111111", "test_source1/fs1/encryptedsource")
self.prepare_encrypted_dataset("22222222", "test_target1/encryptedtarget")
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --decrypt --allow-empty --exclude-received".split(" ")).run())
self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --decrypt --no-snapshot --exclude-received".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --decrypt --allow-empty --exclude-received".split(" ")).run())
self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --decrypt --no-snapshot --exclude-received".split(" ")).run())
r = shelltest("zfs get -r -t filesystem encryptionroot test_target1")
self.assertEqual(r, """
NAME PROPERTY VALUE SOURCE
test_target1 encryptionroot - -
test_target1/encryptedtarget encryptionroot test_target1/encryptedtarget -
test_target1/encryptedtarget/test_source1 encryptionroot test_target1/encryptedtarget -
test_target1/encryptedtarget/test_source1/fs1 encryptionroot - -
test_target1/encryptedtarget/test_source1/fs1/encryptedsource encryptionroot - -
test_target1/encryptedtarget/test_source1/fs1/sub encryptionroot - -
test_target1/encryptedtarget/test_source2 encryptionroot test_target1/encryptedtarget -
test_target1/encryptedtarget/test_source2/fs2 encryptionroot test_target1/encryptedtarget -
test_target1/encryptedtarget/test_source2/fs2/sub encryptionroot - -
test_target1/test_source1 encryptionroot - -
test_target1/test_source1/fs1 encryptionroot - -
test_target1/test_source1/fs1/encryptedsource encryptionroot - -
test_target1/test_source1/fs1/sub encryptionroot - -
test_target1/test_source2 encryptionroot - -
test_target1/test_source2/fs2 encryptionroot - -
test_target1/test_source2/fs2/sub encryptionroot - -
""")
def test_encrypt(self):
"""send normal data set and store encrypted on the other side (--encrypt) issue #60 """
self.prepare_encrypted_dataset("11111111", "test_source1/fs1/encryptedsource")
self.prepare_encrypted_dataset("22222222", "test_target1/encryptedtarget")
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --encrypt --debug --allow-empty --exclude-received".split(" ")).run())
self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --encrypt --debug --no-snapshot --exclude-received".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --encrypt --debug --allow-empty --exclude-received".split(" ")).run())
self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --encrypt --debug --no-snapshot --exclude-received".split(" ")).run())
r = shelltest("zfs get -r -t filesystem encryptionroot test_target1")
self.assertEqual(r, """
NAME PROPERTY VALUE SOURCE
test_target1 encryptionroot - -
test_target1/encryptedtarget encryptionroot test_target1/encryptedtarget -
test_target1/encryptedtarget/test_source1 encryptionroot test_target1/encryptedtarget -
test_target1/encryptedtarget/test_source1/fs1 encryptionroot test_target1/encryptedtarget -
test_target1/encryptedtarget/test_source1/fs1/encryptedsource encryptionroot test_target1/encryptedtarget/test_source1/fs1/encryptedsource -
test_target1/encryptedtarget/test_source1/fs1/sub encryptionroot test_target1/encryptedtarget -
test_target1/encryptedtarget/test_source2 encryptionroot test_target1/encryptedtarget -
test_target1/encryptedtarget/test_source2/fs2 encryptionroot test_target1/encryptedtarget -
test_target1/encryptedtarget/test_source2/fs2/sub encryptionroot test_target1/encryptedtarget -
test_target1/test_source1 encryptionroot - -
test_target1/test_source1/fs1 encryptionroot - -
test_target1/test_source1/fs1/encryptedsource encryptionroot test_target1/test_source1/fs1/encryptedsource -
test_target1/test_source1/fs1/sub encryptionroot - -
test_target1/test_source2 encryptionroot - -
test_target1/test_source2/fs2 encryptionroot - -
test_target1/test_source2/fs2/sub encryptionroot - -
""")
def test_reencrypt(self):
"""reencrypt data (--decrypt --encrypt) """
self.prepare_encrypted_dataset("11111111", "test_source1/fs1/encryptedsource")
self.prepare_encrypted_dataset("22222222", "test_target1/encryptedtarget")
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup(
"test test_target1 --verbose --no-progress --decrypt --encrypt --debug --allow-empty --exclude-received".split(" ")).run())
self.assertFalse(ZfsAutobackup(
"test test_target1/encryptedtarget --verbose --no-progress --decrypt --encrypt --debug --no-snapshot --exclude-received".split(
" ")).run())
with patch('time.strftime', return_value="test-20101111000001"):
self.assertFalse(ZfsAutobackup(
"test test_target1 --verbose --no-progress --decrypt --encrypt --debug --allow-empty --exclude-received".split(" ")).run())
self.assertFalse(ZfsAutobackup(
"test test_target1/encryptedtarget --verbose --no-progress --decrypt --encrypt --debug --no-snapshot --exclude-received".split(
" ")).run())
r = shelltest("zfs get -r -t filesystem encryptionroot test_target1")
self.assertEqual(r, """
NAME PROPERTY VALUE SOURCE
test_target1 encryptionroot - -
test_target1/encryptedtarget encryptionroot test_target1/encryptedtarget -
test_target1/encryptedtarget/test_source1 encryptionroot test_target1/encryptedtarget -
test_target1/encryptedtarget/test_source1/fs1 encryptionroot test_target1/encryptedtarget -
test_target1/encryptedtarget/test_source1/fs1/encryptedsource encryptionroot test_target1/encryptedtarget -
test_target1/encryptedtarget/test_source1/fs1/sub encryptionroot test_target1/encryptedtarget -
test_target1/encryptedtarget/test_source2 encryptionroot test_target1/encryptedtarget -
test_target1/encryptedtarget/test_source2/fs2 encryptionroot test_target1/encryptedtarget -
test_target1/encryptedtarget/test_source2/fs2/sub encryptionroot test_target1/encryptedtarget -
test_target1/test_source1 encryptionroot - -
test_target1/test_source1/fs1 encryptionroot - -
test_target1/test_source1/fs1/encryptedsource encryptionroot - -
test_target1/test_source1/fs1/sub encryptionroot - -
test_target1/test_source2 encryptionroot - -
test_target1/test_source2/fs2 encryptionroot - -
test_target1/test_source2/fs2/sub encryptionroot - -
""")

View File

@ -1,5 +1,5 @@
from basetest import *
from zfs_autobackup.ExecuteNode import *
print("THIS TEST REQUIRES SSH TO LOCALHOST")
@ -15,7 +15,7 @@ class TestExecuteNode(unittest2.TestCase):
self.assertEqual(node.run(["echo","test"]), ["test"])
with self.subTest("error exit code"):
with self.assertRaises(subprocess.CalledProcessError):
with self.assertRaises(ExecuteError):
node.run(["false"])
#
@ -26,9 +26,9 @@ class TestExecuteNode(unittest2.TestCase):
with self.subTest("multiline tabsplit"):
self.assertEqual(node.run(["echo","l1c1\tl1c2\nl2c1\tl2c2"], tab_split=True), [['l1c1', 'l1c2'], ['l2c1', 'l2c2']])
#escaping test (shouldnt be a problem locally, single quotes can be a problem remote via ssh)
#escaping test
with self.subTest("escape test"):
s="><`'\"@&$()$bla\\/.*!#test _+-={}[]|"
s="><`'\"@&$()$bla\\/.* !#test _+-={}[]|${bla} $bla"
self.assertEqual(node.run(["echo",s]), [s])
#return std err as well, trigger stderr by listing something non existing
@ -45,12 +45,21 @@ class TestExecuteNode(unittest2.TestCase):
#input a string and check it via cat
with self.subTest("stdin input string"):
self.assertEqual(node.run(["cat"], input="test"), ["test"])
self.assertEqual(node.run(["cat"], inp="test"), ["test"])
#command that wants input, while we dont have input, shouldnt hang forever.
with self.subTest("stdin process with input=None (shouldn't hang)"):
with self.subTest("stdin process with inp=None (shouldn't hang)"):
self.assertEqual(node.run(["cat"]), [])
# let the system do the piping with an unescaped |:
with self.subTest("system piping test"):
#first make sure the actual | character is still properly escaped:
self.assertEqual(node.run(["echo","|"]), ["|"])
#now pipe
self.assertEqual(node.run(["echo", "abc", node.PIPE, "tr", "a", "A" ]), ["Abc"])
def test_basics_local(self):
node=ExecuteNode(debug_output=True)
self.basics(node)
@ -64,7 +73,7 @@ class TestExecuteNode(unittest2.TestCase):
def test_readonly(self):
node=ExecuteNode(debug_output=True, readonly=True)
self.assertEqual(node.run(["echo","test"], readonly=False), None)
self.assertEqual(node.run(["echo","test"], readonly=False), [])
self.assertEqual(node.run(["echo","test"], readonly=True), ["test"])
@ -73,40 +82,44 @@ class TestExecuteNode(unittest2.TestCase):
def pipe(self, nodea, nodeb):
with self.subTest("pipe data"):
output=nodea.run(["dd", "if=/dev/zero", "count=1000"], pipe=True)
self.assertEqual(nodeb.run(["md5sum"], input=output), ["816df6f64deba63b029ca19d880ee10a -"])
output=nodea.run(["dd", "if=/dev/zero", "count=1000"],pipe=True)
self.assertEqual(nodeb.run(["md5sum"], inp=output), ["816df6f64deba63b029ca19d880ee10a -"])
with self.subTest("exit code both ends of pipe ok"):
output=nodea.run(["true"], pipe=True)
nodeb.run(["true"], input=output)
nodeb.run(["true"], inp=output)
with self.subTest("error on pipe input side"):
with self.assertRaises(subprocess.CalledProcessError):
with self.assertRaises(ExecuteError):
output=nodea.run(["false"], pipe=True)
nodeb.run(["true"], input=output)
nodeb.run(["true"], inp=output)
with self.subTest("error on both sides, ignore exit codes"):
output=nodea.run(["false"], pipe=True, valid_exitcodes=[])
nodeb.run(["false"], inp=output, valid_exitcodes=[])
with self.subTest("error on pipe output side "):
with self.assertRaises(subprocess.CalledProcessError):
with self.assertRaises(ExecuteError):
output=nodea.run(["true"], pipe=True)
nodeb.run(["false"], input=output)
nodeb.run(["false"], inp=output)
with self.subTest("error on both sides of pipe"):
with self.assertRaises(subprocess.CalledProcessError):
with self.assertRaises(ExecuteError):
output=nodea.run(["false"], pipe=True)
nodeb.run(["false"], input=output)
nodeb.run(["false"], inp=output)
with self.subTest("check stderr on pipe output side"):
output=nodea.run(["true"], pipe=True)
(stdout, stderr)=nodeb.run(["ls", "nonexistingfile"], input=output, return_stderr=True, valid_exitcodes=[0,2])
output=nodea.run(["true"], pipe=True, valid_exitcodes=[0])
(stdout, stderr)=nodeb.run(["ls", "nonexistingfile"], inp=output, return_stderr=True, valid_exitcodes=[2])
self.assertEqual(stdout,[])
self.assertRegex(stderr[0], "nonexistingfile" )
with self.subTest("check stderr on pipe input side (should be only printed)"):
output=nodea.run(["ls", "nonexistingfile"], pipe=True)
(stdout, stderr)=nodeb.run(["true"], input=output, return_stderr=True, valid_exitcodes=[0,2])
output=nodea.run(["ls", "nonexistingfile"], pipe=True, valid_exitcodes=[2])
(stdout, stderr)=nodeb.run(["true"], inp=output, return_stderr=True, valid_exitcodes=[0])
self.assertEqual(stdout,[])
self.assertEqual(stderr,[] )
self.assertEqual(stderr,[])
@ -132,4 +145,4 @@ class TestExecuteNode(unittest2.TestCase):
if __name__ == '__main__':
unittest.main()
unittest.main()

View File

@ -0,0 +1,314 @@
from basetest import *
class TestExternalFailures(unittest2.TestCase):
def setUp(self):
prepare_zpools()
self.longMessage = True
# generate a resumable state
# NOTE: this generates two resumable test_target1/test_source1/fs1 and test_target1/test_source1/fs1/sub
def generate_resume(self):
r = shelltest("zfs set compress=off test_source1 test_target1")
# big change on source
r = shelltest("dd if=/dev/zero of=/test_source1/fs1/data bs=250M count=1")
# waste space on target
r = shelltest("dd if=/dev/zero of=/test_target1/waste bs=250M count=1")
# should fail and leave resume token (if supported)
self.assertTrue(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
# free up space
r = shelltest("rm /test_target1/waste")
# sync
r = shelltest("zfs umount test_target1")
r = shelltest("zfs mount test_target1")
# resume initial backup
def test_initial_resume(self):
# inital backup, leaves resume token
with patch('time.strftime', return_value="test-20101111000000"):
self.generate_resume()
# --test should resume and succeed
with OutputIO() as buf:
with redirect_stdout(buf):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --test".split(" ")).run())
print(buf.getvalue())
# did we really resume?
if "0.6.5" in ZFS_USERSPACE:
# abort this late, for beter coverage
self.skipTest("Resume not supported in this ZFS userspace version")
else:
self.assertIn(": resuming", buf.getvalue())
# should resume and succeed
with OutputIO() as buf:
with redirect_stdout(buf):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
print(buf.getvalue())
# did we really resume?
if "0.6.5" in ZFS_USERSPACE:
# abort this late, for beter coverage
self.skipTest("Resume not supported in this ZFS userspace version")
else:
self.assertIn(": resuming", buf.getvalue())
r = shelltest("zfs list -H -o name -r -t all test_target1")
self.assertMultiLineEqual(r, """
test_target1
test_target1/test_source1
test_target1/test_source1/fs1
test_target1/test_source1/fs1@test-20101111000000
test_target1/test_source1/fs1/sub
test_target1/test_source1/fs1/sub@test-20101111000000
test_target1/test_source2
test_target1/test_source2/fs2
test_target1/test_source2/fs2/sub
test_target1/test_source2/fs2/sub@test-20101111000000
""")
# resume incremental backup
def test_incremental_resume(self):
# initial backup
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run())
# incremental backup leaves resume token
with patch('time.strftime', return_value="test-20101111000001"):
self.generate_resume()
# --test should resume and succeed
with OutputIO() as buf:
with redirect_stdout(buf):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --test".split(" ")).run())
print(buf.getvalue())
# did we really resume?
if "0.6.5" in ZFS_USERSPACE:
# abort this late, for beter coverage
self.skipTest("Resume not supported in this ZFS userspace version")
else:
self.assertIn(": resuming", buf.getvalue())
# should resume and succeed
with OutputIO() as buf:
with redirect_stdout(buf):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
print(buf.getvalue())
# did we really resume?
if "0.6.5" in ZFS_USERSPACE:
# abort this late, for beter coverage
self.skipTest("Resume not supported in this ZFS userspace version")
else:
self.assertIn(": resuming", buf.getvalue())
r = shelltest("zfs list -H -o name -r -t all test_target1")
self.assertMultiLineEqual(r, """
test_target1
test_target1/test_source1
test_target1/test_source1/fs1
test_target1/test_source1/fs1@test-20101111000000
test_target1/test_source1/fs1@test-20101111000001
test_target1/test_source1/fs1/sub
test_target1/test_source1/fs1/sub@test-20101111000000
test_target1/test_source2
test_target1/test_source2/fs2
test_target1/test_source2/fs2/sub
test_target1/test_source2/fs2/sub@test-20101111000000
""")
# generate an invalid resume token, and verify if its aborted automaticly
def test_initial_resumeabort(self):
if "0.6.5" in ZFS_USERSPACE:
self.skipTest("Resume not supported in this ZFS userspace version")
# inital backup, leaves resume token
with patch('time.strftime', return_value="test-20101111000000"):
self.generate_resume()
# remove corresponding source snapshot, so it becomes invalid
shelltest("zfs destroy test_source1/fs1@test-20101111000000")
# NOTE: it can only abort the initial dataset if it has no subs
shelltest("zfs destroy test_target1/test_source1/fs1/sub; true")
# --test try again, should abort old resume
with patch('time.strftime', return_value="test-20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --test".split(" ")).run())
# try again, should abort old resume
with patch('time.strftime', return_value="test-20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
r = shelltest("zfs list -H -o name -r -t all test_target1")
self.assertMultiLineEqual(r, """
test_target1
test_target1/test_source1
test_target1/test_source1/fs1
test_target1/test_source1/fs1@test-20101111000001
test_target1/test_source1/fs1/sub
test_target1/test_source1/fs1/sub@test-20101111000000
test_target1/test_source2
test_target1/test_source2/fs2
test_target1/test_source2/fs2/sub
test_target1/test_source2/fs2/sub@test-20101111000000
""")
# generate an invalid resume token, and verify if its aborted automaticly
def test_incremental_resumeabort(self):
if "0.6.5" in ZFS_USERSPACE:
self.skipTest("Resume not supported in this ZFS userspace version")
# initial backup
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run())
# icremental backup, leaves resume token
with patch('time.strftime', return_value="test-20101111000001"):
self.generate_resume()
# remove corresponding source snapshot, so it becomes invalid
shelltest("zfs destroy test_source1/fs1@test-20101111000001")
# --test try again, should abort old resume
with patch('time.strftime', return_value="test-20101111000002"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --test".split(" ")).run())
# try again, should abort old resume
with patch('time.strftime', return_value="test-20101111000002"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
r = shelltest("zfs list -H -o name -r -t all test_target1")
self.assertMultiLineEqual(r, """
test_target1
test_target1/test_source1
test_target1/test_source1/fs1
test_target1/test_source1/fs1@test-20101111000000
test_target1/test_source1/fs1@test-20101111000002
test_target1/test_source1/fs1/sub
test_target1/test_source1/fs1/sub@test-20101111000000
test_target1/test_source2
test_target1/test_source2/fs2
test_target1/test_source2/fs2/sub
test_target1/test_source2/fs2/sub@test-20101111000000
""")
# create a resume situation, where the other side doesnt want the snapshot anymore ( should abort resume )
def test_abort_unwanted_resume(self):
if "0.6.5" in ZFS_USERSPACE:
self.skipTest("Resume not supported in this ZFS userspace version")
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
# generate resume
with patch('time.strftime', return_value="test-20101111000001"):
self.generate_resume()
with OutputIO() as buf:
with redirect_stdout(buf):
# incremental, doesnt want previous anymore
with patch('time.strftime', return_value="test-20101111000002"):
self.assertFalse(ZfsAutobackup(
"test test_target1 --no-progress --verbose --keep-target=0 --allow-empty".split(" ")).run())
print(buf.getvalue())
self.assertIn("Aborting resume, we dont want that snapshot anymore.", buf.getvalue())
r = shelltest("zfs list -H -o name -r -t all test_target1")
self.assertMultiLineEqual(r, """
test_target1
test_target1/test_source1
test_target1/test_source1/fs1
test_target1/test_source1/fs1@test-20101111000002
test_target1/test_source1/fs1/sub
test_target1/test_source1/fs1/sub@test-20101111000002
test_target1/test_source2
test_target1/test_source2/fs2
test_target1/test_source2/fs2/sub
test_target1/test_source2/fs2/sub@test-20101111000002
""")
# test with empty snapshot list (this was a bug)
def test_abort_resume_emptysnapshotlist(self):
if "0.6.5" in ZFS_USERSPACE:
self.skipTest("Resume not supported in this ZFS userspace version")
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
# generate resume
with patch('time.strftime', return_value="test-20101111000001"):
self.generate_resume()
shelltest("zfs destroy test_source1/fs1@test-20101111000001")
with OutputIO() as buf:
with redirect_stdout(buf):
# incremental, doesnt want previous anymore
with patch('time.strftime', return_value="test-20101111000002"):
self.assertFalse(ZfsAutobackup(
"test test_target1 --no-progress --verbose --no-snapshot".split(
" ")).run())
print(buf.getvalue())
self.assertIn("Aborting resume, its obsolete", buf.getvalue())
def test_missing_common(self):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run())
# remove common snapshot and leave nothing
shelltest("zfs release zfs_autobackup:test test_source1/fs1@test-20101111000000")
shelltest("zfs destroy test_source1/fs1@test-20101111000000")
with patch('time.strftime', return_value="test-20101111000001"):
self.assertTrue(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run())
#UPDATE: offcourse the one thing that wasn't tested had a bug :( (in ExecuteNode.run()).
def test_ignoretransfererrors(self):
self.skipTest("Not sure how to implement a test for this without some serious hacking and patching.")
# #recreate target pool without any features
# # shelltest("zfs set compress=on test_source1; zpool destroy test_target1; zpool create test_target1 -o feature@project_quota=disabled /dev/ram2")
#
# with patch('time.strftime', return_value="test-20101111000000"):
# self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty --no-progress".split(" ")).run())
#
# r = shelltest("zfs list -H -o name -r -t all test_target1")
#
# self.assertMultiLineEqual(r, """
# test_target1
# test_target1/test_source1
# test_target1/test_source1/fs1
# test_target1/test_source1/fs1@test-20101111000002
# test_target1/test_source1/fs1/sub
# test_target1/test_source1/fs1/sub@test-20101111000002
# test_target1/test_source2
# test_target1/test_source2/fs2
# test_target1/test_source2/fs2/sub
# test_target1/test_source2/fs2/sub@test-20101111000002
# """)

52
tests/test_log.py Normal file
View File

@ -0,0 +1,52 @@
from zfs_autobackup.LogConsole import LogConsole
from basetest import *
class TestLog(unittest2.TestCase):
def test_colored(self):
"""test with color output"""
with OutputIO() as buf:
with redirect_stdout(buf):
l= LogConsole(show_verbose=False, show_debug=False, color=True)
l.verbose("verbose")
l.debug("debug")
with redirect_stdout(buf):
l=LogConsole(show_verbose=True, show_debug=True, color=True)
l.verbose("verbose")
l.debug("debug")
with redirect_stderr(buf):
l=LogConsole(show_verbose=False, show_debug=False, color=True)
l.error("error")
print(list(buf.getvalue()))
self.assertEqual(list(buf.getvalue()), ['\x1b', '[', '2', '2', 'm', ' ', ' ', 'v', 'e', 'r', 'b', 'o', 's', 'e', '\x1b', '[', '0', 'm', '\n', '\x1b', '[', '3', '2', 'm', '#', ' ', 'd', 'e', 'b', 'u', 'g', '\x1b', '[', '0', 'm', '\n', '\x1b', '[', '3', '1', 'm', '\x1b', '[', '1', 'm', '!', ' ', 'e', 'r', 'r', 'o', 'r', '\x1b', '[', '0', 'm', '\n'])
def test_nocolor(self):
"""test without color output"""
with OutputIO() as buf:
with redirect_stdout(buf):
l=LogConsole(show_verbose=False, show_debug=False, color=False)
l.verbose("verbose")
l.debug("debug")
with redirect_stdout(buf):
l=LogConsole(show_verbose=True, show_debug=True, color=False)
l.verbose("verbose")
l.debug("debug")
with redirect_stderr(buf):
l=LogConsole(show_verbose=False, show_debug=False, color=False)
l.error("error")
print(list(buf.getvalue()))
self.assertEqual(list(buf.getvalue()), [' ', ' ', 'v', 'e', 'r', 'b', 'o', 's', 'e', '\n', '#', ' ', 'd', 'e', 'b', 'u', 'g', '\n', '!', ' ', 'e', 'r', 'r', 'o', 'r', '\n'])
# zfs_autobackup.LogConsole.colorama=False

105
tests/test_regressions.py Normal file
View File

@ -0,0 +1,105 @@
from basetest import *
class TestZfsNode(unittest2.TestCase):
def setUp(self):
prepare_zpools()
self.longMessage=True
def test_keepsource0target10queuedsend(self):
"""Test if thinner doesnt destroy too much early on if there are no common snapshots YET. Issue #84"""
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup(
"test test_target1 --no-progress --verbose --keep-source=0 --keep-target=10 --allow-empty --no-send".split(
" ")).run())
with patch('time.strftime', return_value="test-20101111000001"):
self.assertFalse(ZfsAutobackup(
"test test_target1 --no-progress --verbose --keep-source=0 --keep-target=10 --allow-empty --no-send".split(
" ")).run())
with patch('time.strftime', return_value="test-20101111000002"):
self.assertFalse(ZfsAutobackup(
"test test_target1 --no-progress --verbose --keep-source=0 --keep-target=10 --allow-empty".split(
" ")).run())
r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
self.assertMultiLineEqual(r, """
test_source1
test_source1/fs1
test_source1/fs1@test-20101111000002
test_source1/fs1/sub
test_source1/fs1/sub@test-20101111000002
test_source2
test_source2/fs2
test_source2/fs2/sub
test_source2/fs2/sub@test-20101111000002
test_source2/fs3
test_source2/fs3/sub
test_target1
test_target1/test_source1
test_target1/test_source1/fs1
test_target1/test_source1/fs1@test-20101111000000
test_target1/test_source1/fs1@test-20101111000001
test_target1/test_source1/fs1@test-20101111000002
test_target1/test_source1/fs1/sub
test_target1/test_source1/fs1/sub@test-20101111000000
test_target1/test_source1/fs1/sub@test-20101111000001
test_target1/test_source1/fs1/sub@test-20101111000002
test_target1/test_source2
test_target1/test_source2/fs2
test_target1/test_source2/fs2/sub
test_target1/test_source2/fs2/sub@test-20101111000000
test_target1/test_source2/fs2/sub@test-20101111000001
test_target1/test_source2/fs2/sub@test-20101111000002
""")
def test_excludepaths(self):
"""Test issue #103"""
shelltest("zfs create test_target1/target_shouldnotbeexcluded")
shelltest("zfs set autobackup:test=true test_target1/target_shouldnotbeexcluded")
shelltest("zfs create test_target1/target")
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup(
"test test_target1/target --no-progress --verbose --allow-empty".split(
" ")).run())
r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
self.assertMultiLineEqual(r, """
test_source1
test_source1/fs1
test_source1/fs1@test-20101111000000
test_source1/fs1/sub
test_source1/fs1/sub@test-20101111000000
test_source2
test_source2/fs2
test_source2/fs2/sub
test_source2/fs2/sub@test-20101111000000
test_source2/fs3
test_source2/fs3/sub
test_target1
test_target1/target
test_target1/target/test_source1
test_target1/target/test_source1/fs1
test_target1/target/test_source1/fs1@test-20101111000000
test_target1/target/test_source1/fs1/sub
test_target1/target/test_source1/fs1/sub@test-20101111000000
test_target1/target/test_source2
test_target1/target/test_source2/fs2
test_target1/target/test_source2/fs2/sub
test_target1/target/test_source2/fs2/sub@test-20101111000000
test_target1/target/test_target1
test_target1/target/test_target1/target_shouldnotbeexcluded
test_target1/target/test_target1/target_shouldnotbeexcluded@test-20101111000000
test_target1/target_shouldnotbeexcluded
test_target1/target_shouldnotbeexcluded@test-20101111000000
""")

97
tests/test_scaling.py Normal file
View File

@ -0,0 +1,97 @@
from basetest import *
from zfs_autobackup.ExecuteNode import ExecuteNode
run_orig=ExecuteNode.run
run_counter=0
def run_count(*args, **kwargs):
global run_counter
run_counter=run_counter+1
return (run_orig(*args, **kwargs))
class TestZfsScaling(unittest2.TestCase):
def setUp(self):
prepare_zpools()
self.longMessage = True
def test_manysnapshots(self):
"""count the number of commands when there are many snapshots."""
snapshot_count=100
print("Creating many snapshots...")
s=""
for i in range(1970,1970+snapshot_count):
s=s+"zfs snapshot test_source1/fs1@test-{:04}1111000000;".format(i)
shelltest(s)
global run_counter
run_counter=0
with patch.object(ExecuteNode,'run', run_count) as p:
with patch('time.strftime', return_value="test-20101112000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --keep-source=10000 --keep-target=10000 --no-holds --allow-empty".split(" ")).run())
#this triggers if you make a change with an impact of more than O(snapshot_count/2)
expected_runs=343
print("ACTUAL RUNS: {}".format(run_counter))
self.assertLess(abs(run_counter-expected_runs), snapshot_count/2)
run_counter=0
with patch.object(ExecuteNode,'run', run_count) as p:
with patch('time.strftime', return_value="test-20101112000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --keep-source=10000 --keep-target=10000 --no-holds --allow-empty".split(" ")).run())
#this triggers if you make a change with a performance impact of more than O(snapshot_count/2)
expected_runs=47
print("ACTUAL RUNS: {}".format(run_counter))
self.assertLess(abs(run_counter-expected_runs), snapshot_count/2)
def test_manydatasets(self):
"""count the number of commands when when there are many datasets"""
dataset_count=100
print("Creating many datasets...")
s=""
for i in range(0,dataset_count):
s=s+"zfs create test_source1/fs1/{};".format(i)
shelltest(s)
global run_counter
#first run
run_counter=0
with patch.object(ExecuteNode,'run', run_count) as p:
with patch('time.strftime', return_value="test-20101112000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-holds --allow-empty".split(" ")).run())
#this triggers if you make a change with an impact of more than O(snapshot_count/2)
expected_runs=743
print("ACTUAL RUNS: {}".format(run_counter))
self.assertLess(abs(run_counter-expected_runs), dataset_count/2)
#second run, should have higher number of expected_runs
run_counter=0
with patch.object(ExecuteNode,'run', run_count) as p:
with patch('time.strftime', return_value="test-20101112000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-holds --allow-empty".split(" ")).run())
#this triggers if you make a change with a performance impact of more than O(snapshot_count/2)
expected_runs=947
print("ACTUAL RUNS: {}".format(run_counter))
self.assertLess(abs(run_counter-expected_runs), dataset_count/2)

View File

@ -0,0 +1,88 @@
import zfs_autobackup.compressors
from basetest import *
import time
class TestSendRecvPipes(unittest2.TestCase):
"""test input/output pipes for zfs send and recv"""
def setUp(self):
prepare_zpools()
self.longMessage=True
def test_send_basics(self):
"""send basics (remote/local send pipe)"""
with self.subTest("local local pipe"):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--send-pipe=dd bs=1M", "--recv-pipe=dd bs=2M"]).run())
shelltest("zfs destroy -r test_target1/test_source1/fs1/sub")
with self.subTest("remote local pipe"):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--ssh-source=localhost", "--send-pipe=dd bs=1M", "--recv-pipe=dd bs=2M"]).run())
shelltest("zfs destroy -r test_target1/test_source1/fs1/sub")
with self.subTest("local remote pipe"):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--ssh-target=localhost", "--send-pipe=dd bs=1M", "--recv-pipe=dd bs=2M"]).run())
shelltest("zfs destroy -r test_target1/test_source1/fs1/sub")
with self.subTest("remote remote pipe"):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--ssh-source=localhost", "--ssh-target=localhost", "--send-pipe=dd bs=1M", "--recv-pipe=dd bs=2M"]).run())
def test_compress(self):
"""send basics (remote/local send pipe)"""
for compress in zfs_autobackup.compressors.COMPRESS_CMDS.keys():
with self.subTest("compress "+compress):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--compress="+compress]).run())
shelltest("zfs destroy -r test_target1/test_source1/fs1/sub")
def test_buffer(self):
"""test different buffer configurations"""
with self.subTest("local local pipe"):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--buffer=1M" ]).run())
shelltest("zfs destroy -r test_target1/test_source1/fs1/sub")
with self.subTest("remote local pipe"):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--ssh-source=localhost", "--buffer=1M"]).run())
shelltest("zfs destroy -r test_target1/test_source1/fs1/sub")
with self.subTest("local remote pipe"):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--ssh-target=localhost", "--buffer=1M"]).run())
shelltest("zfs destroy -r test_target1/test_source1/fs1/sub")
with self.subTest("remote remote pipe"):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--ssh-source=localhost", "--ssh-target=localhost", "--buffer=1M"]).run())
def test_rate(self):
"""test rate limit"""
start=time.time()
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--rate=50k" ]).run())
#not a great way of verifying but it works.
self.assertGreater(time.time()-start, 5)

View File

@ -1,6 +1,9 @@
from basetest import *
import pprint
#randint is different in python 2 vs 3
from zfs_autobackup.Thinner import Thinner
# randint is different in python 2 vs 3
randint_compat = lambda lo, hi: lo + int(random.random() * (hi + 1 - lo))
@ -20,6 +23,23 @@ class TestThinner(unittest2.TestCase):
# return super().setUp()
def test_exceptions(self):
with self.assertRaisesRegexp(Exception, "^Invalid period"):
ThinnerRule("12X12m")
with self.assertRaisesRegexp(Exception, "^Invalid ttl"):
ThinnerRule("12d12X")
with self.assertRaisesRegexp(Exception, "^Period cant be"):
ThinnerRule("12d1d")
with self.assertRaisesRegexp(Exception, "^Invalid schedule"):
ThinnerRule("XXX")
with self.assertRaisesRegexp(Exception, "^Number of"):
Thinner("-1")
def test_incremental(self):
ok=['2023-01-03 10:53:16',
'2024-01-02 15:43:29',
@ -72,7 +92,7 @@ class TestThinner(unittest2.TestCase):
result=[]
for thing in things:
result.append(str(thing))
print("Thinner result incremental:")
pprint.pprint(result)
@ -128,12 +148,12 @@ class TestThinner(unittest2.TestCase):
result=[]
for thing in things:
result.append(str(thing))
print("Thinner result full:")
pprint.pprint(result)
self.assertEqual(result, ok)
if __name__ == '__main__':
unittest.main()
# if __name__ == '__main__':
# unittest.main()

99
tests/test_verify.py Normal file
View File

@ -0,0 +1,99 @@
from basetest import *
# test zfs-verify:
# - when there is no common snapshot at all
# - when encryption key not loaded
# - --test mode
# - --fs-compare methods
# - on snapshots of datasets:
# - that are correct
# - that are different
# - on snapshots of zvols
# - that are correct
# - that are different
# - test all directions (local, remote/local, local/remote, remote/remote)
#
class TestZfsEncryption(unittest2.TestCase):
def setUp(self):
prepare_zpools()
#create actual test files and data
shelltest("zfs create test_source1/fs1/ok_filesystem")
shelltest("cp tests/*.py /test_source1/fs1/ok_filesystem")
shelltest("zfs create test_source1/fs1/bad_filesystem")
shelltest("cp tests/*.py /test_source1/fs1/bad_filesystem")
shelltest("zfs create -V 1M test_source1/fs1/ok_zvol")
shelltest("dd if=/dev/urandom of=/dev/zvol/test_source1/fs1/ok_zvol count=1 bs=512k")
shelltest("zfs create -V 1M test_source1/fs1/bad_zvol")
shelltest("dd if=/dev/urandom of=/dev/zvol/test_source1/fs1/bad_zvol count=1 bs=512k")
#create backup
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --no-holds".split(" ")).run())
#Do an ugly hack to create a fault in the bad filesystem
#In zfs-autoverify it doenst matter that the snapshot isnt actually the same snapshot, so this hack works
shelltest("zfs destroy test_target1/test_source1/fs1/bad_filesystem@test-20101111000000")
shelltest("zfs mount test_target1/test_source1/fs1/bad_filesystem")
shelltest("echo >> /test_target1/test_source1/fs1/bad_filesystem/test_verify.py")
shelltest("zfs snapshot test_target1/test_source1/fs1/bad_filesystem@test-20101111000000")
#do the same hack for the bad zvol
shelltest("zfs destroy test_target1/test_source1/fs1/bad_zvol@test-20101111000000")
shelltest("dd if=/dev/urandom of=/dev/zvol/test_target1/test_source1/fs1/bad_zvol count=1 bs=1")
shelltest("zfs snapshot test_target1/test_source1/fs1/bad_zvol@test-20101111000000")
# make sure we cant accidently compare current data
shelltest("zfs mount test_target1/test_source1/fs1/ok_filesystem")
shelltest("rm /test_source1/fs1/ok_filesystem/*")
shelltest("rm /test_source1/fs1/bad_filesystem/*")
shelltest("dd if=/dev/zero of=/dev/zvol/test_source1/fs1/ok_zvol count=1 bs=512k")
def test_verify(self):
with self.subTest("default --test"):
self.assertFalse(ZfsAutoverify("test test_target1 --verbose --test".split(" ")).run())
with self.subTest("rsync, remote source and target. (not supported, all 6 fail)"):
self.assertEqual(6, ZfsAutoverify("test test_target1 --ssh-source=localhost --ssh-target=localhost --verbose --exclude-received --fs-compare=rsync".split(" ")).run())
def runchecked(testname, command):
with self.subTest(testname):
with OutputIO() as buf:
result=None
with redirect_stderr(buf):
result=ZfsAutoverify(command.split(" ")).run()
print(buf.getvalue())
self.assertEqual(2,result)
self.assertRegex(buf.getvalue(), "bad_filesystem: FAILED:")
self.assertRegex(buf.getvalue(), "bad_zvol: FAILED:")
runchecked("rsync, remote source", "test test_target1 --ssh-source=localhost --verbose --exclude-received --fs-compare=rsync")
runchecked("rsync, remote target", "test test_target1 --ssh-target=localhost --verbose --exclude-received --fs-compare=rsync")
runchecked("rsync, local", "test test_target1 --verbose --exclude-received --fs-compare=rsync")
runchecked("tar, remote source and remote target",
"test test_target1 --ssh-source=localhost --ssh-target=localhost --verbose --exclude-received --fs-compare=tar")
runchecked("tar, remote source",
"test test_target1 --ssh-source=localhost --verbose --exclude-received --fs-compare=tar")
runchecked("tar, remote target",
"test test_target1 --ssh-target=localhost --verbose --exclude-received --fs-compare=tar")
runchecked("tar, local", "test test_target1 --verbose --exclude-received --fs-compare=tar")
with self.subTest("no common snapshot"):
#destroy common snapshot, now 3 should fail
shelltest("zfs destroy test_source1/fs1/ok_zvol@test-20101111000000")
self.assertEqual(3, ZfsAutoverify("test test_target1 --verbose --exclude-received".split(" ")).run())

View File

@ -1,73 +1,66 @@
from zfs_autobackup.CmdPipe import CmdPipe
from basetest import *
import time
from zfs_autobackup.LogConsole import LogConsole
class TestZfsAutobackup(unittest2.TestCase):
def setUp(self):
prepare_zpools()
self.longMessage=True
def test_invalidpars(self):
self.assertEqual(ZfsAutobackup("test test_target1 --keep-source -1".split(" ")).run(), 255)
self.assertEqual(ZfsAutobackup("test test_target1 --no-progress --keep-source -1".split(" ")).run(), 255)
with OutputIO() as buf:
with redirect_stdout(buf):
self.assertEqual(ZfsAutobackup("test test_target1 --no-progress --resume --verbose --no-snapshot".split(" ")).run(), 0)
print(buf.getvalue())
self.assertIn("The --resume", buf.getvalue())
with OutputIO() as buf:
with redirect_stderr(buf):
self.assertEqual(ZfsAutobackup("test test_target_nonexisting --no-progress".split(" ")).run(), 255)
print(buf.getvalue())
# correct message?
self.assertIn("Please create this dataset", buf.getvalue())
def test_snapshotmode(self):
"""test snapshot tool mode"""
with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test --no-progress --verbose".split(" ")).run())
with patch('time.strftime', return_value="20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --allow-empty --verbose".split(" ")).run())
with patch('time.strftime', return_value="20101111000002"):
self.assertFalse(ZfsAutobackup("test --verbose --allow-empty --keep-source 0".split(" ")).run())
#on source: only has 1 and 2 (1 was hold)
#on target: has 0 and 1
#XXX:
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
self.assertMultiLineEqual(r,"""
test_source1
test_source1/fs1
test_source1/fs1@test-20101111000001
test_source1/fs1@test-20101111000002
test_source1/fs1@test-20101111000000
test_source1/fs1/sub
test_source1/fs1/sub@test-20101111000001
test_source1/fs1/sub@test-20101111000002
test_source1/fs1/sub@test-20101111000000
test_source2
test_source2/fs2
test_source2/fs2/sub
test_source2/fs2/sub@test-20101111000001
test_source2/fs2/sub@test-20101111000002
test_source2/fs2/sub@test-20101111000000
test_source2/fs3
test_source2/fs3/sub
test_target1
test_target1/test_source1
test_target1/test_source1/fs1
test_target1/test_source1/fs1@test-20101111000000
test_target1/test_source1/fs1@test-20101111000001
test_target1/test_source1/fs1/sub
test_target1/test_source1/fs1/sub@test-20101111000000
test_target1/test_source1/fs1/sub@test-20101111000001
test_target1/test_source2
test_target1/test_source2/fs2
test_target1/test_source2/fs2/sub
test_target1/test_source2/fs2/sub@test-20101111000000
test_target1/test_source2/fs2/sub@test-20101111000001
""")
def test_defaults(self):
with self.subTest("no datasets selected"):
with OutputIO() as buf:
with redirect_stderr(buf):
with patch('time.strftime', return_value="20101111000000"):
self.assertTrue(ZfsAutobackup("nonexisting test_target1 --verbose --debug".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000000"):
self.assertTrue(ZfsAutobackup("nonexisting test_target1 --verbose --debug --no-progress".split(" ")).run())
print(buf.getvalue())
#correct message?
@ -76,8 +69,8 @@ test_target1/test_source2/fs2/sub@test-20101111000001
with self.subTest("defaults with full verbose and debug"):
with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --debug".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --debug --no-progress".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
self.assertMultiLineEqual(r,"""
@ -105,10 +98,10 @@ test_target1/test_source2/fs2/sub@test-20101111000000
""")
with self.subTest("bare defaults, allow empty"):
with patch('time.strftime', return_value="20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --allow-empty".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --allow-empty --no-progress".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
self.assertMultiLineEqual(r,"""
test_source1
@ -176,15 +169,15 @@ test_target1/test_source2/fs2/sub@test-20101111000001 userrefs 1 -
#make sure time handling is correctly. try to make snapshots a year appart and verify that only snapshots mostly 1y old are kept
with self.subTest("test time checking"):
with patch('time.strftime', return_value="20111111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --allow-empty --verbose".split(" ")).run())
with patch('time.strftime', return_value="test-20111111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --allow-empty --verbose --no-progress".split(" ")).run())
time_str="20111112000000" #month in the "future"
future_timestamp=time_secs=time.mktime(time.strptime(time_str,"%Y%m%d%H%M%S"))
with patch('time.time', return_value=future_timestamp):
with patch('time.strftime', return_value="20111111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --allow-empty --verbose --keep-source 1y1y --keep-target 1d1y".split(" ")).run())
with patch('time.strftime', return_value="test-20111111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --allow-empty --verbose --keep-source 1y1y --keep-target 1d1y --no-progress".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
@ -218,14 +211,13 @@ test_target1/test_source2/fs2/sub@test-20111111000000
test_target1/test_source2/fs2/sub@test-20111111000001
""")
def test_ignore_othersnaphots(self):
r=shelltest("zfs snapshot test_source1/fs1@othersimple")
r=shelltest("zfs snapshot test_source1/fs1@otherdate-20001111000000")
with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
self.assertMultiLineEqual(r,"""
@ -259,8 +251,8 @@ test_target1/test_source2/fs2/sub@test-20101111000000
r=shelltest("zfs snapshot test_source1/fs1@othersimple")
r=shelltest("zfs snapshot test_source1/fs1@otherdate-20001111000000")
with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --other-snapshots".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --other-snapshots".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
self.assertMultiLineEqual(r,"""
@ -294,8 +286,8 @@ test_target1/test_source2/fs2/sub@test-20101111000000
def test_nosnapshot(self):
with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-snapshot".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-snapshot --no-progress".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
#(only parents are created )
@ -318,12 +310,10 @@ test_target1/test_source2/fs2
def test_nosend(self):
with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-send".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-send --no-progress".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
#(only parents are created )
#TODO: it probably shouldn't create these
self.assertMultiLineEqual(r,"""
test_source1
test_source1/fs1
@ -343,12 +333,10 @@ test_target1
def test_ignorereplicated(self):
r=shelltest("zfs snapshot test_source1/fs1@otherreplication")
with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --ignore-replicated".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --ignore-replicated".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
#(only parents are created )
#TODO: it probably shouldn't create these
self.assertMultiLineEqual(r,"""
test_source1
test_source1/fs1
@ -374,8 +362,8 @@ test_target1/test_source2/fs2/sub@test-20101111000000
def test_noholds(self):
with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-holds".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-holds --no-progress".split(" ")).run())
r=shelltest("zfs get -r userrefs test_source1 test_source2 test_target1")
self.assertMultiLineEqual(r,"""
@ -394,20 +382,20 @@ test_source2/fs3/sub userrefs - -
test_target1 userrefs - -
test_target1/test_source1 userrefs - -
test_target1/test_source1/fs1 userrefs - -
test_target1/test_source1/fs1@test-20101111000000 userrefs 1 -
test_target1/test_source1/fs1@test-20101111000000 userrefs 0 -
test_target1/test_source1/fs1/sub userrefs - -
test_target1/test_source1/fs1/sub@test-20101111000000 userrefs 1 -
test_target1/test_source1/fs1/sub@test-20101111000000 userrefs 0 -
test_target1/test_source2 userrefs - -
test_target1/test_source2/fs2 userrefs - -
test_target1/test_source2/fs2/sub userrefs - -
test_target1/test_source2/fs2/sub@test-20101111000000 userrefs 1 -
test_target1/test_source2/fs2/sub@test-20101111000000 userrefs 0 -
""")
def test_strippath(self):
with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --strip-path=1".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --strip-path=1 --no-progress".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
self.assertMultiLineEqual(r,"""
@ -432,18 +420,45 @@ test_target1/fs2/sub
test_target1/fs2/sub@test-20101111000000
""")
# def test_strippath_toomuch(self):
# with patch('time.strftime', return_value="test-20101111000000"):
# self.assertFalse(
# ZfsAutobackup("test test_target1 --verbose --strip-path=2 --no-progress".split(" ")).run())
#
# r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
# self.assertMultiLineEqual(r, """
# test_source1
# test_source1/fs1
# test_source1/fs1@test-20101111000000
# test_source1/fs1/sub
# test_source1/fs1/sub@test-20101111000000
# test_source2
# test_source2/fs2
# test_source2/fs2/sub
# test_source2/fs2/sub@test-20101111000000
# test_source2/fs3
# test_source2/fs3/sub
# test_target1
# test_target1/fs1
# test_target1/fs1@test-20101111000000
# test_target1/fs1/sub
# test_target1/fs1/sub@test-20101111000000
# test_target1/fs2
# test_target1/fs2/sub
# test_target1/fs2/sub@test-20101111000000
# """)
def test_clearrefres(self):
#on zfs utils 0.6.x -x isnt supported
#on zfs utils 0.6.x -x isnt supported
r=shelltest("zfs recv -x bla test >/dev/null </dev/zero; echo $?")
if r=="\n2\n":
self.skipTest("This zfs-userspace version doesnt support -x")
r=shelltest("zfs set refreservation=1M test_source1/fs1")
with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --clear-refreservation".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --clear-refreservation".split(" ")).run())
r=shelltest("zfs get refreservation -r test_source1 test_source2 test_target1")
self.assertMultiLineEqual(r,"""
@ -474,14 +489,14 @@ test_target1/test_source2/fs2/sub@test-20101111000000 refreservation -
def test_clearmount(self):
#on zfs utils 0.6.x -o isnt supported
#on zfs utils 0.6.x -o isnt supported
r=shelltest("zfs recv -o bla=1 test >/dev/null </dev/zero; echo $?")
if r=="\n2\n":
self.skipTest("This zfs-userspace version doesnt support -o")
with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --clear-mountpoint".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --clear-mountpoint --debug".split(" ")).run())
r=shelltest("zfs get canmount -r test_source1 test_source2 test_target1")
self.assertMultiLineEqual(r,"""
@ -513,35 +528,35 @@ test_target1/test_source2/fs2/sub@test-20101111000000 canmount - -
def test_rollback(self):
#initial backup
with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
#make change
r=shelltest("zfs mount test_target1/test_source1/fs1")
r=shelltest("touch /test_target1/test_source1/fs1/change.txt")
with patch('time.strftime', return_value="20101111000001"):
with patch('time.strftime', return_value="test-20101111000001"):
#should fail (busy)
self.assertTrue(ZfsAutobackup("test test_target1 --verbose --allow-empty".split(" ")).run())
self.assertTrue(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run())
with patch('time.strftime', return_value="20101111000002"):
with patch('time.strftime', return_value="test-20101111000002"):
#rollback, should succeed
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty --rollback".split(" ")).run())
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --rollback".split(" ")).run())
def test_destroyincompat(self):
#initial backup
with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
#add multiple compatible snapshot (written is still 0)
r=shelltest("zfs snapshot test_target1/test_source1/fs1@compatible1")
r=shelltest("zfs snapshot test_target1/test_source1/fs1@compatible2")
with patch('time.strftime', return_value="20101111000001"):
with patch('time.strftime', return_value="test-20101111000001"):
#should be ok, is compatible
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty".split(" ")).run())
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run())
#add incompatible snapshot by changing and snapshotting
r=shelltest("zfs mount test_target1/test_source1/fs1")
@ -549,110 +564,64 @@ test_target1/test_source2/fs2/sub@test-20101111000000 canmount - -
r=shelltest("zfs snapshot test_target1/test_source1/fs1@incompatible1")
with patch('time.strftime', return_value="20101111000002"):
with patch('time.strftime', return_value="test-20101111000002"):
#--test should fail, now incompatible
self.assertTrue(ZfsAutobackup("test test_target1 --verbose --allow-empty --test".split(" ")).run())
self.assertTrue(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --test".split(" ")).run())
with patch('time.strftime', return_value="20101111000002"):
with patch('time.strftime', return_value="test-20101111000002"):
#should fail, now incompatible
self.assertTrue(ZfsAutobackup("test test_target1 --verbose --allow-empty".split(" ")).run())
self.assertTrue(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run())
with patch('time.strftime', return_value="20101111000003"):
with patch('time.strftime', return_value="test-20101111000003"):
#--test should succeed by destroying incompatibles
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty --destroy-incompatible --test".split(" ")).run())
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --destroy-incompatible --test".split(" ")).run())
with patch('time.strftime', return_value="20101111000003"):
with patch('time.strftime', return_value="test-20101111000003"):
#should succeed by destroying incompatibles
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty --destroy-incompatible".split(" ")).run())
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --destroy-incompatible".split(" ")).run())
def test_keepsourcetarget(self):
with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty".split(" ")).run())
with patch('time.strftime', return_value="20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty".split(" ")).run())
#should still have all snapshots
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
self.assertMultiLineEqual(r,"""
test_source1
test_source1/fs1
test_source1/fs1@test-20101111000000
test_source1/fs1@test-20101111000001
test_source1/fs1/sub
test_source1/fs1/sub@test-20101111000000
test_source1/fs1/sub@test-20101111000001
test_source2
test_source2/fs2
test_source2/fs2/sub
test_source2/fs2/sub@test-20101111000000
test_source2/fs2/sub@test-20101111000001
test_source2/fs3
test_source2/fs3/sub
r = shelltest("zfs list -H -o name -r -t all test_target1")
self.assertMultiLineEqual(r, """
test_target1
test_target1/test_source1
test_target1/test_source1/fs1
test_target1/test_source1/fs1@test-20101111000000
test_target1/test_source1/fs1@compatible1
test_target1/test_source1/fs1@compatible2
test_target1/test_source1/fs1@test-20101111000001
test_target1/test_source1/fs1@test-20101111000002
test_target1/test_source1/fs1@test-20101111000003
test_target1/test_source1/fs1/sub
test_target1/test_source1/fs1/sub@test-20101111000000
test_target1/test_source1/fs1/sub@test-20101111000001
test_target1/test_source1/fs1/sub@test-20101111000002
test_target1/test_source1/fs1/sub@test-20101111000003
test_target1/test_source2
test_target1/test_source2/fs2
test_target1/test_source2/fs2/sub
test_target1/test_source2/fs2/sub@test-20101111000000
test_target1/test_source2/fs2/sub@test-20101111000001
""")
#run again with keep=0
with patch('time.strftime', return_value="20101111000002"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty --keep-source=0 --keep-target=0".split(" ")).run())
#should only have last snapshots
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
self.assertMultiLineEqual(r,"""
test_source1
test_source1/fs1
test_source1/fs1@test-20101111000002
test_source1/fs1/sub
test_source1/fs1/sub@test-20101111000002
test_source2
test_source2/fs2
test_source2/fs2/sub
test_source2/fs2/sub@test-20101111000002
test_source2/fs3
test_source2/fs3/sub
test_target1
test_target1/test_source1
test_target1/test_source1/fs1
test_target1/test_source1/fs1@test-20101111000002
test_target1/test_source1/fs1/sub
test_target1/test_source1/fs1/sub@test-20101111000002
test_target1/test_source2
test_target1/test_source2/fs2
test_target1/test_source2/fs2/sub
test_target1/test_source2/fs2/sub@test-20101111000002
test_target1/test_source2/fs2/sub@test-20101111000003
""")
def test_ssh(self):
#test all ssh directions
with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty --ssh-source localhost".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --ssh-source localhost --exclude-received".split(" ")).run())
with patch('time.strftime', return_value="20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty --ssh-target localhost".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --ssh-target localhost --exclude-received".split(" ")).run())
with patch('time.strftime', return_value="20101111000002"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty --ssh-source localhost --ssh-target localhost".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000002"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --ssh-source localhost --ssh-target localhost".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
@ -696,26 +665,26 @@ test_target1/test_source2/fs2/sub@test-20101111000002
def test_minchange(self):
#initial
with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --min-change 100000".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --min-change 100000".split(" ")).run())
#make small change, use umount to reflect the changes immediately
r=shelltest("zfs set compress=off test_source1")
r=shelltest("touch /test_source1/fs1/change.txt")
r=shelltest("zfs umount test_source1/fs1; zfs mount test_source1/fs1")
#too small change, takes no snapshots
with patch('time.strftime', return_value="20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --min-change 100000".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --min-change 100000".split(" ")).run())
#make big change
r=shelltest("dd if=/dev/zero of=/test_source1/fs1/change.txt bs=200000 count=1")
r=shelltest("zfs umount test_source1/fs1; zfs mount test_source1/fs1")
#bigger change, should take snapshot
with patch('time.strftime', return_value="20101111000002"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --min-change 100000".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000002"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --min-change 100000".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
self.assertMultiLineEqual(r,"""
@ -747,8 +716,8 @@ test_target1/test_source2/fs2/sub@test-20101111000000
def test_test(self):
#initial
with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --test".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --test".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
self.assertMultiLineEqual(r,"""
@ -764,13 +733,13 @@ test_target1
""")
#actual make initial backup
with patch('time.strftime', return_value="20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
#test incremental
with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --test".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000002"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --allow-empty --verbose --test".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
self.assertMultiLineEqual(r,"""
@ -805,8 +774,8 @@ test_target1/test_source2/fs2/sub@test-20101111000001
shelltest("zfs create test_target1/test_source1")
shelltest("zfs send test_source1/fs1@migrate1| zfs recv test_target1/test_source1/fs1")
with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
self.assertMultiLineEqual(r,"""
@ -835,13 +804,127 @@ test_target1/test_source2/fs2/sub
test_target1/test_source2/fs2/sub@test-20101111000000
""")
def test_keep0(self):
"""test if keep-source=0 and keep-target=0 dont delete common snapshot and break backup"""
###########################
# TODO:
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --keep-source=0 --keep-target=0".split(" ")).run())
def test_raw(self):
self.skipTest("todo: later when travis supports zfs 0.8")
#make snapshot, shouldnt delete 0
with patch('time.strftime', return_value="test-20101111000001"):
self.assertFalse(ZfsAutobackup("test --no-progress --verbose --keep-source=0 --keep-target=0 --allow-empty".split(" ")).run())
#make snapshot 2, shouldnt delete 0 since it has holds, but will delete 1 since it has no holds
with patch('time.strftime', return_value="test-20101111000002"):
self.assertFalse(ZfsAutobackup("test --no-progress --verbose --keep-source=0 --keep-target=0 --allow-empty".split(" ")).run())
r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
self.assertMultiLineEqual(r, """
test_source1
test_source1/fs1
test_source1/fs1@test-20101111000000
test_source1/fs1@test-20101111000002
test_source1/fs1/sub
test_source1/fs1/sub@test-20101111000000
test_source1/fs1/sub@test-20101111000002
test_source2
test_source2/fs2
test_source2/fs2/sub
test_source2/fs2/sub@test-20101111000000
test_source2/fs2/sub@test-20101111000002
test_source2/fs3
test_source2/fs3/sub
test_target1
test_target1/test_source1
test_target1/test_source1/fs1
test_target1/test_source1/fs1@test-20101111000000
test_target1/test_source1/fs1/sub
test_target1/test_source1/fs1/sub@test-20101111000000
test_target1/test_source2
test_target1/test_source2/fs2
test_target1/test_source2/fs2/sub
test_target1/test_source2/fs2/sub@test-20101111000000
""")
#make another backup but with no-holds. we should naturally endup with only number 3
with patch('time.strftime', return_value="test-20101111000003"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --keep-source=0 --keep-target=0 --no-holds --allow-empty".split(" ")).run())
r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
self.assertMultiLineEqual(r, """
test_source1
test_source1/fs1
test_source1/fs1@test-20101111000003
test_source1/fs1/sub
test_source1/fs1/sub@test-20101111000003
test_source2
test_source2/fs2
test_source2/fs2/sub
test_source2/fs2/sub@test-20101111000003
test_source2/fs3
test_source2/fs3/sub
test_target1
test_target1/test_source1
test_target1/test_source1/fs1
test_target1/test_source1/fs1@test-20101111000003
test_target1/test_source1/fs1/sub
test_target1/test_source1/fs1/sub@test-20101111000003
test_target1/test_source2
test_target1/test_source2/fs2
test_target1/test_source2/fs2/sub
test_target1/test_source2/fs2/sub@test-20101111000003
""")
# run with snapshot-only for 4, since we used no-holds, it will delete 3 on the source, breaking the backup
with patch('time.strftime', return_value="test-20101111000004"):
self.assertFalse(ZfsAutobackup("test --no-progress --verbose --keep-source=0 --keep-target=0 --allow-empty".split(" ")).run())
r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
self.assertMultiLineEqual(r, """
test_source1
test_source1/fs1
test_source1/fs1@test-20101111000004
test_source1/fs1/sub
test_source1/fs1/sub@test-20101111000004
test_source2
test_source2/fs2
test_source2/fs2/sub
test_source2/fs2/sub@test-20101111000004
test_source2/fs3
test_source2/fs3/sub
test_target1
test_target1/test_source1
test_target1/test_source1/fs1
test_target1/test_source1/fs1@test-20101111000003
test_target1/test_source1/fs1/sub
test_target1/test_source1/fs1/sub@test-20101111000003
test_target1/test_source2
test_target1/test_source2/fs2
test_target1/test_source2/fs2/sub
test_target1/test_source2/fs2/sub@test-20101111000003
""")
def test_progress(self):
r=shelltest("dd if=/dev/zero of=/test_source1/data.txt bs=200000 count=1")
r = shelltest("zfs snapshot test_source1@test")
l=LogConsole(show_verbose=True, show_debug=False, color=False)
n=ZfsNode(snapshot_time_format="bla", hold_name="bla", logger=l)
d=ZfsDataset(n,"test_source1@test")
sp=d.send_pipe([], prev_snapshot=None, resume_token=None, show_progress=True, raw=False, send_pipes=[], send_properties=True, write_embedded=True, zfs_compressed=True)
with OutputIO() as buf:
with redirect_stderr(buf):
try:
n.run(["sleep", "2"], inp=sp)
except:
pass
print(buf.getvalue())
# correct message?
self.assertRegex(buf.getvalue(),".*>>> .*minutes left.*")

View File

@ -0,0 +1,81 @@
from basetest import *
import time
class TestZfsAutobackup31(unittest2.TestCase):
"""various new 3.1 features"""
def setUp(self):
prepare_zpools()
self.longMessage=True
def test_no_thinning(self):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --keep-target=0 --keep-source=0 --no-thinning".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
self.assertMultiLineEqual(r,"""
test_source1
test_source1/fs1
test_source1/fs1@test-20101111000000
test_source1/fs1@test-20101111000001
test_source1/fs1/sub
test_source1/fs1/sub@test-20101111000000
test_source1/fs1/sub@test-20101111000001
test_source2
test_source2/fs2
test_source2/fs2/sub
test_source2/fs2/sub@test-20101111000000
test_source2/fs2/sub@test-20101111000001
test_source2/fs3
test_source2/fs3/sub
test_target1
test_target1/test_source1
test_target1/test_source1/fs1
test_target1/test_source1/fs1@test-20101111000000
test_target1/test_source1/fs1@test-20101111000001
test_target1/test_source1/fs1/sub
test_target1/test_source1/fs1/sub@test-20101111000000
test_target1/test_source1/fs1/sub@test-20101111000001
test_target1/test_source2
test_target1/test_source2/fs2
test_target1/test_source2/fs2/sub
test_target1/test_source2/fs2/sub@test-20101111000000
test_target1/test_source2/fs2/sub@test-20101111000001
""")
def test_re_replication(self):
"""test re-replication of something thats already a backup (new in v3.1-beta5)"""
shelltest("zfs create test_target1/a")
shelltest("zfs create test_target1/b")
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1/a --no-progress --verbose --debug".split(" ")).run())
with patch('time.strftime', return_value="test-20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1/b --no-progress --verbose".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t snapshot test_target1")
#NOTE: it wont backup test_target1/a/test_source2/fs2/sub to test_target1/b since it doesnt have the zfs_autobackup property anymore.
self.assertMultiLineEqual(r,"""
test_target1/a/test_source1/fs1@test-20101111000000
test_target1/a/test_source1/fs1/sub@test-20101111000000
test_target1/a/test_source2/fs2/sub@test-20101111000000
test_target1/b/test_source1/fs1@test-20101111000000
test_target1/b/test_source1/fs1/sub@test-20101111000000
test_target1/b/test_source2/fs2/sub@test-20101111000000
test_target1/b/test_target1/a/test_source1/fs1@test-20101111000000
test_target1/b/test_target1/a/test_source1/fs1/sub@test-20101111000000
""")
def test_zfs_compressed(self):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(
ZfsAutobackup("test test_target1 --no-progress --verbose --debug --zfs-compressed".split(" ")).run())

176
tests/test_zfsnode.py Normal file
View File

@ -0,0 +1,176 @@
from basetest import *
from zfs_autobackup.LogStub import LogStub
from zfs_autobackup.ExecuteNode import ExecuteError
class TestZfsNode(unittest2.TestCase):
def setUp(self):
prepare_zpools()
# return super().setUp()
def test_consistent_snapshot(self):
logger = LogStub()
description = "[Source]"
node = ZfsNode(snapshot_time_format="test-%Y%m%d%H%M%S", hold_name="zfs_autobackup:test", logger=logger, description=description)
with self.subTest("first snapshot"):
node.consistent_snapshot(node.selected_datasets(property_name="autobackup:test",exclude_paths=[], exclude_received=False, exclude_unchanged=False, min_change=200000), "test-20101111000001", 100000)
r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
self.assertEqual(r, """
test_source1
test_source1/fs1
test_source1/fs1@test-20101111000001
test_source1/fs1/sub
test_source1/fs1/sub@test-20101111000001
test_source2
test_source2/fs2
test_source2/fs2/sub
test_source2/fs2/sub@test-20101111000001
test_source2/fs3
test_source2/fs3/sub
test_target1
""")
with self.subTest("second snapshot, no changes, no snapshot"):
node.consistent_snapshot(node.selected_datasets(property_name="autobackup:test",exclude_paths=[], exclude_received=False, exclude_unchanged=False, min_change=200000), "test-20101111000002", 1)
r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
self.assertEqual(r, """
test_source1
test_source1/fs1
test_source1/fs1@test-20101111000001
test_source1/fs1/sub
test_source1/fs1/sub@test-20101111000001
test_source2
test_source2/fs2
test_source2/fs2/sub
test_source2/fs2/sub@test-20101111000001
test_source2/fs3
test_source2/fs3/sub
test_target1
""")
with self.subTest("second snapshot, no changes, empty snapshot"):
node.consistent_snapshot(node.selected_datasets(property_name="autobackup:test", exclude_paths=[], exclude_received=False, exclude_unchanged=False, min_change=200000), "test-20101111000002", 0)
r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
self.assertEqual(r, """
test_source1
test_source1/fs1
test_source1/fs1@test-20101111000001
test_source1/fs1@test-20101111000002
test_source1/fs1/sub
test_source1/fs1/sub@test-20101111000001
test_source1/fs1/sub@test-20101111000002
test_source2
test_source2/fs2
test_source2/fs2/sub
test_source2/fs2/sub@test-20101111000001
test_source2/fs2/sub@test-20101111000002
test_source2/fs3
test_source2/fs3/sub
test_target1
""")
def test_consistent_snapshot_prepostcmds(self):
logger = LogStub()
description = "[Source]"
node = ZfsNode(snapshot_time_format="test", hold_name="test", logger=logger, description=description, debug_output=True)
with self.subTest("Test if all cmds are executed correctly (no failures)"):
with OutputIO() as buf:
with redirect_stdout(buf):
node.consistent_snapshot(node.selected_datasets(property_name="autobackup:test", exclude_paths=[], exclude_received=False, exclude_unchanged=False, min_change=1), "test-1",
0,
pre_snapshot_cmds=["echo pre1", "echo pre2"],
post_snapshot_cmds=["echo post1 >&2", "echo post2 >&2"]
)
self.assertIn("STDOUT > pre1", buf.getvalue())
self.assertIn("STDOUT > pre2", buf.getvalue())
self.assertIn("STDOUT > post1", buf.getvalue())
self.assertIn("STDOUT > post2", buf.getvalue())
with self.subTest("Failure in the middle, only pre1 and both post1 and post2 should be executed, no snapshot should be attempted"):
with OutputIO() as buf:
with redirect_stdout(buf):
with self.assertRaises(ExecuteError):
node.consistent_snapshot(node.selected_datasets(property_name="autobackup:test", exclude_paths=[], exclude_received=False, exclude_unchanged=False, min_change=1), "test-1",
0,
pre_snapshot_cmds=["echo pre1", "false", "echo pre2"],
post_snapshot_cmds=["echo post1", "false", "echo post2"]
)
print(buf.getvalue())
self.assertIn("STDOUT > pre1", buf.getvalue())
self.assertNotIn("STDOUT > pre2", buf.getvalue())
self.assertIn("STDOUT > post1", buf.getvalue())
self.assertIn("STDOUT > post2", buf.getvalue())
with self.subTest("Snapshot fails"):
with OutputIO() as buf:
with redirect_stdout(buf):
with self.assertRaises(ExecuteError):
#same snapshot name as before so it fails
node.consistent_snapshot(node.selected_datasets(property_name="autobackup:test", exclude_paths=[], exclude_received=False, exclude_unchanged=False, min_change=1), "test-1",
0,
pre_snapshot_cmds=["echo pre1", "echo pre2"],
post_snapshot_cmds=["echo post1", "echo post2"]
)
print(buf.getvalue())
self.assertIn("STDOUT > pre1", buf.getvalue())
self.assertIn("STDOUT > pre2", buf.getvalue())
self.assertIn("STDOUT > post1", buf.getvalue())
self.assertIn("STDOUT > post2", buf.getvalue())
def test_getselected(self):
# should be excluded by property
shelltest("zfs create test_source1/fs1/subexcluded")
shelltest("zfs set autobackup:test=false test_source1/fs1/subexcluded")
# should be excluded by being unchanged
shelltest("zfs create test_source1/fs1/unchanged")
shelltest("zfs snapshot test_source1/fs1/unchanged@somesnapshot")
logger = LogStub()
description = "[Source]"
node = ZfsNode(snapshot_time_format="test-%Y%m%d%H%M%S", hold_name="zfs_autobackup:test", logger=logger, description=description)
s = pformat(node.selected_datasets(property_name="autobackup:test", exclude_paths=[], exclude_received=False, exclude_unchanged=True, min_change=1))
print(s)
# basics
self.assertEqual(s, """[(local): test_source1/fs1,
(local): test_source1/fs1/sub,
(local): test_source2/fs2/sub]""")
def test_validcommand(self):
logger = LogStub()
description = "[Source]"
node = ZfsNode(snapshot_time_format="test-%Y%m%d%H%M%S", hold_name="zfs_autobackup:test", logger=logger, description=description)
with self.subTest("test invalid option"):
self.assertFalse(node.valid_command(["zfs", "send", "--invalid-option", "nonexisting"]))
with self.subTest("test valid option"):
self.assertTrue(node.valid_command(["zfs", "send", "-v", "nonexisting"]))
def test_supportedsendoptions(self):
logger = LogStub()
description = "[Source]"
node = ZfsNode(snapshot_time_format="test-%Y%m%d%H%M%S", hold_name="zfs_autobackup:test", logger=logger, description=description)
# -D propably always supported
self.assertGreater(len(node.supported_send_options), 0)
def test_supportedrecvoptions(self):
logger = LogStub()
description = "[Source]"
# NOTE: this could hang via ssh if we dont close filehandles properly. (which was a previous bug)
node = ZfsNode(snapshot_time_format="test-%Y%m%d%H%M%S", hold_name="zfs_autobackup:test", logger=logger, description=description, ssh_to='localhost')
self.assertIsInstance(node.supported_recv_options, list)
if __name__ == '__main__':
unittest.main()

View File

@ -0,0 +1,39 @@
# NOTE: this should inherit from (object) to function correctly with python 2.7
class CachedProperty(object):
""" A property that is only computed once per instance and
then stores the result in _cached_properties of the object.
Source: https://github.com/bottlepy/bottle/commit/fa7733e075da0d790d809aa3d2f53071897e6f76
"""
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None:
return self
propname = self.func.__name__
if not hasattr(obj, '_cached_properties'):
obj._cached_properties = {}
if propname not in obj._cached_properties:
obj._cached_properties[propname] = self.func(obj)
# value = obj.__dict__[propname] = self.func(obj)
return obj._cached_properties[propname]
@staticmethod
def clear(obj):
"""clears cache of obj"""
if hasattr(obj, '_cached_properties'):
obj._cached_properties = {}
@staticmethod
def is_cached(obj, propname):
if hasattr(obj, '_cached_properties') and propname in obj._cached_properties:
return True
else:
return False

160
zfs_autobackup/CmdPipe.py Normal file
View File

@ -0,0 +1,160 @@
import subprocess
import os
import select
try:
from shlex import quote as cmd_quote
except ImportError:
from pipes import quote as cmd_quote
class CmdItem:
"""one command item, to be added to a CmdPipe"""
def __init__(self, cmd, readonly=False, stderr_handler=None, exit_handler=None, shell=False):
"""create item. caller has to make sure cmd is properly escaped when using shell.
:type cmd: list of str
"""
self.cmd = cmd
self.readonly = readonly
self.stderr_handler = stderr_handler
self.exit_handler = exit_handler
self.shell = shell
self.process = None
def __str__(self):
"""return copy-pastable version of command."""
if self.shell:
# its already copy pastable for a shell:
return " ".join(self.cmd)
else:
# make it copy-pastable, will make a mess of quotes sometimes, but is correct
return " ".join(map(cmd_quote, self.cmd))
def create(self, stdin):
"""actually create the subprocess (called by CmdPipe)"""
# make sure the command gets all the data in utf8 format:
# (this is necessary if LC_ALL=en_US.utf8 is not set in the environment)
encoded_cmd = []
for arg in self.cmd:
encoded_cmd.append(arg.encode('utf-8'))
self.process = subprocess.Popen(encoded_cmd, env=os.environ, stdout=subprocess.PIPE, stdin=stdin,
stderr=subprocess.PIPE, shell=self.shell)
class CmdPipe:
"""a pipe of one or more commands. also takes care of utf-8 encoding/decoding and line based parsing"""
def __init__(self, readonly=False, inp=None):
"""
:param inp: input string for stdin
:param readonly: Only execute if entire pipe consist of readonly commands
"""
# list of commands + error handlers to execute
self.items = []
self.inp = inp
self.readonly = readonly
self._should_execute = True
def add(self, cmd_item):
"""adds a CmdItem to pipe.
:type cmd_item: CmdItem
"""
self.items.append(cmd_item)
if not cmd_item.readonly and self.readonly:
self._should_execute = False
def __str__(self):
"""transform whole pipe into oneliner for debugging and testing. this should generate a copy-pastable string for in a console """
ret = ""
for item in self.items:
if ret:
ret = ret + " | "
ret = ret + "({})".format(item) # this will do proper escaping to make it copypastable
return ret
def should_execute(self):
return self._should_execute
def execute(self, stdout_handler):
"""run the pipe. returns True all exit handlers returned true"""
if not self._should_execute:
return True
# first process should have actual user input as stdin:
selectors = []
# create processes
last_stdout = None
stdin = subprocess.PIPE
for item in self.items:
item.create(stdin)
selectors.append(item.process.stderr)
if last_stdout is None:
# we're the first process in the pipe, do we have some input?
if self.inp is not None:
# TODO: make streaming to support big inputs?
item.process.stdin.write(self.inp.encode('utf-8'))
item.process.stdin.close()
else:
# last stdout was piped to this stdin already, so close it because we dont need it anymore
last_stdout.close()
last_stdout = item.process.stdout
stdin = last_stdout
# monitor last stdout as well
selectors.append(last_stdout)
while True:
# wait for output on one of the stderrs or last_stdout
(read_ready, write_ready, ex_ready) = select.select(selectors, [], [])
eof_count = 0
done_count = 0
# read line and call appropriate handlers
if last_stdout in read_ready:
line = last_stdout.readline().decode('utf-8').rstrip()
if line != "":
stdout_handler(line)
else:
eof_count = eof_count + 1
for item in self.items:
if item.process.stderr in read_ready:
line = item.process.stderr.readline().decode('utf-8').rstrip()
if line != "":
item.stderr_handler(line)
else:
eof_count = eof_count + 1
if item.process.poll() is not None:
done_count = done_count + 1
# all filehandles are eof and all processes are done (poll() is not None)
if eof_count == len(selectors) and done_count == len(self.items):
break
# close filehandles
last_stdout.close()
for item in self.items:
item.process.stderr.close()
# call exit handlers
success = True
for item in self.items:
if item.exit_handler is not None:
success=item.exit_handler(item.process.returncode) and success
return success

View File

@ -0,0 +1,166 @@
import os
import select
import subprocess
from .CmdPipe import CmdPipe, CmdItem
from .LogStub import LogStub
try:
from shlex import quote as cmd_quote
except ImportError:
from pipes import quote as cmd_quote
class ExecuteError(Exception):
pass
class ExecuteNode(LogStub):
"""an endpoint to execute local or remote commands via ssh"""
PIPE=1
def __init__(self, ssh_config=None, ssh_to=None, readonly=False, debug_output=False):
"""ssh_config: custom ssh config
ssh_to: server you want to ssh to. none means local
readonly: only execute commands that don't make any changes (useful for testing-runs)
debug_output: show output and exit codes of commands in debugging output.
"""
self.ssh_config = ssh_config
self.ssh_to = ssh_to
self.readonly = readonly
self.debug_output = debug_output
def __repr__(self):
if self.ssh_to is None:
return "(local)"
else:
return self.ssh_to
def _parse_stdout(self, line):
"""parse stdout. can be overridden in subclass"""
if self.debug_output:
self.debug("STDOUT > " + line.rstrip())
def _parse_stderr(self, line, hide_errors):
"""parse stderr. can be overridden in subclass"""
if hide_errors:
self.debug("STDERR > " + line.rstrip())
else:
self.error("STDERR > " + line.rstrip())
def _quote(self, cmd):
"""return quoted version of command. if it has value PIPE it will add an actual | """
if cmd==self.PIPE:
return('|')
else:
return(cmd_quote(cmd))
def _shell_cmd(self, cmd):
"""prefix specified ssh shell to command and escape shell characters"""
ret=[]
#add remote shell
if not self.is_local():
ret=["ssh"]
if self.ssh_config is not None:
ret.extend(["-F", self.ssh_config])
ret.append(self.ssh_to)
ret.append(" ".join(map(self._quote, cmd)))
return ret
def is_local(self):
return self.ssh_to is None
def run(self, cmd, inp=None, tab_split=False, valid_exitcodes=None, readonly=False, hide_errors=False,
return_stderr=False, pipe=False, return_all=False):
"""run a command on the node , checks output and parses/handle output and returns it
Either uses a local shell (sh -c) or remote shell (ssh) to execute the command.
Therefore the command can have stuff like actual pipes in it, if you dont want to use pipe=True to pipe stuff.
:param cmd: the actual command, should be a list, where the first item is the command
and the rest are parameters. use ExecuteNode.PIPE to add an unescaped |
(if you want to use system piping instead of python piping)
:param pipe: return CmdPipe instead of executing it.
:param inp: Can be None, a string or a CmdPipe that was previously returned.
:param tab_split: split tabbed files in output into a list
:param valid_exitcodes: list of valid exit codes for this command (checks exit code of both sides of a pipe)
Use [] to accept all exit codes. Default [0]
:param readonly: make this True if the command doesn't make any changes and is safe to execute in testmode
:param hide_errors: don't show stderr output as error, instead show it as debugging output (use to hide expected errors)
:param return_stderr: return both stdout and stderr as a tuple. (normally only returns stdout)
:param return_all: return both stdout and stderr and exit_code as a tuple. (normally only returns stdout)
"""
# create new pipe?
if not isinstance(inp, CmdPipe):
cmd_pipe = CmdPipe(self.readonly, inp)
else:
# add stuff to existing pipe
cmd_pipe = inp
# stderr parser
error_lines = []
returned_exit_code=None
def stderr_handler(line):
if tab_split:
error_lines.append(line.rstrip().split('\t'))
else:
error_lines.append(line.rstrip())
self._parse_stderr(line, hide_errors)
# exit code hanlder
if valid_exitcodes is None:
valid_exitcodes = [0]
def exit_handler(exit_code):
if self.debug_output:
self.debug("EXIT > {}".format(exit_code))
if (valid_exitcodes != []) and (exit_code not in valid_exitcodes):
self.error("Command \"{}\" returned exit code {} (valid codes: {})".format(cmd_item, exit_code, valid_exitcodes))
return False
return True
# add shell command and handlers to pipe
cmd_item=CmdItem(cmd=self._shell_cmd(cmd), readonly=readonly, stderr_handler=stderr_handler, exit_handler=exit_handler, shell=self.is_local())
cmd_pipe.add(cmd_item)
# return pipe instead of executing?
if pipe:
return cmd_pipe
# stdout parser
output_lines = []
def stdout_handler(line):
if tab_split:
output_lines.append(line.rstrip().split('\t'))
else:
output_lines.append(line.rstrip())
self._parse_stdout(line)
if cmd_pipe.should_execute():
self.debug("CMD > {}".format(cmd_pipe))
else:
self.debug("CMDSKIP> {}".format(cmd_pipe))
# execute and calls handlers in CmdPipe
if not cmd_pipe.execute(stdout_handler=stdout_handler):
raise(ExecuteError("Last command returned error"))
if return_all:
return output_lines, error_lines, cmd_item.process and cmd_item.process.returncode
elif return_stderr:
return output_lines, error_lines
else:
return output_lines

View File

@ -0,0 +1,66 @@
# python 2 compatibility
from __future__ import print_function
import sys
class LogConsole:
"""Log-class that outputs to console, adding colors if needed"""
def __init__(self, show_debug, show_verbose, color):
self.last_log = ""
self.show_debug = show_debug
self.show_verbose = show_verbose
if color:
# try to use color, failback if colorama not available
self.colorama=False
try:
import colorama
global colorama
self.colorama = True
except ImportError:
pass
else:
self.colorama=False
def error(self, txt):
if self.colorama:
print(colorama.Fore.RED + colorama.Style.BRIGHT + "! " + txt + colorama.Style.RESET_ALL, file=sys.stderr)
else:
print("! " + txt, file=sys.stderr)
sys.stderr.flush()
def warning(self, txt):
if self.colorama:
print(colorama.Fore.YELLOW + colorama.Style.BRIGHT + " NOTE: " + txt + colorama.Style.RESET_ALL)
else:
print(" NOTE: " + txt)
sys.stdout.flush()
def verbose(self, txt):
if self.show_verbose:
if self.colorama:
print(colorama.Style.NORMAL + " " + txt + colorama.Style.RESET_ALL)
else:
print(" " + txt)
sys.stdout.flush()
def debug(self, txt):
if self.show_debug:
if self.colorama:
print(colorama.Fore.GREEN + "# " + txt + colorama.Style.RESET_ALL)
else:
print("# " + txt)
sys.stdout.flush()
def progress(self, txt):
"""print progress output to stderr (stays on same line)"""
self.clear_progress()
print(">>> {}\r".format(txt), end='', file=sys.stderr)
sys.stderr.flush()
def clear_progress(self):
import colorama
print(colorama.ansi.clear_line(), end='', file=sys.stderr)
sys.stderr.flush()

18
zfs_autobackup/LogStub.py Normal file
View File

@ -0,0 +1,18 @@
#Used for baseclasses that dont implement their own logging (Like ExecuteNode)
#Usually logging is implemented in subclasses (Like ZfsNode thats a subclass of ExecuteNode), but for regression testing its nice to have these stubs.
class LogStub:
"""Just a stub, usually overriden in subclasses."""
# simple logging stubs
def debug(self, txt):
print("DEBUG : " + txt)
def verbose(self, txt):
print("VERBOSE: " + txt)
def warning(self, txt):
print("WARNING: " + txt)
def error(self, txt):
print("ERROR : " + txt)

99
zfs_autobackup/Thinner.py Normal file
View File

@ -0,0 +1,99 @@
import time
from .ThinnerRule import ThinnerRule
class Thinner:
"""progressive thinner (universal, used for cleaning up snapshots)"""
def __init__(self, schedule_str=""):
"""
Args:
schedule_str: comma seperated list of ThinnerRules. A plain number specifies how many snapshots to always keep.
"""
self.rules = []
self.always_keep = 0
if schedule_str == "":
return
rule_strs = schedule_str.split(",")
for rule_str in rule_strs:
if rule_str.lstrip('-').isdigit():
self.always_keep = int(rule_str)
if self.always_keep < 0:
raise (Exception("Number of snapshots to keep cant be negative: {}".format(self.always_keep)))
else:
self.rules.append(ThinnerRule(rule_str))
def human_rules(self):
"""get list of human readable rules"""
ret = []
if self.always_keep:
ret.append("Keep the last {} snapshot{}.".format(self.always_keep, self.always_keep != 1 and "s" or ""))
for rule in self.rules:
ret.append(rule.human_str)
return ret
def thin(self, objects, keep_objects=None, now=None):
"""thin list of objects with current schedule rules. objects: list of
objects to thin. every object should have timestamp attribute.
return( keeps, removes )
Args:
objects: list of objects to check (should have a timestamp attribute)
keep_objects: objects to always keep (if they also are in the in the normal objects list)
now: if specified, use this time as current time
"""
if not keep_objects:
keep_objects = []
# always keep a number of the last objets?
if self.always_keep:
# all of them
if len(objects) <= self.always_keep:
return objects, []
# determine which ones
always_keep_objects = objects[-self.always_keep:]
else:
always_keep_objects = []
# determine time blocks
time_blocks = {}
for rule in self.rules:
time_blocks[rule.period] = {}
if not now:
now = int(time.time())
keeps = []
removes = []
# traverse objects
for thisobject in objects:
# important they are ints!
timestamp = int(thisobject.timestamp)
age = int(now) - timestamp
# store in the correct time blocks, per period-size, if not too old yet
# e.g.: look if there is ANY timeblock that wants to keep this object
keep = False
for rule in self.rules:
if age <= rule.ttl:
block_nr = int(timestamp / rule.period)
if block_nr not in time_blocks[rule.period]:
time_blocks[rule.period][block_nr] = True
keep = True
# keep it according to schedule, or keep it because it is in the keep_objects list
if keep or thisobject in keep_objects or thisobject in always_keep_objects:
keeps.append(thisobject)
else:
removes.append(thisobject)
return keeps, removes

View File

@ -0,0 +1,71 @@
import re
class ThinnerRule:
"""a thinning schedule rule for Thinner"""
TIME_NAMES = {
'y': 3600 * 24 * 365.25,
'm': 3600 * 24 * 30,
'w': 3600 * 24 * 7,
'd': 3600 * 24,
'h': 3600,
'min': 60,
's': 1,
}
TIME_DESC = {
'y': 'year',
'm': 'month',
'w': 'week',
'd': 'day',
'h': 'hour',
'min': 'minute',
's': 'second',
}
def __init__(self, rule_str):
"""parse scheduling string
example:
daily snapshot, remove after a week: 1d1w
weekly snapshot, remove after a month: 1w1m
monthly snapshot, remove after 6 months: 1m6m
yearly snapshot, remove after 2 year: 1y2y
keep all snapshots, remove after a day 1s1d
keep nothing: 1s1s
"""
rule_str = rule_str.lower()
matches = re.findall("([0-9]*)([a-z]*)([0-9]*)([a-z]*)", rule_str)[0]
if '' in matches:
raise (Exception("Invalid schedule string: '{}'".format(rule_str)))
period_amount = int(matches[0])
period_unit = matches[1]
ttl_amount = int(matches[2])
ttl_unit = matches[3]
if period_unit not in self.TIME_NAMES:
raise (Exception("Invalid period string in schedule: '{}'".format(rule_str)))
if ttl_unit not in self.TIME_NAMES:
raise (Exception("Invalid ttl string in schedule: '{}'".format(rule_str)))
self.period = period_amount * self.TIME_NAMES[period_unit]
self.ttl = ttl_amount * self.TIME_NAMES[ttl_unit]
if self.period > self.ttl:
raise (Exception("Period cant be longer than ttl in schedule: '{}'".format(rule_str)))
self.rule_str = rule_str
self.human_str = "Keep every {} {}{}, delete after {} {}{}.".format(
period_amount, self.TIME_DESC[period_unit], period_amount != 1 and "s" or "", ttl_amount,
self.TIME_DESC[ttl_unit], ttl_amount != 1 and "s" or "")
def __str__(self):
"""get schedule as a schedule string"""
return self.rule_str

191
zfs_autobackup/ZfsAuto.py Normal file
View File

@ -0,0 +1,191 @@
import argparse
import os.path
import sys
from .LogConsole import LogConsole
class ZfsAuto(object):
"""Common Base class, this class is always used subclassed. Look at ZfsAutobackup and ZfsAutoverify ."""
# also used by setup.py
VERSION = "3.2-alpha1"
HEADER = "{} v{} - (c)2021 E.H.Eefting (edwin@datux.nl)".format(os.path.basename(sys.argv[0]), VERSION)
def __init__(self, argv, print_arguments=True):
self.hold_name = None
self.snapshot_time_format = None
self.property_name = None
self.exclude_paths = None
# helps with investigating failed regression tests:
if print_arguments:
print("ARGUMENTS: " + " ".join(argv))
self.args = self.parse_args(argv)
def parse_args(self, argv):
"""parse common arguments, setup logging, check and adjust parameters"""
parser=self.get_parser()
args = parser.parse_args(argv)
if args.help:
parser.print_help()
sys.exit(255)
if args.version:
print(self.HEADER)
sys.exit(255)
# auto enable progress?
if sys.stderr.isatty() and not args.no_progress:
args.progress = True
if args.debug_output:
args.debug = True
if args.test:
args.verbose = True
if args.debug:
args.verbose = True
self.log = LogConsole(show_debug=args.debug, show_verbose=args.verbose, color=sys.stdout.isatty())
self.verbose(self.HEADER)
self.verbose("")
if args.backup_name == None:
parser.print_usage()
self.log.error("Please specify BACKUP-NAME")
sys.exit(255)
if args.target_path is not None and args.target_path[0] == "/":
self.log.error("Target should not start with a /")
sys.exit(255)
if args.ignore_replicated:
self.warning("--ignore-replicated has been renamed, using --exclude-unchanged")
args.exclude_unchanged = True
# Note: Before version v3.1-beta5, we always used exclude_received. This was a problem if you wanted to
# replicate an existing backup to another host and use the same backupname/snapshots. However, exclude_received
# may still need to be used to explicitly exclude a backup with the 'received' source property to avoid accidental
# recursive replication of a zvol that is currently being received in another session (as it will have changes).
self.exclude_paths = []
if args.ssh_source == args.ssh_target:
if args.target_path:
# target and source are the same, make sure to exclude target_path
self.verbose("NOTE: Source and target are on the same host, excluding target-path from selection.")
self.exclude_paths.append(args.target_path)
else:
self.verbose("NOTE: Source and target are on the same host, excluding received datasets from selection.")
args.exclude_received = True
if args.test:
self.warning("TEST MODE - SIMULATING WITHOUT MAKING ANY CHANGES")
#format all the names
self.property_name = args.property_format.format(args.backup_name)
self.snapshot_time_format = args.snapshot_format.format(args.backup_name)
self.hold_name = args.hold_format.format(args.backup_name)
self.verbose("")
self.verbose("Selecting dataset property : {}".format(self.property_name))
self.verbose("Snapshot format : {}".format(self.snapshot_time_format))
return args
def get_parser(self):
parser = argparse.ArgumentParser(description=self.HEADER, add_help=False,
epilog='Full manual at: https://github.com/psy0rz/zfs_autobackup')
#positional arguments
parser.add_argument('backup_name', metavar='BACKUP-NAME', default=None, nargs='?',
help='Name of the backup to select')
parser.add_argument('target_path', metavar='TARGET-PATH', default=None, nargs='?',
help='Target ZFS filesystem (optional)')
# Basic options
group=parser.add_argument_group("Basic options")
group.add_argument('--help', '-h', action='store_true', help='show help')
group.add_argument('--test', '--dry-run', '-n', action='store_true',
help='Dry run, dont change anything, just show what would be done (still does all read-only '
'operations)')
group.add_argument('--verbose', '-v', action='store_true', help='verbose output')
group.add_argument('--debug', '-d', action='store_true',
help='Show zfs commands that are executed, stops after an exception.')
group.add_argument('--debug-output', action='store_true',
help='Show zfs commands and their output/exit codes. (noisy)')
group.add_argument('--progress', action='store_true',
help='show zfs progress output. Enabled automaticly on ttys. (use --no-progress to disable)')
group.add_argument('--no-progress', action='store_true',
help=argparse.SUPPRESS) # needed to workaround a zfs recv -v bug
group.add_argument('--version', action='store_true',
help='Show version.')
group.add_argument('--strip-path', metavar='N', default=0, type=int,
help='Number of directories to strip from target path (use 1 when cloning zones between 2 '
'SmartOS machines)')
# SSH options
group=parser.add_argument_group("SSH options")
group.add_argument('--ssh-config', metavar='CONFIG-FILE', default=None, help='Custom ssh client config')
group.add_argument('--ssh-source', metavar='USER@HOST', default=None,
help='Source host to get backup from.')
group.add_argument('--ssh-target', metavar='USER@HOST', default=None,
help='Target host to push backup to.')
group=parser.add_argument_group("String formatting options")
group.add_argument('--property-format', metavar='FORMAT', default="autobackup:{}",
help='Dataset selection string format. Default: %(default)s')
group.add_argument('--snapshot-format', metavar='FORMAT', default="{}-%Y%m%d%H%M%S",
help='ZFS Snapshot string format. Default: %(default)s')
group.add_argument('--hold-format', metavar='FORMAT', default="zfs_autobackup:{}",
help='ZFS hold string format. Default: %(default)s')
group=parser.add_argument_group("Selection options")
group.add_argument('--ignore-replicated', action='store_true', help=argparse.SUPPRESS)
group.add_argument('--exclude-unchanged', action='store_true',
help='Exclude datasets that have no changes since any last snapshot. (Useful in combination with proxmox HA replication)')
group.add_argument('--exclude-received', action='store_true',
help='Exclude datasets that have the origin of their autobackup: property as "received". '
'This can avoid recursive replication between two backup partners.')
return parser
def verbose(self, txt):
self.log.verbose(txt)
def warning(self, txt):
self.log.warning(txt)
def error(self, txt):
self.log.error(txt)
def debug(self, txt):
self.log.debug(txt)
def progress(self, txt):
self.log.progress(txt)
def clear_progress(self):
self.log.clear_progress()
def set_title(self, title):
self.log.verbose("")
self.log.verbose("#### " + title)
def print_error_sources(self):
self.error(
"No source filesystems selected, please do a 'zfs set autobackup:{0}=true' on the source datasets "
"you want to select.".format(
self.args.backup_name))
def make_target_name(self, source_dataset):
"""make target_name from a source_dataset"""
return self.args.target_path + "/" + source_dataset.lstrip_path(self.args.strip_path)

View File

@ -0,0 +1,496 @@
import time
import argparse
from .ZfsAuto import ZfsAuto
from . import compressors
from .ExecuteNode import ExecuteNode
from .Thinner import Thinner
from .ZfsDataset import ZfsDataset
from .ZfsNode import ZfsNode
from .ThinnerRule import ThinnerRule
class ZfsAutobackup(ZfsAuto):
"""The main zfs-autobackup class. Start here, at run() :)"""
def __init__(self, argv, print_arguments=True):
# NOTE: common options and parameters are in ZfsAuto
super(ZfsAutobackup, self).__init__(argv, print_arguments)
def parse_args(self, argv):
"""do extra checks on common args"""
args = super(ZfsAutobackup, self).parse_args(argv)
if not args.no_holds:
self.verbose("Hold name : {}".format(self.hold_name))
if args.allow_empty:
args.min_change = 0
if args.destroy_incompatible:
args.rollback = True
if args.resume:
self.warning("The --resume option isn't needed anymore (its autodetected now)")
if args.raw:
self.warning(
"The --raw option isn't needed anymore (its autodetected now). Also see --encrypt and --decrypt.")
if args.compress and args.ssh_source is None and args.ssh_target is None:
self.warning("Using compression, but transfer is local.")
if args.compress and args.zfs_compressed:
self.warning("Using --compress with --zfs-compressed, might be inefficient.")
return args
def get_parser(self):
"""extend common parser with extra stuff needed for zfs-autobackup"""
parser = super(ZfsAutobackup, self).get_parser()
group = parser.add_argument_group("Snapshot options")
group.add_argument('--no-snapshot', action='store_true',
help='Don\'t create new snapshots (useful for finishing uncompleted backups, or cleanups)')
group.add_argument('--pre-snapshot-cmd', metavar="COMMAND", default=[], action='append',
help='Run COMMAND before snapshotting (can be used multiple times.')
group.add_argument('--post-snapshot-cmd', metavar="COMMAND", default=[], action='append',
help='Run COMMAND after snapshotting (can be used multiple times.')
group.add_argument('--min-change', metavar='BYTES', type=int, default=1,
help='Only create snapshot if enough bytes are changed. (default %('
'default)s)')
group.add_argument('--allow-empty', action='store_true',
help='If nothing has changed, still create empty snapshots. (Faster. Same as --min-change=0)')
group.add_argument('--other-snapshots', action='store_true',
help='Send over other snapshots as well, not just the ones created by this tool.')
group = parser.add_argument_group("Transfer options")
group.add_argument('--no-send', action='store_true',
help='Don\'t transfer snapshots (useful for cleanups, or if you want a serperate send-cronjob)')
group.add_argument('--no-holds', action='store_true',
help='Don\'t hold snapshots. (Faster. Allows you to destroy common snapshot.)')
group.add_argument('--clear-refreservation', action='store_true',
help='Filter "refreservation" property. (recommended, safes space. same as '
'--filter-properties refreservation)')
group.add_argument('--clear-mountpoint', action='store_true',
help='Set property canmount=noauto for new datasets. (recommended, prevents mount '
'conflicts. same as --set-properties canmount=noauto)')
group.add_argument('--filter-properties', metavar='PROPERTY,...', type=str,
help='List of properties to "filter" when receiving filesystems. (you can still restore '
'them with zfs inherit -S)')
group.add_argument('--set-properties', metavar='PROPERTY=VALUE,...', type=str,
help='List of propererties to override when receiving filesystems. (you can still restore '
'them with zfs inherit -S)')
group.add_argument('--rollback', action='store_true',
help='Rollback changes to the latest target snapshot before starting. (normally you can '
'prevent changes by setting the readonly property on the target_path to on)')
group.add_argument('--destroy-incompatible', action='store_true',
help='Destroy incompatible snapshots on target. Use with care! (implies --rollback)')
group.add_argument('--ignore-transfer-errors', action='store_true',
help='Ignore transfer errors (still checks if received filesystem exists. useful for '
'acltype errors)')
group.add_argument('--decrypt', action='store_true',
help='Decrypt data before sending it over.')
group.add_argument('--encrypt', action='store_true',
help='Encrypt data after receiving it.')
group.add_argument('--zfs-compressed', action='store_true',
help='Transfer blocks that already have zfs-compression as-is.')
group = parser.add_argument_group("ZFS send/recv pipes")
group.add_argument('--compress', metavar='TYPE', default=None, nargs='?', const='zstd-fast',
choices=compressors.choices(),
help='Use compression during transfer, defaults to zstd-fast if TYPE is not specified. ({})'.format(
", ".join(compressors.choices())))
group.add_argument('--rate', metavar='DATARATE', default=None,
help='Limit data transfer rate (e.g. 128K. requires mbuffer.)')
group.add_argument('--buffer', metavar='SIZE', default=None,
help='Add zfs send and recv buffers to smooth out IO bursts. (e.g. 128M. requires mbuffer)')
group.add_argument('--send-pipe', metavar="COMMAND", default=[], action='append',
help='pipe zfs send output through COMMAND (can be used multiple times)')
group.add_argument('--recv-pipe', metavar="COMMAND", default=[], action='append',
help='pipe zfs recv input through COMMAND (can be used multiple times)')
group = parser.add_argument_group("Thinner options")
group.add_argument('--no-thinning', action='store_true', help="Do not destroy any snapshots.")
group.add_argument('--keep-source', metavar='SCHEDULE', type=str, default="10,1d1w,1w1m,1m1y",
help='Thinning schedule for old source snapshots. Default: %(default)s')
group.add_argument('--keep-target', metavar='SCHEDULE', type=str, default="10,1d1w,1w1m,1m1y",
help='Thinning schedule for old target snapshots. Default: %(default)s')
group.add_argument('--destroy-missing', metavar="SCHEDULE", type=str, default=None,
help='Destroy datasets on target that are missing on the source. Specify the time since '
'the last snapshot, e.g: --destroy-missing 30d')
# obsolete
parser.add_argument('--resume', action='store_true', help=argparse.SUPPRESS)
parser.add_argument('--raw', action='store_true', help=argparse.SUPPRESS)
return parser
# NOTE: this method also uses self.args. args that need extra processing are passed as function parameters:
def thin_missing_targets(self, target_dataset, used_target_datasets):
"""thin target datasets that are missing on the source."""
self.debug("Thinning obsolete datasets")
missing_datasets = [dataset for dataset in target_dataset.recursive_datasets if
dataset not in used_target_datasets]
count = 0
for dataset in missing_datasets:
count = count + 1
if self.args.progress:
self.progress("Analysing missing {}/{}".format(count, len(missing_datasets)))
try:
dataset.debug("Missing on source, thinning")
dataset.thin()
except Exception as e:
dataset.error("Error during thinning of missing datasets ({})".format(str(e)))
if self.args.progress:
self.clear_progress()
# NOTE: this method also uses self.args. args that need extra processing are passed as function parameters:
def destroy_missing_targets(self, target_dataset, used_target_datasets):
"""destroy target datasets that are missing on the source and that meet the requirements"""
self.debug("Destroying obsolete datasets")
missing_datasets = [dataset for dataset in target_dataset.recursive_datasets if
dataset not in used_target_datasets]
count = 0
for dataset in missing_datasets:
count = count + 1
if self.args.progress:
self.progress("Analysing destroy missing {}/{}".format(count, len(missing_datasets)))
try:
# cant do anything without our own snapshots
if not dataset.our_snapshots:
if dataset.datasets:
# its not a leaf, just ignore
dataset.debug("Destroy missing: ignoring")
else:
dataset.verbose(
"Destroy missing: has no snapshots made by us. (please destroy manually)")
else:
# past the deadline?
deadline_ttl = ThinnerRule("0s" + self.args.destroy_missing).ttl
now = int(time.time())
if dataset.our_snapshots[-1].timestamp + deadline_ttl > now:
dataset.verbose("Destroy missing: Waiting for deadline.")
else:
dataset.debug("Destroy missing: Removing our snapshots.")
# remove all our snaphots, except last, to safe space in case we fail later on
for snapshot in dataset.our_snapshots[:-1]:
snapshot.destroy(fail_exception=True)
# does it have other snapshots?
has_others = False
for snapshot in dataset.snapshots:
if not snapshot.is_ours():
has_others = True
break
if has_others:
dataset.verbose("Destroy missing: Still in use by other snapshots")
else:
if dataset.datasets:
dataset.verbose("Destroy missing: Still has children here.")
else:
dataset.verbose("Destroy missing.")
dataset.our_snapshots[-1].destroy(fail_exception=True)
dataset.destroy(fail_exception=True)
except Exception as e:
if self.args.progress:
self.clear_progress()
dataset.error("Error during --destroy-missing: {}".format(str(e)))
if self.args.progress:
self.clear_progress()
def get_send_pipes(self, logger):
"""determine the zfs send pipe"""
ret = []
# IO buffer
if self.args.buffer:
logger("zfs send buffer : {}".format(self.args.buffer))
ret.extend([ExecuteNode.PIPE, "mbuffer", "-q", "-s128k", "-m" + self.args.buffer])
# custom pipes
for send_pipe in self.args.send_pipe:
ret.append(ExecuteNode.PIPE)
ret.extend(send_pipe.split(" "))
logger("zfs send custom pipe : {}".format(send_pipe))
# compression
if self.args.compress != None:
ret.append(ExecuteNode.PIPE)
cmd = compressors.compress_cmd(self.args.compress)
ret.extend(cmd)
logger("zfs send compression : {}".format(" ".join(cmd)))
# transfer rate
if self.args.rate:
logger("zfs send transfer rate : {}".format(self.args.rate))
ret.extend([ExecuteNode.PIPE, "mbuffer", "-q", "-s128k", "-m16M", "-R" + self.args.rate])
return ret
def get_recv_pipes(self, logger):
ret = []
# decompression
if self.args.compress != None:
cmd = compressors.decompress_cmd(self.args.compress)
ret.extend(cmd)
ret.append(ExecuteNode.PIPE)
logger("zfs recv decompression : {}".format(" ".join(cmd)))
# custom pipes
for recv_pipe in self.args.recv_pipe:
ret.extend(recv_pipe.split(" "))
ret.append(ExecuteNode.PIPE)
logger("zfs recv custom pipe : {}".format(recv_pipe))
# IO buffer
if self.args.buffer:
# only add second buffer if its usefull. (e.g. non local transfer or other pipes active)
if self.args.ssh_source != None or self.args.ssh_target != None or self.args.recv_pipe or self.args.send_pipe or self.args.compress != None:
logger("zfs recv buffer : {}".format(self.args.buffer))
ret.extend(["mbuffer", "-q", "-s128k", "-m" + self.args.buffer, ExecuteNode.PIPE])
return ret
# NOTE: this method also uses self.args. args that need extra processing are passed as function parameters:
def sync_datasets(self, source_node, source_datasets, target_node):
"""Sync datasets, or thin-only on both sides
:type target_node: ZfsNode
:type source_datasets: list of ZfsDataset
:type source_node: ZfsNode
"""
send_pipes = self.get_send_pipes(source_node.verbose)
recv_pipes = self.get_recv_pipes(target_node.verbose)
fail_count = 0
count = 0
target_datasets = []
for source_dataset in source_datasets:
# stats
if self.args.progress:
count = count + 1
self.progress("Analysing dataset {}/{} ({} failed)".format(count, len(source_datasets), fail_count))
try:
# determine corresponding target_dataset
target_name = self.make_target_name(source_dataset)
target_dataset = target_node.get_dataset(target_name)
target_datasets.append(target_dataset)
# ensure parents exists
# TODO: this isnt perfect yet, in some cases it can create parents when it shouldn't.
if not self.args.no_send \
and target_dataset.parent not in target_datasets \
and not target_dataset.parent.exists:
target_dataset.parent.create_filesystem(parents=True)
# determine common zpool features (cached, so no problem we call it often)
source_features = source_node.get_pool(source_dataset).features
target_features = target_node.get_pool(target_dataset).features
common_features = source_features and target_features
# sync the snapshots of this dataset
source_dataset.sync_snapshots(target_dataset, show_progress=self.args.progress,
features=common_features, filter_properties=self.filter_properties_list(),
set_properties=self.set_properties_list(),
ignore_recv_exit_code=self.args.ignore_transfer_errors,
holds=not self.args.no_holds, rollback=self.args.rollback,
also_other_snapshots=self.args.other_snapshots,
no_send=self.args.no_send,
destroy_incompatible=self.args.destroy_incompatible,
send_pipes=send_pipes, recv_pipes=recv_pipes,
decrypt=self.args.decrypt, encrypt=self.args.encrypt,
zfs_compressed=self.args.zfs_compressed)
except Exception as e:
if self.args.progress:
self.clear_progress()
fail_count = fail_count + 1
source_dataset.error("FAILED: " + str(e))
if self.args.debug:
self.verbose("Debug mode, aborting on first error")
raise
if self.args.progress:
self.clear_progress()
target_path_dataset = target_node.get_dataset(self.args.target_path)
if not self.args.no_thinning:
self.thin_missing_targets(target_dataset=target_path_dataset, used_target_datasets=target_datasets)
if self.args.destroy_missing is not None:
self.destroy_missing_targets(target_dataset=target_path_dataset, used_target_datasets=target_datasets)
return fail_count
def thin_source(self, source_datasets):
self.set_title("Thinning source")
for source_dataset in source_datasets:
source_dataset.thin(skip_holds=True)
def filter_properties_list(self):
if self.args.filter_properties:
filter_properties = self.args.filter_properties.split(",")
else:
filter_properties = []
if self.args.clear_refreservation:
filter_properties.append("refreservation")
return filter_properties
def set_properties_list(self):
if self.args.set_properties:
set_properties = self.args.set_properties.split(",")
else:
set_properties = []
if self.args.clear_mountpoint:
set_properties.append("canmount=noauto")
return set_properties
def run(self):
try:
################ create source zfsNode
self.set_title("Source settings")
description = "[Source]"
if self.args.no_thinning:
source_thinner = None
else:
source_thinner = Thinner(self.args.keep_source)
source_node = ZfsNode(snapshot_time_format=self.snapshot_time_format, hold_name=self.hold_name, logger=self,
ssh_config=self.args.ssh_config,
ssh_to=self.args.ssh_source, readonly=self.args.test,
debug_output=self.args.debug_output, description=description, thinner=source_thinner)
################# select source datasets
self.set_title("Selecting")
source_datasets = source_node.selected_datasets(property_name=self.property_name,
exclude_received=self.args.exclude_received,
exclude_paths=self.exclude_paths,
exclude_unchanged=self.args.exclude_unchanged,
min_change=self.args.min_change)
if not source_datasets:
self.print_error_sources()
return 255
################# snapshotting
if not self.args.no_snapshot:
self.set_title("Snapshotting")
snapshot_name = time.strftime(self.snapshot_time_format)
source_node.consistent_snapshot(source_datasets, snapshot_name,
min_changed_bytes=self.args.min_change,
pre_snapshot_cmds=self.args.pre_snapshot_cmd,
post_snapshot_cmds=self.args.post_snapshot_cmd)
################# sync
# if target is specified, we sync the datasets, otherwise we just thin the source. (e.g. snapshot mode)
if self.args.target_path:
# create target_node
self.set_title("Target settings")
if self.args.no_thinning:
target_thinner = None
else:
target_thinner = Thinner(self.args.keep_target)
target_node = ZfsNode(snapshot_time_format=self.snapshot_time_format, hold_name=self.hold_name,
logger=self, ssh_config=self.args.ssh_config,
ssh_to=self.args.ssh_target,
readonly=self.args.test, debug_output=self.args.debug_output,
description="[Target]",
thinner=target_thinner)
target_node.verbose("Receive datasets under: {}".format(self.args.target_path))
self.set_title("Synchronising")
# check if exists, to prevent vague errors
target_dataset = target_node.get_dataset(self.args.target_path)
if not target_dataset.exists:
raise (Exception(
"Target path '{}' does not exist. Please create this dataset first.".format(target_dataset)))
# do the actual sync
# NOTE: even with no_send, no_thinning and no_snapshot it does a usefull thing because it checks if the common snapshots and shows incompatible snapshots
fail_count = self.sync_datasets(
source_node=source_node,
source_datasets=source_datasets,
target_node=target_node)
# no target specified, run in snapshot-only mode
else:
if not self.args.no_thinning:
self.thin_source(source_datasets)
fail_count = 0
if not fail_count:
if self.args.test:
self.set_title("All tests successful.")
else:
self.set_title("All operations completed successfully")
if not self.args.target_path:
self.verbose("(No target_path specified, only operated as snapshot tool.)")
else:
if fail_count != 255:
self.error("{} dataset(s) failed!".format(fail_count))
if self.args.test:
self.verbose("")
self.warning("TEST MODE - DID NOT MAKE ANY CHANGES!")
return fail_count
except Exception as e:
self.error("Exception: " + str(e))
if self.args.debug:
raise
return 255
except KeyboardInterrupt:
self.error("Aborted")
return 255
def cli():
import sys
sys.exit(ZfsAutobackup(sys.argv[1:], False).run())
if __name__ == "__main__":
cli()

View File

@ -0,0 +1,402 @@
import os
import time
from .ExecuteNode import ExecuteNode
from .ZfsAuto import ZfsAuto
from .ZfsDataset import ZfsDataset
from .ZfsNode import ZfsNode
import sys
import platform
def tmp_name(suffix=""):
"""create temporary name unique to this process and node"""
#we could use uuids but those are ugly and confusing
name="zfstmp_{}_{}".format(platform.node(), os.getpid())
name=name+suffix
return name
def hash_tree_tar(node, path):
"""calculate md5sum of a directory tree, using tar"""
node.debug("Hashing filesystem {} ".format(path))
cmd=[ "tar", "-cf", "-", "-C", path, ".",
ExecuteNode.PIPE, "md5sum"]
stdout = node.run(cmd)
if node.readonly:
hashed=None
else:
hashed = stdout[0].split(" ")[0]
node.debug("Hash of {} filesytem is {}".format(path, hashed))
return hashed
def compare_trees_tar(source_node, source_path, target_node, target_path):
"""compare two trees using tar. compatible and simple"""
source_hash= hash_tree_tar(source_node, source_path)
target_hash= hash_tree_tar(target_node, target_path)
if source_hash != target_hash:
raise Exception("md5hash difference: {} != {}".format(source_hash, target_hash))
def compare_trees_rsync(source_node, source_path, target_node, target_path):
"""use rsync to compare two trees.
Advantage is that we can see which individual files differ.
But requires rsync and cant do remote to remote."""
cmd = ["rsync", "-rcn", "--info=COPY,DEL,MISC,NAME,SYMSAFE", "--msgs2stderr", "--delete" ]
#local
if source_node.ssh_to is None and target_node.ssh_to is None:
cmd.append("{}/".format(source_path))
cmd.append("{}/".format(target_path))
source_node.debug("Running rsync locally, on source.")
stdout, stderr = source_node.run(cmd, return_stderr=True)
#source is local
elif source_node.ssh_to is None and target_node.ssh_to is not None:
cmd.append("{}/".format(source_path))
cmd.append("{}:{}/".format(target_node.ssh_to, target_path))
source_node.debug("Running rsync locally, on source.")
stdout, stderr = source_node.run(cmd, return_stderr=True)
#target is local
elif source_node.ssh_to is not None and target_node.ssh_to is None:
cmd.append("{}:{}/".format(source_node.ssh_to, source_path))
cmd.append("{}/".format(target_path))
source_node.debug("Running rsync locally, on target.")
stdout, stderr=target_node.run(cmd, return_stderr=True)
else:
raise Exception("Source and target cant both be remote when verifying. (rsync limitation)")
if stderr:
raise Exception("Dataset verify failed, see above list for differences")
def verify_filesystem(source_snapshot, source_mnt, target_snapshot, target_mnt, method):
"""Compare the contents of two zfs filesystem snapshots """
try:
# mount the snapshots
source_snapshot.mount(source_mnt)
target_snapshot.mount(target_mnt)
if method=='rsync':
compare_trees_rsync(source_snapshot.zfs_node, source_mnt, target_snapshot.zfs_node, target_mnt)
elif method == 'tar':
compare_trees_tar(source_snapshot.zfs_node, source_mnt, target_snapshot.zfs_node, target_mnt)
else:
raise(Exception("program errror, unknown method"))
finally:
source_snapshot.unmount()
target_snapshot.unmount()
def hash_dev(node, dev):
"""calculate md5sum of a device on a node"""
node.debug("Hashing volume {} ".format(dev))
cmd = [ "md5sum", dev ]
stdout = node.run(cmd)
if node.readonly:
hashed=None
else:
hashed = stdout[0].split(" ")[0]
node.debug("Hash of volume {} is {}".format(dev, hashed))
return hashed
# def activate_volume_snapshot(dataset, snapshot):
# """enables snapdev, waits and tries to findout /dev path to the volume, in a compatible way. (linux/freebsd/smartos)"""
#
# dataset.set("snapdev", "visible")
#
# #NOTE: add smartos location to this list as well
# locations=[
# "/dev/zvol/" + snapshot.name
# ]
#
# dataset.debug("Waiting for /dev entry to appear...")
# time.sleep(0.1)
#
# start_time=time.time()
# while time.time()-start_time<10:
# for location in locations:
# stdout, stderr, exit_code=dataset.zfs_node.run(["test", "-e", location], return_all=True, valid_exitcodes=[0,1])
#
# #fake it in testmode
# if dataset.zfs_node.readonly:
# return location
#
# if exit_code==0:
# return location
# time.sleep(1)
#
# raise(Exception("Timeout while waiting for {} entry to appear.".format(locations)))
#
# def deacitvate_volume_snapshot(dataset):
# dataset.inherit("snapdev")
#NOTE: https://www.google.com/search?q=Mount+Path+Limit+freebsd
#Freebsd has limitations regarding path length, so we cant use the above method.
#Instead we create a temporary clone
def get_tmp_clone_name(snapshot):
pool=snapshot.zfs_node.get_pool(snapshot)
return pool.name+"/"+tmp_name()
def activate_volume_snapshot(snapshot):
"""clone volume, waits and tries to findout /dev path to the volume, in a compatible way. (linux/freebsd/smartos)"""
clone_name=get_tmp_clone_name(snapshot)
clone=snapshot.clone(clone_name)
#NOTE: add smartos location to this list as well
locations=[
"/dev/zvol/" + clone_name
]
clone.debug("Waiting for /dev entry to appear...")
time.sleep(0.1)
start_time=time.time()
while time.time()-start_time<10:
for location in locations:
stdout, stderr, exit_code=clone.zfs_node.run(["test", "-e", location], return_all=True, valid_exitcodes=[0,1])
#fake it in testmode
if clone.zfs_node.readonly:
return location
if exit_code==0:
return location
time.sleep(1)
raise(Exception("Timeout while waiting for {} entry to appear.".format(locations)))
def deacitvate_volume_snapshot(snapshot):
clone_name=get_tmp_clone_name(snapshot)
clone=snapshot.zfs_node.get_dataset(clone_name)
clone.destroy()
def verify_volume(source_dataset, source_snapshot, target_dataset, target_snapshot):
"""compare the contents of two zfs volume snapshots"""
try:
source_dev= activate_volume_snapshot(source_snapshot)
target_dev= activate_volume_snapshot(target_snapshot)
source_hash= hash_dev(source_snapshot.zfs_node, source_dev)
target_hash= hash_dev(target_snapshot.zfs_node, target_dev)
if source_hash!=target_hash:
raise Exception("md5hash difference: {} != {}".format(source_hash, target_hash))
finally:
deacitvate_volume_snapshot(source_snapshot)
deacitvate_volume_snapshot(target_snapshot)
def create_mountpoints(source_node, target_node):
# prepare mount points
source_node.debug("Create temporary mount point")
source_mnt = "/tmp/"+tmp_name("source")
source_node.run(["mkdir", source_mnt])
target_node.debug("Create temporary mount point")
target_mnt = "/tmp/"+tmp_name("target")
target_node.run(["mkdir", target_mnt])
return source_mnt, target_mnt
def cleanup_mountpoint(node, mnt):
node.debug("Cleaning up temporary mount point")
node.run([ "rmdir", mnt ], hide_errors=True, valid_exitcodes=[] )
class ZfsAutoverify(ZfsAuto):
"""The zfs-autoverify class, default agruments and stuff come from ZfsAuto"""
def __init__(self, argv, print_arguments=True):
# NOTE: common options and parameters are in ZfsAuto
super(ZfsAutoverify, self).__init__(argv, print_arguments)
def parse_args(self, argv):
"""do extra checks on common args"""
args=super(ZfsAutoverify, self).parse_args(argv)
if args.target_path == None:
self.log.error("Please specify TARGET-PATH")
sys.exit(255)
return args
def get_parser(self):
"""extend common parser with extra stuff needed for zfs-autobackup"""
parser=super(ZfsAutoverify, self).get_parser()
group=parser.add_argument_group("Verify options")
group.add_argument('--fs-compare', metavar='METHOD', default="tar", choices=["tar", "rsync"],
help='Compare method to use for filesystems. (tar, rsync) Default: %(default)s ')
return parser
def verify_datasets(self, source_mnt, source_datasets, target_node, target_mnt):
fail_count=0
count = 0
for source_dataset in source_datasets:
# stats
if self.args.progress:
count = count + 1
self.progress("Analysing dataset {}/{} ({} failed)".format(count, len(source_datasets), fail_count))
try:
# determine corresponding target_dataset
target_name = self.make_target_name(source_dataset)
target_dataset = target_node.get_dataset(target_name)
# find common snapshots to verify
source_snapshot = source_dataset.find_common_snapshot(target_dataset)
target_snapshot = target_dataset.find_snapshot(source_snapshot)
if source_snapshot is None or target_snapshot is None:
raise(Exception("Cant find common snapshot"))
target_snapshot.verbose("Verifying...")
if source_dataset.properties['type']=="filesystem":
verify_filesystem(source_snapshot, source_mnt, target_snapshot, target_mnt, self.args.fs_compare)
elif source_dataset.properties['type']=="volume":
verify_volume(source_dataset, source_snapshot, target_dataset, target_snapshot)
else:
raise(Exception("{} has unknown type {}".format(source_dataset, source_dataset.properties['type'])))
except Exception as e:
if self.args.progress:
self.clear_progress()
fail_count = fail_count + 1
target_dataset.error("FAILED: " + str(e))
if self.args.debug:
self.verbose("Debug mode, aborting on first error")
raise
if self.args.progress:
self.clear_progress()
return fail_count
def run(self):
source_node=None
source_mnt=None
target_node=None
target_mnt=None
try:
################ create source zfsNode
self.set_title("Source settings")
description = "[Source]"
source_node = ZfsNode(snapshot_time_format=self.snapshot_time_format, hold_name=self.hold_name, logger=self,
ssh_config=self.args.ssh_config,
ssh_to=self.args.ssh_source, readonly=self.args.test,
debug_output=self.args.debug_output, description=description)
################# select source datasets
self.set_title("Selecting")
source_datasets = source_node.selected_datasets(property_name=self.property_name,
exclude_received=self.args.exclude_received,
exclude_paths=self.exclude_paths,
exclude_unchanged=self.args.exclude_unchanged,
min_change=0)
if not source_datasets:
self.print_error_sources()
return 255
# create target_node
self.set_title("Target settings")
target_node = ZfsNode(snapshot_time_format=self.snapshot_time_format, hold_name=self.hold_name,
logger=self, ssh_config=self.args.ssh_config,
ssh_to=self.args.ssh_target,
readonly=self.args.test, debug_output=self.args.debug_output,
description="[Target]")
target_node.verbose("Verify datasets under: {}".format(self.args.target_path))
self.set_title("Verifying")
source_mnt, target_mnt= create_mountpoints(source_node, target_node)
fail_count = self.verify_datasets(
source_mnt=source_mnt,
source_datasets=source_datasets,
target_mnt=target_mnt,
target_node=target_node)
if not fail_count:
if self.args.test:
self.set_title("All tests successful.")
else:
self.set_title("All datasets verified ok")
else:
if fail_count != 255:
self.error("{} dataset(s) failed!".format(fail_count))
if self.args.test:
self.verbose("")
self.warning("TEST MODE - DID NOT VERIFY ANYTHING!")
return fail_count
except Exception as e:
self.error("Exception: " + str(e))
if self.args.debug:
raise
return 255
except KeyboardInterrupt:
self.error("Aborted")
return 255
finally:
# cleanup
if source_mnt is not None:
cleanup_mountpoint(source_node, source_mnt)
if target_mnt is not None:
cleanup_mountpoint(target_node, target_mnt)
def cli():
import sys
sys.exit(ZfsAutoverify(sys.argv[1:], False).run())
if __name__ == "__main__":
cli()

1159
zfs_autobackup/ZfsDataset.py Normal file

File diff suppressed because it is too large Load Diff

269
zfs_autobackup/ZfsNode.py Normal file
View File

@ -0,0 +1,269 @@
# python 2 compatibility
from __future__ import print_function
import re
import shlex
import subprocess
import sys
import time
from .ExecuteNode import ExecuteNode
from .Thinner import Thinner
from .CachedProperty import CachedProperty
from .ZfsPool import ZfsPool
from .ZfsDataset import ZfsDataset
from .ExecuteNode import ExecuteError
class ZfsNode(ExecuteNode):
"""a node that contains zfs datasets. implements global (systemwide/pool wide) zfs commands"""
def __init__(self, snapshot_time_format, hold_name, logger, ssh_config=None, ssh_to=None, readonly=False,
description="",
debug_output=False, thinner=None):
self.snapshot_time_format = snapshot_time_format
self.hold_name = hold_name
self.description = description
self.logger = logger
if ssh_config:
self.verbose("Using custom SSH config: {}".format(ssh_config))
if ssh_to:
self.verbose("Datasets on: {}".format(ssh_to))
else:
self.verbose("Datasets are local")
if thinner is not None:
rules = thinner.human_rules()
if rules:
for rule in rules:
self.verbose(rule)
else:
self.verbose("Keep no old snaphots")
self.__thinner = thinner
# list of ZfsPools
self.__pools = {}
self.__datasets = {}
self._progress_total_bytes = 0
self._progress_start_time = time.time()
ExecuteNode.__init__(self, ssh_config=ssh_config, ssh_to=ssh_to, readonly=readonly, debug_output=debug_output)
def thin(self, objects, keep_objects):
# NOTE: if thinning is disabled with --no-thinning, self.__thinner will be none.
if self.__thinner is not None:
return self.__thinner.thin(objects, keep_objects)
else:
return (keep_objects, [])
@CachedProperty
def supported_send_options(self):
"""list of supported options, for optimizing sends"""
# not every zfs implementation supports them all
ret = []
for option in ["-L", "-e", "-c"]:
if self.valid_command(["zfs", "send", option, "zfs_autobackup_option_test"]):
ret.append(option)
return ret
@CachedProperty
def supported_recv_options(self):
"""list of supported options"""
# not every zfs implementation supports them all
ret = []
for option in ["-s"]:
if self.valid_command(["zfs", "recv", option, "zfs_autobackup_option_test"]):
ret.append(option)
return ret
def valid_command(self, cmd):
"""test if a specified zfs options are valid exit code. use this to determine support options"""
try:
self.run(cmd, hide_errors=True, valid_exitcodes=[0, 1])
except ExecuteError:
return False
return True
def get_pool(self, dataset):
"""get a ZfsPool() object from dataset. stores objects internally to enable caching"""
if not isinstance(dataset, ZfsDataset):
raise (Exception("{} is not a ZfsDataset".format(dataset)))
zpool_name = dataset.name.split("/")[0]
return self.__pools.setdefault(zpool_name, ZfsPool(self, zpool_name))
def get_dataset(self, name, force_exists=None):
"""get a ZfsDataset() object from name. stores objects internally to enable caching"""
return self.__datasets.setdefault(name, ZfsDataset(self, name))
def reset_progress(self):
"""reset progress output counters"""
self._progress_total_bytes = 0
self._progress_start_time = time.time()
def parse_zfs_progress(self, line, hide_errors, prefix):
"""try to parse progress output of zfs recv -Pv, and don't show it as error to the user """
# is it progress output?
progress_fields = line.rstrip().split("\t")
if (line.find("nvlist version") == 0 or
line.find("resume token contents") == 0 or
len(progress_fields) != 1 or
line.find("skipping ") == 0 or
re.match("send from .*estimated size is ", line)):
# always output for debugging offcourse
self.debug(prefix + line.rstrip())
# actual useful info
if len(progress_fields) >= 3:
if progress_fields[0] == 'full' or progress_fields[0] == 'size':
self._progress_total_bytes = int(progress_fields[2])
elif progress_fields[0] == 'incremental':
self._progress_total_bytes = int(progress_fields[3])
elif progress_fields[1].isnumeric():
bytes_ = int(progress_fields[1])
if self._progress_total_bytes:
percentage = min(100, int(bytes_ * 100 / self._progress_total_bytes))
speed = int(bytes_ / (time.time() - self._progress_start_time) / (1024 * 1024))
bytes_left = self._progress_total_bytes - bytes_
minutes_left = int((bytes_left / (bytes_ / (time.time() - self._progress_start_time))) / 60)
self.logger.progress(
"Transfer {}% {}MB/s (total {}MB, {} minutes left)".format(percentage, speed, int(
self._progress_total_bytes / (1024 * 1024)), minutes_left))
return
# still do the normal stderr output handling
if hide_errors:
self.debug(prefix + line.rstrip())
else:
self.error(prefix + line.rstrip())
# def _parse_stderr_pipe(self, line, hide_errors):
# self.parse_zfs_progress(line, hide_errors, "STDERR|> ")
def _parse_stderr(self, line, hide_errors):
self.parse_zfs_progress(line, hide_errors, "STDERR > ")
def verbose(self, txt):
self.logger.verbose("{} {}".format(self.description, txt))
def error(self, txt):
self.logger.error("{} {}".format(self.description, txt))
def warning(self, txt):
self.logger.warning("{} {}".format(self.description, txt))
def debug(self, txt):
self.logger.debug("{} {}".format(self.description, txt))
def consistent_snapshot(self, datasets, snapshot_name, min_changed_bytes, pre_snapshot_cmds=[],
post_snapshot_cmds=[]):
"""create a consistent (atomic) snapshot of specified datasets, per pool.
"""
pools = {}
# collect snapshots that we want to make, per pool
# self.debug(datasets)
for dataset in datasets:
if not dataset.is_changed_ours(min_changed_bytes):
dataset.verbose("No changes since {}".format(dataset.our_snapshots[-1].snapshot_name))
continue
# force_exist, since we're making it
snapshot = self.get_dataset(dataset.name + "@" + snapshot_name, force_exists=True)
pool = dataset.split_path()[0]
if pool not in pools:
pools[pool] = []
pools[pool].append(snapshot)
# update cache, but try to prevent an unneeded zfs list
if self.readonly or CachedProperty.is_cached(dataset, 'snapshots'):
dataset.snapshots.append(snapshot) # NOTE: this will trigger zfs list if its not cached
if not pools:
self.verbose("No changes anywhere: not creating snapshots.")
return
try:
for cmd in pre_snapshot_cmds:
self.verbose("Running pre-snapshot-cmd")
self.run(cmd=shlex.split(cmd), readonly=False)
# create consistent snapshot per pool
for (pool_name, snapshots) in pools.items():
cmd = ["zfs", "snapshot"]
cmd.extend(map(lambda snapshot_: str(snapshot_), snapshots))
self.verbose("Creating snapshots {} in pool {}".format(snapshot_name, pool_name))
self.run(cmd, readonly=False)
finally:
for cmd in post_snapshot_cmds:
self.verbose("Running post-snapshot-cmd")
try:
self.run(cmd=shlex.split(cmd), readonly=False)
except Exception as e:
pass
def selected_datasets(self, property_name, exclude_received, exclude_paths, exclude_unchanged, min_change):
"""determine filesystems that should be backed up by looking at the special autobackup-property, systemwide
returns: list of ZfsDataset
"""
self.debug("Getting selected datasets")
# get all source filesystems that have the backup property
lines = self.run(tab_split=True, readonly=True, cmd=[
"zfs", "get", "-t", "volume,filesystem", "-o", "name,value,source", "-H",
property_name
])
# The returnlist of selected ZfsDataset's:
selected_filesystems = []
# list of sources, used to resolve inherited sources
sources = {}
for line in lines:
(name, value, raw_source) = line
dataset = self.get_dataset(name, force_exists=True)
# "resolve" inherited sources
sources[name] = raw_source
if raw_source.find("inherited from ") == 0:
inherited = True
inherited_from = re.sub("^inherited from ", "", raw_source)
source = sources[inherited_from]
else:
inherited = False
source = raw_source
# determine it
if dataset.is_selected(value=value, source=source, inherited=inherited, exclude_received=exclude_received,
exclude_paths=exclude_paths, exclude_unchanged=exclude_unchanged,
min_change=min_change):
selected_filesystems.append(dataset)
return selected_filesystems

63
zfs_autobackup/ZfsPool.py Normal file
View File

@ -0,0 +1,63 @@
from .CachedProperty import CachedProperty
class ZfsPool():
"""a zfs pool"""
def __init__(self, zfs_node, name):
"""name: name of the pool
"""
self.zfs_node = zfs_node
self.name = name
def __repr__(self):
return "{}: {}".format(self.zfs_node, self.name)
def __str__(self):
return self.name
def __eq__(self, obj):
if not isinstance(obj, ZfsPool):
return False
return self.name == obj.name
def verbose(self, txt):
self.zfs_node.verbose("zpool {}: {}".format(self.name, txt))
def error(self, txt):
self.zfs_node.error("zpool {}: {}".format(self.name, txt))
def debug(self, txt):
self.zfs_node.debug("zpool {}: {}".format(self.name, txt))
@CachedProperty
def properties(self):
"""all zpool properties"""
self.debug("Getting zpool properties")
cmd = [
"zpool", "get", "-H", "-p", "all", self.name
]
ret = {}
for pair in self.zfs_node.run(tab_split=True, cmd=cmd, readonly=True, valid_exitcodes=[0]):
ret[pair[1]] = pair[2]
return ret
@property
def features(self):
"""get list of active zpool features"""
ret = []
for (key, value) in self.properties.items():
if key.startswith("feature@"):
feature = key.split("@")[1]
if value == 'enabled' or value == 'active':
ret.append(feature)
return ret

3
zfs_autobackup/__init__.py Executable file
View File

@ -0,0 +1,3 @@

7
zfs_autobackup/__main__.py Executable file
View File

@ -0,0 +1,7 @@
# (c)edwin@datux.nl - Released under GPL V3
#
# Greetings from eth0 2019 :)
import sys

View File

@ -0,0 +1,75 @@
# Adopted from Syncoid :)
# this software is licensed for use under the Free Software Foundation's GPL v3.0 license, as retrieved
# from http://www.gnu.org/licenses/gpl-3.0.html on 2014-11-17. A copy should also be available in this
# project's Git repository at https://github.com/jimsalterjrs/sanoid/blob/master/LICENSE.
COMPRESS_CMDS = {
'gzip': {
'cmd': 'gzip',
'args': [ '-3' ],
'dcmd': 'zcat',
'dargs': [],
},
'pigz-fast': {
'cmd': 'pigz',
'args': [ '-3' ],
'dcmd': 'pigz',
'dargs': [ '-dc' ],
},
'pigz-slow': {
'cmd': 'pigz',
'args': [ '-9' ],
'dcmd': 'pigz',
'dargs': [ '-dc' ],
},
'zstd-fast': {
'cmd': 'zstdmt',
'args': [ '-3' ],
'dcmd': 'zstdmt',
'dargs': [ '-dc' ],
},
'zstd-slow': {
'cmd': 'zstdmt',
'args': [ '-19' ],
'dcmd': 'zstdmt',
'dargs': [ '-dc' ],
},
'zstd-adapt': {
'cmd': 'zstdmt',
'args': [ '--adapt' ],
'dcmd': 'zstdmt',
'dargs': [ '-dc' ],
},
'xz': {
'cmd': 'xz',
'args': [],
'dcmd': 'xz',
'dargs': [ '-d' ],
},
'lzo': {
'cmd': 'lzop',
'args': [],
'dcmd': 'lzop',
'dargs': [ '-dfc' ],
},
'lz4': {
'cmd': 'lz4',
'args': [],
'dcmd': 'lz4',
'dargs': [ '-dc' ],
},
}
def compress_cmd(compressor):
ret=[ COMPRESS_CMDS[compressor]['cmd'] ]
ret.extend( COMPRESS_CMDS[compressor]['args'])
return ret
def decompress_cmd(compressor):
ret= [ COMPRESS_CMDS[compressor]['dcmd'] ]
ret.extend(COMPRESS_CMDS[compressor]['dargs'])
return ret
def choices():
return COMPRESS_CMDS.keys()