Compare commits

...

49 Commits

Author SHA1 Message Date
e4356cb516 Added -F (--force) to allow 1:1 replication. 2022-02-23 18:36:03 +01:00
cab2f98bb8 Better strip path handling and collision checking. Now also supports stripping so much it ends up on a pool-target.
Fixes #102, #117
2022-02-23 17:47:50 +01:00
07cb7cfad4 version bump 2021-12-19 18:23:09 +01:00
7b4a986f13 fix #103 2021-12-19 18:16:54 +01:00
be2474bb1c error doc 2021-11-02 20:18:57 +01:00
81e7cd940c forgot to remove debugging print 2021-10-04 00:59:50 +02:00
0b4448798e out of range for python 2 2021-10-04 00:34:07 +02:00
b1689f5066 added --...-format options. closes #87 2021-10-04 00:14:40 +02:00
dcb9cdac44 Merge branch 'master' of github.com:psy0rz/zfs_autobackup 2021-10-03 21:50:08 +02:00
9dc280abad Merge pull request #98 from sbidoul/relative-imports
Use relative imports
2021-09-21 16:44:47 +02:00
6b8c683315 Merge branch 'relative-imports' of https://github.com/sbidoul/zfs_autobackup 2021-09-21 14:31:50 +02:00
66e123849b preparations for #87 2021-09-21 14:30:59 +02:00
7325e1e351 ignore coveralls submission errors 2021-09-21 14:21:32 +02:00
9f4ea51622 ignore coveralls submission errors 2021-09-21 14:14:58 +02:00
8c1058a808 Use relative imports 2021-09-21 14:05:33 +02:00
d9e759a3eb fix 2021-09-20 21:14:26 +02:00
46457b3aca added some common short options and changes to fix #88 2021-09-20 15:11:23 +02:00
59f7ccc352 default compression is now zstd-fast, fixes #97 2021-09-20 14:54:38 +02:00
578fb1be4b renamed --ignore-replicated to --exclude-unchanged. tidied up and removed seperate filter_replicated() step. #93, #95, #96 2021-09-20 14:52:20 +02:00
f9b16c050b Merge pull request #96 from xrobau/really-ignore-replicated
Fix #93, Fix #95 Re-Document --exclude-received
2021-09-16 12:14:01 +02:00
2ba6fe5235 Fix #93, Fix #95 Re-Document --exclude-received
If another zfs_autobackup session is running, there will be changes
on zvols that are replicated. This means that it is possible for
a pair of servers running replication between themselves to start
backing up the backups.  Turning on --exclude-received when backups
are running between an a <--> b pair of servers means that the vols
that are received will never be accidentally seleted.
2021-09-16 09:38:26 +10:00
8e2c91735a Merge remote-tracking branch 'origin/master' 2021-08-24 11:14:24 +02:00
d57e3922a0 added note, #91 2021-08-24 11:14:13 +02:00
4b25dd76f1 Merge pull request #90 from bagbag/master
Support for zstd-adapt
2021-08-19 15:45:17 +02:00
2843781aa6 Support for zstd-adapt 2021-08-18 23:08:00 +02:00
ce987328d9 update doc 2021-08-18 10:32:41 +02:00
9a902f0f38 final 3.1 release 2021-08-18 10:28:10 +02:00
ee2c074539 it seems the amount of changed bytes has become bigger on my prox. (approx 230k) 2021-07-07 13:20:04 +02:00
77f1c16414 fix #84 2021-07-03 14:31:34 +02:00
c5363a1538 pre/post cmd tests 2021-06-22 12:43:52 +02:00
119225ba5b bump 2021-06-18 17:27:41 +02:00
84437ee1d0 pre/post snapshot polishing. still needs test-scripts 2021-06-18 17:16:18 +02:00
1286bfafd0 Merge pull request #80 from tuffnatty/pre-post-snapshot-cmd
Add support for pre- and post-snapshot scripts (#39)
2021-06-17 11:48:09 +02:00
9fc2703638 Always call post-snapshot-cmd to clean up faster on snapshot failure 2021-06-17 12:38:16 +03:00
01dc65af96 Merge pull request #81 from tuffnatty/another-typo
Another typo fix
2021-06-17 11:21:08 +02:00
082153e0ce A more robust MySQL example 2021-06-17 09:52:18 +03:00
77f5474447 Fix tests. 2021-06-16 23:37:38 +03:00
55ff14f1d8 Allow multiple --pre/post-snapshot-cmd options. Add a usage example. 2021-06-16 23:19:29 +03:00
2acd26b304 Fix a typo 2021-06-16 19:52:06 +03:00
ec9459c1d2 Use shlex.split() for --pre-snapshot-cmd and --post-snapshot-cmd 2021-06-16 19:35:41 +03:00
233fd83ded Merge pull request #79 from tuffnatty/typos
Fix a couple of typos
2021-06-16 16:46:10 +02:00
37c24e092c Merge pull request #78 from tuffnatty/patch-1
Typo fix in argument name in warning message
2021-06-16 16:45:42 +02:00
b2bf11382c Add --pre-snapshot-cmd and --post-snapshot-cmd options 2021-06-16 16:12:20 +03:00
19b918044e Fix a couple of typos 2021-06-16 13:19:47 +03:00
67d9240e7b Typo fix in argument name in warning message 2021-06-16 00:32:31 +03:00
1a5e4a9cdd doc 2021-05-31 22:33:44 +02:00
31f8c359ff update version number 2021-05-31 22:19:28 +02:00
b50b7b7563 test 2021-05-31 22:10:56 +02:00
37f91e1e08 no longer use zfs send --compressed as default. uses --zfs-compressed to reenable it. fixes #77 . 2021-05-31 22:02:31 +02:00
19 changed files with 644 additions and 340 deletions

View File

@ -27,7 +27,7 @@ jobs:
- name: Coveralls - name: Coveralls
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: coveralls --service=github run: coveralls --service=github || true
ubuntu18: ubuntu18:
runs-on: ubuntu-18.04 runs-on: ubuntu-18.04
@ -49,7 +49,7 @@ jobs:
- name: Coveralls - name: Coveralls
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: coveralls --service=github run: coveralls --service=github || true
ubuntu18_python2: ubuntu18_python2:
runs-on: ubuntu-18.04 runs-on: ubuntu-18.04
@ -73,4 +73,4 @@ jobs:
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
COVERALLS_REPO_TOKEN: ${{ secrets.GITHUB_TOKEN }} COVERALLS_REPO_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: coveralls --service=github run: coveralls --service=github || true

View File

@ -279,6 +279,8 @@ root@ws1:~# zfs-autobackup test --verbose
This also allows you to make several snapshots during the day, but only backup the data at night when the server is not busy. This also allows you to make several snapshots during the day, but only backup the data at night when the server is not busy.
**Note**: In this mode it doesnt take a specified target-schedule into account when thinning, it only knows a snapshot is the common snapshot by looking at the holds. So make sure your source-schedule keeps the snapshots you still want to transfer at a later point.
## Thinning out obsolete snapshots ## Thinning out obsolete snapshots
The thinner is the thing that destroys old snapshots on the source and target. The thinner is the thing that destroys old snapshots on the source and target.
@ -396,11 +398,18 @@ Note 1: The --encrypt option will rely on inheriting encryption parameters from
Note 2: Decide what you want at an early stage: If you change the --encrypt or --decrypt parameter after the inital sync you might get weird and wonderfull errors. (nothing dangerous) Note 2: Decide what you want at an early stage: If you change the --encrypt or --decrypt parameter after the inital sync you might get weird and wonderfull errors. (nothing dangerous)
I'll add some tips when the issues start to get in on github. :) **Some common errors while using zfs encryption:**
```
cannot receive incremental stream: kernel modules must be upgraded to receive this stream.
```
This happens if you forget to use --encrypt, while the target datasets are already encrypted. (Very strange error message indeed)
## Transfer buffering, compression and rate limiting. ## Transfer buffering, compression and rate limiting.
If you're transferring over a slow link it might be useful to use `--compress=zstd-fast`. This will compres the data before sending, so it uses less bandwidth. If you're transferring over a slow link it might be useful to use `--compress=zstd-fast`. This will compress the data before sending, so it uses less bandwidth. An alternative to this is to use --zfs-compressed: This will transfer blocks that already have compression intact. (--compress will usually compress much better but uses much more resources. --zfs-compressed uses the least resources, but can be a disadvantage if you want to use a different compression method on the target.)
You can also limit the datarate by using the `--rate` option. You can also limit the datarate by using the `--rate` option.
@ -417,6 +426,28 @@ zfs send -> send buffer -> custom send pipes -> compression -> transfer rate lim
#### On the receiving side: #### On the receiving side:
decompression -> custom recv pipes -> buffer -> zfs recv decompression -> custom recv pipes -> buffer -> zfs recv
## Running custom commands before and after snapshotting
You can run commands before and after the snapshot to freeze databases to make the on for example to make the on-disk data consistent before snapshotting.
The commands will be executed on the source side. Use the `--pre-snapshot-cmd` and `--post-snapshot-cmd` options for this.
For example:
```sh
zfs-autobackup \
--pre-snapshot-cmd 'daemon -f jexec mysqljail1 mysql -s -e "set autocommit=0;flush logs;flush tables with read lock;\\! echo \$\$ > /tmp/mysql_lock.pid && sleep 60"' \
--pre-snapshot-cmd 'daemon -f jexec mysqljail2 mysql -s -e "set autocommit=0;flush logs;flush tables with read lock;\\! echo \$\$ > /tmp/mysql_lock.pid && sleep 60"' \
--post-snapshot-cmd 'pkill -F /jails/mysqljail1/tmp/mysql_lock.pid' \
--post-snapshot-cmd 'pkill -F /jails/mysqljail2/tmp/mysql_lock.pid' \
backupfs1
```
Failure handling during pre/post commands:
* If a pre-command fails, zfs-autobackup will exit with an error. (after executing the post-commands)
* All post-commands are always executed. Even if the pre-commands or actual snapshot have failed. This way you can be sure that stuff is always cleanedup and unfreezed.
## Tips ## Tips
* Use ```--debug``` if something goes wrong and you want to see the commands that are executed. This will also stop at the first error. * Use ```--debug``` if something goes wrong and you want to see the commands that are executed. This will also stop at the first error.
@ -488,26 +519,25 @@ Look in man ssh_config for many more options.
## Usage ## Usage
(NOTE: Quite a lot has changed since the current stable version 3.0. The page your are viewing is for upcoming version 3.1 which is still in beta.)
```console ```console
usage: zfs-autobackup [-h] [--ssh-config CONFIG-FILE] [--ssh-source USER@HOST] usage: zfs-autobackup [-h] [--ssh-config CONFIG-FILE] [--ssh-source USER@HOST]
[--ssh-target USER@HOST] [--keep-source SCHEDULE] [--ssh-target USER@HOST] [--keep-source SCHEDULE]
[--keep-target SCHEDULE] [--other-snapshots] [--keep-target SCHEDULE] [--pre-snapshot-cmd COMMAND]
[--no-snapshot] [--no-send] [--no-thinning] [--no-holds] [--post-snapshot-cmd COMMAND] [--other-snapshots]
[--min-change BYTES] [--allow-empty] [--no-snapshot] [--no-send] [--no-thinning] [--no-holds]
[--ignore-replicated] [--strip-path N] [--min-change BYTES] [--allow-empty] [--ignore-replicated]
[--clear-refreservation] [--clear-mountpoint] [--strip-path N] [--clear-refreservation]
[--filter-properties PROPERY,...] [--clear-mountpoint] [--filter-properties PROPERTY,...]
[--set-properties PROPERTY=VALUE,...] [--rollback] [--set-properties PROPERTY=VALUE,...] [--rollback]
[--destroy-incompatible] [--destroy-missing SCHEDULE] [--destroy-incompatible] [--destroy-missing SCHEDULE]
[--ignore-transfer-errors] [--decrypt] [--encrypt] [--ignore-transfer-errors] [--decrypt] [--encrypt]
[--test] [--verbose] [--debug] [--debug-output] [--zfs-compressed] [--test] [--verbose] [--debug]
[--progress] [--send-pipe COMMAND] [--recv-pipe COMMAND] [--debug-output] [--progress] [--send-pipe COMMAND]
[--compress TYPE] [--rate DATARATE] [--buffer SIZE] [--recv-pipe COMMAND] [--compress TYPE] [--rate DATARATE]
backup-name [target-path] [--buffer SIZE]
backup-name [target-path]
zfs-autobackup v3.1-beta6 - (c)2021 E.H.Eefting (edwin@datux.nl) zfs-autobackup v3.1 - (c)2021 E.H.Eefting (edwin@datux.nl)
positional arguments: positional arguments:
backup-name Name of the backup (you should set the zfs property backup-name Name of the backup (you should set the zfs property
@ -531,6 +561,12 @@ optional arguments:
--keep-target SCHEDULE --keep-target SCHEDULE
Thinning schedule for old target snapshots. Default: Thinning schedule for old target snapshots. Default:
10,1d1w,1w1m,1m1y 10,1d1w,1w1m,1m1y
--pre-snapshot-cmd COMMAND
Run COMMAND before snapshotting (can be used multiple
times.
--post-snapshot-cmd COMMAND
Run COMMAND after snapshotting (can be used multiple
times.
--other-snapshots Send over other snapshots as well, not just the ones --other-snapshots Send over other snapshots as well, not just the ones
created by this tool. created by this tool.
--no-snapshot Don't create new snapshots (useful for finishing --no-snapshot Don't create new snapshots (useful for finishing
@ -547,7 +583,6 @@ optional arguments:
--ignore-replicated Ignore datasets that seem to be replicated some other --ignore-replicated Ignore datasets that seem to be replicated some other
way. (No changes since lastest snapshot. Useful for way. (No changes since lastest snapshot. Useful for
proxmox HA replication) proxmox HA replication)
--strip-path N Number of directories to strip from target path (use 1 --strip-path N Number of directories to strip from target path (use 1
when cloning zones between 2 SmartOS machines) when cloning zones between 2 SmartOS machines)
--clear-refreservation --clear-refreservation
@ -556,7 +591,7 @@ optional arguments:
--clear-mountpoint Set property canmount=noauto for new datasets. --clear-mountpoint Set property canmount=noauto for new datasets.
(recommended, prevents mount conflicts. same as --set- (recommended, prevents mount conflicts. same as --set-
properties canmount=noauto) properties canmount=noauto)
--filter-properties PROPERY,... --filter-properties PROPERTY,...
List of properties to "filter" when receiving List of properties to "filter" when receiving
filesystems. (you can still restore them with zfs filesystems. (you can still restore them with zfs
inherit -S) inherit -S)
@ -579,6 +614,8 @@ optional arguments:
filesystem exists. useful for acltype errors) filesystem exists. useful for acltype errors)
--decrypt Decrypt data before sending it over. --decrypt Decrypt data before sending it over.
--encrypt Encrypt data after receiving it. --encrypt Encrypt data after receiving it.
--zfs-compressed Transfer blocks that already have zfs-compression as-
is.
--test dont change anything, just show what would be done --test dont change anything, just show what would be done
(still does all read-only operations) (still does all read-only operations)
--verbose verbose output --verbose verbose output
@ -591,15 +628,16 @@ optional arguments:
multiple times) multiple times)
--recv-pipe COMMAND pipe zfs recv input through COMMAND (can be used --recv-pipe COMMAND pipe zfs recv input through COMMAND (can be used
multiple times) multiple times)
--compress TYPE Use compression during transfer, zstd-fast --compress TYPE Use compression during transfer, defaults to zstd-adapt
recommended. (zstd-slow, xz, pigz-fast, lz4, pigz- if TYPE is not specified. (gzip, pigz-fast, pigz-slow,
slow, zstd-fast, gzip, lzo) zstd-fast, zstd-slow, zstd-adapt, xz, lzo, lz4)
--rate DATARATE Limit data transfer rate (e.g. 128K. requires --rate DATARATE Limit data transfer rate (e.g. 128K. requires
mbuffer.) mbuffer.)
--buffer SIZE Add zfs send and recv buffers to smooth out IO bursts. --buffer SIZE Add zfs send and recv buffers to smooth out IO bursts.
(e.g. 128M. requires mbuffer) (e.g. 128M. requires mbuffer)
Full manual at: https://github.com/psy0rz/zfs_autobackup Full manual at: https://github.com/psy0rz/zfs_autobackup
``` ```
## Troubleshooting ## Troubleshooting
@ -725,7 +763,7 @@ for HOST in $HOSTS; do
ssh $HOST "zfs set autobackup:data_$NAME=child rpool/data" ssh $HOST "zfs set autobackup:data_$NAME=child rpool/data"
#backup data filesystems to a common directory #backup data filesystems to a common directory
zfs-autobackup --keep-source=1d1w,1w1m --ssh-source $HOST data_$NAME $TARGET/data --clear-mountpoint --clear-refreservation --ignore-transfer-errors --strip-path 2 --verbose --ignore-replicated --min-change 200000 --no-holds $@ zfs-autobackup --keep-source=1d1w,1w1m --ssh-source $HOST data_$NAME $TARGET/data --clear-mountpoint --clear-refreservation --ignore-transfer-errors --strip-path 2 --verbose --ignore-replicated --min-change 300000 --no-holds $@
zabbix-job-status backup_$HOST""_data_$NAME daily $? >/dev/null 2>/dev/null zabbix-job-status backup_$HOST""_data_$NAME daily $? >/dev/null 2>/dev/null

View File

@ -13,10 +13,10 @@ class TestZfsNode(unittest2.TestCase):
def test_destroymissing(self): def test_destroymissing(self):
#initial backup #initial backup
with patch('time.strftime', return_value="10101111000000"): #1000 years in past with patch('time.strftime', return_value="test-19101111000000"): #1000 years in past
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-holds".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-holds".split(" ")).run())
with patch('time.strftime', return_value="20101111000000"): #far in past with patch('time.strftime', return_value="test-20101111000000"): #far in past
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-holds --allow-empty".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-holds --allow-empty".split(" ")).run())
@ -40,7 +40,7 @@ class TestZfsNode(unittest2.TestCase):
print(buf.getvalue()) print(buf.getvalue())
#should have done the snapshot cleanup for destoy missing: #should have done the snapshot cleanup for destoy missing:
self.assertIn("fs1@test-10101111000000: Destroying", buf.getvalue()) self.assertIn("fs1@test-19101111000000: Destroying", buf.getvalue())
self.assertIn("fs1: Destroy missing: Still has children here.", buf.getvalue()) self.assertIn("fs1: Destroy missing: Still has children here.", buf.getvalue())
@ -130,6 +130,6 @@ test_target1/test_source1
test_target1/test_source2 test_target1/test_source2
test_target1/test_source2/fs2 test_target1/test_source2/fs2
test_target1/test_source2/fs2/sub test_target1/test_source2/fs2/sub
test_target1/test_source2/fs2/sub@test-10101111000000 test_target1/test_source2/fs2/sub@test-19101111000000
test_target1/test_source2/fs2/sub@test-20101111000000 test_target1/test_source2/fs2/sub@test-20101111000000
""") """)

View File

@ -48,11 +48,11 @@ class TestZfsEncryption(unittest2.TestCase):
self.prepare_encrypted_dataset("11111111", "test_source1/fs1/encryptedsourcekeyless", unload_key=True) # raw mode shouldn't need a key self.prepare_encrypted_dataset("11111111", "test_source1/fs1/encryptedsourcekeyless", unload_key=True) # raw mode shouldn't need a key
self.prepare_encrypted_dataset("22222222", "test_target1/encryptedtarget") self.prepare_encrypted_dataset("22222222", "test_target1/encryptedtarget")
with patch('time.strftime', return_value="20101111000000"): with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --allow-empty --exclude-received".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --allow-empty --exclude-received".split(" ")).run())
self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --no-snapshot --exclude-received".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --no-snapshot --exclude-received".split(" ")).run())
with patch('time.strftime', return_value="20101111000001"): with patch('time.strftime', return_value="test-20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --allow-empty --exclude-received".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --allow-empty --exclude-received".split(" ")).run())
self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --no-snapshot --exclude-received".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --no-snapshot --exclude-received".split(" ")).run())
@ -85,11 +85,11 @@ test_target1/test_source2/fs2/sub encryption
self.prepare_encrypted_dataset("11111111", "test_source1/fs1/encryptedsource") self.prepare_encrypted_dataset("11111111", "test_source1/fs1/encryptedsource")
self.prepare_encrypted_dataset("22222222", "test_target1/encryptedtarget") self.prepare_encrypted_dataset("22222222", "test_target1/encryptedtarget")
with patch('time.strftime', return_value="20101111000000"): with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --decrypt --allow-empty --exclude-received".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --decrypt --allow-empty --exclude-received".split(" ")).run())
self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --decrypt --no-snapshot --exclude-received".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --decrypt --no-snapshot --exclude-received".split(" ")).run())
with patch('time.strftime', return_value="20101111000001"): with patch('time.strftime', return_value="test-20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --decrypt --allow-empty --exclude-received".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --decrypt --allow-empty --exclude-received".split(" ")).run())
self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --decrypt --no-snapshot --exclude-received".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --decrypt --no-snapshot --exclude-received".split(" ")).run())
@ -120,11 +120,11 @@ test_target1/test_source2/fs2/sub encryptionroot -
self.prepare_encrypted_dataset("11111111", "test_source1/fs1/encryptedsource") self.prepare_encrypted_dataset("11111111", "test_source1/fs1/encryptedsource")
self.prepare_encrypted_dataset("22222222", "test_target1/encryptedtarget") self.prepare_encrypted_dataset("22222222", "test_target1/encryptedtarget")
with patch('time.strftime', return_value="20101111000000"): with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --encrypt --debug --allow-empty --exclude-received".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --encrypt --debug --allow-empty --exclude-received".split(" ")).run())
self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --encrypt --debug --no-snapshot --exclude-received".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --encrypt --debug --no-snapshot --exclude-received".split(" ")).run())
with patch('time.strftime', return_value="20101111000001"): with patch('time.strftime', return_value="test-20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --encrypt --debug --allow-empty --exclude-received".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --encrypt --debug --allow-empty --exclude-received".split(" ")).run())
self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --encrypt --debug --no-snapshot --exclude-received".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --encrypt --debug --no-snapshot --exclude-received".split(" ")).run())
@ -155,14 +155,14 @@ test_target1/test_source2/fs2/sub encryptionroot -
self.prepare_encrypted_dataset("11111111", "test_source1/fs1/encryptedsource") self.prepare_encrypted_dataset("11111111", "test_source1/fs1/encryptedsource")
self.prepare_encrypted_dataset("22222222", "test_target1/encryptedtarget") self.prepare_encrypted_dataset("22222222", "test_target1/encryptedtarget")
with patch('time.strftime', return_value="20101111000000"): with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup( self.assertFalse(ZfsAutobackup(
"test test_target1 --verbose --no-progress --decrypt --encrypt --debug --allow-empty --exclude-received".split(" ")).run()) "test test_target1 --verbose --no-progress --decrypt --encrypt --debug --allow-empty --exclude-received".split(" ")).run())
self.assertFalse(ZfsAutobackup( self.assertFalse(ZfsAutobackup(
"test test_target1/encryptedtarget --verbose --no-progress --decrypt --encrypt --debug --no-snapshot --exclude-received".split( "test test_target1/encryptedtarget --verbose --no-progress --decrypt --encrypt --debug --no-snapshot --exclude-received".split(
" ")).run()) " ")).run())
with patch('time.strftime', return_value="20101111000001"): with patch('time.strftime', return_value="test-20101111000001"):
self.assertFalse(ZfsAutobackup( self.assertFalse(ZfsAutobackup(
"test test_target1 --verbose --no-progress --decrypt --encrypt --debug --allow-empty --exclude-received".split(" ")).run()) "test test_target1 --verbose --no-progress --decrypt --encrypt --debug --allow-empty --exclude-received".split(" ")).run())
self.assertFalse(ZfsAutobackup( self.assertFalse(ZfsAutobackup(

View File

@ -32,7 +32,7 @@ class TestExternalFailures(unittest2.TestCase):
def test_initial_resume(self): def test_initial_resume(self):
# inital backup, leaves resume token # inital backup, leaves resume token
with patch('time.strftime', return_value="20101111000000"): with patch('time.strftime', return_value="test-20101111000000"):
self.generate_resume() self.generate_resume()
# --test should resume and succeed # --test should resume and succeed
@ -81,11 +81,11 @@ test_target1/test_source2/fs2/sub@test-20101111000000
def test_incremental_resume(self): def test_incremental_resume(self):
# initial backup # initial backup
with patch('time.strftime', return_value="20101111000000"): with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run())
# incremental backup leaves resume token # incremental backup leaves resume token
with patch('time.strftime', return_value="20101111000001"): with patch('time.strftime', return_value="test-20101111000001"):
self.generate_resume() self.generate_resume()
# --test should resume and succeed # --test should resume and succeed
@ -138,7 +138,7 @@ test_target1/test_source2/fs2/sub@test-20101111000000
self.skipTest("Resume not supported in this ZFS userspace version") self.skipTest("Resume not supported in this ZFS userspace version")
# inital backup, leaves resume token # inital backup, leaves resume token
with patch('time.strftime', return_value="20101111000000"): with patch('time.strftime', return_value="test-20101111000000"):
self.generate_resume() self.generate_resume()
# remove corresponding source snapshot, so it becomes invalid # remove corresponding source snapshot, so it becomes invalid
@ -148,11 +148,11 @@ test_target1/test_source2/fs2/sub@test-20101111000000
shelltest("zfs destroy test_target1/test_source1/fs1/sub; true") shelltest("zfs destroy test_target1/test_source1/fs1/sub; true")
# --test try again, should abort old resume # --test try again, should abort old resume
with patch('time.strftime', return_value="20101111000001"): with patch('time.strftime', return_value="test-20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --test".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --test".split(" ")).run())
# try again, should abort old resume # try again, should abort old resume
with patch('time.strftime', return_value="20101111000001"): with patch('time.strftime', return_value="test-20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
r = shelltest("zfs list -H -o name -r -t all test_target1") r = shelltest("zfs list -H -o name -r -t all test_target1")
@ -176,22 +176,22 @@ test_target1/test_source2/fs2/sub@test-20101111000000
self.skipTest("Resume not supported in this ZFS userspace version") self.skipTest("Resume not supported in this ZFS userspace version")
# initial backup # initial backup
with patch('time.strftime', return_value="20101111000000"): with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run())
# icremental backup, leaves resume token # icremental backup, leaves resume token
with patch('time.strftime', return_value="20101111000001"): with patch('time.strftime', return_value="test-20101111000001"):
self.generate_resume() self.generate_resume()
# remove corresponding source snapshot, so it becomes invalid # remove corresponding source snapshot, so it becomes invalid
shelltest("zfs destroy test_source1/fs1@test-20101111000001") shelltest("zfs destroy test_source1/fs1@test-20101111000001")
# --test try again, should abort old resume # --test try again, should abort old resume
with patch('time.strftime', return_value="20101111000002"): with patch('time.strftime', return_value="test-20101111000002"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --test".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --test".split(" ")).run())
# try again, should abort old resume # try again, should abort old resume
with patch('time.strftime', return_value="20101111000002"): with patch('time.strftime', return_value="test-20101111000002"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
r = shelltest("zfs list -H -o name -r -t all test_target1") r = shelltest("zfs list -H -o name -r -t all test_target1")
@ -215,17 +215,17 @@ test_target1/test_source2/fs2/sub@test-20101111000000
if "0.6.5" in ZFS_USERSPACE: if "0.6.5" in ZFS_USERSPACE:
self.skipTest("Resume not supported in this ZFS userspace version") self.skipTest("Resume not supported in this ZFS userspace version")
with patch('time.strftime', return_value="20101111000000"): with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
# generate resume # generate resume
with patch('time.strftime', return_value="20101111000001"): with patch('time.strftime', return_value="test-20101111000001"):
self.generate_resume() self.generate_resume()
with OutputIO() as buf: with OutputIO() as buf:
with redirect_stdout(buf): with redirect_stdout(buf):
# incremental, doesnt want previous anymore # incremental, doesnt want previous anymore
with patch('time.strftime', return_value="20101111000002"): with patch('time.strftime', return_value="test-20101111000002"):
self.assertFalse(ZfsAutobackup( self.assertFalse(ZfsAutobackup(
"test test_target1 --no-progress --verbose --keep-target=0 --allow-empty".split(" ")).run()) "test test_target1 --no-progress --verbose --keep-target=0 --allow-empty".split(" ")).run())
@ -253,11 +253,11 @@ test_target1/test_source2/fs2/sub@test-20101111000002
if "0.6.5" in ZFS_USERSPACE: if "0.6.5" in ZFS_USERSPACE:
self.skipTest("Resume not supported in this ZFS userspace version") self.skipTest("Resume not supported in this ZFS userspace version")
with patch('time.strftime', return_value="20101111000000"): with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
# generate resume # generate resume
with patch('time.strftime', return_value="20101111000001"): with patch('time.strftime', return_value="test-20101111000001"):
self.generate_resume() self.generate_resume()
shelltest("zfs destroy test_source1/fs1@test-20101111000001") shelltest("zfs destroy test_source1/fs1@test-20101111000001")
@ -265,7 +265,7 @@ test_target1/test_source2/fs2/sub@test-20101111000002
with OutputIO() as buf: with OutputIO() as buf:
with redirect_stdout(buf): with redirect_stdout(buf):
# incremental, doesnt want previous anymore # incremental, doesnt want previous anymore
with patch('time.strftime', return_value="20101111000002"): with patch('time.strftime', return_value="test-20101111000002"):
self.assertFalse(ZfsAutobackup( self.assertFalse(ZfsAutobackup(
"test test_target1 --no-progress --verbose --no-snapshot".split( "test test_target1 --no-progress --verbose --no-snapshot".split(
" ")).run()) " ")).run())
@ -277,14 +277,14 @@ test_target1/test_source2/fs2/sub@test-20101111000002
def test_missing_common(self): def test_missing_common(self):
with patch('time.strftime', return_value="20101111000000"): with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run())
# remove common snapshot and leave nothing # remove common snapshot and leave nothing
shelltest("zfs release zfs_autobackup:test test_source1/fs1@test-20101111000000") shelltest("zfs release zfs_autobackup:test test_source1/fs1@test-20101111000000")
shelltest("zfs destroy test_source1/fs1@test-20101111000000") shelltest("zfs destroy test_source1/fs1@test-20101111000000")
with patch('time.strftime', return_value="20101111000001"): with patch('time.strftime', return_value="test-20101111000001"):
self.assertTrue(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run()) self.assertTrue(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run())
#UPDATE: offcourse the one thing that wasn't tested had a bug :( (in ExecuteNode.run()). #UPDATE: offcourse the one thing that wasn't tested had a bug :( (in ExecuteNode.run()).
@ -295,7 +295,7 @@ test_target1/test_source2/fs2/sub@test-20101111000002
# #recreate target pool without any features # #recreate target pool without any features
# # shelltest("zfs set compress=on test_source1; zpool destroy test_target1; zpool create test_target1 -o feature@project_quota=disabled /dev/ram2") # # shelltest("zfs set compress=on test_source1; zpool destroy test_target1; zpool create test_target1 -o feature@project_quota=disabled /dev/ram2")
# #
# with patch('time.strftime', return_value="20101111000000"): # with patch('time.strftime', return_value="test-20101111000000"):
# self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty --no-progress".split(" ")).run()) # self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty --no-progress".split(" ")).run())
# #
# r = shelltest("zfs list -H -o name -r -t all test_target1") # r = shelltest("zfs list -H -o name -r -t all test_target1")

View File

@ -8,12 +8,98 @@ class TestZfsNode(unittest2.TestCase):
prepare_zpools() prepare_zpools()
self.longMessage=True self.longMessage=True
# #resume initial backup def test_keepsource0target10queuedsend(self):
# def test_keepsource0(self): """Test if thinner doesnt destroy too much early on if there are no common snapshots YET. Issue #84"""
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup(
"test test_target1 --no-progress --verbose --keep-source=0 --keep-target=10 --allow-empty --no-send".split(
" ")).run())
with patch('time.strftime', return_value="test-20101111000001"):
self.assertFalse(ZfsAutobackup(
"test test_target1 --no-progress --verbose --keep-source=0 --keep-target=10 --allow-empty --no-send".split(
" ")).run())
with patch('time.strftime', return_value="test-20101111000002"):
self.assertFalse(ZfsAutobackup(
"test test_target1 --no-progress --verbose --keep-source=0 --keep-target=10 --allow-empty".split(
" ")).run())
r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
self.assertMultiLineEqual(r, """
test_source1
test_source1/fs1
test_source1/fs1@test-20101111000002
test_source1/fs1/sub
test_source1/fs1/sub@test-20101111000002
test_source2
test_source2/fs2
test_source2/fs2/sub
test_source2/fs2/sub@test-20101111000002
test_source2/fs3
test_source2/fs3/sub
test_target1
test_target1/test_source1
test_target1/test_source1/fs1
test_target1/test_source1/fs1@test-20101111000000
test_target1/test_source1/fs1@test-20101111000001
test_target1/test_source1/fs1@test-20101111000002
test_target1/test_source1/fs1/sub
test_target1/test_source1/fs1/sub@test-20101111000000
test_target1/test_source1/fs1/sub@test-20101111000001
test_target1/test_source1/fs1/sub@test-20101111000002
test_target1/test_source2
test_target1/test_source2/fs2
test_target1/test_source2/fs2/sub
test_target1/test_source2/fs2/sub@test-20101111000000
test_target1/test_source2/fs2/sub@test-20101111000001
test_target1/test_source2/fs2/sub@test-20101111000002
""")
def test_excludepaths(self):
"""Test issue #103"""
shelltest("zfs create test_target1/target_shouldnotbeexcluded")
shelltest("zfs set autobackup:test=true test_target1/target_shouldnotbeexcluded")
shelltest("zfs create test_target1/target")
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup(
"test test_target1/target --no-progress --verbose --allow-empty".split(
" ")).run())
r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
self.assertMultiLineEqual(r, """
test_source1
test_source1/fs1
test_source1/fs1@test-20101111000000
test_source1/fs1/sub
test_source1/fs1/sub@test-20101111000000
test_source2
test_source2/fs2
test_source2/fs2/sub
test_source2/fs2/sub@test-20101111000000
test_source2/fs3
test_source2/fs3/sub
test_target1
test_target1/target
test_target1/target/test_source1
test_target1/target/test_source1/fs1
test_target1/target/test_source1/fs1@test-20101111000000
test_target1/target/test_source1/fs1/sub
test_target1/target/test_source1/fs1/sub@test-20101111000000
test_target1/target/test_source2
test_target1/target/test_source2/fs2
test_target1/target/test_source2/fs2/sub
test_target1/target/test_source2/fs2/sub@test-20101111000000
test_target1/target/test_target1
test_target1/target/test_target1/target_shouldnotbeexcluded
test_target1/target/test_target1/target_shouldnotbeexcluded@test-20101111000000
test_target1/target_shouldnotbeexcluded
test_target1/target_shouldnotbeexcluded@test-20101111000000
""")
# #somehow only specifying --allow-empty --keep-source 0 failed:
# with patch('time.strftime', return_value="20101111000000"):
# self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty --keep-source 0".split(" ")).run())
# with patch('time.strftime', return_value="20101111000001"):
# self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty --keep-source 0".split(" ")).run())

View File

@ -33,7 +33,7 @@ class TestZfsScaling(unittest2.TestCase):
run_counter=0 run_counter=0
with patch.object(ExecuteNode,'run', run_count) as p: with patch.object(ExecuteNode,'run', run_count) as p:
with patch('time.strftime', return_value="20101112000000"): with patch('time.strftime', return_value="test-20101112000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --keep-source=10000 --keep-target=10000 --no-holds --allow-empty".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --keep-source=10000 --keep-target=10000 --no-holds --allow-empty".split(" ")).run())
@ -46,7 +46,7 @@ class TestZfsScaling(unittest2.TestCase):
run_counter=0 run_counter=0
with patch.object(ExecuteNode,'run', run_count) as p: with patch.object(ExecuteNode,'run', run_count) as p:
with patch('time.strftime', return_value="20101112000001"): with patch('time.strftime', return_value="test-20101112000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --keep-source=10000 --keep-target=10000 --no-holds --allow-empty".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --keep-source=10000 --keep-target=10000 --no-holds --allow-empty".split(" ")).run())
@ -73,7 +73,7 @@ class TestZfsScaling(unittest2.TestCase):
run_counter=0 run_counter=0
with patch.object(ExecuteNode,'run', run_count) as p: with patch.object(ExecuteNode,'run', run_count) as p:
with patch('time.strftime', return_value="20101112000000"): with patch('time.strftime', return_value="test-20101112000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-holds --allow-empty".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-holds --allow-empty".split(" ")).run())
@ -87,7 +87,7 @@ class TestZfsScaling(unittest2.TestCase):
run_counter=0 run_counter=0
with patch.object(ExecuteNode,'run', run_count) as p: with patch.object(ExecuteNode,'run', run_count) as p:
with patch('time.strftime', return_value="20101112000001"): with patch('time.strftime', return_value="test-20101112000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-holds --allow-empty".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-holds --allow-empty".split(" ")).run())

View File

@ -16,25 +16,25 @@ class TestSendRecvPipes(unittest2.TestCase):
with self.subTest("local local pipe"): with self.subTest("local local pipe"):
with patch('time.strftime', return_value="20101111000000"): with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--send-pipe=dd bs=1M", "--recv-pipe=dd bs=2M"]).run()) self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--send-pipe=dd bs=1M", "--recv-pipe=dd bs=2M"]).run())
shelltest("zfs destroy -r test_target1/test_source1/fs1/sub") shelltest("zfs destroy -r test_target1/test_source1/fs1/sub")
with self.subTest("remote local pipe"): with self.subTest("remote local pipe"):
with patch('time.strftime', return_value="20101111000000"): with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--ssh-source=localhost", "--send-pipe=dd bs=1M", "--recv-pipe=dd bs=2M"]).run()) self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--ssh-source=localhost", "--send-pipe=dd bs=1M", "--recv-pipe=dd bs=2M"]).run())
shelltest("zfs destroy -r test_target1/test_source1/fs1/sub") shelltest("zfs destroy -r test_target1/test_source1/fs1/sub")
with self.subTest("local remote pipe"): with self.subTest("local remote pipe"):
with patch('time.strftime', return_value="20101111000000"): with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--ssh-target=localhost", "--send-pipe=dd bs=1M", "--recv-pipe=dd bs=2M"]).run()) self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--ssh-target=localhost", "--send-pipe=dd bs=1M", "--recv-pipe=dd bs=2M"]).run())
shelltest("zfs destroy -r test_target1/test_source1/fs1/sub") shelltest("zfs destroy -r test_target1/test_source1/fs1/sub")
with self.subTest("remote remote pipe"): with self.subTest("remote remote pipe"):
with patch('time.strftime', return_value="20101111000000"): with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--ssh-source=localhost", "--ssh-target=localhost", "--send-pipe=dd bs=1M", "--recv-pipe=dd bs=2M"]).run()) self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--ssh-source=localhost", "--ssh-target=localhost", "--send-pipe=dd bs=1M", "--recv-pipe=dd bs=2M"]).run())
def test_compress(self): def test_compress(self):
@ -43,7 +43,7 @@ class TestSendRecvPipes(unittest2.TestCase):
for compress in zfs_autobackup.compressors.COMPRESS_CMDS.keys(): for compress in zfs_autobackup.compressors.COMPRESS_CMDS.keys():
with self.subTest("compress "+compress): with self.subTest("compress "+compress):
with patch('time.strftime', return_value="20101111000000"): with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--compress="+compress]).run()) self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--compress="+compress]).run())
shelltest("zfs destroy -r test_target1/test_source1/fs1/sub") shelltest("zfs destroy -r test_target1/test_source1/fs1/sub")
@ -53,25 +53,25 @@ class TestSendRecvPipes(unittest2.TestCase):
with self.subTest("local local pipe"): with self.subTest("local local pipe"):
with patch('time.strftime', return_value="20101111000000"): with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--buffer=1M" ]).run()) self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--buffer=1M" ]).run())
shelltest("zfs destroy -r test_target1/test_source1/fs1/sub") shelltest("zfs destroy -r test_target1/test_source1/fs1/sub")
with self.subTest("remote local pipe"): with self.subTest("remote local pipe"):
with patch('time.strftime', return_value="20101111000000"): with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--ssh-source=localhost", "--buffer=1M"]).run()) self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--ssh-source=localhost", "--buffer=1M"]).run())
shelltest("zfs destroy -r test_target1/test_source1/fs1/sub") shelltest("zfs destroy -r test_target1/test_source1/fs1/sub")
with self.subTest("local remote pipe"): with self.subTest("local remote pipe"):
with patch('time.strftime', return_value="20101111000000"): with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--ssh-target=localhost", "--buffer=1M"]).run()) self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--ssh-target=localhost", "--buffer=1M"]).run())
shelltest("zfs destroy -r test_target1/test_source1/fs1/sub") shelltest("zfs destroy -r test_target1/test_source1/fs1/sub")
with self.subTest("remote remote pipe"): with self.subTest("remote remote pipe"):
with patch('time.strftime', return_value="20101111000000"): with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--ssh-source=localhost", "--ssh-target=localhost", "--buffer=1M"]).run()) self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--ssh-source=localhost", "--ssh-target=localhost", "--buffer=1M"]).run())
def test_rate(self): def test_rate(self):
@ -79,7 +79,7 @@ class TestSendRecvPipes(unittest2.TestCase):
start=time.time() start=time.time()
with patch('time.strftime', return_value="20101111000000"): with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--rate=50k" ]).run()) self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--rate=50k" ]).run())
#not a great way of verifying but it works. #not a great way of verifying but it works.

View File

@ -33,7 +33,7 @@ class TestZfsAutobackup(unittest2.TestCase):
def test_snapshotmode(self): def test_snapshotmode(self):
"""test snapshot tool mode""" """test snapshot tool mode"""
with patch('time.strftime', return_value="20101111000000"): with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test --no-progress --verbose".split(" ")).run()) self.assertFalse(ZfsAutobackup("test --no-progress --verbose".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS) r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
@ -57,7 +57,7 @@ test_target1
with self.subTest("no datasets selected"): with self.subTest("no datasets selected"):
with OutputIO() as buf: with OutputIO() as buf:
with redirect_stderr(buf): with redirect_stderr(buf):
with patch('time.strftime', return_value="20101111000000"): with patch('time.strftime', return_value="test-20101111000000"):
self.assertTrue(ZfsAutobackup("nonexisting test_target1 --verbose --debug --no-progress".split(" ")).run()) self.assertTrue(ZfsAutobackup("nonexisting test_target1 --verbose --debug --no-progress".split(" ")).run())
print(buf.getvalue()) print(buf.getvalue())
@ -67,7 +67,7 @@ test_target1
with self.subTest("defaults with full verbose and debug"): with self.subTest("defaults with full verbose and debug"):
with patch('time.strftime', return_value="20101111000000"): with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --debug --no-progress".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1 --verbose --debug --no-progress".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS) r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
@ -96,7 +96,7 @@ test_target1/test_source2/fs2/sub@test-20101111000000
""") """)
with self.subTest("bare defaults, allow empty"): with self.subTest("bare defaults, allow empty"):
with patch('time.strftime', return_value="20101111000001"): with patch('time.strftime', return_value="test-20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --allow-empty --no-progress".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1 --allow-empty --no-progress".split(" ")).run())
@ -167,14 +167,14 @@ test_target1/test_source2/fs2/sub@test-20101111000001 userrefs 1 -
#make sure time handling is correctly. try to make snapshots a year appart and verify that only snapshots mostly 1y old are kept #make sure time handling is correctly. try to make snapshots a year appart and verify that only snapshots mostly 1y old are kept
with self.subTest("test time checking"): with self.subTest("test time checking"):
with patch('time.strftime', return_value="20111111000000"): with patch('time.strftime', return_value="test-20111111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --allow-empty --verbose --no-progress".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1 --allow-empty --verbose --no-progress".split(" ")).run())
time_str="20111112000000" #month in the "future" time_str="20111112000000" #month in the "future"
future_timestamp=time_secs=time.mktime(time.strptime(time_str,"%Y%m%d%H%M%S")) future_timestamp=time_secs=time.mktime(time.strptime(time_str,"%Y%m%d%H%M%S"))
with patch('time.time', return_value=future_timestamp): with patch('time.time', return_value=future_timestamp):
with patch('time.strftime', return_value="20111111000001"): with patch('time.strftime', return_value="test-20111111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --allow-empty --verbose --keep-source 1y1y --keep-target 1d1y --no-progress".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1 --allow-empty --verbose --keep-source 1y1y --keep-target 1d1y --no-progress".split(" ")).run())
@ -214,7 +214,7 @@ test_target1/test_source2/fs2/sub@test-20111111000001
r=shelltest("zfs snapshot test_source1/fs1@othersimple") r=shelltest("zfs snapshot test_source1/fs1@othersimple")
r=shelltest("zfs snapshot test_source1/fs1@otherdate-20001111000000") r=shelltest("zfs snapshot test_source1/fs1@otherdate-20001111000000")
with patch('time.strftime', return_value="20101111000000"): with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS) r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
@ -249,7 +249,7 @@ test_target1/test_source2/fs2/sub@test-20101111000000
r=shelltest("zfs snapshot test_source1/fs1@othersimple") r=shelltest("zfs snapshot test_source1/fs1@othersimple")
r=shelltest("zfs snapshot test_source1/fs1@otherdate-20001111000000") r=shelltest("zfs snapshot test_source1/fs1@otherdate-20001111000000")
with patch('time.strftime', return_value="20101111000000"): with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --other-snapshots".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --other-snapshots".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS) r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
@ -284,7 +284,7 @@ test_target1/test_source2/fs2/sub@test-20101111000000
def test_nosnapshot(self): def test_nosnapshot(self):
with patch('time.strftime', return_value="20101111000000"): with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-snapshot --no-progress".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-snapshot --no-progress".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS) r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
@ -308,12 +308,10 @@ test_target1/test_source2/fs2
def test_nosend(self): def test_nosend(self):
with patch('time.strftime', return_value="20101111000000"): with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-send --no-progress".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-send --no-progress".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS) r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
#(only parents are created )
#TODO: it probably shouldn't create these
self.assertMultiLineEqual(r,""" self.assertMultiLineEqual(r,"""
test_source1 test_source1
test_source1/fs1 test_source1/fs1
@ -333,12 +331,10 @@ test_target1
def test_ignorereplicated(self): def test_ignorereplicated(self):
r=shelltest("zfs snapshot test_source1/fs1@otherreplication") r=shelltest("zfs snapshot test_source1/fs1@otherreplication")
with patch('time.strftime', return_value="20101111000000"): with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --ignore-replicated".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --ignore-replicated".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS) r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
#(only parents are created )
#TODO: it probably shouldn't create these
self.assertMultiLineEqual(r,""" self.assertMultiLineEqual(r,"""
test_source1 test_source1
test_source1/fs1 test_source1/fs1
@ -364,7 +360,7 @@ test_target1/test_source2/fs2/sub@test-20101111000000
def test_noholds(self): def test_noholds(self):
with patch('time.strftime', return_value="20101111000000"): with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-holds --no-progress".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-holds --no-progress".split(" ")).run())
r=shelltest("zfs get -r userrefs test_source1 test_source2 test_target1") r=shelltest("zfs get -r userrefs test_source1 test_source2 test_target1")
@ -396,7 +392,7 @@ test_target1/test_source2/fs2/sub@test-20101111000000 userrefs 0 -
def test_strippath(self): def test_strippath(self):
with patch('time.strftime', return_value="20101111000000"): with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --strip-path=1 --no-progress".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1 --verbose --strip-path=1 --no-progress".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS) r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
@ -422,6 +418,13 @@ test_target1/fs2/sub
test_target1/fs2/sub@test-20101111000000 test_target1/fs2/sub@test-20101111000000
""") """)
def test_strippath_collision(self):
with self.assertRaisesRegexp(Exception,"collision"):
ZfsAutobackup("test test_target1 --verbose --strip-path=2 --no-progress --debug".split(" ")).run()
def test_strippath_toomuch(self):
with self.assertRaisesRegexp(Exception,"too much"):
ZfsAutobackup("test test_target1 --verbose --strip-path=3 --no-progress --debug".split(" ")).run()
def test_clearrefres(self): def test_clearrefres(self):
@ -432,7 +435,7 @@ test_target1/fs2/sub@test-20101111000000
r=shelltest("zfs set refreservation=1M test_source1/fs1") r=shelltest("zfs set refreservation=1M test_source1/fs1")
with patch('time.strftime', return_value="20101111000000"): with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --clear-refreservation".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --clear-refreservation".split(" ")).run())
r=shelltest("zfs get refreservation -r test_source1 test_source2 test_target1") r=shelltest("zfs get refreservation -r test_source1 test_source2 test_target1")
@ -470,7 +473,7 @@ test_target1/test_source2/fs2/sub@test-20101111000000 refreservation -
self.skipTest("This zfs-userspace version doesnt support -o") self.skipTest("This zfs-userspace version doesnt support -o")
with patch('time.strftime', return_value="20101111000000"): with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --clear-mountpoint --debug".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --clear-mountpoint --debug".split(" ")).run())
r=shelltest("zfs get canmount -r test_source1 test_source2 test_target1") r=shelltest("zfs get canmount -r test_source1 test_source2 test_target1")
@ -503,18 +506,18 @@ test_target1/test_source2/fs2/sub@test-20101111000000 canmount - -
def test_rollback(self): def test_rollback(self):
#initial backup #initial backup
with patch('time.strftime', return_value="20101111000000"): with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
#make change #make change
r=shelltest("zfs mount test_target1/test_source1/fs1") r=shelltest("zfs mount test_target1/test_source1/fs1")
r=shelltest("touch /test_target1/test_source1/fs1/change.txt") r=shelltest("touch /test_target1/test_source1/fs1/change.txt")
with patch('time.strftime', return_value="20101111000001"): with patch('time.strftime', return_value="test-20101111000001"):
#should fail (busy) #should fail (busy)
self.assertTrue(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run()) self.assertTrue(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run())
with patch('time.strftime', return_value="20101111000002"): with patch('time.strftime', return_value="test-20101111000002"):
#rollback, should succeed #rollback, should succeed
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --rollback".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --rollback".split(" ")).run())
@ -522,14 +525,14 @@ test_target1/test_source2/fs2/sub@test-20101111000000 canmount - -
def test_destroyincompat(self): def test_destroyincompat(self):
#initial backup #initial backup
with patch('time.strftime', return_value="20101111000000"): with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
#add multiple compatible snapshot (written is still 0) #add multiple compatible snapshot (written is still 0)
r=shelltest("zfs snapshot test_target1/test_source1/fs1@compatible1") r=shelltest("zfs snapshot test_target1/test_source1/fs1@compatible1")
r=shelltest("zfs snapshot test_target1/test_source1/fs1@compatible2") r=shelltest("zfs snapshot test_target1/test_source1/fs1@compatible2")
with patch('time.strftime', return_value="20101111000001"): with patch('time.strftime', return_value="test-20101111000001"):
#should be ok, is compatible #should be ok, is compatible
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run())
@ -539,19 +542,19 @@ test_target1/test_source2/fs2/sub@test-20101111000000 canmount - -
r=shelltest("zfs snapshot test_target1/test_source1/fs1@incompatible1") r=shelltest("zfs snapshot test_target1/test_source1/fs1@incompatible1")
with patch('time.strftime', return_value="20101111000002"): with patch('time.strftime', return_value="test-20101111000002"):
#--test should fail, now incompatible #--test should fail, now incompatible
self.assertTrue(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --test".split(" ")).run()) self.assertTrue(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --test".split(" ")).run())
with patch('time.strftime', return_value="20101111000002"): with patch('time.strftime', return_value="test-20101111000002"):
#should fail, now incompatible #should fail, now incompatible
self.assertTrue(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run()) self.assertTrue(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run())
with patch('time.strftime', return_value="20101111000003"): with patch('time.strftime', return_value="test-20101111000003"):
#--test should succeed by destroying incompatibles #--test should succeed by destroying incompatibles
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --destroy-incompatible --test".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --destroy-incompatible --test".split(" ")).run())
with patch('time.strftime', return_value="20101111000003"): with patch('time.strftime', return_value="test-20101111000003"):
#should succeed by destroying incompatibles #should succeed by destroying incompatibles
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --destroy-incompatible".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --destroy-incompatible".split(" ")).run())
@ -589,13 +592,13 @@ test_target1/test_source2/fs2/sub@test-20101111000003
#test all ssh directions #test all ssh directions
with patch('time.strftime', return_value="20101111000000"): with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --ssh-source localhost --exclude-received".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --ssh-source localhost --exclude-received".split(" ")).run())
with patch('time.strftime', return_value="20101111000001"): with patch('time.strftime', return_value="test-20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --ssh-target localhost --exclude-received".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --ssh-target localhost --exclude-received".split(" ")).run())
with patch('time.strftime', return_value="20101111000002"): with patch('time.strftime', return_value="test-20101111000002"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --ssh-source localhost --ssh-target localhost".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --ssh-source localhost --ssh-target localhost".split(" ")).run())
@ -640,7 +643,7 @@ test_target1/test_source2/fs2/sub@test-20101111000002
def test_minchange(self): def test_minchange(self):
#initial #initial
with patch('time.strftime', return_value="20101111000000"): with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --min-change 100000".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --min-change 100000".split(" ")).run())
#make small change, use umount to reflect the changes immediately #make small change, use umount to reflect the changes immediately
@ -650,7 +653,7 @@ test_target1/test_source2/fs2/sub@test-20101111000002
#too small change, takes no snapshots #too small change, takes no snapshots
with patch('time.strftime', return_value="20101111000001"): with patch('time.strftime', return_value="test-20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --min-change 100000".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --min-change 100000".split(" ")).run())
#make big change #make big change
@ -658,7 +661,7 @@ test_target1/test_source2/fs2/sub@test-20101111000002
r=shelltest("zfs umount test_source1/fs1; zfs mount test_source1/fs1") r=shelltest("zfs umount test_source1/fs1; zfs mount test_source1/fs1")
#bigger change, should take snapshot #bigger change, should take snapshot
with patch('time.strftime', return_value="20101111000002"): with patch('time.strftime', return_value="test-20101111000002"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --min-change 100000".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --min-change 100000".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS) r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
@ -691,7 +694,7 @@ test_target1/test_source2/fs2/sub@test-20101111000000
def test_test(self): def test_test(self):
#initial #initial
with patch('time.strftime', return_value="20101111000000"): with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --test".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --test".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS) r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
@ -708,12 +711,12 @@ test_target1
""") """)
#actual make initial backup #actual make initial backup
with patch('time.strftime', return_value="20101111000001"): with patch('time.strftime', return_value="test-20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
#test incremental #test incremental
with patch('time.strftime', return_value="20101111000002"): with patch('time.strftime', return_value="test-20101111000002"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --allow-empty --verbose --test".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --allow-empty --verbose --test".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS) r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
@ -749,7 +752,7 @@ test_target1/test_source2/fs2/sub@test-20101111000001
shelltest("zfs create test_target1/test_source1") shelltest("zfs create test_target1/test_source1")
shelltest("zfs send test_source1/fs1@migrate1| zfs recv test_target1/test_source1/fs1") shelltest("zfs send test_source1/fs1@migrate1| zfs recv test_target1/test_source1/fs1")
with patch('time.strftime', return_value="20101111000000"): with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS) r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
@ -782,15 +785,15 @@ test_target1/test_source2/fs2/sub@test-20101111000000
def test_keep0(self): def test_keep0(self):
"""test if keep-source=0 and keep-target=0 dont delete common snapshot and break backup""" """test if keep-source=0 and keep-target=0 dont delete common snapshot and break backup"""
with patch('time.strftime', return_value="20101111000000"): with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --keep-source=0 --keep-target=0".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --keep-source=0 --keep-target=0".split(" ")).run())
#make snapshot, shouldnt delete 0 #make snapshot, shouldnt delete 0
with patch('time.strftime', return_value="20101111000001"): with patch('time.strftime', return_value="test-20101111000001"):
self.assertFalse(ZfsAutobackup("test --no-progress --verbose --keep-source=0 --keep-target=0 --allow-empty".split(" ")).run()) self.assertFalse(ZfsAutobackup("test --no-progress --verbose --keep-source=0 --keep-target=0 --allow-empty".split(" ")).run())
#make snapshot 2, shouldnt delete 0 since it has holds, but will delete 1 since it has no holds #make snapshot 2, shouldnt delete 0 since it has holds, but will delete 1 since it has no holds
with patch('time.strftime', return_value="20101111000002"): with patch('time.strftime', return_value="test-20101111000002"):
self.assertFalse(ZfsAutobackup("test --no-progress --verbose --keep-source=0 --keep-target=0 --allow-empty".split(" ")).run()) self.assertFalse(ZfsAutobackup("test --no-progress --verbose --keep-source=0 --keep-target=0 --allow-empty".split(" ")).run())
r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS) r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
@ -822,7 +825,7 @@ test_target1/test_source2/fs2/sub@test-20101111000000
""") """)
#make another backup but with no-holds. we should naturally endup with only number 3 #make another backup but with no-holds. we should naturally endup with only number 3
with patch('time.strftime', return_value="20101111000003"): with patch('time.strftime', return_value="test-20101111000003"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --keep-source=0 --keep-target=0 --no-holds --allow-empty".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --keep-source=0 --keep-target=0 --no-holds --allow-empty".split(" ")).run())
r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS) r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
@ -851,8 +854,8 @@ test_target1/test_source2/fs2/sub@test-20101111000003
""") """)
# make snapshot 4, since we used no-holds, it will delete 3 on the source, breaking the backup # run with snapshot-only for 4, since we used no-holds, it will delete 3 on the source, breaking the backup
with patch('time.strftime', return_value="20101111000004"): with patch('time.strftime', return_value="test-20101111000004"):
self.assertFalse(ZfsAutobackup("test --no-progress --verbose --keep-source=0 --keep-target=0 --allow-empty".split(" ")).run()) self.assertFalse(ZfsAutobackup("test --no-progress --verbose --keep-source=0 --keep-target=0 --allow-empty".split(" ")).run())
r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS) r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
@ -887,10 +890,10 @@ test_target1/test_source2/fs2/sub@test-20101111000003
r = shelltest("zfs snapshot test_source1@test") r = shelltest("zfs snapshot test_source1@test")
l=LogConsole(show_verbose=True, show_debug=False, color=False) l=LogConsole(show_verbose=True, show_debug=False, color=False)
n=ZfsNode("test",l) n=ZfsNode(snapshot_time_format="bla", hold_name="bla", logger=l)
d=ZfsDataset(n,"test_source1@test") d=ZfsDataset(n,"test_source1@test")
sp=d.send_pipe([], prev_snapshot=None, resume_token=None, show_progress=True, raw=False, send_pipes=[], send_properties=True, write_embedded=True) sp=d.send_pipe([], prev_snapshot=None, resume_token=None, show_progress=True, raw=False, send_pipes=[], send_properties=True, write_embedded=True, zfs_compressed=True)
with OutputIO() as buf: with OutputIO() as buf:

View File

@ -10,10 +10,10 @@ class TestZfsAutobackup31(unittest2.TestCase):
def test_no_thinning(self): def test_no_thinning(self):
with patch('time.strftime', return_value="20101111000000"): with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run())
with patch('time.strftime', return_value="20101111000001"): with patch('time.strftime', return_value="test-20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --keep-target=0 --keep-source=0 --no-thinning".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --keep-target=0 --keep-source=0 --no-thinning".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS) r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
@ -54,10 +54,10 @@ test_target1/test_source2/fs2/sub@test-20101111000001
shelltest("zfs create test_target1/a") shelltest("zfs create test_target1/a")
shelltest("zfs create test_target1/b") shelltest("zfs create test_target1/b")
with patch('time.strftime', return_value="20101111000000"): with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1/a --no-progress --verbose --debug".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1/a --no-progress --verbose --debug".split(" ")).run())
with patch('time.strftime', return_value="20101111000001"): with patch('time.strftime', return_value="test-20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1/b --no-progress --verbose".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1/b --no-progress --verbose".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t snapshot test_target1") r=shelltest("zfs list -H -o name -r -t snapshot test_target1")
@ -71,4 +71,11 @@ test_target1/b/test_source1/fs1/sub@test-20101111000000
test_target1/b/test_source2/fs2/sub@test-20101111000000 test_target1/b/test_source2/fs2/sub@test-20101111000000
test_target1/b/test_target1/a/test_source1/fs1@test-20101111000000 test_target1/b/test_target1/a/test_source1/fs1@test-20101111000000
test_target1/b/test_target1/a/test_source1/fs1/sub@test-20101111000000 test_target1/b/test_target1/a/test_source1/fs1/sub@test-20101111000000
""") """)
def test_zfs_compressed(self):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(
ZfsAutobackup("test test_target1 --no-progress --verbose --debug --zfs-compressed".split(" ")).run())

View File

@ -1,6 +1,6 @@
from basetest import * from basetest import *
from zfs_autobackup.LogStub import LogStub from zfs_autobackup.LogStub import LogStub
from zfs_autobackup.ExecuteNode import ExecuteError
class TestZfsNode(unittest2.TestCase): class TestZfsNode(unittest2.TestCase):
@ -9,95 +9,148 @@ class TestZfsNode(unittest2.TestCase):
prepare_zpools() prepare_zpools()
# return super().setUp() # return super().setUp()
def test_consistent_snapshot(self): def test_consistent_snapshot(self):
logger=LogStub() logger = LogStub()
description="[Source]" description = "[Source]"
node=ZfsNode("test", logger, description=description) node = ZfsNode(snapshot_time_format="test-%Y%m%d%H%M%S", hold_name="zfs_autobackup:test", logger=logger, description=description)
with self.subTest("first snapshot"): with self.subTest("first snapshot"):
node.consistent_snapshot(node.selected_datasets(exclude_paths=[], exclude_received=False), "test-1",100000) node.consistent_snapshot(node.selected_datasets(property_name="autobackup:test",exclude_paths=[], exclude_received=False, exclude_unchanged=False, min_change=200000), "test-20101111000001", 100000)
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS) r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
self.assertEqual(r,""" self.assertEqual(r, """
test_source1 test_source1
test_source1/fs1 test_source1/fs1
test_source1/fs1@test-1 test_source1/fs1@test-20101111000001
test_source1/fs1/sub test_source1/fs1/sub
test_source1/fs1/sub@test-1 test_source1/fs1/sub@test-20101111000001
test_source2 test_source2
test_source2/fs2 test_source2/fs2
test_source2/fs2/sub test_source2/fs2/sub
test_source2/fs2/sub@test-1 test_source2/fs2/sub@test-20101111000001
test_source2/fs3 test_source2/fs3
test_source2/fs3/sub test_source2/fs3/sub
test_target1 test_target1
""") """)
with self.subTest("second snapshot, no changes, no snapshot"): with self.subTest("second snapshot, no changes, no snapshot"):
node.consistent_snapshot(node.selected_datasets(exclude_paths=[], exclude_received=False), "test-2",1) node.consistent_snapshot(node.selected_datasets(property_name="autobackup:test",exclude_paths=[], exclude_received=False, exclude_unchanged=False, min_change=200000), "test-20101111000002", 1)
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS) r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
self.assertEqual(r,""" self.assertEqual(r, """
test_source1 test_source1
test_source1/fs1 test_source1/fs1
test_source1/fs1@test-1 test_source1/fs1@test-20101111000001
test_source1/fs1/sub test_source1/fs1/sub
test_source1/fs1/sub@test-1 test_source1/fs1/sub@test-20101111000001
test_source2 test_source2
test_source2/fs2 test_source2/fs2
test_source2/fs2/sub test_source2/fs2/sub
test_source2/fs2/sub@test-1 test_source2/fs2/sub@test-20101111000001
test_source2/fs3 test_source2/fs3
test_source2/fs3/sub test_source2/fs3/sub
test_target1 test_target1
""") """)
with self.subTest("second snapshot, no changes, empty snapshot"): with self.subTest("second snapshot, no changes, empty snapshot"):
node.consistent_snapshot(node.selected_datasets(exclude_paths=[], exclude_received=False), "test-2",0) node.consistent_snapshot(node.selected_datasets(property_name="autobackup:test", exclude_paths=[], exclude_received=False, exclude_unchanged=False, min_change=200000), "test-20101111000002", 0)
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS) r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
self.assertEqual(r,""" self.assertEqual(r, """
test_source1 test_source1
test_source1/fs1 test_source1/fs1
test_source1/fs1@test-1 test_source1/fs1@test-20101111000001
test_source1/fs1@test-2 test_source1/fs1@test-20101111000002
test_source1/fs1/sub test_source1/fs1/sub
test_source1/fs1/sub@test-1 test_source1/fs1/sub@test-20101111000001
test_source1/fs1/sub@test-2 test_source1/fs1/sub@test-20101111000002
test_source2 test_source2
test_source2/fs2 test_source2/fs2
test_source2/fs2/sub test_source2/fs2/sub
test_source2/fs2/sub@test-1 test_source2/fs2/sub@test-20101111000001
test_source2/fs2/sub@test-2 test_source2/fs2/sub@test-20101111000002
test_source2/fs3 test_source2/fs3
test_source2/fs3/sub test_source2/fs3/sub
test_target1 test_target1
""") """)
def test_consistent_snapshot_prepostcmds(self):
logger = LogStub()
description = "[Source]"
node = ZfsNode(snapshot_time_format="test", hold_name="test", logger=logger, description=description, debug_output=True)
with self.subTest("Test if all cmds are executed correctly (no failures)"):
with OutputIO() as buf:
with redirect_stdout(buf):
node.consistent_snapshot(node.selected_datasets(property_name="autobackup:test", exclude_paths=[], exclude_received=False, exclude_unchanged=False, min_change=1), "test-1",
0,
pre_snapshot_cmds=["echo pre1", "echo pre2"],
post_snapshot_cmds=["echo post1 >&2", "echo post2 >&2"]
)
self.assertIn("STDOUT > pre1", buf.getvalue())
self.assertIn("STDOUT > pre2", buf.getvalue())
self.assertIn("STDOUT > post1", buf.getvalue())
self.assertIn("STDOUT > post2", buf.getvalue())
with self.subTest("Failure in the middle, only pre1 and both post1 and post2 should be executed, no snapshot should be attempted"):
with OutputIO() as buf:
with redirect_stdout(buf):
with self.assertRaises(ExecuteError):
node.consistent_snapshot(node.selected_datasets(property_name="autobackup:test", exclude_paths=[], exclude_received=False, exclude_unchanged=False, min_change=1), "test-1",
0,
pre_snapshot_cmds=["echo pre1", "false", "echo pre2"],
post_snapshot_cmds=["echo post1", "false", "echo post2"]
)
print(buf.getvalue())
self.assertIn("STDOUT > pre1", buf.getvalue())
self.assertNotIn("STDOUT > pre2", buf.getvalue())
self.assertIn("STDOUT > post1", buf.getvalue())
self.assertIn("STDOUT > post2", buf.getvalue())
with self.subTest("Snapshot fails"):
with OutputIO() as buf:
with redirect_stdout(buf):
with self.assertRaises(ExecuteError):
#same snapshot name as before so it fails
node.consistent_snapshot(node.selected_datasets(property_name="autobackup:test", exclude_paths=[], exclude_received=False, exclude_unchanged=False, min_change=1), "test-1",
0,
pre_snapshot_cmds=["echo pre1", "echo pre2"],
post_snapshot_cmds=["echo post1", "echo post2"]
)
print(buf.getvalue())
self.assertIn("STDOUT > pre1", buf.getvalue())
self.assertIn("STDOUT > pre2", buf.getvalue())
self.assertIn("STDOUT > post1", buf.getvalue())
self.assertIn("STDOUT > post2", buf.getvalue())
def test_getselected(self): def test_getselected(self):
logger=LogStub()
description="[Source]" # should be excluded by property
node=ZfsNode("test", logger, description=description) shelltest("zfs create test_source1/fs1/subexcluded")
s=pformat(node.selected_datasets(exclude_paths=[], exclude_received=False)) shelltest("zfs set autobackup:test=false test_source1/fs1/subexcluded")
# should be excluded by being unchanged
shelltest("zfs create test_source1/fs1/unchanged")
shelltest("zfs snapshot test_source1/fs1/unchanged@somesnapshot")
logger = LogStub()
description = "[Source]"
node = ZfsNode(snapshot_time_format="test-%Y%m%d%H%M%S", hold_name="zfs_autobackup:test", logger=logger, description=description)
s = pformat(node.selected_datasets(property_name="autobackup:test", exclude_paths=[], exclude_received=False, exclude_unchanged=True, min_change=1))
print(s) print(s)
#basics # basics
self.assertEqual (s, """[(local): test_source1/fs1, self.assertEqual(s, """[(local): test_source1/fs1,
(local): test_source1/fs1/sub,
(local): test_source2/fs2/sub]""")
#caching, so expect same result after changing it
subprocess.check_call("zfs set autobackup:test=true test_source2/fs3", shell=True)
self.assertEqual (s, """[(local): test_source1/fs1,
(local): test_source1/fs1/sub, (local): test_source1/fs1/sub,
(local): test_source2/fs2/sub]""") (local): test_source2/fs2/sub]""")
def test_validcommand(self): def test_validcommand(self):
logger=LogStub() logger = LogStub()
description="[Source]" description = "[Source]"
node=ZfsNode("test", logger, description=description) node = ZfsNode(snapshot_time_format="test-%Y%m%d%H%M%S", hold_name="zfs_autobackup:test", logger=logger, description=description)
with self.subTest("test invalid option"): with self.subTest("test invalid option"):
self.assertFalse(node.valid_command(["zfs", "send", "--invalid-option", "nonexisting"])) self.assertFalse(node.valid_command(["zfs", "send", "--invalid-option", "nonexisting"]))
@ -105,21 +158,19 @@ test_target1
self.assertTrue(node.valid_command(["zfs", "send", "-v", "nonexisting"])) self.assertTrue(node.valid_command(["zfs", "send", "-v", "nonexisting"]))
def test_supportedsendoptions(self): def test_supportedsendoptions(self):
logger=LogStub() logger = LogStub()
description="[Source]" description = "[Source]"
node=ZfsNode("test", logger, description=description) node = ZfsNode(snapshot_time_format="test-%Y%m%d%H%M%S", hold_name="zfs_autobackup:test", logger=logger, description=description)
# -D propably always supported # -D propably always supported
self.assertGreater(len(node.supported_send_options),0) self.assertGreater(len(node.supported_send_options), 0)
def test_supportedrecvoptions(self): def test_supportedrecvoptions(self):
logger=LogStub() logger = LogStub()
description="[Source]" description = "[Source]"
#NOTE: this could hang via ssh if we dont close filehandles properly. (which was a previous bug) # NOTE: this could hang via ssh if we dont close filehandles properly. (which was a previous bug)
node=ZfsNode("test", logger, description=description, ssh_to='localhost') node = ZfsNode(snapshot_time_format="test-%Y%m%d%H%M%S", hold_name="zfs_autobackup:test", logger=logger, description=description, ssh_to='localhost')
self.assertIsInstance(node.supported_recv_options, list) self.assertIsInstance(node.supported_recv_options, list)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()

View File

@ -1,8 +1,8 @@
import os import os
import select import select
import subprocess import subprocess
from zfs_autobackup.CmdPipe import CmdPipe, CmdItem from .CmdPipe import CmdPipe, CmdItem
from zfs_autobackup.LogStub import LogStub from .LogStub import LogStub
try: try:
from shlex import quote as cmd_quote from shlex import quote as cmd_quote

View File

@ -1,6 +1,6 @@
import time import time
from zfs_autobackup.ThinnerRule import ThinnerRule from .ThinnerRule import ThinnerRule
class Thinner: class Thinner:

View File

@ -2,21 +2,19 @@ import argparse
import sys import sys
import time import time
from zfs_autobackup import compressors from . import compressors
from zfs_autobackup.ExecuteNode import ExecuteNode from .ExecuteNode import ExecuteNode
from zfs_autobackup.Thinner import Thinner from .Thinner import Thinner
from zfs_autobackup.ZfsDataset import ZfsDataset from .ZfsDataset import ZfsDataset
from zfs_autobackup.LogConsole import LogConsole from .LogConsole import LogConsole
from zfs_autobackup.ZfsNode import ZfsNode from .ZfsNode import ZfsNode
from zfs_autobackup.ThinnerRule import ThinnerRule from .ThinnerRule import ThinnerRule
import os.path
class ZfsAutobackup: class ZfsAutobackup:
"""main class""" """main class"""
VERSION = "3.1-rc2" VERSION = "3.1.2-rc2"
HEADER = "zfs-autobackup v{} - (c)2021 E.H.Eefting (edwin@datux.nl)".format(VERSION) HEADER = "zfs-autobackup v{} - (c)2021 E.H.Eefting (edwin@datux.nl)".format(VERSION)
def __init__(self, argv, print_arguments=True): def __init__(self, argv, print_arguments=True):
@ -38,13 +36,17 @@ class ZfsAutobackup:
parser.add_argument('--keep-target', metavar='SCHEDULE', type=str, default="10,1d1w,1w1m,1m1y", parser.add_argument('--keep-target', metavar='SCHEDULE', type=str, default="10,1d1w,1w1m,1m1y",
help='Thinning schedule for old target snapshots. Default: %(default)s') help='Thinning schedule for old target snapshots. Default: %(default)s')
parser.add_argument('backup_name', metavar='backup-name', parser.add_argument('backup_name', metavar='BACKUP-NAME', default=None, nargs='?',
help='Name of the backup (you should set the zfs property "autobackup:backup-name" to ' help='Name of the backup (you should set the zfs property "autobackup:backup-name" to '
'true on filesystems you want to backup') 'true on filesystems you want to backup')
parser.add_argument('target_path', metavar='target-path', default=None, nargs='?', parser.add_argument('target_path', metavar='TARGET-PATH', default=None, nargs='?',
help='Target ZFS filesystem (optional: if not specified, zfs-autobackup will only operate ' help='Target ZFS filesystem (optional: if not specified, zfs-autobackup will only operate '
'as snapshot-tool on source)') 'as snapshot-tool on source)')
parser.add_argument('--pre-snapshot-cmd', metavar="COMMAND", default=[], action='append',
help='Run COMMAND before snapshotting (can be used multiple times.')
parser.add_argument('--post-snapshot-cmd', metavar="COMMAND", default=[], action='append',
help='Run COMMAND after snapshotting (can be used multiple times.')
parser.add_argument('--other-snapshots', action='store_true', parser.add_argument('--other-snapshots', action='store_true',
help='Send over other snapshots as well, not just the ones created by this tool.') help='Send over other snapshots as well, not just the ones created by this tool.')
parser.add_argument('--no-snapshot', action='store_true', parser.add_argument('--no-snapshot', action='store_true',
@ -59,15 +61,16 @@ class ZfsAutobackup:
'default)s)') 'default)s)')
parser.add_argument('--allow-empty', action='store_true', parser.add_argument('--allow-empty', action='store_true',
help='If nothing has changed, still create empty snapshots. (same as --min-change=0)') help='If nothing has changed, still create empty snapshots. (same as --min-change=0)')
parser.add_argument('--ignore-replicated', action='store_true',
help='Ignore datasets that seem to be replicated some other way. (No changes since '
'lastest snapshot. Useful for proxmox HA replication)')
parser.add_argument('--ignore-replicated', action='store_true', help=argparse.SUPPRESS)
parser.add_argument('--exclude-unchanged', action='store_true',
help='Exclude datasets that have no changes since any last snapshot. (Useful in combination with proxmox HA replication)')
parser.add_argument('--exclude-received', action='store_true',
help='Exclude datasets that have the origin of their autobackup: property as "received". '
'This can avoid recursive replication between two backup partners.')
parser.add_argument('--strip-path', metavar='N', default=0, type=int, parser.add_argument('--strip-path', metavar='N', default=0, type=int,
help='Number of directories to strip from target path (use 1 when cloning zones between 2 ' help='Number of directories to strip from target path (use 1 when cloning zones between 2 '
'SmartOS machines)') 'SmartOS machines)')
# parser.add_argument('--buffer', default="", help='Use mbuffer with specified size to speedup zfs transfer.
# (e.g. --buffer 1G) Will also show nice progress output.')
parser.add_argument('--clear-refreservation', action='store_true', parser.add_argument('--clear-refreservation', action='store_true',
help='Filter "refreservation" property. (recommended, safes space. same as ' help='Filter "refreservation" property. (recommended, safes space. same as '
@ -75,7 +78,7 @@ class ZfsAutobackup:
parser.add_argument('--clear-mountpoint', action='store_true', parser.add_argument('--clear-mountpoint', action='store_true',
help='Set property canmount=noauto for new datasets. (recommended, prevents mount ' help='Set property canmount=noauto for new datasets. (recommended, prevents mount '
'conflicts. same as --set-properties canmount=noauto)') 'conflicts. same as --set-properties canmount=noauto)')
parser.add_argument('--filter-properties', metavar='PROPERY,...', type=str, parser.add_argument('--filter-properties', metavar='PROPERTY,...', type=str,
help='List of properties to "filter" when receiving filesystems. (you can still restore ' help='List of properties to "filter" when receiving filesystems. (you can still restore '
'them with zfs inherit -S)') 'them with zfs inherit -S)')
parser.add_argument('--set-properties', metavar='PROPERTY=VALUE,...', type=str, parser.add_argument('--set-properties', metavar='PROPERTY=VALUE,...', type=str,
@ -84,6 +87,8 @@ class ZfsAutobackup:
parser.add_argument('--rollback', action='store_true', parser.add_argument('--rollback', action='store_true',
help='Rollback changes to the latest target snapshot before starting. (normally you can ' help='Rollback changes to the latest target snapshot before starting. (normally you can '
'prevent changes by setting the readonly property on the target_path to on)') 'prevent changes by setting the readonly property on the target_path to on)')
parser.add_argument('--force', '-F', action='store_true',
help='Use zfs -F option to force overwrite/rollback. (Usefull with --strip-path=1, but use with care)')
parser.add_argument('--destroy-incompatible', action='store_true', parser.add_argument('--destroy-incompatible', action='store_true',
help='Destroy incompatible snapshots on target. Use with care! (implies --rollback)') help='Destroy incompatible snapshots on target. Use with care! (implies --rollback)')
parser.add_argument('--destroy-missing', metavar="SCHEDULE", type=str, default=None, parser.add_argument('--destroy-missing', metavar="SCHEDULE", type=str, default=None,
@ -99,11 +104,14 @@ class ZfsAutobackup:
parser.add_argument('--encrypt', action='store_true', parser.add_argument('--encrypt', action='store_true',
help='Encrypt data after receiving it.') help='Encrypt data after receiving it.')
parser.add_argument('--test', action='store_true', parser.add_argument('--zfs-compressed', action='store_true',
help='dont change anything, just show what would be done (still does all read-only ' help='Transfer blocks that already have zfs-compression as-is.')
parser.add_argument('--test','--dry-run', '-n', action='store_true',
help='Dry run, dont change anything, just show what would be done (still does all read-only '
'operations)') 'operations)')
parser.add_argument('--verbose', action='store_true', help='verbose output') parser.add_argument('--verbose','-v', action='store_true', help='verbose output')
parser.add_argument('--debug', action='store_true', parser.add_argument('--debug','-d', action='store_true',
help='Show zfs commands that are executed, stops after an exception.') help='Show zfs commands that are executed, stops after an exception.')
parser.add_argument('--debug-output', action='store_true', parser.add_argument('--debug-output', action='store_true',
help='Show zfs commands and their output/exit codes. (noisy)') help='Show zfs commands and their output/exit codes. (noisy)')
@ -114,25 +122,40 @@ class ZfsAutobackup:
parser.add_argument('--resume', action='store_true', help=argparse.SUPPRESS) parser.add_argument('--resume', action='store_true', help=argparse.SUPPRESS)
parser.add_argument('--raw', action='store_true', help=argparse.SUPPRESS) parser.add_argument('--raw', action='store_true', help=argparse.SUPPRESS)
parser.add_argument('--exclude-received', action='store_true',
help=argparse.SUPPRESS) # probably never needed anymore
#these things all do stuff by piping zfs send/recv IO # these things all do stuff by piping zfs send/recv IO
parser.add_argument('--send-pipe', metavar="COMMAND", default=[], action='append', parser.add_argument('--send-pipe', metavar="COMMAND", default=[], action='append',
help='pipe zfs send output through COMMAND (can be used multiple times)') help='pipe zfs send output through COMMAND (can be used multiple times)')
parser.add_argument('--recv-pipe', metavar="COMMAND", default=[], action='append', parser.add_argument('--recv-pipe', metavar="COMMAND", default=[], action='append',
help='pipe zfs recv input through COMMAND (can be used multiple times)') help='pipe zfs recv input through COMMAND (can be used multiple times)')
parser.add_argument('--compress', metavar='TYPE', default=None, choices=compressors.choices(), help='Use compression during transfer, zstd-fast recommended. ({})'.format(", ".join(compressors.choices()))) parser.add_argument('--compress', metavar='TYPE', default=None, nargs='?', const='zstd-fast',
parser.add_argument('--rate', metavar='DATARATE', default=None, help='Limit data transfer rate (e.g. 128K. requires mbuffer.)') choices=compressors.choices(),
parser.add_argument('--buffer', metavar='SIZE', default=None, help='Add zfs send and recv buffers to smooth out IO bursts. (e.g. 128M. requires mbuffer)') help='Use compression during transfer, defaults to zstd-fast if TYPE is not specified. ({})'.format(
", ".join(compressors.choices())))
parser.add_argument('--rate', metavar='DATARATE', default=None,
help='Limit data transfer rate (e.g. 128K. requires mbuffer.)')
parser.add_argument('--buffer', metavar='SIZE', default=None,
help='Add zfs send and recv buffers to smooth out IO bursts. (e.g. 128M. requires mbuffer)')
parser.add_argument('--snapshot-format', metavar='FORMAT', default="{}-%Y%m%d%H%M%S",
help='Snapshot naming format. Default: %(default)s')
parser.add_argument('--property-format', metavar='FORMAT', default="autobackup:{}",
help='Select property naming format. Default: %(default)s')
parser.add_argument('--hold-format', metavar='FORMAT', default="zfs_autobackup:{}",
help='Hold naming format. Default: %(default)s')
parser.add_argument('--version', action='store_true',
help='Show version.')
# note args is the only global variable we use, since its a global readonly setting anyway # note args is the only global variable we use, since its a global readonly setting anyway
args = parser.parse_args(argv) args = parser.parse_args(argv)
self.args = args self.args = args
if args.version:
print(self.HEADER)
sys.exit(255)
# auto enable progress? # auto enable progress?
if sys.stderr.isatty() and not args.no_progress: if sys.stderr.isatty() and not args.no_progress:
args.progress = True args.progress = True
@ -152,6 +175,11 @@ class ZfsAutobackup:
self.log = LogConsole(show_debug=self.args.debug, show_verbose=self.args.verbose, color=sys.stdout.isatty()) self.log = LogConsole(show_debug=self.args.debug, show_verbose=self.args.verbose, color=sys.stdout.isatty())
self.verbose(self.HEADER) self.verbose(self.HEADER)
if args.backup_name==None:
parser.print_usage()
self.log.error("Please specify BACKUP-NAME")
sys.exit(255)
if args.resume: if args.resume:
self.warning("The --resume option isn't needed anymore (its autodetected now)") self.warning("The --resume option isn't needed anymore (its autodetected now)")
@ -166,6 +194,13 @@ class ZfsAutobackup:
if args.compress and args.ssh_source is None and args.ssh_target is None: if args.compress and args.ssh_source is None and args.ssh_target is None:
self.warning("Using compression, but transfer is local.") self.warning("Using compression, but transfer is local.")
if args.compress and args.zfs_compressed:
self.warning("Using --compress with --zfs-compressed, might be inefficient.")
if args.ignore_replicated:
self.warning("--ignore-replicated has been renamed, using --exclude-unchanged")
args.exclude_unchanged = True
def verbose(self, txt): def verbose(self, txt):
self.log.verbose(txt) self.log.verbose(txt)
@ -278,12 +313,12 @@ class ZfsAutobackup:
def get_send_pipes(self, logger): def get_send_pipes(self, logger):
"""determine the zfs send pipe""" """determine the zfs send pipe"""
ret=[] ret = []
# IO buffer # IO buffer
if self.args.buffer: if self.args.buffer:
logger("zfs send buffer : {}".format(self.args.buffer)) logger("zfs send buffer : {}".format(self.args.buffer))
ret.extend([ ExecuteNode.PIPE, "mbuffer", "-q", "-s128k", "-m"+self.args.buffer ]) ret.extend([ExecuteNode.PIPE, "mbuffer", "-q", "-s128k", "-m" + self.args.buffer])
# custom pipes # custom pipes
for send_pipe in self.args.send_pipe: for send_pipe in self.args.send_pipe:
@ -292,27 +327,26 @@ class ZfsAutobackup:
logger("zfs send custom pipe : {}".format(send_pipe)) logger("zfs send custom pipe : {}".format(send_pipe))
# compression # compression
if self.args.compress!=None: if self.args.compress != None:
ret.append(ExecuteNode.PIPE) ret.append(ExecuteNode.PIPE)
cmd=compressors.compress_cmd(self.args.compress) cmd = compressors.compress_cmd(self.args.compress)
ret.extend(cmd) ret.extend(cmd)
logger("zfs send compression : {}".format(" ".join(cmd))) logger("zfs send compression : {}".format(" ".join(cmd)))
# transfer rate # transfer rate
if self.args.rate: if self.args.rate:
logger("zfs send transfer rate : {}".format(self.args.rate)) logger("zfs send transfer rate : {}".format(self.args.rate))
ret.extend([ ExecuteNode.PIPE, "mbuffer", "-q", "-s128k", "-m16M", "-R"+self.args.rate ]) ret.extend([ExecuteNode.PIPE, "mbuffer", "-q", "-s128k", "-m16M", "-R" + self.args.rate])
return ret return ret
def get_recv_pipes(self, logger): def get_recv_pipes(self, logger):
ret=[] ret = []
# decompression # decompression
if self.args.compress!=None: if self.args.compress != None:
cmd=compressors.decompress_cmd(self.args.compress) cmd = compressors.decompress_cmd(self.args.compress)
ret.extend(cmd) ret.extend(cmd)
ret.append(ExecuteNode.PIPE) ret.append(ExecuteNode.PIPE)
logger("zfs recv decompression : {}".format(" ".join(cmd))) logger("zfs recv decompression : {}".format(" ".join(cmd)))
@ -325,13 +359,36 @@ class ZfsAutobackup:
# IO buffer # IO buffer
if self.args.buffer: if self.args.buffer:
#only add second buffer if its usefull. (e.g. non local transfer or other pipes active) # only add second buffer if its usefull. (e.g. non local transfer or other pipes active)
if self.args.ssh_source!=None or self.args.ssh_target!=None or self.args.recv_pipe or self.args.send_pipe or self.args.compress!=None: if self.args.ssh_source != None or self.args.ssh_target != None or self.args.recv_pipe or self.args.send_pipe or self.args.compress != None:
logger("zfs recv buffer : {}".format(self.args.buffer)) logger("zfs recv buffer : {}".format(self.args.buffer))
ret.extend(["mbuffer", "-q", "-s128k", "-m"+self.args.buffer, ExecuteNode.PIPE ]) ret.extend(["mbuffer", "-q", "-s128k", "-m" + self.args.buffer, ExecuteNode.PIPE])
return ret return ret
def make_target_name(self, source_dataset):
"""make target_name from a source_dataset"""
stripped=source_dataset.lstrip_path(self.args.strip_path)
if stripped!="":
return self.args.target_path + "/" + stripped
else:
return self.args.target_path
def check_target_names(self, source_node, source_datasets, target_node):
"""check all target names for collesions etc due to strip-options"""
self.debug("Checking target names:")
target_datasets={}
for source_dataset in source_datasets:
target_name = self.make_target_name(source_dataset)
source_dataset.debug("-> {}".format(target_name))
if target_name in target_datasets:
raise Exception("Target collision: Target path {} encountered twice, due to: {} and {}".format(target_name, source_dataset, target_datasets[target_name]))
target_datasets[target_name]=source_dataset
# NOTE: this method also uses self.args. args that need extra processing are passed as function parameters: # NOTE: this method also uses self.args. args that need extra processing are passed as function parameters:
def sync_datasets(self, source_node, source_datasets, target_node): def sync_datasets(self, source_node, source_datasets, target_node):
"""Sync datasets, or thin-only on both sides """Sync datasets, or thin-only on both sides
@ -340,8 +397,8 @@ class ZfsAutobackup:
:type source_node: ZfsNode :type source_node: ZfsNode
""" """
send_pipes=self.get_send_pipes(source_node.verbose) send_pipes = self.get_send_pipes(source_node.verbose)
recv_pipes=self.get_recv_pipes(target_node.verbose) recv_pipes = self.get_recv_pipes(target_node.verbose)
fail_count = 0 fail_count = 0
count = 0 count = 0
@ -355,13 +412,14 @@ class ZfsAutobackup:
try: try:
# determine corresponding target_dataset # determine corresponding target_dataset
target_name = self.args.target_path + "/" + source_dataset.lstrip_path(self.args.strip_path) target_name = self.make_target_name(source_dataset)
target_dataset = ZfsDataset(target_node, target_name) target_dataset = ZfsDataset(target_node, target_name)
target_datasets.append(target_dataset) target_datasets.append(target_dataset)
# ensure parents exists # ensure parents exists
# TODO: this isnt perfect yet, in some cases it can create parents when it shouldn't. # TODO: this isnt perfect yet, in some cases it can create parents when it shouldn't.
if not self.args.no_send \ if not self.args.no_send \
and target_dataset.parent \
and target_dataset.parent not in target_datasets \ and target_dataset.parent not in target_datasets \
and not target_dataset.parent.exists: and not target_dataset.parent.exists:
target_dataset.parent.create_filesystem(parents=True) target_dataset.parent.create_filesystem(parents=True)
@ -381,7 +439,8 @@ class ZfsAutobackup:
no_send=self.args.no_send, no_send=self.args.no_send,
destroy_incompatible=self.args.destroy_incompatible, destroy_incompatible=self.args.destroy_incompatible,
send_pipes=send_pipes, recv_pipes=recv_pipes, send_pipes=send_pipes, recv_pipes=recv_pipes,
decrypt=self.args.decrypt, encrypt=self.args.encrypt, ) decrypt=self.args.decrypt, encrypt=self.args.encrypt,
zfs_compressed=self.args.zfs_compressed, force=self.args.force)
except Exception as e: except Exception as e:
fail_count = fail_count + 1 fail_count = fail_count + 1
source_dataset.error("FAILED: " + str(e)) source_dataset.error("FAILED: " + str(e))
@ -407,20 +466,6 @@ class ZfsAutobackup:
for source_dataset in source_datasets: for source_dataset in source_datasets:
source_dataset.thin(skip_holds=True) source_dataset.thin(skip_holds=True)
def filter_replicated(self, datasets):
if not self.args.ignore_replicated:
return datasets
else:
self.set_title("Filtering already replicated filesystems")
ret = []
for dataset in datasets:
if dataset.is_changed(self.args.min_change):
ret.append(dataset)
else:
dataset.verbose("Ignoring, already replicated")
return ret
def filter_properties_list(self): def filter_properties_list(self):
if self.args.filter_properties: if self.args.filter_properties:
@ -452,6 +497,19 @@ class ZfsAutobackup:
if self.args.test: if self.args.test:
self.warning("TEST MODE - SIMULATING WITHOUT MAKING ANY CHANGES") self.warning("TEST MODE - SIMULATING WITHOUT MAKING ANY CHANGES")
#format all the names
property_name = self.args.property_format.format(self.args.backup_name)
snapshot_time_format = self.args.snapshot_format.format(self.args.backup_name)
hold_name = self.args.hold_format.format(self.args.backup_name)
self.verbose("")
self.verbose("Selecting dataset property : {}".format(property_name))
self.verbose("Snapshot format : {}".format(snapshot_time_format))
if not self.args.no_holds:
self.verbose("Hold name : {}".format(hold_name))
################ create source zfsNode ################ create source zfsNode
self.set_title("Source settings") self.set_title("Source settings")
@ -460,20 +518,20 @@ class ZfsAutobackup:
source_thinner = None source_thinner = None
else: else:
source_thinner = Thinner(self.args.keep_source) source_thinner = Thinner(self.args.keep_source)
source_node = ZfsNode(self.args.backup_name, self, ssh_config=self.args.ssh_config, source_node = ZfsNode(snapshot_time_format=snapshot_time_format, hold_name=hold_name, logger=self, ssh_config=self.args.ssh_config,
ssh_to=self.args.ssh_source, readonly=self.args.test, ssh_to=self.args.ssh_source, readonly=self.args.test,
debug_output=self.args.debug_output, description=description, thinner=source_thinner) debug_output=self.args.debug_output, description=description, thinner=source_thinner)
source_node.verbose(
"Selects all datasets that have property 'autobackup:{}=true' (or childs of datasets that have "
"'autobackup:{}=child')".format(
self.args.backup_name, self.args.backup_name))
################# select source datasets ################# select source datasets
self.set_title("Selecting") self.set_title("Selecting")
#Note: Before version v3.1-beta5, we always used exclude_received. This was a problem if you wanto to replicate an existing backup to another host and use the same backupname/snapshots. # Note: Before version v3.1-beta5, we always used exclude_received. This was a problem if you wanted to
# replicate an existing backup to another host and use the same backupname/snapshots. However, exclude_received
# may still need to be used to explicitly exclude a backup with the 'received' source to avoid accidental
# recursive replication of a zvol that is currently being received in another session (as it will have changes).
exclude_paths = [] exclude_paths = []
exclude_received=self.args.exclude_received exclude_received = self.args.exclude_received
if self.args.ssh_source == self.args.ssh_target: if self.args.ssh_source == self.args.ssh_target:
if self.args.target_path: if self.args.target_path:
# target and source are the same, make sure to exclude target_path # target and source are the same, make sure to exclude target_path
@ -481,26 +539,27 @@ class ZfsAutobackup:
exclude_paths.append(self.args.target_path) exclude_paths.append(self.args.target_path)
else: else:
self.warning("Source and target are on the same host, excluding received datasets from selection.") self.warning("Source and target are on the same host, excluding received datasets from selection.")
exclude_received=True exclude_received = True
source_datasets = source_node.selected_datasets(property_name=property_name,exclude_received=exclude_received,
selected_source_datasets = source_node.selected_datasets(exclude_received=exclude_received, exclude_paths=exclude_paths,
exclude_paths=exclude_paths) exclude_unchanged=self.args.exclude_unchanged,
if not selected_source_datasets: min_change=self.args.min_change)
if not source_datasets:
self.error( self.error(
"No source filesystems selected, please do a 'zfs set autobackup:{0}=true' on the source datasets " "No source filesystems selected, please do a 'zfs set autobackup:{0}=true' on the source datasets "
"you want to select.".format( "you want to select.".format(
self.args.backup_name)) self.args.backup_name))
return 255 return 255
# filter out already replicated stuff?
source_datasets = self.filter_replicated(selected_source_datasets)
################# snapshotting ################# snapshotting
if not self.args.no_snapshot: if not self.args.no_snapshot:
self.set_title("Snapshotting") self.set_title("Snapshotting")
source_node.consistent_snapshot(source_datasets, source_node.new_snapshotname(), snapshot_name=time.strftime(snapshot_time_format)
min_changed_bytes=self.args.min_change) source_node.consistent_snapshot(source_datasets, snapshot_name,
min_changed_bytes=self.args.min_change,
pre_snapshot_cmds=self.args.pre_snapshot_cmd,
post_snapshot_cmds=self.args.post_snapshot_cmd)
################# sync ################# sync
# if target is specified, we sync the datasets, otherwise we just thin the source. (e.g. snapshot mode) # if target is specified, we sync the datasets, otherwise we just thin the source. (e.g. snapshot mode)
@ -512,7 +571,7 @@ class ZfsAutobackup:
target_thinner = None target_thinner = None
else: else:
target_thinner = Thinner(self.args.keep_target) target_thinner = Thinner(self.args.keep_target)
target_node = ZfsNode(self.args.backup_name, self, ssh_config=self.args.ssh_config, target_node = ZfsNode(snapshot_time_format=snapshot_time_format, hold_name=hold_name, logger=self, ssh_config=self.args.ssh_config,
ssh_to=self.args.ssh_target, ssh_to=self.args.ssh_target,
readonly=self.args.test, debug_output=self.args.debug_output, readonly=self.args.test, debug_output=self.args.debug_output,
description="[Target]", description="[Target]",
@ -527,6 +586,9 @@ class ZfsAutobackup:
raise (Exception( raise (Exception(
"Target path '{}' does not exist. Please create this dataset first.".format(target_dataset))) "Target path '{}' does not exist. Please create this dataset first.".format(target_dataset)))
# check for collisions due to strip-path
self.check_target_names(source_node, source_datasets, target_node)
# do the actual sync # do the actual sync
# NOTE: even with no_send, no_thinning and no_snapshot it does a usefull thing because it checks if the common snapshots and shows incompatible snapshots # NOTE: even with no_send, no_thinning and no_snapshot it does a usefull thing because it checks if the common snapshots and shows incompatible snapshots
fail_count = self.sync_datasets( fail_count = self.sync_datasets(

View File

@ -1,8 +1,8 @@
import re import re
import time import time
from zfs_autobackup.CachedProperty import CachedProperty from .CachedProperty import CachedProperty
from zfs_autobackup.ExecuteNode import ExecuteError from .ExecuteNode import ExecuteError
class ZfsDataset: class ZfsDataset:
@ -79,7 +79,11 @@ class ZfsDataset:
Args: Args:
:type count: int :type count: int
""" """
return "/".join(self.split_path()[count:]) components=self.split_path()
if count>len(components):
raise Exception("Trying to strip too much from path ({} items from {})".format(count, self.name))
return "/".join(components[count:])
def rstrip_path(self, count): def rstrip_path(self, count):
"""return name with last count components stripped """return name with last count components stripped
@ -112,7 +116,7 @@ class ZfsDataset:
"""true if this dataset is a snapshot""" """true if this dataset is a snapshot"""
return self.name.find("@") != -1 return self.name.find("@") != -1
def is_selected(self, value, source, inherited, exclude_received, exclude_paths): def is_selected(self, value, source, inherited, exclude_received, exclude_paths, exclude_unchanged, min_change):
"""determine if dataset should be selected for backup (called from """determine if dataset should be selected for backup (called from
ZfsNode) ZfsNode)
@ -122,6 +126,12 @@ class ZfsDataset:
:type source: str :type source: str
:type inherited: bool :type inherited: bool
:type exclude_received: bool :type exclude_received: bool
:type exclude_unchanged: bool
:type min_change: bool
:param value: Value of the zfs property ("false"/"true"/"child"/"-")
:param source: Source of the zfs property ("local"/"received", "-")
:param inherited: True of the value/source was inherited from a higher dataset.
""" """
# sanity checks # sanity checks
@ -135,28 +145,41 @@ class ZfsDataset:
raise (Exception( raise (Exception(
"{} autobackup-property has illegal value: '{}'".format(self.name, value))) "{} autobackup-property has illegal value: '{}'".format(self.name, value)))
# non specified, ignore
if value == "-":
return False
# only select childs of this dataset, ignore
if value == "child" and not inherited:
return False
# manually excluded by property
if value == "false":
self.verbose("Excluded")
return False
# from here on the dataset is selected by property, now do additional exclusion checks
# our path starts with one of the excluded paths? # our path starts with one of the excluded paths?
for exclude_path in exclude_paths: for exclude_path in exclude_paths:
if self.name.startswith(exclude_path): # if self.name.startswith(exclude_path):
if (self.name + "/").startswith(exclude_path + "/"):
# too noisy for verbose # too noisy for verbose
self.debug("Excluded (in exclude list)") self.debug("Excluded (path in exclude list)")
return False return False
# now determine if its actually selected if source == "received":
if value == "false": if exclude_received:
self.verbose("Excluded (disabled)") self.verbose("Excluded (dataset already received)")
return False
if exclude_unchanged and not self.is_changed(min_change):
self.verbose("Excluded (unchanged since last snapshot)")
return False return False
elif value == "true" or (value == "child" and inherited):
if source == "local": self.verbose("Selected")
self.verbose("Selected") return True
return True
elif source == "received":
if exclude_received:
self.verbose("Excluded (dataset already received)")
return False
else:
self.verbose("Selected")
return True
@CachedProperty @CachedProperty
def parent(self): def parent(self):
@ -169,7 +192,11 @@ class ZfsDataset:
if self.is_snapshot: if self.is_snapshot:
return ZfsDataset(self.zfs_node, self.filesystem_name) return ZfsDataset(self.zfs_node, self.filesystem_name)
else: else:
return ZfsDataset(self.zfs_node, self.rstrip_path(1)) stripped=self.rstrip_path(1)
if stripped:
return ZfsDataset(self.zfs_node, stripped)
else:
return None
# NOTE: unused for now # NOTE: unused for now
# def find_prev_snapshot(self, snapshot, also_other_snapshots=False): # def find_prev_snapshot(self, snapshot, also_other_snapshots=False):
@ -296,21 +323,20 @@ class ZfsDataset:
if min_changed_bytes == 0: if min_changed_bytes == 0:
return True return True
if int(self.properties['written']) < min_changed_bytes: if int(self.properties['written']) < min_changed_bytes:
return False return False
else: else:
return True return True
def is_ours(self): def is_ours(self):
"""return true if this snapshot is created by this backup_name""" """return true if this snapshot name has format"""
if re.match("^" + self.zfs_node.backup_name + "-[0-9]*$", self.snapshot_name): try:
return True test = self.timestamp
else: except ValueError as e:
return False return False
@property return True
def _hold_name(self):
return "zfs_autobackup:" + self.zfs_node.backup_name
@property @property
def holds(self): def holds(self):
@ -322,30 +348,26 @@ class ZfsDataset:
def is_hold(self): def is_hold(self):
"""did we hold this snapshot?""" """did we hold this snapshot?"""
return self._hold_name in self.holds return self.zfs_node.hold_name in self.holds
def hold(self): def hold(self):
"""hold dataset""" """hold dataset"""
self.debug("holding") self.debug("holding")
self.zfs_node.run(["zfs", "hold", self._hold_name, self.name], valid_exitcodes=[0, 1]) self.zfs_node.run(["zfs", "hold", self.zfs_node.hold_name, self.name], valid_exitcodes=[0, 1])
def release(self): def release(self):
"""release dataset""" """release dataset"""
if self.zfs_node.readonly or self.is_hold(): if self.zfs_node.readonly or self.is_hold():
self.debug("releasing") self.debug("releasing")
self.zfs_node.run(["zfs", "release", self._hold_name, self.name], valid_exitcodes=[0, 1]) self.zfs_node.run(["zfs", "release", self.zfs_node.hold_name, self.name], valid_exitcodes=[0, 1])
@property @property
def timestamp(self): def timestamp(self):
"""get timestamp from snapshot name. Only works for our own snapshots """get timestamp from snapshot name. Only works for our own snapshots
with the correct format. with the correct format.
""" """
time_str = re.findall("^.*-([0-9]*)$", self.snapshot_name)[0]
if len(time_str) != 14:
raise (Exception("Snapshot has invalid timestamp in name: {}".format(self.snapshot_name)))
# new format: time_secs = time.mktime(time.strptime(self.snapshot_name, self.zfs_node.snapshot_time_format))
time_secs = time.mktime(time.strptime(time_str, "%Y%m%d%H%M%S"))
return time_secs return time_secs
def from_names(self, names): def from_names(self, names):
@ -503,7 +525,7 @@ class ZfsDataset:
return self.from_names(names[1:]) return self.from_names(names[1:])
def send_pipe(self, features, prev_snapshot, resume_token, show_progress, raw, send_properties, write_embedded, send_pipes): def send_pipe(self, features, prev_snapshot, resume_token, show_progress, raw, send_properties, write_embedded, send_pipes, zfs_compressed):
"""returns a pipe with zfs send output for this snapshot """returns a pipe with zfs send output for this snapshot
resume_token: resume sending from this token. (in that case we don't resume_token: resume sending from this token. (in that case we don't
@ -530,7 +552,7 @@ class ZfsDataset:
if write_embedded and 'embedded_data' in features and "-e" in self.zfs_node.supported_send_options: if write_embedded and 'embedded_data' in features and "-e" in self.zfs_node.supported_send_options:
cmd.append("--embed") # WRITE_EMBEDDED, more compact stream cmd.append("--embed") # WRITE_EMBEDDED, more compact stream
if "-c" in self.zfs_node.supported_send_options: if zfs_compressed and "-c" in self.zfs_node.supported_send_options:
cmd.append("--compressed") # use compressed WRITE records cmd.append("--compressed") # use compressed WRITE records
# raw? (send over encrypted data in its original encrypted form without decrypting) # raw? (send over encrypted data in its original encrypted form without decrypting)
@ -563,7 +585,7 @@ class ZfsDataset:
return output_pipe return output_pipe
def recv_pipe(self, pipe, features, recv_pipes, filter_properties=None, set_properties=None, ignore_exit_code=False): def recv_pipe(self, pipe, features, recv_pipes, filter_properties=None, set_properties=None, ignore_exit_code=False, force=False):
"""starts a zfs recv for this snapshot and uses pipe as input """starts a zfs recv for this snapshot and uses pipe as input
note: you can it both on a snapshot or filesystem object. The note: you can it both on a snapshot or filesystem object. The
@ -604,6 +626,9 @@ class ZfsDataset:
# verbose output # verbose output
cmd.append("-v") cmd.append("-v")
if force:
cmd.append("-F")
if 'extensible_dataset' in features and "-s" in self.zfs_node.supported_recv_options: if 'extensible_dataset' in features and "-s" in self.zfs_node.supported_recv_options:
# support resuming # support resuming
self.debug("Enabled resume support") self.debug("Enabled resume support")
@ -634,7 +659,7 @@ class ZfsDataset:
def transfer_snapshot(self, target_snapshot, features, prev_snapshot, show_progress, def transfer_snapshot(self, target_snapshot, features, prev_snapshot, show_progress,
filter_properties, set_properties, ignore_recv_exit_code, resume_token, filter_properties, set_properties, ignore_recv_exit_code, resume_token,
raw, send_properties, write_embedded, send_pipes, recv_pipes): raw, send_properties, write_embedded, send_pipes, recv_pipes, zfs_compressed, force):
"""transfer this snapshot to target_snapshot. specify prev_snapshot for """transfer this snapshot to target_snapshot. specify prev_snapshot for
incremental transfer incremental transfer
@ -673,9 +698,9 @@ class ZfsDataset:
# do it # do it
pipe = self.send_pipe(features=features, show_progress=show_progress, prev_snapshot=prev_snapshot, pipe = self.send_pipe(features=features, show_progress=show_progress, prev_snapshot=prev_snapshot,
resume_token=resume_token, raw=raw, send_properties=send_properties, write_embedded=write_embedded, send_pipes=send_pipes) resume_token=resume_token, raw=raw, send_properties=send_properties, write_embedded=write_embedded, send_pipes=send_pipes, zfs_compressed=zfs_compressed)
target_snapshot.recv_pipe(pipe, features=features, filter_properties=filter_properties, target_snapshot.recv_pipe(pipe, features=features, filter_properties=filter_properties,
set_properties=set_properties, ignore_exit_code=ignore_recv_exit_code, recv_pipes=recv_pipes) set_properties=set_properties, ignore_exit_code=ignore_recv_exit_code, recv_pipes=recv_pipes, force=force)
def abort_resume(self): def abort_resume(self):
"""abort current resume state""" """abort current resume state"""
@ -874,9 +899,13 @@ class ZfsDataset:
:type target_keeps: list of ZfsDataset :type target_keeps: list of ZfsDataset
""" """
# on source: destroy all obsoletes before common. # on source: destroy all obsoletes before common. (since we cant send them anyways)
# But after common, only delete snapshots that target also doesn't want # But after common, only delete snapshots that target also doesn't want
before_common = True if common_snapshot:
before_common = True
else:
before_common = False
for source_snapshot in self.snapshots: for source_snapshot in self.snapshots:
if common_snapshot and source_snapshot.snapshot_name == common_snapshot.snapshot_name: if common_snapshot and source_snapshot.snapshot_name == common_snapshot.snapshot_name:
before_common = False before_common = False
@ -888,8 +917,8 @@ class ZfsDataset:
# on target: destroy everything thats obsolete, except common_snapshot # on target: destroy everything thats obsolete, except common_snapshot
for target_snapshot in target_dataset.snapshots: for target_snapshot in target_dataset.snapshots:
if (target_snapshot in target_obsoletes) and ( if (target_snapshot in target_obsoletes) \
not common_snapshot or target_snapshot.snapshot_name != common_snapshot.snapshot_name): and ( not common_snapshot or (target_snapshot.snapshot_name != common_snapshot.snapshot_name)):
if target_snapshot.exists: if target_snapshot.exists:
target_snapshot.destroy() target_snapshot.destroy()
@ -968,7 +997,7 @@ class ZfsDataset:
def sync_snapshots(self, target_dataset, features, show_progress, filter_properties, set_properties, def sync_snapshots(self, target_dataset, features, show_progress, filter_properties, set_properties,
ignore_recv_exit_code, holds, rollback, decrypt, encrypt, also_other_snapshots, ignore_recv_exit_code, holds, rollback, decrypt, encrypt, also_other_snapshots,
no_send, destroy_incompatible, send_pipes, recv_pipes): no_send, destroy_incompatible, send_pipes, recv_pipes, zfs_compressed, force):
"""sync this dataset's snapshots to target_dataset, while also thinning """sync this dataset's snapshots to target_dataset, while also thinning
out old snapshots along the way. out old snapshots along the way.
@ -989,6 +1018,8 @@ class ZfsDataset:
:type destroy_incompatible: bool :type destroy_incompatible: bool
""" """
self.verbose("sending to {}".format(target_dataset))
(common_snapshot, start_snapshot, source_obsoletes, target_obsoletes, target_keeps, (common_snapshot, start_snapshot, source_obsoletes, target_obsoletes, target_keeps,
incompatible_target_snapshots) = \ incompatible_target_snapshots) = \
self._plan_sync(target_dataset=target_dataset, also_other_snapshots=also_other_snapshots) self._plan_sync(target_dataset=target_dataset, also_other_snapshots=also_other_snapshots)
@ -1051,7 +1082,9 @@ class ZfsDataset:
filter_properties=active_filter_properties, filter_properties=active_filter_properties,
set_properties=active_set_properties, set_properties=active_set_properties,
ignore_recv_exit_code=ignore_recv_exit_code, ignore_recv_exit_code=ignore_recv_exit_code,
resume_token=resume_token, write_embedded=write_embedded, raw=raw, send_properties=send_properties, send_pipes=send_pipes, recv_pipes=recv_pipes) resume_token=resume_token, write_embedded=write_embedded, raw=raw,
send_properties=send_properties, send_pipes=send_pipes,
recv_pipes=recv_pipes, zfs_compressed=zfs_compressed, force=force)
resume_token = None resume_token = None

View File

@ -1,24 +1,29 @@
# python 2 compatibility # python 2 compatibility
from __future__ import print_function from __future__ import print_function
import re import re
import shlex
import subprocess import subprocess
import sys import sys
import time import time
from zfs_autobackup.ExecuteNode import ExecuteNode from .ExecuteNode import ExecuteNode
from zfs_autobackup.Thinner import Thinner from .Thinner import Thinner
from zfs_autobackup.CachedProperty import CachedProperty from .CachedProperty import CachedProperty
from zfs_autobackup.ZfsPool import ZfsPool from .ZfsPool import ZfsPool
from zfs_autobackup.ZfsDataset import ZfsDataset from .ZfsDataset import ZfsDataset
from zfs_autobackup.ExecuteNode import ExecuteError from .ExecuteNode import ExecuteError
class ZfsNode(ExecuteNode): class ZfsNode(ExecuteNode):
"""a node that contains zfs datasets. implements global (systemwide/pool wide) zfs commands""" """a node that contains zfs datasets. implements global (systemwide/pool wide) zfs commands"""
def __init__(self, backup_name, logger, ssh_config=None, ssh_to=None, readonly=False, description="", def __init__(self, snapshot_time_format, hold_name, logger, ssh_config=None, ssh_to=None, readonly=False,
description="",
debug_output=False, thinner=None): debug_output=False, thinner=None):
self.backup_name = backup_name
self.snapshot_time_format = snapshot_time_format
self.hold_name = hold_name
self.description = description self.description = description
self.logger = logger self.logger = logger
@ -53,7 +58,7 @@ class ZfsNode(ExecuteNode):
if self.__thinner is not None: if self.__thinner is not None:
return self.__thinner.thin(objects, keep_objects) return self.__thinner.thin(objects, keep_objects)
else: else:
return ( keep_objects, [] ) return (keep_objects, [])
@CachedProperty @CachedProperty
def supported_send_options(self): def supported_send_options(self):
@ -128,8 +133,9 @@ class ZfsNode(ExecuteNode):
bytes_left = self._progress_total_bytes - bytes_ bytes_left = self._progress_total_bytes - bytes_
minutes_left = int((bytes_left / (bytes_ / (time.time() - self._progress_start_time))) / 60) minutes_left = int((bytes_left / (bytes_ / (time.time() - self._progress_start_time))) / 60)
self.logger.progress("Transfer {}% {}MB/s (total {}MB, {} minutes left)".format(percentage, speed, int( self.logger.progress(
self._progress_total_bytes / (1024 * 1024)), minutes_left)) "Transfer {}% {}MB/s (total {}MB, {} minutes left)".format(percentage, speed, int(
self._progress_total_bytes / (1024 * 1024)), minutes_left))
return return
@ -157,11 +163,8 @@ class ZfsNode(ExecuteNode):
def debug(self, txt): def debug(self, txt):
self.logger.debug("{} {}".format(self.description, txt)) self.logger.debug("{} {}".format(self.description, txt))
def new_snapshotname(self): def consistent_snapshot(self, datasets, snapshot_name, min_changed_bytes, pre_snapshot_cmds=[],
"""determine uniq new snapshotname""" post_snapshot_cmds=[]):
return self.backup_name + "-" + time.strftime("%Y%m%d%H%M%S")
def consistent_snapshot(self, datasets, snapshot_name, min_changed_bytes):
"""create a consistent (atomic) snapshot of specified datasets, per pool. """create a consistent (atomic) snapshot of specified datasets, per pool.
""" """
@ -191,17 +194,30 @@ class ZfsNode(ExecuteNode):
self.verbose("No changes anywhere: not creating snapshots.") self.verbose("No changes anywhere: not creating snapshots.")
return return
# create consistent snapshot per pool try:
for (pool_name, snapshots) in pools.items(): for cmd in pre_snapshot_cmds:
cmd = ["zfs", "snapshot"] self.verbose("Running pre-snapshot-cmd")
self.run(cmd=shlex.split(cmd), readonly=False)
cmd.extend(map(lambda snapshot_: str(snapshot_), snapshots)) # create consistent snapshot per pool
for (pool_name, snapshots) in pools.items():
cmd = ["zfs", "snapshot"]
self.verbose("Creating snapshots {} in pool {}".format(snapshot_name, pool_name)) cmd.extend(map(lambda snapshot_: str(snapshot_), snapshots))
self.run(cmd, readonly=False)
def selected_datasets(self, exclude_received, exclude_paths): self.verbose("Creating snapshots {} in pool {}".format(snapshot_name, pool_name))
"""determine filesystems that should be backupped by looking at the special autobackup-property, systemwide self.run(cmd, readonly=False)
finally:
for cmd in post_snapshot_cmds:
self.verbose("Running post-snapshot-cmd")
try:
self.run(cmd=shlex.split(cmd), readonly=False)
except Exception as e:
pass
def selected_datasets(self, property_name, exclude_received, exclude_paths, exclude_unchanged, min_change):
"""determine filesystems that should be backed up by looking at the special autobackup-property, systemwide
returns: list of ZfsDataset returns: list of ZfsDataset
""" """
@ -211,7 +227,7 @@ class ZfsNode(ExecuteNode):
# get all source filesystems that have the backup property # get all source filesystems that have the backup property
lines = self.run(tab_split=True, readonly=True, cmd=[ lines = self.run(tab_split=True, readonly=True, cmd=[
"zfs", "get", "-t", "volume,filesystem", "-o", "name,value,source", "-H", "zfs", "get", "-t", "volume,filesystem", "-o", "name,value,source", "-H",
"autobackup:" + self.backup_name property_name
]) ])
# The returnlist of selected ZfsDataset's: # The returnlist of selected ZfsDataset's:
@ -235,7 +251,9 @@ class ZfsNode(ExecuteNode):
source = raw_source source = raw_source
# determine it # determine it
if dataset.is_selected(value=value, source=source, inherited=inherited, exclude_received=exclude_received, exclude_paths=exclude_paths): if dataset.is_selected(value=value, source=source, inherited=inherited, exclude_received=exclude_received,
exclude_paths=exclude_paths, exclude_unchanged=exclude_unchanged,
min_change=min_change):
selected_filesystems.append(dataset) selected_filesystems.append(dataset)
return selected_filesystems return selected_filesystems

View File

@ -1,4 +1,4 @@
from zfs_autobackup.CachedProperty import CachedProperty from .CachedProperty import CachedProperty
class ZfsPool(): class ZfsPool():

View File

@ -3,7 +3,7 @@
def cli(): def cli():
import sys import sys
from zfs_autobackup.ZfsAutobackup import ZfsAutobackup from .ZfsAutobackup import ZfsAutobackup
zfs_autobackup = ZfsAutobackup(sys.argv[1:], False) zfs_autobackup = ZfsAutobackup(sys.argv[1:], False)
sys.exit(zfs_autobackup.run()) sys.exit(zfs_autobackup.run())

View File

@ -35,6 +35,12 @@ COMPRESS_CMDS = {
'dcmd': 'zstdmt', 'dcmd': 'zstdmt',
'dargs': [ '-dc' ], 'dargs': [ '-dc' ],
}, },
'zstd-adapt': {
'cmd': 'zstdmt',
'args': [ '--adapt' ],
'dcmd': 'zstdmt',
'dargs': [ '-dc' ],
},
'xz': { 'xz': {
'cmd': 'xz', 'cmd': 'xz',
'args': [], 'args': [],